source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
mission.py
|
#!/usr/bin/env python3
import time
import threading
import csv
import os
from picam_lib import PicamImpl, FilterContWheel, FilterPosWheel
from mycelium_utils import DronekitConnector
# MANUAL indicates mission has not started or has finished
# HOLD indicates robot is stationary or mission is paused
# AUTO indicates robot is moving to next waypoint
class FilterMissionExec:
def __init__(self, filter_servo_pin, filter_wheel_count, connection_string):
self.vehicle = DronekitConnector(connection_string)
self.camera = PicamImpl()
self.filter = FilterContWheel(filter_servo_pin, filter_wheel_count)
self.filter.detect_c_threshold()
self.mode = None
self.wp = None
self.capture = False
self.filter_capture = False
self.close_mission = False
self.mission_complete = False
self.threads = {}
self._init_threads()
def _init_threads(self):
self.threads['check_state_t'] = threading.Thread(target=self._check_state_thread)
self.threads['capture_t'] = threading.Thread(target=self._capture_thread)
self.threads['check_state_t'].start()
def start(self):
self.threads['capture_t'].start()
def stop(self):
self.close_mission = True
time.sleep(2)
for t in self.threads:
try:
t.join()
except:
pass
self.vehicle.disconnect()
self.camera.disconnect()
def is_mission_complete(self):
return self.mission_complete
def _check_state_thread(self):
while not self.close_mission:
self.mode = self.vehicle.get_mode()
# Camera will capture frames when mission has started and mode is not manual
if self.mode == 'MANUAL':
self.capture = False
else:
self.capture = True
new_wp = self.vehicle.mission.next
if new_wp != self.wp:
# Trigger filter capture at waypoint
self.filter_capture = True
self.wp = new_wp
if self.wp == self.vehicle.mission.count and self.mode == 'MANUAL':
self.mission_complete = True
def _capture_thread(self):
while not self.close_mission:
try:
if self.filter_capture:
self._process_filters()
self.filter_capture = False
if self.capture:
self.camera.capture_single()
except:
pass
def _process_filters(self):
for i in range(self.filter.filter_count):
self.filter.rotate_to_next(0.5)
self.camera.capture_single("f%d"%i)
# Captures images when waypoint reached
# AUTO to trigger start
# MANUAL to trigger mission end
class WaypointMissionExec:
NO_FILTER = 0
CONT_FILTER = 1
POS_FILTER = 2
def __init__(self,
connection_string,
filter_servo_pin=None,
filter_count=None,
mode=NO_FILTER,
capture_time=2,
**kwargs):
self.vehicle = DronekitConnector(connection_string)
self.vehicle.get_mission(update=True)
self.camera = PicamImpl()
self.capture_time = capture_time
self.mode = None
self.mission_started = False
self.wp = None
self.waypoint_capture = False
self.close_mission = False
self.threads = {}
self._init_threads()
if mode == self.CONT_FILTER:
self.filter = FilterContWheel(filter_servo_pin, filter_count, **kwargs)
elif mode == self.POS_FILTER:
self.filter = FilterPosWheel(filter_servo_pin)
else:
self.filter = None
def _init_threads(self):
self.threads['check_state_t'] = threading.Thread(target=self._check_state_thread)
self.threads['capture_t'] = threading.Thread(target=self._capture_thread)
self.threads['check_state_t'].start()
def start(self):
self.threads['capture_t'].start()
def stop(self):
self.close_mission = True
time.sleep(2)
for t in self.threads:
try:
t.join()
except:
pass
self.vehicle.disconnect()
self.camera.disconnect()
def is_mission_complete(self):
return self.close_mission
def _check_state_thread(self):
while not self.close_mission:
self.mode = self.vehicle.get_mode()
# Camera will capture frames when mission has started and mode is not manual
if self.mode == 'AUTO':
self.mission_started = True
elif self.mission_started:
continue # Mission is started but robot is not in AUTO mode
new_wp = self.vehicle.mission.next
if new_wp != self.wp:
# Trigger filter capture at waypoint
self.waypoint_capture = True
self.wp = new_wp
if self.wp == self.vehicle.mission.count and self.mode == 'MANUAL':
self.close_mission = True
def _capture_thread(self):
filename = self.camera.save_dir + 'waypoint_mission.csv'
file_exists = os.path.exists(filename)
with open(filename, 'a+', newline='') as csvfile:
csvwriter = csv.writer(csvfile, delimiter=',')
if not file_exists:
header = ['image_timestamp', 'gps_lat', 'gps_lon']
csvwriter.writerow(header)
while not self.close_mission:
try:
if self.waypoint_capture:
if self.mode == self.NO_FILTER:
images = self._capture_no_filter()
else:
images = self._process_filters()
csvwriter.writerow(images)
self.waypoint_capture = False
except:
pass
def _capture_no_filter(self):
images = []
start = time.time()
while time.time() < start + self.capture_time:
filename = self.camera.capture_single()
images.append([filename]+self.vehicle.get_gps())
return images
def _process_filters(self):
images = []
for i in range(self.filter.filter_count):
self.filter.rotate_to_next(0.5)
filename = self.camera.capture_single("f%d"%i)
images.append([filename]+self.vehicle.get_gps())
return images
# Manual trigger of camera capture running through all filters for positional servo filter
class FilterCapture:
def __init__(self,
filter_servo_pin,
connection_string=None,
connect_robot=False):
self.camera = PicamImpl()
self.close_mission = False
self.filter = FilterPosWheel(filter_servo_pin)
if connect_robot and connection_string is not None:
try:
self.vehicle = DronekitConnector(connection_string)
except:
self.vehicle = None
else:
self.vehicle = None
def run_once_gps_log(self):
filename = self.camera.save_dir + 'waypoint_mission.csv'
file_exists = os.path.exists(filename)
with open(filename, 'a+', newline='') as csvfile:
csvwriter = csv.writer(csvfile, delimiter=',')
if not file_exists:
header = ['image_timestamp', 'gps_lat', 'gps_lon']
csvwriter.writerow(header)
while not self.close_mission:
filters = len(self.filter.filter_angles)
try:
for _ in range(filters):
images = self._process_filters()
csvwriter.writerow(images)
except:
pass
def run_once(self):
filters = len(self.filter.filter_angles)
for i in range(filters):
self.camera.capture_single("f%d"%i)
self.filter.rotate_to_next()
def _process_filters(self):
images = []
for i in range(self.filter.filter_count):
filename = self.camera.capture_single("f%d"%i)
if self.vehicle:
gps_data = self.vehicle.get_gps()
else:
gps_data = []
images.append([filename]+gps_data)
self.filter.rotate_to_next()
return images
def stop(self):
self.camera.disconnect()
self.filter.stop()
self.vehicle.disconnect()
|
pm_db.py
|
import json
import base64
import random
from cryptography.hazmat.primitives.kdf.scrypt import Scrypt
from cryptography.fernet import Fernet
import getpass
import os
import threading
import difflib
import string
import secrets
import pyperclip
import time
from inputimeout import inputimeout, TimeoutOccurred
import keyboard as kb
import sys
from god_key_hasher import *
divider = "-----------------------------------------------------------------------------------------------------------------------\n"
lockImg = """
^jEQBQDj^
r#@@@@@@@@@#r
?@@@#x_`_v#@@@x
g@@@! !@@@Q
Q@@@_ _@@@B
rgg@@@@QgggggQ@@@@ggr
Y@@@@@@@@@@@@@@@@@@@Y
Y@@@@@@@Qx^xQ@@@@@@@Y
Y@@@@@@@^ ~@@@@@@@Y
Y@@@@@@@@r r#@@@@@@@Y
Y@@@@@@@@c,c@@@@@@@@Y
Y@@@@@@@@@@@@@@@@@@@Y
v###################v
"""
checkImg = """
`xx.
'k#@@@h`
_m@@@@@@Q,
"M@@@@@@$*
`xk< =N@@@@@@9=
T#@@@Qr ^g@@@@@@5,
y@@@@@@Bv ?Q@@@@@@s-
`V#@@@@@#B@@@@@@w'
`}#@@@@@@@@#T`
vB@@@@Bx
)ER)
"""
vaultImg = """
!wdEEEEEEEEEEEEEEEEEEEEEEEEEEEEdw~
M@@ZzzzzzzzzzzzzzzzzzzzzzzzzzzzzZ@@6`
\@@: !vvxvvvvvvvvvvvvvvvvvvvvvxv~ :@@L
x@@` 0@@@@@@@@@@@@@@@@@@@@@@@@@@Q `@@c
x@@` $@@@@@@@@@@@@@@@@@@@@@@@@@@Q `@@c
x@@` $@@@@@@@@@@@@@@@@@@@@@@@@#Tr `@@c
x@@` $@@@@#I)!,,~L6@@@@@@@@@@@m `@@c
x@@` $@@@v`L$@###M!-6@@@@@@@@@3 `@@c
x@@` $@@)`8@x` ,d@zT@@@@@@@@@@MT `@@c
x@@` $@@ r@3 !@@@@@@@Q `@@c
x@@` $@@r`Q@\` _Z@z}#@@@@@@@@0-` `@@c
x@@` $@@@)`T8@B##Z~-d@@@@@@@@@m `@@c
x@@` $@@@@Bz*:,,!xd@@@@@@@@@@@E` `@@c
x@@` $@@@@@@@@@@@@@@@@@@@@@@@@@@Q `@@c
x@@` $@@@@@@@@@@@@@@@@@@@@@@@@@@Q `@@c
x@@` $@@@@@@@@@@@@@@@@@@@@@@@@@@Q `@@c
\@@: !LLLLLLLLLLLLLLLLLLLLLLLLLL> :@@L
`d@@MwwwwwwwwwwwwwwwwwwwwwwwwwwwwM@@E`
~z6Q@@@@@@$0$$$$0$$0$$0$@@@@@@B6z>
,EEEEEd ZEEEEE!
"""
# Global Variables
timeoutGlobalCode = "*TIMEOUT*"
def main():
# RUN PROGRAM
# Check if vault exists
try:
file = open("pm_db.mmf", "r+")
file.close()
except:
# If failed to open
print(vaultImg)
print("\nVAULT SETUP\n\nCould not find pm_db.mmf in local directory, continuing to vault setup.")
print(vaultSetup())
# RUN LOGIN
os.system("cls" if os.name == "nt" else "clear")
print(lockImg)
hashed_pass = False
cSALT, cVERIFIER, dataBase = fileSetup()
while not hashed_pass:
entered_pass = getpass.getpass("Enter Master Key: ")
hashed_pass = verify_password(
entered_pass, cSALT, cVERIFIER
) # Require password to be entered
if not hashed_pass:
print("Incorrect master password. Try again.\n")
if hashed_pass:
del entered_pass
main_pwd_manager(hashed_pass, dataBase)
del hashed_pass
del cSALT
del cVERIFIER
del dataBase
def main_pwd_manager(hashed_pass, contents):
os.system("cls" if os.name == "nt" else "clear")
db = json.loads(decrypt_data(contents, hashed_pass).decode("utf-8"))
timedOut = False
while not timedOut:
os.system("cls" if os.name == "nt" else "clear")
print(checkImg)
print(divider)
user_cmd = print(
"\n(a)dd profile | (f)ind profile data | (e)dit profile data | (r)ead all profiles | (d)elete profile data\n(g)enerate password | (c)hange master password | e(x)it\n"
)
user_cmd = timeoutInput("What would you like to do? ")
print("\n")
# Ensure user input is lowercase
if user_cmd != timeoutGlobalCode:
user_cmd = user_cmd.lower()
# Add Profile
if user_cmd == "a":
timedOut = addProfile(hashed_pass, db)
# READ PROFILE
if user_cmd == "f":
timedOut = findProfileData(hashed_pass, db)
# READ ALL PROFILES
if user_cmd == "r":
timedOut = readAllProfiles(hashed_pass, db)
# EDIT PROFILE
if user_cmd == "e":
timedOut = editProfileData(hashed_pass, db)
# DELETE PROFILE
if user_cmd == "d":
timedOut = deleteProfileData(hashed_pass, db)
# GENERATE PASSWORD
if user_cmd == "g":
timedOut = pwdGenerate(hashed_pass, db)
# CHANGE MASTER PASSWORD
if user_cmd == "c":
timedOut = changeMasterPassword(hashed_pass, db)
# EXIT PROGRAM AND RETURN TO TERMINAL
if user_cmd == "x":
os.system("cls" if os.name == "nt" else "clear")
timedOut = True
# EXIT BECAUSE OF TIMEOUT
if user_cmd == timeoutGlobalCode:
timeoutCleanup()
timedOut = True
# CLEANUP SENSITIVE INFO ON TIMEOUT
del hashed_pass
del contents
del db
def changeMasterPassword(hashed_pass, db):
# CHANGE MASTER PASSWORD
displayHeader("CHANGE MASTER PASSWORD")
password_provided = timeoutInput("What would you like your master password to be (type and submit (.c) to cancel)? ")
if password_provided != ".c" and password_provided != "" and password_provided != " " and password_provided != timeoutGlobalCode:
password = password_provided.encode() # Convert to type bytes
salt = os.urandom(random.randint(16, 256))
kdf = Scrypt(
salt=salt,
length=32,
n=2 ** 14,
r=8,
p=1,
)
hashed_entered_pass = base64.urlsafe_b64encode(kdf.derive(password)) # Can only use kdf once
try:
i = -1
domains = list(db.keys())
for e in db:
i = i + 1
# decrypt the username and password with the original master password
username = str(
decrypt_data(
bytes(db[domains[i]]["username"], encoding="utf-8"), hashed_pass
).decode("utf-8")
)
password = str(
decrypt_data(
bytes(db[domains[i]][ "password"], encoding="utf-8"),
hashed_pass,
).decode("utf-8")
)
# encrypt and save them with then new master password
db[domains[i]] = {
"username": str(encrypt_data(username, hashed_entered_pass).decode("utf-8")),
"password": str(encrypt_data(password, hashed_entered_pass).decode("utf-8")),
}
del e
del username
del password
del domains
file = open("SALT.txt", "wb")
file.write(salt)
file.close()
del salt
file = open("VERIFIER.txt", "wb")
file.write(encrypt_data("entered_master_correct", hashed_entered_pass))
file.close()
# finally overwrite the database file with everything encrypted with the new password
overwrite_db(encrypt_data(json.dumps(db), hashed_entered_pass).decode("utf-8"))
del hashed_entered_pass
del hashed_pass
os.system("cls" if os.name == "nt" else "clear")
print("Master password changed successfully! Log in again to access the password manager.")
timeoutInput("\nPress enter to logout..")
return True
except:
print("Could not change master password (Error code: 01)")
userContinue = timeoutInput("\nPress enter to return to menu...")
if userContinue != timeoutGlobalCode:
return False
else:
return True
else:
if password_provided != timeoutGlobalCode:
userContinue = timeoutInput("\nPress enter to return to menu...")
if userContinue != timeoutGlobalCode:
return False
else:
return True
else:
return True
def addProfile(hashed_pass, db):
# ADD PROFILE
displayHeader("ADD A PROFILE")
print("Type and submit (.c) to cancel.")
add_domain = timeoutInput("Website domain name: ")
if add_domain != ".c" and add_domain != timeoutGlobalCode:
add_user = timeoutInput("Username: ")
if add_user != ".c" and add_user != timeoutGlobalCode:
add_password = timeoutInput("Password: ")
if add_domain != ".c" and add_domain != timeoutGlobalCode and add_user != timeoutGlobalCode and add_password != timeoutGlobalCode:
db[add_domain] = {
"username": str(encrypt_data(add_user, hashed_pass).decode("utf-8")),
"password": str(encrypt_data(add_password, hashed_pass).decode("utf-8")),
}
overwrite_db(encrypt_data(json.dumps(db), hashed_pass).decode("utf-8"))
print("Created " + add_domain + " profile successfully!")
if add_domain == ".c":
print("Operation canceled.")
return False
if add_domain == timeoutGlobalCode or add_user == timeoutGlobalCode or add_password == timeoutGlobalCode:
return True
def findProfileData(hashed_pass, db):
displayHeader("FIND A PROFILE")
print("Type and submit (.c) to cancel.")
read_domain = timeoutInput("What's the domain you're looking for? ")
if read_domain != ".c" and read_domain != timeoutGlobalCode:
try:
domains = list(db.keys())
matches = difflib.get_close_matches(read_domain, domains)
if matches:
print("\nClosest match:\n")
i = 1
for d in matches:
domain_info = db[d]
username = str(
decrypt_data(
bytes(domain_info["username"], encoding="utf-8"),
hashed_pass,
).decode("utf-8")
)
print("PROFILE " + str(i) + ": " + d)
del d
print("Username: " + username + "\n")
del domain_info
del username
i = i + 1
userContinue = timeoutInput("\nSelect the password to be copied to your clipboard (ex: 1), or type (.c) to cancel: ")
if userContinue.isdigit() == True:
if int(userContinue) > 0:
try:
password = str(
decrypt_data(
bytes(db[str(matches[int(userContinue) - 1])]["password"], encoding="utf-8"),
hashed_pass,
).decode("utf-8")
)
print("\n" + to_clipboard(password))
del password
except:
print("\nUnable to find profile corresponding to " + str(userContinue) + ".")
else:
print("\nThere are no profiles corresponding to that number.")
if userContinue.isdigit() == False:
if userContinue != timeoutGlobalCode:
return False
else:
return True
else:
print("Could not find a match. Try viewing all saved profiles.")
except:
print("Error finding profile.")
userContinue = timeoutInput("\nPress enter to return to menu...")
if userContinue != timeoutGlobalCode:
return False
else:
return True
if read_domain == ".c":
print("Operation canceled.")
print("\nReturning to Menu")
return False
if read_domain == timeoutGlobalCode:
return True
def editProfileData(hashed_pass, db):
displayHeader("EDIT A PROFILE")
edit_domain = timeoutInput("Website domain name (submit (.c) to cancel): ")
if edit_domain != ".c" and edit_domain != timeoutGlobalCode:
try:
domain_info = db[edit_domain]
curr_user = str(
decrypt_data(
bytes(domain_info["username"], encoding="utf-8"), hashed_pass
).decode("utf-8")
)
curr_password = str(
decrypt_data(
bytes(domain_info["password"], encoding="utf-8"), hashed_pass
).decode("utf-8")
)
edit_user = timeoutInput("New Username (press enter to keep the current: " + curr_user + "): ")
if edit_user == ".c" or edit_user == " " or edit_user == "":
edit_user = curr_user
if edit_user == timeoutGlobalCode:
return True
edit_password = timeoutInput("New Password (press enter to keep the current: " + curr_password + "): ")
if edit_password == ".c" or edit_password == " " or edit_user == "":
edit_password = curr_password
if edit_password == timeoutGlobalCode:
return True
db[edit_domain] = {
"username": str(encrypt_data(edit_user, hashed_pass).decode("utf-8")),
"password": str(
encrypt_data(edit_password, hashed_pass).decode("utf-8")
),
}
overwrite_db(encrypt_data(json.dumps(db), hashed_pass).decode("utf-8"))
print("Updated " + edit_domain + " profile successfully!")
del edit_domain
del curr_user
del edit_user
del curr_password
del edit_password
del db
userContinue = timeoutInput("\nPress enter to return to menu...")
if userContinue != timeoutGlobalCode:
print("Returning to menu")
return False
else:
return True
except:
print("This domain does not exist, changing to adding to new profile")
userContinue = timeoutInput("\nPress enter to return to menu...")
if userContinue != timeoutGlobalCode:
print("Returning to menu")
return False
else:
return True
if edit_domain != timeoutGlobalCode:
print("Returning to menu")
return False
else:
return True
def readAllProfiles(hashed_pass, db):
displayHeader("READING ALL PROFILES")
try:
i = 0
domains = list(db.keys())
for e in db:
i = i + 1
username = str(
decrypt_data(
bytes(db[e]["username"], encoding="utf-8"), hashed_pass
).decode("utf-8")
)
print("PROFILE " + str(i) + ": " + e)
print("Username: " + username)
del e
del username
print(divider)
if i == 0:
print("No saved profiles")
if i > 0:
userContinue = timeoutInput("\nSelect the password to be copied to your clipboard (ex: 1), or type (.c) to cancel: ")
if userContinue.isdigit() == True:
if int(userContinue) > 0:
try:
password = str(
decrypt_data(
bytes(db[str(domains[int(userContinue) - 1])]["password"], encoding="utf-8"),
hashed_pass,
).decode("utf-8")
)
print("\n" + to_clipboard(password))
del password
except:
print("\nUnable to find profile corresponding to " + str(userContinue) + ".")
else:
print("\nThere are no profiles corresponding to that number.")
if userContinue.isdigit() == False and userContinue != timeoutGlobalCode:
return False
if userContinue == timeoutGlobalCode:
return True
except:
print("Could not load all profiles")
userContinue = timeoutInput("\nPress enter to return to menu...")
if userContinue != timeoutGlobalCode:
print("Returning to menu")
return False
else:
return True
def deleteProfileData(hashed_pass, db):
displayHeader("DELETE A PROFILE")
del_domain = timeoutInput("Write the exact saved domain name (type (.c) to cancel): ")
if del_domain != ".c" and del_domain != timeoutGlobalCode:
try:
del db[del_domain]
overwrite_db(encrypt_data(json.dumps(db), hashed_pass).decode("utf-8"))
print("Deleted " + del_domain + " profile successfully!")
userContinue = timeoutInput("\nPress enter to return to menu...")
if userContinue != timeoutGlobalCode:
print("Returning to menu")
return False
else:
return True
except:
print("Unable to find " + del_domain)
userContinue = timeoutInput("\nPress enter to return to menu...")
if userContinue != timeoutGlobalCode:
print("Returning to menu")
return False
else:
return True
else:
if del_domain != timeoutGlobalCode:
print("Returning to menu")
return False
else:
return True
def pwdGenerate(hashed_pass, db):
displayHeader("GENERATE RANDOM PASSWORD")
pass_length = str(timeoutInput("Password length (type (.c) to cancel): "))
if pass_length != ".c" and pass_length != timeoutGlobalCode:
try:
if int(pass_length) < 6:
pass_length = str(12)
print("\nPasswords must be at least 6 characters long.")
print(to_clipboard(str(generate_password(int(pass_length)))))
userContinue = timeoutInput("\nPress enter to return to menu...")
if userContinue != timeoutGlobalCode:
print("Returning to menu")
return False
else:
return True
except:
print("Unable to generate password.")
userContinue = timeoutInput("\nPress enter to return to menu...")
if userContinue != timeoutGlobalCode:
print("Returning to menu")
return False
else:
return True
else:
if pass_length != timeoutGlobalCode:
print("Returning to menu")
return False
else:
return True
def fileSetup():
with open("SALT.txt", "rb") as readfile:
content1 = readfile.read()
readfile.close()
cSALT = content1
with open("VERIFIER.txt", "rb") as readfile:
content2 = readfile.read()
readfile.close()
cVERIFIER = content2
file_path = "pm_db.mmf"
file = open(file_path, "rb")
content3 = file.read()
dataBase = content3
return cSALT, cVERIFIER, dataBase
def displayHeader(title):
os.system("cls" if os.name == "nt" else "clear")
print(checkImg)
print(divider)
print(str(title) + "\n")
# Clear clipboard after 30 seconds
def clear_clipboard_timer():
kb.wait('ctrl+v')
time.sleep(0.1) # Without sleep, clipboard will automatically clear before user actually pastes content
pyperclip.copy("")
# Put string in clipboard
def to_clipboard(input_to_copy):
pyperclip.copy(str(input_to_copy))
del input_to_copy
threading.Thread(target=clear_clipboard_timer).start()
return "Password was saved to clipboard. It will be removed from your clipboard as soon as you paste it."
# TIMEOUT
def timeoutCleanup():
os.system("cls" if os.name == "nt" else "clear")
print(lockImg)
print(
"\n\nYour session expired. For your security, the program has automatically exited. All submitted data is still saved."
)
sys.exit
def timeoutInput(caption):
try:
user_input = inputimeout(prompt=caption, timeout=90)
except TimeoutOccurred:
user_input = timeoutGlobalCode
timeoutCleanup()
return(user_input)
# CRYPTOGRAPHY FUNCTIONS
# Generate random password - user cannot request passwords that are less than 6 characters
# use secrets instead of random (secrets is safer)
def generate_password(length=12):
if length < 6:
length = 12
uppercase_loc = secrets.choice(string.digits) # random location of lowercase
symbol_loc = secrets.choice(string.digits) # random location of symbols
lowercase_loc = secrets.choice(string.digits) # random location of uppercase
password = ""
pool = string.ascii_letters + string.punctuation # the selection of characters used
for i in range(length):
if i == uppercase_loc: # this is to ensure there is at least one uppercase
password += secrets.choice(string.ascii_uppercase)
elif i == lowercase_loc: # this is to ensure there is at least one uppercase
password += secrets.choice(string.ascii_lowercase)
elif i == symbol_loc: # this is to ensure there is at least one symbol
password += secrets.choice(string.punctuation)
else: # adds a random character from pool
password += secrets.choice(pool)
return password
def encrypt_data(input, hashed_pass):
message = input.encode()
f = Fernet(hashed_pass)
encrypted = f.encrypt(message)
return encrypted
def decrypt_data(input, hashed_pass):
f = Fernet(hashed_pass)
decrypted = f.decrypt(input)
return decrypted
def verify_password(password_provided, cSALT, cVERIFIER):
verifier = cVERIFIER
# Hash password for later comparison
password = password_provided.encode() # Convert to type bytes
salt = cSALT
kdf = Scrypt(
salt=salt,
length=32,
n=2**14,
r=8,
p=1,
)
hashed_entered_pass = base64.urlsafe_b64encode(
kdf.derive(password)
) # Can only use kdf once
try:
pass_verifier = decrypt_data(verifier, hashed_entered_pass)
if pass_verifier == b"entered_master_correct":
return hashed_entered_pass
except:
return False
# PROFILE OPERATIONS
def overwrite_db(new_contents):
file = open("pm_db.mmf", "w+")
file.write(new_contents)
file.close()
if __name__ == "__main__":
main()
|
test_insert.py
|
import pytest
from milvus import DataType, ParamError, BaseException
from utils import *
from constants import *
ADD_TIMEOUT = 60
uid = "test_insert"
field_name = default_float_vec_field_name
binary_field_name = default_binary_vec_field_name
default_single_query = {
"bool": {
"must": [
{"vector": {field_name: {"topk": 10, "query": gen_vectors(1, default_dim), "metric_type": "L2",
"params": {"nprobe": 10}}}}
]
}
}
class TestInsertBase:
"""
******************************************************************
The following cases are used to test `insert` function
******************************************************************
"""
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_simple_index(self, request, connect):
# if str(connect._cmd("mode")) == "CPU":
if request.param["index_type"] in index_cpu_not_support():
pytest.skip("CPU not support index_type: ivf_sq8h")
logging.getLogger().info(request.param)
return request.param
@pytest.fixture(
scope="function",
params=gen_single_filter_fields()
)
def get_filter_field(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_single_vector_fields()
)
def get_vector_field(self, request):
yield request.param
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_insert_with_empty_entity(self, connect, collection):
'''
target: test insert with empty entity list
method: set empty entity list as insert method params
expected: raises a ParamError exception
'''
entities = []
with pytest.raises(ParamError) as e:
connect.insert(collection, entities)
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_insert_with_None(self, connect, collection):
'''
target: test insert with None
method: set None as insert method params
expected: raises a ParamError
'''
entity = None
with pytest.raises(Exception) as e:
connect.insert(collection, entity)
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_insert_collection_not_existed(self, connect):
'''
target: test insert, with collection not existed
method: insert entity into a random named collection
expected: raise a BaseException
'''
collection_name = gen_unique_str(uid)
with pytest.raises(BaseException) as e:
connect.insert(collection_name, default_entities)
@pytest.mark.level(2)
def test_insert_without_connect(self, dis_connect, collection):
'''
target: test insert entities without connection
method: create collection and insert entities in it, check if inserted successfully
expected: raise exception
'''
with pytest.raises(Exception) as e:
dis_connect.insert(collection, default_entities)
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_drop_collection(self, connect, collection):
'''
target: test delete collection after insert entities
method: insert entities and drop collection
expected: has_collection false
'''
ids = connect.insert(collection, default_entity)
assert len(ids) == 1
connect.drop_collection(collection)
assert connect.has_collection(collection) == False
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_insert_flush_drop_collection(self, connect, collection):
'''
target: test drop collection after insert entities for a while
method: insert entities, sleep, and delete collection
expected: has_collection false
'''
ids = connect.insert(collection, default_entity)
assert len(ids) == 1
connect.flush([collection])
connect.drop_collection(collection)
assert connect.has_collection(collection) == False
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_create_index(self, connect, collection, get_simple_index):
'''
target: test build index insert after entities
method: insert entities and build index
expected: no error raised
'''
ids = connect.insert(collection, default_entities)
assert len(ids) == default_nb
connect.flush([collection])
connect.create_index(collection, field_name, get_simple_index)
if get_simple_index["index_type"] != "FLAT":
index = connect.describe_index(collection, "")
create_target_index(get_simple_index, field_name)
assert index == get_simple_index
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_insert_after_create_index(self, connect, collection, get_simple_index):
'''
target: test build index insert after vector
method: insert entities and build index
expected: no error raised
'''
connect.create_index(collection, field_name, get_simple_index)
ids = connect.insert(collection, default_entities)
assert len(ids) == default_nb
if get_simple_index["index_type"] != "FLAT":
index = connect.describe_index(collection, "")
create_target_index(get_simple_index, field_name)
assert index == get_simple_index
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_insert_search(self, connect, collection):
'''
target: test search entity after insert entity after a while
method: insert entity, sleep, and search collection
expected: no error raised
'''
ids = connect.insert(collection, default_entities)
connect.flush([collection])
connect.load_collection(collection)
res = connect.search(collection, default_single_query)
assert len(res[0]) == default_top_k
def _test_insert_segment_row_count(self, connect, collection):
nb = default_segment_row_limit + 1
res_ids = connect.insert(collection, gen_entities(nb))
connect.flush([collection])
assert len(res_ids) == nb
stats = connect.get_collection_stats(collection)
assert len(stats['partitions'][0]['segments']) == 2
for segment in stats['partitions'][0]['segments']:
assert segment['row_count'] in [default_segment_row_limit, 1]
@pytest.fixture(
scope="function",
params=[
1,
2000
],
)
def insert_count(self, request):
yield request.param
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_insert_ids(self, connect, id_collection, insert_count):
'''
target: test insert entities in collection, use customize ids
method: create collection and insert entities in it, check the ids returned and the collection length after entities inserted
expected: the length of ids and the collection row count
'''
nb = insert_count
ids = [i for i in range(nb)]
res_ids = connect.insert(id_collection, gen_entities(nb), ids)
connect.flush([id_collection])
assert len(res_ids) == nb
assert res_ids == ids
stats = connect.get_collection_stats(id_collection)
assert stats[row_count] == nb
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_the_same_ids(self, connect, id_collection, insert_count):
'''
target: test insert vectors in collection, use customize the same ids
method: create collection and insert vectors in it, check the ids returned and the collection length after vectors inserted
expected: the length of ids and the collection row count
'''
nb = insert_count
ids = [1 for i in range(nb)]
res_ids = connect.insert(id_collection, gen_entities(nb), ids)
connect.flush([id_collection])
assert len(res_ids) == nb
assert res_ids == ids
stats = connect.get_collection_stats(id_collection)
assert stats[row_count] == nb
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_insert_ids_fields(self, connect, get_filter_field, get_vector_field):
'''
target: test create normal collection with different fields, insert entities into id with ids
method: create collection with diff fields: metric/field_type/..., insert, and count
expected: row count correct
'''
nb = 5
filter_field = get_filter_field
vector_field = get_vector_field
collection_name = gen_unique_str("test_collection")
fields = {
"fields": [filter_field, vector_field],
"auto_id": False
}
connect.create_collection(collection_name, fields)
ids = [i for i in range(nb)]
entities = gen_entities_by_fields(fields["fields"], nb, default_dim)
logging.getLogger().info(entities)
res_ids = connect.insert(collection_name, entities, ids)
assert res_ids == ids
connect.flush([collection_name])
stats = connect.get_collection_stats(collection_name)
assert stats[row_count] == nb
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_ids_not_match(self, connect, id_collection, insert_count):
'''
target: test insert entities in collection without ids
method: create id_collection and insert entities without
expected: exception raised
'''
nb = insert_count
with pytest.raises(Exception) as e:
connect.insert(id_collection, gen_entities(nb))
# TODO
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_insert_twice_ids_no_ids(self, connect, id_collection):
'''
target: check the result of insert, with params ids and no ids
method: test insert vectors twice, use customize ids first, and then use no ids
expected: BaseException raised
'''
ids = [i for i in range(default_nb)]
connect.insert(id_collection, default_entities, ids)
with pytest.raises(Exception) as e:
connect.insert(id_collection, default_entities)
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_not_ids(self, connect, id_collection):
'''
target: check the result of insert, with params ids and no ids
method: test insert vectors twice, use not ids first, and then use customize ids
expected: error raised
'''
with pytest.raises(Exception) as e:
connect.insert(id_collection, default_entities)
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_insert_ids_length_not_match_batch(self, connect, id_collection):
'''
target: test insert vectors in collection, use customize ids, len(ids) != len(vectors)
method: create collection and insert vectors in it
expected: raise an exception
'''
ids = [i for i in range(1, default_nb)]
logging.getLogger().info(len(ids))
with pytest.raises(Exception) as e:
connect.insert(id_collection, default_entities, ids)
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_ids_length_not_match_single(self, connect, id_collection):
'''
target: test insert vectors in collection, use customize ids, len(ids) != len(vectors)
method: create collection and insert vectors in it
expected: raise an exception
'''
ids = [i for i in range(1, default_nb)]
logging.getLogger().info(len(ids))
with pytest.raises(BaseException) as e:
connect.insert(id_collection, default_entity, ids)
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_insert_partition(self, connect, collection):
'''
target: test insert entities in collection created before
method: create collection and insert entities in it, with the partition_tag param
expected: the collection row count equals to nq
'''
connect.create_partition(collection, default_tag)
ids = connect.insert(collection, default_entities, partition_tag=default_tag)
assert len(ids) == default_nb
assert connect.has_partition(collection, default_tag)
connect.flush([collection])
stats = connect.get_collection_stats(collection)
assert stats[row_count] == default_nb
# TODO
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_insert_partition_with_ids(self, connect, id_collection):
'''
target: test insert entities in collection created before, insert with ids
method: create collection and insert entities in it, with the partition_tag param
expected: the collection row count equals to nq
'''
connect.create_partition(id_collection, default_tag)
ids = [i for i in range(default_nb)]
res_ids = connect.insert(id_collection, gen_entities(default_nb), ids=ids, partition_tag=default_tag)
assert res_ids == ids
logging.getLogger().info(connect.describe_collection(id_collection))
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_insert_default_partition(self, connect, collection):
'''
target: test insert entities into default partition
method: create partition and insert info collection without tag params
expected: the collection row count equals to nb
'''
ids = connect.insert(collection, default_entities, partition_tag=default_partition_name)
assert len(ids) == default_nb
connect.flush([collection])
stats = connect.get_collection_stats(collection)
assert stats[row_count] == default_nb
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_partition_not_existed(self, connect, collection):
'''
target: test insert entities in collection created before
method: create collection and insert entities in it, with the not existed partition_tag param
expected: error raised
'''
tag = gen_unique_str()
with pytest.raises(Exception) as e:
connect.insert(collection, default_entities, partition_tag=tag)
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_partition_repeatedly(self, connect, collection):
'''
target: test insert entities in collection created before
method: create collection and insert entities in it repeatly, with the partition_tag param
expected: the collection row count equals to nq
'''
connect.create_partition(collection, default_tag)
ids = connect.insert(collection, default_entities, partition_tag=default_tag)
ids = connect.insert(collection, default_entities, partition_tag=default_tag)
connect.flush([collection])
res = connect.get_collection_stats(collection)
assert res[row_count] == 2 * default_nb
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_insert_dim_not_matched(self, connect, collection):
'''
target: test insert entities, the vector dimension is not equal to the collection dimension
method: the entities dimension is half of the collection dimension, check the status
expected: error raised
'''
vectors = gen_vectors(default_nb, int(default_dim) // 2)
insert_entities = copy.deepcopy(default_entities)
insert_entities[-1]["values"] = vectors
with pytest.raises(Exception) as e:
connect.insert(collection, insert_entities)
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_insert_with_field_name_not_match(self, connect, collection):
'''
target: test insert entities, with the entity field name updated
method: update entity field name
expected: error raised
'''
tmp_entity = update_field_name(copy.deepcopy(default_entity), "int64", "int64new")
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
@pytest.mark.level(2)
def test_insert_with_field_type_not_match(self, connect, collection):
'''
target: test insert entities, with the entity field type updated
method: update entity field type
expected: error raised
'''
tmp_entity = update_field_type(copy.deepcopy(default_entity), "int64", DataType.FLOAT)
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
@pytest.mark.level(2)
def test_insert_with_field_value_not_match(self, connect, collection):
'''
target: test insert entities, with the entity field value updated
method: update entity field value
expected: error raised
'''
tmp_entity = update_field_value(copy.deepcopy(default_entity), DataType.FLOAT, 's')
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_insert_with_field_more(self, connect, collection):
'''
target: test insert entities, with more fields than collection schema
method: add entity field
expected: error raised
'''
tmp_entity = add_field(copy.deepcopy(default_entity))
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_insert_with_field_vector_more(self, connect, collection):
'''
target: test insert entities, with more fields than collection schema
method: add entity vector field
expected: error raised
'''
tmp_entity = add_vector_field(default_nb, default_dim)
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_insert_with_field_less(self, connect, collection):
'''
target: test insert entities, with less fields than collection schema
method: remove entity field
expected: error raised
'''
tmp_entity = remove_field(copy.deepcopy(default_entity))
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_insert_with_field_vector_less(self, connect, collection):
'''
target: test insert entities, with less fields than collection schema
method: remove entity vector field
expected: error raised
'''
tmp_entity = remove_vector_field(copy.deepcopy(default_entity))
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_insert_with_no_field_vector_value(self, connect, collection):
'''
target: test insert entities, with no vector field value
method: remove entity values of vector field
expected: error raised
'''
tmp_entity = copy.deepcopy(default_entity)
del tmp_entity[-1]["values"]
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_insert_with_no_field_vector_type(self, connect, collection):
'''
target: test insert entities, with no vector field type
method: remove entity vector field
expected: error raised
'''
tmp_entity = copy.deepcopy(default_entity)
del tmp_entity[-1]["type"]
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_insert_with_no_field_vector_name(self, connect, collection):
'''
target: test insert entities, with no vector field name
method: remove entity vector field
expected: error raised
'''
tmp_entity = copy.deepcopy(default_entity)
del tmp_entity[-1]["name"]
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
# todo fix timeout
@pytest.mark.level(2)
@pytest.mark.timeout(30)
def test_collection_insert_rows_count_multi_threading(self, args, collection):
'''
target: test collection rows_count is correct or not with multi threading
method: create collection and insert entities in it(idmap),
assert the value returned by count_entities method is equal to length of entities
expected: the count is equal to the length of entities
'''
if args["handler"] == "HTTP":
pytest.skip("Skip test in http mode")
thread_num = 8
threads = []
milvus = get_milvus(host=args["ip"], port=args["port"], handler=args["handler"], try_connect=False)
def insert(thread_i):
logging.getLogger().info("In thread-%d" % thread_i)
res_ids = milvus.insert(collection, default_entities)
milvus.flush([collection])
for i in range(thread_num):
x = threading.Thread(target=insert, args=(i,))
threads.append(x)
x.start()
for th in threads:
th.join()
stats = milvus.get_collection_stats(collection)
assert stats[row_count] == thread_num * default_nb
# TODO: unable to set config
@pytest.mark.level(2)
def _test_insert_disable_auto_flush(self, connect, collection):
'''
target: test insert entities, with disable autoflush
method: disable autoflush and insert, get entity
expected: the count is equal to 0
'''
delete_nums = 500
disable_flush(connect)
ids = connect.insert(collection, default_entities)
res = connect.get_entity_by_id(collection, ids[:delete_nums])
assert len(res) == delete_nums
assert res[0] is None
class TestInsertBinary:
@pytest.fixture(
scope="function",
params=gen_binary_index()
)
def get_binary_index(self, request):
request.param["metric_type"] = "JACCARD"
return request.param
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_insert_binary_entities(self, connect, binary_collection):
'''
target: test insert entities in binary collection
method: create collection and insert binary entities in it
expected: the collection row count equals to nb
'''
ids = connect.insert(binary_collection, default_binary_entities)
assert len(ids) == default_nb
connect.flush([binary_collection])
stats = connect.get_collection_stats(binary_collection)
assert stats[row_count] == default_nb
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_insert_binary_partition(self, connect, binary_collection):
'''
target: test insert entities and create partition tag
method: create collection and insert binary entities in it, with the partition_tag param
expected: the collection row count equals to nb
'''
connect.create_partition(binary_collection, default_tag)
ids = connect.insert(binary_collection, default_binary_entities, partition_tag=default_tag)
assert len(ids) == default_nb
assert connect.has_partition(binary_collection, default_tag)
connect.flush([binary_collection])
stats = connect.get_collection_stats(binary_collection)
assert stats[row_count] == default_nb
def test_insert_binary_multi_times(self, connect, binary_collection):
'''
target: test insert entities multi times and final flush
method: create collection and insert binary entity multi and final flush
expected: the collection row count equals to nb
'''
for i in range(default_nb):
ids = connect.insert(binary_collection, default_binary_entity)
assert len(ids) == 1
connect.flush([binary_collection])
stats = connect.get_collection_stats(binary_collection)
assert stats[row_count] == default_nb
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_insert_binary_after_create_index(self, connect, binary_collection, get_binary_index):
'''
target: test insert binary entities after build index
method: build index and insert entities
expected: no error raised
'''
connect.create_index(binary_collection, binary_field_name, get_binary_index)
ids = connect.insert(binary_collection, default_binary_entities)
assert len(ids) == default_nb
connect.flush([binary_collection])
index = connect.describe_index(binary_collection, "")
create_target_index(get_binary_index, binary_field_name)
assert index == get_binary_index
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_binary_create_index(self, connect, binary_collection, get_binary_index):
'''
target: test build index insert after vector
method: insert vector and build index
expected: no error raised
'''
ids = connect.insert(binary_collection, default_binary_entities)
assert len(ids) == default_nb
connect.flush([binary_collection])
connect.create_index(binary_collection, binary_field_name, get_binary_index)
index = connect.describe_index(binary_collection, "")
create_target_index(get_binary_index, binary_field_name)
assert index == get_binary_index
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_insert_binary_search(self, connect, binary_collection):
'''
target: test search vector after insert vector after a while
method: insert vector, sleep, and search collection
expected: no error raised
'''
ids = connect.insert(binary_collection, default_binary_entities)
connect.flush([binary_collection])
query, vecs = gen_query_vectors(binary_field_name, default_binary_entities, default_top_k, 1,
metric_type="JACCARD")
connect.load_collection(binary_collection)
res = connect.search(binary_collection, query)
logging.getLogger().debug(res)
assert len(res[0]) == default_top_k
class TestInsertAsync:
@pytest.fixture(scope="function", autouse=True)
def skip_http_check(self, args):
if args["handler"] == "HTTP":
pytest.skip("skip in http mode")
@pytest.fixture(
scope="function",
params=[
1,
1000
],
)
def insert_count(self, request):
yield request.param
def check_status(self, result):
logging.getLogger().info("In callback check status")
assert not result
def check_result(self, result):
logging.getLogger().info("In callback check results")
assert result
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_insert_async(self, connect, collection, insert_count):
'''
target: test insert vectors with different length of vectors
method: set different vectors as insert method params
expected: length of ids is equal to the length of vectors
'''
nb = insert_count
future = connect.insert(collection, gen_entities(nb), _async=True)
ids = future.result()
connect.flush([collection])
assert len(ids) == nb
@pytest.mark.level(2)
def test_insert_async_false(self, connect, collection, insert_count):
'''
target: test insert vectors with different length of vectors
method: set different vectors as insert method params
expected: length of ids is equal to the length of vectors
'''
nb = insert_count
ids = connect.insert(collection, gen_entities(nb), _async=False)
# ids = future.result()
connect.flush([collection])
assert len(ids) == nb
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_insert_async_callback(self, connect, collection, insert_count):
'''
target: test insert vectors with different length of vectors
method: set different vectors as insert method params
expected: length of ids is equal to the length of vectors
'''
nb = insert_count
future = connect.insert(collection, gen_entities(nb), _async=True, _callback=self.check_result)
future.done()
ids = future.result()
assert len(ids) == nb
@pytest.mark.level(2)
def test_insert_async_long(self, connect, collection):
'''
target: test insert vectors with different length of vectors
method: set different vectors as insert method params
expected: length of ids is equal to the length of vectors
'''
nb = 50000
future = connect.insert(collection, gen_entities(nb), _async=True, _callback=self.check_result)
ids = future.result()
assert len(ids) == nb
connect.flush([collection])
stats = connect.get_collection_stats(collection)
logging.getLogger().info(stats)
assert stats[row_count] == nb
@pytest.mark.level(2)
def test_insert_async_callback_timeout(self, connect, collection):
'''
target: test insert vectors with different length of vectors
method: set different vectors as insert method params
expected: length of ids is equal to the length of vectors
'''
nb = 100000
future = connect.insert(collection, gen_entities(nb), _async=True, _callback=self.check_status, timeout=1)
with pytest.raises(Exception) as e:
result = future.result()
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_insert_async_invalid_params(self, connect):
'''
target: test insert vectors with different length of vectors
method: set different vectors as insert method params
expected: length of ids is equal to the length of vectors
'''
collection_new = gen_unique_str()
future = connect.insert(collection_new, default_entities, _async=True)
future.done()
with pytest.raises(Exception) as e:
ids = future.result()
# 1339
def test_insert_async_invalid_params_raise_exception(self, connect, collection):
'''
target: test insert vectors with different length of vectors
method: set different vectors as insert method params
expected: length of ids is equal to the length of vectors
'''
entities = []
future = connect.insert(collection, entities, _async=True)
future.done()
with pytest.raises(Exception) as e:
future.result()
class TestInsertMultiCollections:
"""
******************************************************************
The following cases are used to test `insert` function
******************************************************************
"""
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_simple_index(self, request, connect):
logging.getLogger().info(request.param)
# if str(connect._cmd("mode")) == "CPU":
# if request.param["index_type"] in index_cpu_not_support():
# pytest.skip("sq8h not support in CPU mode")
return request.param
def test_insert_entity_multi_collections(self, connect):
'''
target: test insert entities
method: create 10 collections and insert entities into them in turn
expected: row count
'''
collection_num = 10
collection_list = []
for i in range(collection_num):
collection_name = gen_unique_str(uid)
collection_list.append(collection_name)
connect.create_collection(collection_name, default_fields)
ids = connect.insert(collection_name, default_entities)
connect.flush([collection_name])
assert len(ids) == default_nb
stats = connect.get_collection_stats(collection_name)
assert stats[row_count] == default_nb
for i in range(collection_num):
connect.drop_collection(collection_list[i])
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_drop_collection_insert_entity_another(self, connect, collection):
'''
target: test insert vector to collection_1 after collection_2 deleted
method: delete collection_2 and insert vector to collection_1
expected: row count equals the length of entities inserted
'''
collection_name = gen_unique_str(uid)
connect.create_collection(collection_name, default_fields)
connect.drop_collection(collection)
ids = connect.insert(collection_name, default_entity)
connect.flush([collection_name])
assert len(ids) == 1
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_create_index_insert_entity_another(self, connect, collection, get_simple_index):
'''
target: test insert vector to collection_2 after build index for collection_1
method: build index and insert vector
expected: status ok
'''
collection_name = gen_unique_str(uid)
connect.create_collection(collection_name, default_fields)
connect.create_index(collection, field_name, get_simple_index)
ids = connect.insert(collection_name, default_entity)
assert len(ids) == 1
if get_simple_index["index_type"] != "FLAT":
index = connect.describe_index(collection, "")
create_target_index(get_simple_index, field_name)
assert index == get_simple_index
connect.drop_collection(collection_name)
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_insert_entity_create_index_another(self, connect, collection, get_simple_index):
'''
target: test insert vector to collection_2 after build index for collection_1
method: build index and insert vector
expected: status ok
'''
collection_name = gen_unique_str(uid)
connect.create_collection(collection_name, default_fields)
ids = connect.insert(collection, default_entity)
connect.flush([collection])
connect.create_index(collection_name, field_name, get_simple_index)
if get_simple_index["index_type"] != "FLAT":
index = connect.describe_index(collection_name, "")
create_target_index(get_simple_index, field_name)
assert index == get_simple_index
stats = connect.get_collection_stats(collection)
assert stats[row_count] == 1
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_entity_sleep_create_index_another(self, connect, collection, get_simple_index):
'''
target: test insert vector to collection_2 after build index for collection_1 for a while
method: build index and insert vector
expected: status ok
'''
collection_name = gen_unique_str(uid)
connect.create_collection(collection_name, default_fields)
ids = connect.insert(collection, default_entity)
connect.flush([collection])
connect.create_index(collection_name, field_name, get_simple_index)
stats = connect.get_collection_stats(collection)
assert stats[row_count] == 1
@pytest.mark.timeout(ADD_TIMEOUT)
def test_search_entity_insert_entity_another(self, connect, collection):
'''
target: test insert entity to collection_1 after search collection_2
method: search collection and insert entity
expected: status ok
'''
collection_name = gen_unique_str(uid)
connect.create_collection(collection_name, default_fields)
connect.load_collection(collection)
res = connect.search(collection, default_single_query)
assert len(res[0]) == 0
connect.insert(collection_name, default_entity)
connect.flush([collection_name])
stats = connect.get_collection_stats(collection_name)
assert stats[row_count] == 1
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_insert_entity_search_entity_another(self, connect, collection):
'''
target: test insert entity to collection_1 after search collection_2
method: search collection and insert entity
expected: status ok
'''
collection_name = gen_unique_str(uid)
connect.create_collection(collection_name, default_fields)
ids = connect.insert(collection, default_entity)
connect.flush([collection])
connect.load_collection(collection_name)
res = connect.search(collection_name, default_single_query)
stats = connect.get_collection_stats(collection)
assert stats[row_count] == 1
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_entity_sleep_search_entity_another(self, connect, collection):
'''
target: test insert entity to collection_1 after search collection_2 a while
method: search collection, sleep, and insert entity
expected: status ok
'''
collection_name = gen_unique_str(uid)
connect.create_collection(collection_name, default_fields)
ids = connect.insert(collection, default_entity)
connect.flush([collection])
connect.load_collection(collection_name)
res = connect.search(collection_name, default_single_query)
assert len(res[0]) == 0
@pytest.mark.timeout(ADD_TIMEOUT)
def _test_insert_entity_during_release_collection(self, connect, collection):
'''
target: test insert entity during release
method: release collection async, then do insert operation
expected: insert ok
'''
for i in range(10):
connect.insert(collection, default_entities)
connect.flush([collection])
connect.load_collection(collection)
def release():
connect.release_collection(collection)
t = threading.Thread(target=release, args=(collection,))
t.start()
ids = connect.insert(collection, default_entities)
assert len(ids) == default_nb
class TestInsertInvalid(object):
"""
Test inserting vectors with invalid collection names
"""
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_collection_name(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_tag_name(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_field_name(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_field_type(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_field_int_value(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_ints()
)
def get_entity_id(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_vectors()
)
def get_field_vectors_value(self, request):
yield request.param
def test_insert_ids_invalid(self, connect, id_collection, get_entity_id):
'''
target: test insert, with using customize ids, which are not int64
method: create collection and insert entities in it
expected: raise an exception
'''
entity_id = get_entity_id
ids = [entity_id for _ in range(default_nb)]
with pytest.raises(Exception):
connect.insert(id_collection, default_entities, ids)
def test_insert_with_invalid_collection_name(self, connect, get_collection_name):
collection_name = get_collection_name
with pytest.raises(Exception):
connect.insert(collection_name, default_entity)
def test_insert_with_invalid_partition_name(self, connect, collection, get_tag_name):
tag_name = get_tag_name
connect.create_partition(collection, default_tag)
if tag_name is not None:
with pytest.raises(Exception):
connect.insert(collection, default_entity, partition_tag=tag_name)
else:
connect.insert(collection, default_entity, partition_tag=tag_name)
def test_insert_with_invalid_field_name(self, connect, collection, get_field_name):
tmp_entity = update_field_name(copy.deepcopy(default_entity), "int64", get_field_name)
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
def test_insert_with_invalid_field_type(self, connect, collection, get_field_type):
field_type = get_field_type
tmp_entity = update_field_type(copy.deepcopy(default_entity), 'float', field_type)
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
def test_insert_with_invalid_field_value(self, connect, collection, get_field_int_value):
field_value = get_field_int_value
tmp_entity = update_field_type(copy.deepcopy(default_entity), 'int64', field_value)
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
def test_insert_with_invalid_field_entity_value(self, connect, collection, get_field_vectors_value):
tmp_entity = copy.deepcopy(default_entity)
src_vector = tmp_entity[-1]["values"]
src_vector[0][1] = get_field_vectors_value
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
class TestInsertInvalidBinary(object):
"""
Test inserting vectors with invalid collection names
"""
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_collection_name(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_tag_name(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_field_name(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_field_type(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_field_int_value(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_ints()
)
def get_entity_id(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_vectors()
)
def get_field_vectors_value(self, request):
yield request.param
@pytest.mark.level(2)
def test_insert_with_invalid_field_name(self, connect, binary_collection, get_field_name):
tmp_entity = update_field_name(copy.deepcopy(default_binary_entity), "int64", get_field_name)
with pytest.raises(Exception):
connect.insert(binary_collection, tmp_entity)
@pytest.mark.level(2)
def test_insert_with_invalid_field_value(self, connect, binary_collection, get_field_int_value):
tmp_entity = update_field_type(copy.deepcopy(default_binary_entity), 'int64', get_field_int_value)
with pytest.raises(Exception):
connect.insert(binary_collection, tmp_entity)
@pytest.mark.level(2)
def test_insert_with_invalid_field_entity_value(self, connect, binary_collection, get_field_vectors_value):
tmp_entity = copy.deepcopy(default_binary_entity)
src_vectors = tmp_entity[-1]["values"]
src_vectors[0] = get_field_vectors_value
with pytest.raises(Exception):
connect.insert(binary_collection, tmp_entity)
@pytest.mark.level(2)
def test_insert_ids_invalid(self, connect, binary_id_collection, get_entity_id):
'''
target: test insert, with using customize ids, which are not int64
method: create collection and insert entities in it
expected: raise an exception
'''
entity_id = get_entity_id
ids = [entity_id for _ in range(default_nb)]
with pytest.raises(Exception):
connect.insert(binary_id_collection, default_binary_entities, ids)
@pytest.mark.level(2)
def test_insert_with_invalid_field_type(self, connect, binary_collection, get_field_type):
field_type = get_field_type
tmp_entity = update_field_type(copy.deepcopy(default_binary_entity), 'int64', field_type)
with pytest.raises(Exception):
connect.insert(binary_collection, tmp_entity)
@pytest.mark.level(2)
def test_insert_with_invalid_field_entities_value(self, connect, binary_collection, get_field_vectors_value):
tmp_entities = copy.deepcopy(default_binary_entities)
src_vector = tmp_entities[-1]["values"]
src_vector[1] = get_field_vectors_value
with pytest.raises(Exception):
connect.insert(binary_collection, tmp_entities)
|
webqq_client.py
|
# -*- coding: utf-8 -*-
"""
用于内嵌到其他程序中,为其他程序添加发送到qq功能
参数格式:
_参数名_=_{{{{参数内容}}}}_
发送方式允许任意基于tcp的方式,如http等(比如说出现在url中、post表单中、http头、cookies、UA中)
本py使用的发送方式是urt-8的raw socket数据
参数名、内容、标示符_={} 都允许不编码(gbk/utf-8)或urlencode
只要发送的数据中出现上述格式的串,即会被解析
一个例子:
若想要发送一条信息'hello world'到QQ 345678901 (假设webqq消息服务器是127.0.0.1 端口为默认的34567)
则准备发送的内容为:
_token_=_{{{{sometoken}}}}_
_cmd_=_{{{{sendtoqq}}}}_
_msg_=_{{{{hello world}}}}_
_target_=_{{{{345678901}}}}_
发送方式:
0.在其他python程序中发送信息(支持py2.6 2.7 3.4 3.5+)
from webqq_client import WebqqClient
...下面的代码请看本文件底部的demo..
1.以raw socket发送就是上面的样子,直接发送(换行只是为了阅读方便)
2.以浏览器请求的方式发送
在浏览器中直接访问
http://127.0.0.1:34567/?_token_=_{{{{sometoken}}}}_&_cmd_=_{{{{sendtoqq}}}}_&_msg_=_{{{{hello world}}}}_&_target_=_{{{{345678901}}}}_
即可
3.以curl发送(注意{}的转义):
curl "http://127.0.0.1:34567/?_token_=_\{\{\{\{sometoken\}\}\}\}_&_cmd_=_\{\{\{\{sendtoqq\}\}\}\}_&_msg_=_\{\{\{\{hello world\}\}\}\}_&_target_=_\{\{\{\{Xno0Pu7bnCB\}\}\}\}"
参数说明(目前服务器仅支持2个API):
1.发送到QQ:
token: 就是token,你在运行服务端程序时指定
cmd: sendtoqq 固定值,表示命令为发送到QQ
msg: 消息内容
target: 目标QQ号
2.发送到讨论组:
token: 就是token,你在运行服务端程序时指定
cmd: sendtodis 固定值,表示命令为发送到讨论组
msg: 消息内容
target: 目标讨论组的名称,请尽可能取得独特一点,建议不要纯数字,不保证对字母、数字外的符号支持
"""
import socket
import threading
DEFAULT_PORT = 34567
__VERSION__ = '0.2.0'
def assembly_payload(paras):
"""
将dict参数组装为服务器可以理解的参数
:param paras: dict
"""
buffer = []
for key in paras:
buffer.append('_%s_=_{{{{%s}}}}_&' % (str(key), str(paras[key])))
return (''.join(buffer)).encode()
class WebqqClient:
def _send_and_receive(self, payload):
"""
send bytes to server and receive echo
:type payload: bytes
"""
try:
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.connect((self.server, self.port))
except:
return False
self.socket.send(payload)
buffer = []
while True:
d = self.socket.recv(1024)
buffer.append(d)
if len(d) < 1024:
break
self.socket.close()
data = b''.join(buffer)
try:
data = data.decode(encoding='utf-8')
except:
data = data.decode(encoding='gbk')
return data
def handshake(self):
payload = assembly_payload({
'token': self.token,
'cmd': 'handshake'
})
result = self._send_and_receive(payload)
if 'handshakeOK' in result:
return True
def send_to_qq(self, msg_content, target_qq=None):
target_qq = target_qq if target_qq is not None else self.target
if target_qq is None:
print('[ERR] an target qq must be given')
return False
payload = assembly_payload({
'token': self.token,
'cmd': 'sendtoqq',
'msg': msg_content,
'target': target_qq
})
result = self._send_and_receive(payload)
if 'thank you' in result:
return True
else:
return False
def send_to_discuss(self, msg_content, target_discuss_name=None):
target_discuss_name = target_discuss_name if target_discuss_name is not None else self.target
if target_discuss_name is None:
print('[ERR] an target discuss name must be given')
return False
payload = assembly_payload({
'token': self.token,
'cmd': 'sendtodis',
'msg': msg_content,
'target': target_discuss_name
})
result = self._send_and_receive(payload)
if 'thank you' in result:
return True
else:
return False
def send_to_discuss_mt(self, msg_content, target_discuss_name=None):
"""
an multi-threads version of send_to_discuss(), avoid lagging
"""
s = threading.Thread(target=self.send_to_discuss, args=(msg_content, target_discuss_name))
s.start()
def write(self, stream):
"""
若初始化时指定了 token,target,default_target_type 那么这个类可以当成一个file-like object使用
消息会被发送到默认的目标
"""
self._default_send_method(stream)
return
def send(self, msg_content):
"""
只是WebqqClient.write()的别名
"""
self._default_send_method(msg_content)
return
def __init__(self, server, token="", target=None, default_target_type='discuss', port=DEFAULT_PORT):
self.server = server
self.token = token
self.target = target
self.port = port
if default_target_type == 'discuss':
self._default_send_method = self.send_to_discuss
elif default_target_type == 'qq':
self._default_send_method = self.send_to_qq
if self.target is None:
print('[TIP] In personal use, you can give a target=YOUR_QQ param.')
if not self.token:
print('[WARN] maybe you forget your token')
if not self.handshake():
print('[ERR] handshake error')
if __name__ == '__main__':
# 这下面是一个demo
from time import time
server = None
target = None
token = None
port = None
target_type = None # 'qq'->QQ朋友 'discuss'->讨论组
print('Version: ', __VERSION__)
print('hhh你正在直接运行本程序,进入demo模式\n'
'在给定一些参数后程序将用你指定的webqq消息服务器(需要你自己架设,请参考 https://github.com/Aploium/WebQQ_API )\n'
'发送当前的unix时间戳到你的QQ(或者你指定的讨论组)\n'
'注:截止目前(2016-03-07),WebQQ服务器出问题了(不是本程序的锅),私戳发送到QQ暂时失效,'
'请与小号新建一个讨论组来接受信息\n\n')
if server is None:
server = input('请输入webqq消息服务器(如127.0.0.1): ')
if port is None:
port = input('请输入端口,什么都不输按回车使用默认端口(): ')
if not port:
port = DEFAULT_PORT
if target is None:
buff = input('请输入目标QQ或讨论组名称,若输入为纯数字则被认为是QQ号,否则视为讨论组名: ')
try:
target = int(buff)
except:
target = buff
target_type = 'discuss'
else:
target_type = 'friend'
if token is None:
token = input('请输入token: ')
q_client = WebqqClient(
server, # 服务器地址
token=token,
target=target, # 默认的目标(一般就是你自己)
default_target_type=target_type, # 默认目标的类型,即上一行target的类型 'qq'->QQ朋友 'discuss'->讨论组
port=port # 端口,一般不需要指定,使用默认值即可
)
# 若初始化时指定了 token,target,default_target_type 那么这个类可以当成一个file-like object使用
# 消息会被发送到默认的目标,相当方便的使用方法
q_client.send('Hello world! Send by method .send() at unix time:' + str(time()))
# 也可以手动指定target_type,target
if target_type == 'discuss':
q_client.send_to_discuss('Hello world! Send by method .send_to_discuss() at unix time:' + str(time()))
elif target_type == 'friend':
q_client.send_to_qq('Hello world! Send by method .send_to_qq() at unix time:' + str(time()))
|
keep_alive.py
|
from flask import Flask
from threading import Thread
app = Flask('')
@app.route('/')
def home():
return "Running"
def run():
app.run(host='0.0.0.0', port=8000)
def keep_alive():
t = Thread(target=run)
t.start()
|
test_poplib.py
|
"""Test script for poplib module."""
# Modified by Giampaolo Rodola' to give poplib.POP3 and poplib.POP3_SSL
# a real test suite
import poplib
import asyncore
import asynchat
import socket
import os
import time
import errno
from unittest import TestCase, skipUnless
from test import support as test_support
threading = test_support.import_module('threading')
HOST = test_support.HOST
PORT = 0
SUPPORTS_SSL = False
if hasattr(poplib, 'POP3_SSL'):
import ssl
SUPPORTS_SSL = True
CERTFILE = os.path.join(os.path.dirname(__file__) or os.curdir, "keycert3.pem")
CAFILE = os.path.join(os.path.dirname(__file__) or os.curdir, "pycacert.pem")
requires_ssl = skipUnless(SUPPORTS_SSL, 'SSL not supported')
# the dummy data returned by server when LIST and RETR commands are issued
LIST_RESP = b'1 1\r\n2 2\r\n3 3\r\n4 4\r\n5 5\r\n.\r\n'
RETR_RESP = b"""From: postmaster@python.org\
\r\nContent-Type: text/plain\r\n\
MIME-Version: 1.0\r\n\
Subject: Dummy\r\n\
\r\n\
line1\r\n\
line2\r\n\
line3\r\n\
.\r\n"""
class DummyPOP3Handler(asynchat.async_chat):
CAPAS = {'UIDL': [], 'IMPLEMENTATION': ['python-testlib-pop-server']}
enable_UTF8 = False
def __init__(self, conn):
asynchat.async_chat.__init__(self, conn)
self.set_terminator(b"\r\n")
self.in_buffer = []
self.push('+OK dummy pop3 server ready. <timestamp>')
self.tls_active = False
self.tls_starting = False
def collect_incoming_data(self, data):
self.in_buffer.append(data)
def found_terminator(self):
line = b''.join(self.in_buffer)
line = str(line, 'ISO-8859-1')
self.in_buffer = []
cmd = line.split(' ')[0].lower()
space = line.find(' ')
if space != -1:
arg = line[space + 1:]
else:
arg = ""
if hasattr(self, 'cmd_' + cmd):
method = getattr(self, 'cmd_' + cmd)
method(arg)
else:
self.push('-ERR unrecognized POP3 command "%s".' %cmd)
def handle_error(self):
raise
def push(self, data):
asynchat.async_chat.push(self, data.encode("ISO-8859-1") + b'\r\n')
def cmd_echo(self, arg):
# sends back the received string (used by the test suite)
self.push(arg)
def cmd_user(self, arg):
if arg != "guido":
self.push("-ERR no such user")
self.push('+OK password required')
def cmd_pass(self, arg):
if arg != "python":
self.push("-ERR wrong password")
self.push('+OK 10 messages')
def cmd_stat(self, arg):
self.push('+OK 10 100')
def cmd_list(self, arg):
if arg:
self.push('+OK %s %s' % (arg, arg))
else:
self.push('+OK')
asynchat.async_chat.push(self, LIST_RESP)
cmd_uidl = cmd_list
def cmd_retr(self, arg):
self.push('+OK %s bytes' %len(RETR_RESP))
asynchat.async_chat.push(self, RETR_RESP)
cmd_top = cmd_retr
def cmd_dele(self, arg):
self.push('+OK message marked for deletion.')
def cmd_noop(self, arg):
self.push('+OK done nothing.')
def cmd_rpop(self, arg):
self.push('+OK done nothing.')
def cmd_apop(self, arg):
self.push('+OK done nothing.')
def cmd_quit(self, arg):
self.push('+OK closing.')
self.close_when_done()
def _get_capas(self):
_capas = dict(self.CAPAS)
if not self.tls_active and SUPPORTS_SSL:
_capas['STLS'] = []
return _capas
def cmd_capa(self, arg):
self.push('+OK Capability list follows')
if self._get_capas():
for cap, params in self._get_capas().items():
_ln = [cap]
if params:
_ln.extend(params)
self.push(' '.join(_ln))
self.push('.')
def cmd_utf8(self, arg):
self.push('+OK I know RFC6856'
if self.enable_UTF8
else '-ERR What is UTF8?!')
if SUPPORTS_SSL:
def cmd_stls(self, arg):
if self.tls_active is False:
self.push('+OK Begin TLS negotiation')
tls_sock = ssl.wrap_socket(self.socket, certfile=CERTFILE,
server_side=True,
do_handshake_on_connect=False,
suppress_ragged_eofs=False)
self.del_channel()
self.set_socket(tls_sock)
self.tls_active = True
self.tls_starting = True
self.in_buffer = []
self._do_tls_handshake()
else:
self.push('-ERR Command not permitted when TLS active')
def _do_tls_handshake(self):
try:
self.socket.do_handshake()
except ssl.SSLError as err:
if err.args[0] in (ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE):
return
elif err.args[0] == ssl.SSL_ERROR_EOF:
return self.handle_close()
raise
except OSError as err:
if err.args[0] == errno.ECONNABORTED:
return self.handle_close()
else:
self.tls_active = True
self.tls_starting = False
def handle_read(self):
if self.tls_starting:
self._do_tls_handshake()
else:
try:
asynchat.async_chat.handle_read(self)
except ssl.SSLEOFError:
self.handle_close()
class DummyPOP3Server(asyncore.dispatcher, threading.Thread):
handler = DummyPOP3Handler
def __init__(self, address, af=socket.AF_INET):
threading.Thread.__init__(self)
asyncore.dispatcher.__init__(self)
self.create_socket(af, socket.SOCK_STREAM)
self.bind(address)
self.listen(5)
self.active = False
self.active_lock = threading.Lock()
self.host, self.port = self.socket.getsockname()[:2]
self.handler_instance = None
def start(self):
assert not self.active
self.__flag = threading.Event()
threading.Thread.start(self)
self.__flag.wait()
def run(self):
self.active = True
self.__flag.set()
while self.active and asyncore.socket_map:
self.active_lock.acquire()
asyncore.loop(timeout=0.1, count=1)
self.active_lock.release()
asyncore.close_all(ignore_all=True)
def stop(self):
assert self.active
self.active = False
self.join()
def handle_accepted(self, conn, addr):
self.handler_instance = self.handler(conn)
def handle_connect(self):
self.close()
handle_read = handle_connect
def writable(self):
return 0
def handle_error(self):
raise
class TestPOP3Class(TestCase):
def assertOK(self, resp):
self.assertTrue(resp.startswith(b"+OK"))
def setUp(self):
self.server = DummyPOP3Server((HOST, PORT))
self.server.start()
self.client = poplib.POP3(self.server.host, self.server.port, timeout=3)
def tearDown(self):
self.client.close()
self.server.stop()
def test_getwelcome(self):
self.assertEqual(self.client.getwelcome(),
b'+OK dummy pop3 server ready. <timestamp>')
def test_exceptions(self):
self.assertRaises(poplib.error_proto, self.client._shortcmd, 'echo -err')
def test_user(self):
self.assertOK(self.client.user('guido'))
self.assertRaises(poplib.error_proto, self.client.user, 'invalid')
def test_pass_(self):
self.assertOK(self.client.pass_('python'))
self.assertRaises(poplib.error_proto, self.client.user, 'invalid')
def test_stat(self):
self.assertEqual(self.client.stat(), (10, 100))
def test_list(self):
self.assertEqual(self.client.list()[1:],
([b'1 1', b'2 2', b'3 3', b'4 4', b'5 5'],
25))
self.assertTrue(self.client.list('1').endswith(b"OK 1 1"))
def test_retr(self):
expected = (b'+OK 116 bytes',
[b'From: postmaster@python.org', b'Content-Type: text/plain',
b'MIME-Version: 1.0', b'Subject: Dummy',
b'', b'line1', b'line2', b'line3'],
113)
foo = self.client.retr('foo')
self.assertEqual(foo, expected)
def test_too_long_lines(self):
self.assertRaises(poplib.error_proto, self.client._shortcmd,
'echo +%s' % ((poplib._MAXLINE + 10) * 'a'))
def test_dele(self):
self.assertOK(self.client.dele('foo'))
def test_noop(self):
self.assertOK(self.client.noop())
def test_rpop(self):
self.assertOK(self.client.rpop('foo'))
def test_apop_normal(self):
self.assertOK(self.client.apop('foo', 'dummypassword'))
def test_apop_REDOS(self):
# Replace welcome with very long evil welcome.
# NB The upper bound on welcome length is currently 2048.
# At this length, evil input makes each apop call take
# on the order of milliseconds instead of microseconds.
evil_welcome = b'+OK' + (b'<' * 1000000)
with test_support.swap_attr(self.client, 'welcome', evil_welcome):
# The evil welcome is invalid, so apop should throw.
self.assertRaises(poplib.error_proto, self.client.apop, 'a', 'kb')
def test_top(self):
expected = (b'+OK 116 bytes',
[b'From: postmaster@python.org', b'Content-Type: text/plain',
b'MIME-Version: 1.0', b'Subject: Dummy', b'',
b'line1', b'line2', b'line3'],
113)
self.assertEqual(self.client.top(1, 1), expected)
def test_uidl(self):
self.client.uidl()
self.client.uidl('foo')
def test_utf8_raises_if_unsupported(self):
self.server.handler.enable_UTF8 = False
self.assertRaises(poplib.error_proto, self.client.utf8)
def test_utf8(self):
self.server.handler.enable_UTF8 = True
expected = b'+OK I know RFC6856'
result = self.client.utf8()
self.assertEqual(result, expected)
def test_capa(self):
capa = self.client.capa()
self.assertTrue('IMPLEMENTATION' in capa.keys())
def test_quit(self):
resp = self.client.quit()
self.assertTrue(resp)
self.assertIsNone(self.client.sock)
self.assertIsNone(self.client.file)
@requires_ssl
def test_stls_capa(self):
capa = self.client.capa()
self.assertTrue('STLS' in capa.keys())
@requires_ssl
def test_stls(self):
expected = b'+OK Begin TLS negotiation'
resp = self.client.stls()
self.assertEqual(resp, expected)
@requires_ssl
def test_stls_context(self):
expected = b'+OK Begin TLS negotiation'
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_verify_locations(CAFILE)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.check_hostname = True
with self.assertRaises(ssl.CertificateError):
resp = self.client.stls(context=ctx)
self.client = poplib.POP3("localhost", self.server.port, timeout=3)
resp = self.client.stls(context=ctx)
self.assertEqual(resp, expected)
if SUPPORTS_SSL:
from test.test_ftplib import SSLConnection
class DummyPOP3_SSLHandler(SSLConnection, DummyPOP3Handler):
def __init__(self, conn):
asynchat.async_chat.__init__(self, conn)
self.secure_connection()
self.set_terminator(b"\r\n")
self.in_buffer = []
self.push('+OK dummy pop3 server ready. <timestamp>')
self.tls_active = True
self.tls_starting = False
@requires_ssl
class TestPOP3_SSLClass(TestPOP3Class):
# repeat previous tests by using poplib.POP3_SSL
def setUp(self):
self.server = DummyPOP3Server((HOST, PORT))
self.server.handler = DummyPOP3_SSLHandler
self.server.start()
self.client = poplib.POP3_SSL(self.server.host, self.server.port)
def test__all__(self):
self.assertIn('POP3_SSL', poplib.__all__)
def test_context(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertRaises(ValueError, poplib.POP3_SSL, self.server.host,
self.server.port, keyfile=CERTFILE, context=ctx)
self.assertRaises(ValueError, poplib.POP3_SSL, self.server.host,
self.server.port, certfile=CERTFILE, context=ctx)
self.assertRaises(ValueError, poplib.POP3_SSL, self.server.host,
self.server.port, keyfile=CERTFILE,
certfile=CERTFILE, context=ctx)
self.client.quit()
self.client = poplib.POP3_SSL(self.server.host, self.server.port,
context=ctx)
self.assertIsInstance(self.client.sock, ssl.SSLSocket)
self.assertIs(self.client.sock.context, ctx)
self.assertTrue(self.client.noop().startswith(b'+OK'))
def test_stls(self):
self.assertRaises(poplib.error_proto, self.client.stls)
test_stls_context = test_stls
def test_stls_capa(self):
capa = self.client.capa()
self.assertFalse('STLS' in capa.keys())
@requires_ssl
class TestPOP3_TLSClass(TestPOP3Class):
# repeat previous tests by using poplib.POP3.stls()
def setUp(self):
self.server = DummyPOP3Server((HOST, PORT))
self.server.start()
self.client = poplib.POP3(self.server.host, self.server.port, timeout=3)
self.client.stls()
def tearDown(self):
if self.client.file is not None and self.client.sock is not None:
try:
self.client.quit()
except poplib.error_proto:
# happens in the test_too_long_lines case; the overlong
# response will be treated as response to QUIT and raise
# this exception
self.client.close()
self.server.stop()
def test_stls(self):
self.assertRaises(poplib.error_proto, self.client.stls)
test_stls_context = test_stls
def test_stls_capa(self):
capa = self.client.capa()
self.assertFalse(b'STLS' in capa.keys())
class TestTimeouts(TestCase):
def setUp(self):
self.evt = threading.Event()
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.settimeout(60) # Safety net. Look issue 11812
self.port = test_support.bind_port(self.sock)
self.thread = threading.Thread(target=self.server, args=(self.evt,self.sock))
self.thread.setDaemon(True)
self.thread.start()
self.evt.wait()
def tearDown(self):
self.thread.join()
del self.thread # Clear out any dangling Thread objects.
def server(self, evt, serv):
serv.listen()
evt.set()
try:
conn, addr = serv.accept()
conn.send(b"+ Hola mundo\n")
conn.close()
except socket.timeout:
pass
finally:
serv.close()
def testTimeoutDefault(self):
self.assertIsNone(socket.getdefaulttimeout())
socket.setdefaulttimeout(30)
try:
pop = poplib.POP3(HOST, self.port)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(pop.sock.gettimeout(), 30)
pop.sock.close()
def testTimeoutNone(self):
self.assertIsNone(socket.getdefaulttimeout())
socket.setdefaulttimeout(30)
try:
pop = poplib.POP3(HOST, self.port, timeout=None)
finally:
socket.setdefaulttimeout(None)
self.assertIsNone(pop.sock.gettimeout())
pop.sock.close()
def testTimeoutValue(self):
pop = poplib.POP3(HOST, self.port, timeout=30)
self.assertEqual(pop.sock.gettimeout(), 30)
pop.sock.close()
def test_main():
tests = [TestPOP3Class, TestTimeouts,
TestPOP3_SSLClass, TestPOP3_TLSClass]
thread_info = test_support.threading_setup()
try:
test_support.run_unittest(*tests)
finally:
test_support.threading_cleanup(*thread_info)
if __name__ == '__main__':
test_main()
|
BotClass.py
|
import datetime
import tweepy
import json
import os
import threading
import time
from time import sleep
print_lock = threading.Lock()
class authorization:
def __init__(self, Consumer_key, Consumer_secret, Access_token, Access_secret):
self.consumer_key = Consumer_key
self.consumer_secret = Consumer_secret
self.access_token = Access_token
self.access_secret = Access_secret
class TwitterBot:
def __init__(self, Searchdate, Search_for_hashtag, Authobj, Retweet_and_follow = 0, Favorite_tweet = 0):
self.authobj = Authobj
self.search_date = Searchdate
self.search_for_hashtag = Search_for_hashtag
self.auth = tweepy.OAuthHandler(self.authobj.consumer_key, self.authobj.consumer_secret)
self.auth.set_access_token(self.authobj.access_token, self.authobj.access_secret)
self.auth.secure = True
self.api = tweepy.API(self.auth)
self.myBot = self.api.get_user(screen_name= #NAME OF THE ACCOUNT GOES HERE, EX: @YourName)
self.since_id_var = 901728140011163652
self.retweet_and_follow = Retweet_and_follow
self.favorite_tweet = Favorite_tweet
self.time_to_sleep = 10
def set_date(self, x):
try:
if isinstance(x, datetime.datetime):
self.search_date = x
except:
print("Variable " + x + " is not a datetime object")
def set_hashtag(self, tag):
self.search_for_hashtag = tag
def set_time_to_sleep(self, ze_time):
self.time_to_sleep = ze_time
def start_retweeting(self):
while 1:
start = time.time()
for tweet in tweepy.Cursor(self.api.search, q=self.search_for_hashtag, since_id=self.since_id_var).items():
try:
if tweet.user.id == self.myBot.id: #If you yourself has already retweeted it, just continue algorithm
continue
with print_lock:
print("\n\nFound tweet by: @" + tweet.user.screen_name)
if (tweet.retweeted == False) or (tweet.favorited == False):
tweet.retweet()
with print_lock:
print("Retweeted tweet from @" + tweet.user.screen_name)
print(tweet.id)
if tweet.id > self.since_id_var:
self.since_id_var = tweet.id
if self.favorite_tweet:
tweet.favorite()
if self.retweet_and_follow:
if(tweet.user.following == False):
tweet.user.follow()
with print_lock:
print("Followed the user")
except tweepy.TweepError as e:
if tweet.id > self.since_id_var:
self.since_id_var = tweet.id
with print_lock:
print(self.since_id_var)
with print_lock:
print(tweet.id)
print(e.reason)
if "429" in e.reason or "185" in e.reason:
sleep(1800)
continue
except StopIteration:
break
with print_lock:
print("Pausar botten i " + str(self.time_to_sleep / 60) + " minuter")
print("Loopen kördes på tiden: ", time.time() - start)
print(threading.current_thread().name)
sleep(self.time_to_sleep)
def retweeting_thread(self):
t1 = threading.Thread(target=self.start_retweeting)
t1.daemon = True
t1.start()
def write_to_file(self, filename, data):
a = []
#lägg till en koll så man inte appendar samma tweets flera gånger
for tweet in data:
a.append(tweet.text)
a.append(str(tweet.created_at))
if not os.path.isfile(filename):
with open(filename, "w") as f:
for item in a:
f.write(item + '\n')
else:
with open(filename, "a") as f:
for item in a:
f.write(item + '\n')
|
mumbleBot.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import threading
import time
import sys
import math
import signal
import configparser
import audioop
import subprocess as sp
import argparse
import os
import os.path
import pymumble_py3 as pymumble
import pymumble_py3.constants
import variables as var
import logging
import logging.handlers
import traceback
import struct
from packaging import version
import util
import command
import constants
from constants import tr_cli as tr
from database import SettingsDatabase, MusicDatabase, DatabaseMigration
import media.system
from media.item import ValidationFailedError, PreparationFailedError
from media.playlist import BasePlaylist
from media.cache import MusicCache
class MumbleBot:
version = 'git'
def __init__(self, args):
self.log = logging.getLogger("bot")
self.log.info(f"bot: botamusique version {self.get_version()}, starting...")
signal.signal(signal.SIGINT, self.ctrl_caught)
self.cmd_handle = {}
self.stereo = var.config.getboolean('bot', 'stereo', fallback=True)
if args.channel:
self.channel = args.channel
else:
self.channel = var.config.get("server", "channel", fallback=None)
var.user = args.user
var.is_proxified = var.config.getboolean(
"webinterface", "is_web_proxified")
# Flags to indicate the bot is exiting (Ctrl-C, or !kill)
self.exit = False
self.nb_exit = 0
# Related to ffmpeg thread
self.thread = None
self.thread_stderr = None
self.read_pcm_size = 0
self.pcm_buffer_size = 0
self.last_ffmpeg_err = ""
# Play/pause status
self.is_pause = False
self.pause_at_id = ""
self.playhead = -1 # current position in a song.
self.song_start_at = -1
self.wait_for_ready = False # flag for the loop are waiting for download to complete in the other thread
#
self.on_interrupting = False
if args.host:
host = args.host
else:
host = var.config.get("server", "host")
if args.port:
port = args.port
else:
port = var.config.getint("server", "port")
if args.password:
password = args.password
else:
password = var.config.get("server", "password")
if args.channel:
self.channel = args.channel
else:
self.channel = var.config.get("server", "channel")
if args.certificate:
certificate = args.certificate
else:
certificate = util.solve_filepath(var.config.get("server", "certificate"))
if args.tokens:
tokens = args.tokens
else:
tokens = var.config.get("server", "tokens")
tokens = tokens.split(',')
if args.user:
self.username = args.user
else:
self.username = var.config.get("bot", "username")
self.mumble = pymumble.Mumble(host, user=self.username, port=port, password=password, tokens=tokens,
stereo=self.stereo,
debug=var.config.getboolean('debug', 'mumbleConnection'),
certfile=certificate)
self.mumble.callbacks.set_callback(pymumble.constants.PYMUMBLE_CLBK_TEXTMESSAGERECEIVED, self.message_received)
self.mumble.set_codec_profile("audio")
self.mumble.start() # start the mumble thread
self.mumble.is_ready() # wait for the connection
if self.mumble.connected >= pymumble.constants.PYMUMBLE_CONN_STATE_FAILED:
exit()
self.set_comment()
self.mumble.users.myself.unmute() # by sure the user is not muted
self.join_channel()
self.mumble.set_bandwidth(200000)
# ====== Volume ======
self.volume_helper = util.VolumeHelper()
_volume = var.config.getfloat('bot', 'volume', fallback=0.1)
if var.db.has_option('bot', 'volume'):
_volume = var.db.getfloat('bot', 'volume')
self.volume_helper.set_volume(_volume)
self.is_ducking = False
self.on_ducking = False
self.ducking_release = time.time()
self.last_volume_cycle_time = time.time()
self._ducking_volume = 0
_ducking_volume = var.config.getfloat("bot", "ducking_volume", fallback=0.05)
_ducking_volume = var.db.getfloat("bot", "ducking_volume", fallback=_ducking_volume)
self.volume_helper.set_ducking_volume(_ducking_volume)
self.ducking_threshold = var.config.getfloat("bot", "ducking_threshold", fallback=5000)
self.ducking_threshold = var.db.getfloat("bot", "ducking_threshold", fallback=self.ducking_threshold)
if not var.db.has_option("bot", "ducking") and var.config.getboolean("bot", "ducking", fallback=False) \
or var.config.getboolean("bot", "ducking"):
self.is_ducking = True
self.mumble.callbacks.set_callback(pymumble.constants.PYMUMBLE_CLBK_SOUNDRECEIVED,
self.ducking_sound_received)
self.mumble.set_receive_sound(True)
assert var.config.get("bot", "when_nobody_in_channel") in ['pause', 'pause_resume', 'stop', 'nothing', ''], \
"Unknown action for when_nobody_in_channel"
if var.config.get("bot", "when_nobody_in_channel", fallback='') in ['pause', 'pause_resume', 'stop']:
user_change_callback = \
lambda user, action: threading.Thread(target=self.users_changed,
args=(user, action), daemon=True).start()
self.mumble.callbacks.set_callback(pymumble.constants.PYMUMBLE_CLBK_USERREMOVED, user_change_callback)
self.mumble.callbacks.set_callback(pymumble.constants.PYMUMBLE_CLBK_USERUPDATED, user_change_callback)
# Debug use
self._loop_status = 'Idle'
self._display_rms = False
self._max_rms = 0
self.redirect_ffmpeg_log = var.config.getboolean('debug', 'redirect_ffmpeg_log', fallback=True)
if var.config.getboolean("bot", "auto_check_update"):
def check_update():
nonlocal self
new_version, changelog = util.check_update(self.get_version())
if new_version:
self.send_channel_msg(tr('new_version_found', new_version=new_version, changelog=changelog))
th = threading.Thread(target=check_update, name="UpdateThread")
th.daemon = True
th.start()
last_startup_version = var.db.get("bot", "version", fallback=None)
if not last_startup_version or version.parse(last_startup_version) < version.parse(self.version):
var.db.set("bot", "version", self.version)
changelog = util.fetch_changelog()
self.send_channel_msg(tr("update_successful", version=self.version, changelog=changelog))
# Set the CTRL+C shortcut
def ctrl_caught(self, signal, frame):
self.log.info(
"\nSIGINT caught, quitting, {} more to kill".format(2 - self.nb_exit))
if var.config.getboolean('bot', 'save_playlist', fallback=True) \
and var.config.get("bot", "save_music_library", fallback=True):
self.log.info("bot: save playlist into database")
var.playlist.save()
if self.nb_exit > 1:
self.log.info("Forced Quit")
sys.exit(0)
self.nb_exit += 1
self.exit = True
def get_version(self):
if self.version != "git":
return self.version
else:
return util.get_snapshot_version()
def register_command(self, cmd, handle, no_partial_match=False, access_outside_channel=False, admin=False):
cmds = cmd.split(",")
for command in cmds:
command = command.strip()
if command:
self.cmd_handle[command] = {'handle': handle,
'partial_match': not no_partial_match,
'access_outside_channel': access_outside_channel,
'admin': admin}
self.log.debug("bot: command added: " + command)
def set_comment(self):
self.mumble.users.myself.comment(var.config.get('bot', 'comment'))
def join_channel(self):
if self.channel:
if '/' in self.channel:
self.mumble.channels.find_by_tree(self.channel.split('/')).move_in()
else:
self.mumble.channels.find_by_name(self.channel).move_in()
# =======================
# Message
# =======================
# All text send to the chat is analysed by this function
def message_received(self, text):
message = text.message.strip()
user = self.mumble.users[text.actor]['name']
if var.config.getboolean('commands', 'split_username_at_space'):
# in can you use https://github.com/Natenom/mumblemoderator-module-collection/tree/master/os-suffixes ,
# you want to split the username
user = user.split()[0]
if message[0] in var.config.get('commands', 'command_symbol'):
# remove the symbol from the message
message = message[1:].split(' ', 1)
# use the first word as a command, the others one as parameters
if len(message) > 0:
command = message[0].lower()
parameter = ''
if len(message) > 1:
parameter = message[1].rstrip()
else:
return
self.log.info('bot: received command ' + command + ' - ' + parameter + ' by ' + user)
# Anti stupid guy function
if not self.is_admin(user) and not var.config.getboolean('bot', 'allow_private_message') and text.session:
self.mumble.users[text.actor].send_text_message(
tr('pm_not_allowed'))
return
for i in var.db.items("user_ban"):
if user.lower() == i[0]:
self.mumble.users[text.actor].send_text_message(
tr('user_ban'))
return
if not self.is_admin(user) and parameter:
input_url = util.get_url_from_input(parameter)
if input_url:
for i in var.db.items("url_ban"):
if input_url == i[0]:
self.mumble.users[text.actor].send_text_message(
tr('url_ban'))
return
command_exc = ""
try:
if command in self.cmd_handle:
command_exc = command
else:
# try partial match
cmds = self.cmd_handle.keys()
matches = []
for cmd in cmds:
if cmd.startswith(command) and self.cmd_handle[cmd]['partial_match']:
matches.append(cmd)
if len(matches) == 1:
self.log.info("bot: {:s} matches {:s}".format(command, matches[0]))
command_exc = matches[0]
elif len(matches) > 1:
self.mumble.users[text.actor].send_text_message(
tr('which_command', commands="<br>".join(matches)))
return
else:
self.mumble.users[text.actor].send_text_message(
tr('bad_command', command=command))
return
if self.cmd_handle[command_exc]['admin'] and not self.is_admin(user):
self.mumble.users[text.actor].send_text_message(tr('not_admin'))
return
if not self.cmd_handle[command_exc]['access_outside_channel'] \
and not self.is_admin(user) \
and not var.config.getboolean('bot', 'allow_other_channel_message') \
and self.mumble.users[text.actor]['channel_id'] != self.mumble.users.myself['channel_id']:
self.mumble.users[text.actor].send_text_message(
tr('not_in_my_channel'))
return
self.cmd_handle[command_exc]['handle'](self, user, text, command_exc, parameter)
except:
error_traceback = traceback.format_exc()
error = error_traceback.rstrip().split("\n")[-1]
self.log.error(f"bot: command {command_exc} failed with error: {error_traceback}\n")
self.send_msg(tr('error_executing_command', command=command_exc, error=error), text)
def send_msg(self, msg, text):
msg = msg.encode('utf-8', 'ignore').decode('utf-8')
# text if the object message, contain information if direct message or channel message
self.mumble.users[text.actor].send_text_message(msg)
def send_channel_msg(self, msg):
msg = msg.encode('utf-8', 'ignore').decode('utf-8')
own_channel = self.mumble.channels[self.mumble.users.myself['channel_id']]
own_channel.send_text_message(msg)
@staticmethod
def is_admin(user):
list_admin = var.config.get('bot', 'admin').rstrip().split(';')
if user in list_admin:
return True
else:
return False
# =======================
# Users changed
# =======================
def users_changed(self, user, message):
own_channel = self.mumble.channels[self.mumble.users.myself['channel_id']]
# only check if there is one more user currently in the channel
# else when the music is paused and somebody joins, music would start playing again
if len(own_channel.get_users()) == 2:
if var.config.get("bot", "when_nobody_in_channel") == "pause_resume":
self.resume()
elif var.config.get("bot", "when_nobody_in_channel") == "pause":
self.send_channel_msg(tr("auto_paused"))
elif len(own_channel.get_users()) == 1:
# if the bot is the only user left in the channel
self.log.info('bot: Other users in the channel left. Stopping music now.')
if var.config.get("bot", "when_nobody_in_channel") == "stop":
self.clear()
else:
self.pause()
# =======================
# Launch and Download
# =======================
def launch_music(self, music_wrapper, start_from=0):
assert music_wrapper.is_ready()
uri = music_wrapper.uri()
self.log.info("bot: play music " + music_wrapper.format_debug_string())
if var.config.getboolean('bot', 'announce_current_music'):
self.send_channel_msg(music_wrapper.format_current_playing())
if var.config.getboolean('debug', 'ffmpeg'):
ffmpeg_debug = "debug"
else:
ffmpeg_debug = "warning"
channels = 2 if self.stereo else 1
self.pcm_buffer_size = 960*channels
command = ("ffmpeg", '-v', ffmpeg_debug, '-nostdin', '-i',
uri, '-ss', f"{start_from:f}", '-ac', str(channels), '-f', 's16le', '-ar', '48000', '-')
self.log.debug("bot: execute ffmpeg command: " + " ".join(command))
# The ffmpeg process is a thread
# prepare pipe for catching stderr of ffmpeg
if self.redirect_ffmpeg_log:
pipe_rd, pipe_wd = util.pipe_no_wait() # Let the pipe work in non-blocking mode
self.thread_stderr = os.fdopen(pipe_rd)
else:
pipe_rd, pipe_wd = None, None
self.thread = sp.Popen(command, stdout=sp.PIPE, stderr=pipe_wd, bufsize=self.pcm_buffer_size)
def async_download_next(self):
# Function start if the next music isn't ready
# Do nothing in case the next music is already downloaded
self.log.debug("bot: Async download next asked ")
while var.playlist.next_item() and var.playlist.next_item().type in ['url', 'url_from_playlist']:
# usually, all validation will be done when adding to the list.
# however, for performance consideration, youtube playlist won't be validate when added.
# the validation has to be done here.
next = var.playlist.next_item()
try:
next.validate()
if not next.is_ready():
self.async_download(next)
break
except ValidationFailedError as e:
self.send_channel_msg(e.msg)
var.playlist.remove_by_id(next.id)
var.cache.free_and_delete(next.id)
def async_download(self, item):
th = threading.Thread(
target=self._download, name="Prepare-" + item.id[:7], args=(item,))
self.log.info(f"bot: start preparing item in thread: {item.format_debug_string()}")
th.daemon = True
th.start()
return th
def validate_and_start_download(self, item):
item.validate()
if not item.is_ready():
self.log.info("bot: current music isn't ready, start downloading.")
self.async_download(item)
self.send_channel_msg(
tr('download_in_progress', item=item.format_title()))
def _download(self, item):
ver = item.version
try:
item.prepare()
except PreparationFailedError as e:
self.send_channel_msg(e.msg)
return False
if item.version > ver:
var.playlist.version += 1
# =======================
# Loop
# =======================
# Main loop of the Bot
def loop(self):
while not self.exit and self.mumble.is_alive():
while self.thread and self.mumble.sound_output.get_buffer_size() > 0.5 and not self.exit:
# If the buffer isn't empty, I cannot send new music part, so I wait
self._loop_status = f'Wait for buffer {self.mumble.sound_output.get_buffer_size():.3f}'
time.sleep(0.01)
if self.thread:
# I get raw from ffmpeg thread
# move playhead forward
self._loop_status = 'Reading raw'
if self.song_start_at == -1:
self.song_start_at = time.time() - self.playhead
self.playhead = time.time() - self.song_start_at
raw_music = self.thread.stdout.read(self.pcm_buffer_size)
self.read_pcm_size += self.pcm_buffer_size
if self.redirect_ffmpeg_log:
try:
self.last_ffmpeg_err = self.thread_stderr.readline()
if self.last_ffmpeg_err:
self.log.debug("ffmpeg: " + self.last_ffmpeg_err.strip("\n"))
except:
pass
if raw_music:
# Adjust the volume and send it to mumble
self.volume_cycle()
if not self.on_interrupting and len(raw_music) == self.pcm_buffer_size:
self.mumble.sound_output.add_sound(
audioop.mul(raw_music, 2, self.volume_helper.real_volume))
elif self.read_pcm_size == 0:
self.mumble.sound_output.add_sound(
audioop.mul(self._fadeout(raw_music, self.stereo, fadein=True), 2, self.volume_helper.real_volume))
elif self.on_interrupting or len(raw_music) < self.pcm_buffer_size:
self.mumble.sound_output.add_sound(
audioop.mul(self._fadeout(raw_music, self.stereo, fadein=False), 2, self.volume_helper.real_volume))
self.thread.kill()
self.thread = None
time.sleep(0.1)
self.on_interrupting = False
else:
time.sleep(0.1)
else:
time.sleep(0.1)
if not self.is_pause and self.thread is None:
# bot is not paused, but ffmpeg thread has gone.
# indicate that last song has finished, or the bot just resumed from pause, or something is wrong.
if self.read_pcm_size < self.pcm_buffer_size and len(var.playlist) > 0 \
and var.playlist.current_index != -1 \
and self.last_ffmpeg_err:
current = var.playlist.current_item()
self.log.error("bot: cannot play music %s", current.format_debug_string())
self.log.error("bot: with ffmpeg error: %s", self.last_ffmpeg_err)
self.last_ffmpeg_err = ""
self.send_channel_msg(tr('unable_play', item=current.format_title()))
var.playlist.remove_by_id(current.id)
var.cache.free_and_delete(current.id)
# move to the next song.
if not self.wait_for_ready: # if wait_for_ready flag is not true, move to the next song.
if var.playlist.next():
current = var.playlist.current_item()
self.log.debug(f"bot: next into the song: {current.format_debug_string()}")
try:
self.validate_and_start_download(current)
self.wait_for_ready = True
self.song_start_at = -1
self.playhead = 0
except ValidationFailedError as e:
self.send_channel_msg(e.msg)
var.playlist.remove_by_id(current.id)
var.cache.free_and_delete(current.id)
else:
self._loop_status = 'Empty queue'
else:
# if wait_for_ready flag is true, means the pointer is already
# pointing to target song. start playing
current = var.playlist.current_item()
if current:
if current.is_ready():
self.wait_for_ready = False
self.read_pcm_size = 0
self.launch_music(current, self.playhead)
self.last_volume_cycle_time = time.time()
self.async_download_next()
elif current.is_failed():
var.playlist.remove_by_id(current.id)
self.wait_for_ready = False
else:
self._loop_status = 'Wait for the next item to be ready'
else:
self.wait_for_ready = False
while self.mumble.sound_output.get_buffer_size() > 0:
# Empty the buffer before exit
time.sleep(0.01)
time.sleep(0.5)
if self.exit:
self._loop_status = "exited"
if var.config.getboolean('bot', 'save_playlist', fallback=True) \
and var.config.get("bot", "save_music_library", fallback=True):
self.log.info("bot: save playlist into database")
var.playlist.save()
def volume_cycle(self):
delta = time.time() - self.last_volume_cycle_time
if self.on_ducking and self.ducking_release < time.time():
self.on_ducking = False
self._max_rms = 0
if delta > 0.001:
if self.is_ducking and self.on_ducking:
self.volume_helper.real_volume = \
(self.volume_helper.real_volume - self.volume_helper.ducking_volume_set) * math.exp(- delta / 0.2) \
+ self.volume_helper.ducking_volume_set
else:
self.volume_helper.real_volume = self.volume_helper.volume_set - \
(self.volume_helper.volume_set - self.volume_helper.real_volume) * math.exp(- delta / 0.5)
self.last_volume_cycle_time = time.time()
def ducking_sound_received(self, user, sound):
rms = audioop.rms(sound.pcm, 2)
self._max_rms = max(rms, self._max_rms)
if self._display_rms:
if rms < self.ducking_threshold:
print('%6d/%6d ' % (rms, self._max_rms) + '-' * int(rms / 200), end='\r')
else:
print('%6d/%6d ' % (rms, self._max_rms) + '-' * int(self.ducking_threshold / 200)
+ '+' * int((rms - self.ducking_threshold) / 200), end='\r')
if rms > self.ducking_threshold:
if self.on_ducking is False:
self.log.debug("bot: ducking triggered")
self.on_ducking = True
self.ducking_release = time.time() + 1 # ducking release after 1s
def _fadeout(self, _pcm_data, stereo=False, fadein=False):
pcm_data = bytearray(_pcm_data)
if stereo:
if not fadein:
mask = [math.exp(-x/60) for x in range(0, int(len(pcm_data) / 4))]
else:
mask = [math.exp(-x / 60) for x in reversed(range(0, int(len(pcm_data) / 4)))]
for i in range(int(len(pcm_data) / 4)):
pcm_data[4 * i:4 * i + 2] = struct.pack("<h",
round(struct.unpack("<h", pcm_data[4 * i:4 * i + 2])[0] * mask[i]))
pcm_data[4 * i + 2:4 * i + 4] = struct.pack("<h", round(
struct.unpack("<h", pcm_data[4 * i + 2:4 * i + 4])[0] * mask[i]))
else:
mask = [math.exp(-x/60) for x in range(0, int(len(pcm_data) / 2))]
for i in range(int(len(pcm_data) / 2)):
pcm_data[2 * i:2 * i + 2] = struct.pack("<h",
round(struct.unpack("<h", pcm_data[2 * i:2 * i + 2])[0] * mask[i]))
return bytes(pcm_data) + bytes(len(pcm_data))
# =======================
# Play Control
# =======================
def play(self, index=-1, start_at=0):
if not self.is_pause:
self.interrupt()
if index != -1:
var.playlist.point_to(index)
current = var.playlist.current_item()
self.validate_and_start_download(current)
self.is_pause = False
self.wait_for_ready = True
self.song_start_at = -1
self.playhead = start_at
def clear(self):
# Kill the ffmpeg thread and empty the playlist
self.interrupt()
var.playlist.clear()
self.wait_for_ready = False
self.log.info("bot: music stopped. playlist trashed.")
def stop(self):
self.interrupt()
self.is_pause = True
if len(var.playlist) > 0:
self.wait_for_ready = True
else:
self.wait_for_ready = False
self.log.info("bot: music stopped.")
def interrupt(self):
# Kill the ffmpeg thread
if self.thread:
self.on_interrupting = True
time.sleep(0.1)
self.song_start_at = -1
self.read_pcm_size = 0
def pause(self):
# Kill the ffmpeg thread
self.interrupt()
self.is_pause = True
self.song_start_at = -1
if len(var.playlist) > 0:
self.pause_at_id = var.playlist.current_item().id
self.log.info(f"bot: music paused at {self.playhead:.2f} seconds.")
def resume(self):
self.is_pause = False
if var.playlist.current_index == -1:
var.playlist.next()
self.playhead = 0
return
music_wrapper = var.playlist.current_item()
if not music_wrapper or not music_wrapper.id == self.pause_at_id or not music_wrapper.is_ready():
self.playhead = 0
return
self.wait_for_ready = True
self.pause_at_id = ""
def start_web_interface(addr, port):
global formatter
import interface
# setup logger
werkzeug_logger = logging.getLogger('werkzeug')
logfile = util.solve_filepath(var.config.get('webinterface', 'web_logfile'))
if logfile:
handler = logging.handlers.RotatingFileHandler(logfile, mode='a', maxBytes=10240) # Rotate after 10KB
else:
handler = logging.StreamHandler()
werkzeug_logger.addHandler(handler)
interface.init_proxy()
interface.web.env = 'development'
interface.web.secret_key = var.config.get('webinterface', 'flask_secret')
interface.web.run(port=port, host=addr)
if __name__ == '__main__':
supported_languages = util.get_supported_language()
parser = argparse.ArgumentParser(
description='Bot for playing music on Mumble')
# General arguments
parser.add_argument("--config", dest='config', type=str, default='configuration.ini',
help='Load configuration from this file. Default: configuration.ini')
parser.add_argument("--db", dest='db', type=str,
default=None, help='Settings database file')
parser.add_argument("--music-db", dest='music_db', type=str,
default=None, help='Music library database file')
parser.add_argument("--lang", dest='lang', type=str, default=None,
help='Preferred language. Support ' + ", ".join(supported_languages))
parser.add_argument("-q", "--quiet", dest="quiet",
action="store_true", help="Only Error logs")
parser.add_argument("-v", "--verbose", dest="verbose",
action="store_true", help="Show debug log")
# Mumble arguments
parser.add_argument("-s", "--server", dest="host",
type=str, help="Hostname of the Mumble server")
parser.add_argument("-u", "--user", dest="user",
type=str, help="Username for the bot")
parser.add_argument("-P", "--password", dest="password",
type=str, help="Server password, if required")
parser.add_argument("-T", "--tokens", dest="tokens",
type=str, help="Server tokens, if required")
parser.add_argument("-p", "--port", dest="port",
type=int, help="Port for the Mumble server")
parser.add_argument("-c", "--channel", dest="channel",
type=str, help="Default channel for the bot")
parser.add_argument("-C", "--cert", dest="certificate",
type=str, default=None, help="Certificate file")
args = parser.parse_args()
# ======================
# Load Config
# ======================
config = configparser.ConfigParser(interpolation=None, allow_no_value=True)
var.config = config
parsed_configs = config.read([util.solve_filepath('configuration.default.ini'), util.solve_filepath(args.config)],
encoding='utf-8')
if len(parsed_configs) == 0:
logging.error('Could not read configuration from file \"{}\"'.format(args.config))
sys.exit()
# ======================
# Setup Logger
# ======================
bot_logger = logging.getLogger("bot")
bot_logger.setLevel(logging.INFO)
if args.verbose:
bot_logger.setLevel(logging.DEBUG)
bot_logger.debug("Starting in DEBUG loglevel")
elif args.quiet:
bot_logger.setLevel(logging.ERROR)
bot_logger.error("Starting in ERROR loglevel")
logfile = util.solve_filepath(var.config.get('bot', 'logfile').strip())
handler = None
if logfile:
print(f"Redirecting stdout and stderr to log file: {logfile}")
handler = logging.handlers.RotatingFileHandler(logfile, mode='a', maxBytes=10240) # Rotate after 10KB
sys.stdout = util.LoggerIOWrapper(bot_logger, logging.INFO, fallback_io_buffer=sys.stdout.buffer)
sys.stderr = util.LoggerIOWrapper(bot_logger, logging.INFO, fallback_io_buffer=sys.stderr.buffer)
else:
handler = logging.StreamHandler()
util.set_logging_formatter(handler, bot_logger.level)
bot_logger.addHandler(handler)
logging.getLogger("root").addHandler(handler)
var.bot_logger = bot_logger
# ======================
# Load Database
# ======================
if args.user:
username = args.user
else:
username = var.config.get("bot", "username")
sanitized_username = "".join([x if x.isalnum() else "_" for x in username])
var.settings_db_path = args.db if args.db is not None else util.solve_filepath(
config.get("bot", "database_path", fallback=f"settings-{sanitized_username}.db"))
var.music_db_path = args.music_db if args.music_db is not None else util.solve_filepath(
config.get("bot", "music_database_path", fallback="music.db"))
var.db = SettingsDatabase(var.settings_db_path)
if var.config.get("bot", "save_music_library", fallback=True):
var.music_db = MusicDatabase(var.music_db_path)
else:
var.music_db = MusicDatabase(":memory:")
DatabaseMigration(var.db, var.music_db).migrate()
var.music_folder = util.solve_filepath(var.config.get('bot', 'music_folder'))
var.tmp_folder = util.solve_filepath(var.config.get('bot', 'tmp_folder'))
# ======================
# Translation
# ======================
lang = ""
if args.lang:
lang = args.lang
else:
lang = var.config.get('bot', 'language', fallback='en_US')
if lang not in supported_languages:
raise KeyError(f"Unsupported language {lang}")
var.language = lang
constants.load_lang(lang)
# ======================
# Prepare Cache
# ======================
var.cache = MusicCache(var.music_db)
if var.config.getboolean("bot", "refresh_cache_on_startup", fallback=True):
var.cache.build_dir_cache()
# ======================
# Load playback mode
# ======================
playback_mode = None
if var.db.has_option("playlist", "playback_mode"):
playback_mode = var.db.get('playlist', 'playback_mode')
else:
playback_mode = var.config.get('bot', 'playback_mode', fallback="one-shot")
if playback_mode in ["one-shot", "repeat", "random", "autoplay"]:
var.playlist = media.playlist.get_playlist(playback_mode)
else:
raise KeyError(f"Unknown playback mode '{playback_mode}'")
# ======================
# Create bot instance
# ======================
var.bot = MumbleBot(args)
command.register_all_commands(var.bot)
# load playlist
if var.config.getboolean('bot', 'save_playlist', fallback=True):
var.bot_logger.info("bot: load playlist from previous session")
var.playlist.load()
# ============================
# Start the web interface
# ============================
if var.config.getboolean("webinterface", "enabled"):
wi_addr = var.config.get("webinterface", "listening_addr")
wi_port = var.config.getint("webinterface", "listening_port")
tt = threading.Thread(
target=start_web_interface, name="WebThread", args=(wi_addr, wi_port))
tt.daemon = True
bot_logger.info('Starting web interface on {}:{}'.format(wi_addr, wi_port))
tt.start()
# Start the main loop.
var.bot.loop()
|
beakerx_server.py
|
# Copyright 2018 TWO SIGMA OPEN SOURCE, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import socket
import threading
import zmq
class BeakerxZMQServer:
def __init__(self, beakerXQueue):
self.queue = beakerXQueue
self.url = "tcp://127.0.0.1:" + BeakerxZMQServer.get_free_tcp_port()
thread = threading.Thread(target=self.threaded_function, daemon=True)
thread.start()
def threaded_function(self):
context = zmq.Context()
socket = context.socket(zmq.REP)
socket.bind(self.url)
while True:
message = socket.recv()
self.queue.put(message)
socket.send_string("Ok")
@staticmethod
def get_free_tcp_port():
tcp = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
tcp.bind(('localhost', 0))
addr, port = tcp.getsockname()
tcp.close()
return str(port)
|
test_basic.py
|
# -*- coding: utf-8 -*-
"""
tests.basic
~~~~~~~~~~~~~~~~~~~~~
The basic functionality.
:copyright: © 2010 by the Pallets team.
:license: BSD, see LICENSE for more details.
"""
import re
import time
import uuid
from datetime import datetime
from threading import Thread
import pytest
import werkzeug.serving
from werkzeug.exceptions import BadRequest, Forbidden, NotFound
from werkzeug.http import parse_date
from werkzeug.routing import BuildError
import flask
from flask._compat import text_type
def test_options_work(app, client):
@app.route('/', methods=['GET', 'POST'])
def index():
return 'Hello World'
rv = client.open('/', method='OPTIONS')
assert sorted(rv.allow) == ['GET', 'HEAD', 'OPTIONS', 'POST']
assert rv.data == b''
def test_options_on_multiple_rules(app, client):
@app.route('/', methods=['GET', 'POST'])
def index():
return 'Hello World'
@app.route('/', methods=['PUT'])
def index_put():
return 'Aha!'
rv = client.open('/', method='OPTIONS')
assert sorted(rv.allow) == ['GET', 'HEAD', 'OPTIONS', 'POST', 'PUT']
def test_provide_automatic_options_attr():
app = flask.Flask(__name__)
def index():
return 'Hello World!'
index.provide_automatic_options = False
app.route('/')(index)
rv = app.test_client().open('/', method='OPTIONS')
assert rv.status_code == 405
app = flask.Flask(__name__)
def index2():
return 'Hello World!'
index2.provide_automatic_options = True
app.route('/', methods=['OPTIONS'])(index2)
rv = app.test_client().open('/', method='OPTIONS')
assert sorted(rv.allow) == ['OPTIONS']
def test_provide_automatic_options_kwarg(app, client):
def index():
return flask.request.method
def more():
return flask.request.method
app.add_url_rule('/', view_func=index, provide_automatic_options=False)
app.add_url_rule(
'/more', view_func=more, methods=['GET', 'POST'],
provide_automatic_options=False
)
assert client.get('/').data == b'GET'
rv = client.post('/')
assert rv.status_code == 405
assert sorted(rv.allow) == ['GET', 'HEAD']
# Older versions of Werkzeug.test.Client don't have an options method
if hasattr(client, 'options'):
rv = client.options('/')
else:
rv = client.open('/', method='OPTIONS')
assert rv.status_code == 405
rv = client.head('/')
assert rv.status_code == 200
assert not rv.data # head truncates
assert client.post('/more').data == b'POST'
assert client.get('/more').data == b'GET'
rv = client.delete('/more')
assert rv.status_code == 405
assert sorted(rv.allow) == ['GET', 'HEAD', 'POST']
if hasattr(client, 'options'):
rv = client.options('/more')
else:
rv = client.open('/more', method='OPTIONS')
assert rv.status_code == 405
def test_request_dispatching(app, client):
@app.route('/')
def index():
return flask.request.method
@app.route('/more', methods=['GET', 'POST'])
def more():
return flask.request.method
assert client.get('/').data == b'GET'
rv = client.post('/')
assert rv.status_code == 405
assert sorted(rv.allow) == ['GET', 'HEAD', 'OPTIONS']
rv = client.head('/')
assert rv.status_code == 200
assert not rv.data # head truncates
assert client.post('/more').data == b'POST'
assert client.get('/more').data == b'GET'
rv = client.delete('/more')
assert rv.status_code == 405
assert sorted(rv.allow) == ['GET', 'HEAD', 'OPTIONS', 'POST']
def test_disallow_string_for_allowed_methods(app):
with pytest.raises(TypeError):
@app.route('/', methods='GET POST')
def index():
return "Hey"
def test_url_mapping(app, client):
random_uuid4 = "7eb41166-9ebf-4d26-b771-ea3f54f8b383"
def index():
return flask.request.method
def more():
return flask.request.method
def options():
return random_uuid4
app.add_url_rule('/', 'index', index)
app.add_url_rule('/more', 'more', more, methods=['GET', 'POST'])
# Issue 1288: Test that automatic options are not added when non-uppercase 'options' in methods
app.add_url_rule('/options', 'options', options, methods=['options'])
assert client.get('/').data == b'GET'
rv = client.post('/')
assert rv.status_code == 405
assert sorted(rv.allow) == ['GET', 'HEAD', 'OPTIONS']
rv = client.head('/')
assert rv.status_code == 200
assert not rv.data # head truncates
assert client.post('/more').data == b'POST'
assert client.get('/more').data == b'GET'
rv = client.delete('/more')
assert rv.status_code == 405
assert sorted(rv.allow) == ['GET', 'HEAD', 'OPTIONS', 'POST']
rv = client.open('/options', method='OPTIONS')
assert rv.status_code == 200
assert random_uuid4 in rv.data.decode("utf-8")
def test_werkzeug_routing(app, client):
from werkzeug.routing import Submount, Rule
app.url_map.add(Submount('/foo', [
Rule('/bar', endpoint='bar'),
Rule('/', endpoint='index')
]))
def bar():
return 'bar'
def index():
return 'index'
app.view_functions['bar'] = bar
app.view_functions['index'] = index
assert client.get('/foo/').data == b'index'
assert client.get('/foo/bar').data == b'bar'
def test_endpoint_decorator(app, client):
from werkzeug.routing import Submount, Rule
app.url_map.add(Submount('/foo', [
Rule('/bar', endpoint='bar'),
Rule('/', endpoint='index')
]))
@app.endpoint('bar')
def bar():
return 'bar'
@app.endpoint('index')
def index():
return 'index'
assert client.get('/foo/').data == b'index'
assert client.get('/foo/bar').data == b'bar'
def test_session(app, client):
@app.route('/set', methods=['POST'])
def set():
assert not flask.session.accessed
assert not flask.session.modified
flask.session['value'] = flask.request.form['value']
assert flask.session.accessed
assert flask.session.modified
return 'value set'
@app.route('/get')
def get():
assert not flask.session.accessed
assert not flask.session.modified
v = flask.session.get('value', 'None')
assert flask.session.accessed
assert not flask.session.modified
return v
assert client.post('/set', data={'value': '42'}).data == b'value set'
assert client.get('/get').data == b'42'
def test_session_using_server_name(app, client):
app.config.update(
SERVER_NAME='example.com'
)
@app.route('/')
def index():
flask.session['testing'] = 42
return 'Hello World'
rv = client.get('/', 'http://example.com/')
assert 'domain=.example.com' in rv.headers['set-cookie'].lower()
assert 'httponly' in rv.headers['set-cookie'].lower()
def test_session_using_server_name_and_port(app, client):
app.config.update(
SERVER_NAME='example.com:8080'
)
@app.route('/')
def index():
flask.session['testing'] = 42
return 'Hello World'
rv = client.get('/', 'http://example.com:8080/')
assert 'domain=.example.com' in rv.headers['set-cookie'].lower()
assert 'httponly' in rv.headers['set-cookie'].lower()
def test_session_using_server_name_port_and_path(app, client):
app.config.update(
SERVER_NAME='example.com:8080',
APPLICATION_ROOT='/foo'
)
@app.route('/')
def index():
flask.session['testing'] = 42
return 'Hello World'
rv = client.get('/', 'http://example.com:8080/foo')
assert 'domain=example.com' in rv.headers['set-cookie'].lower()
assert 'path=/foo' in rv.headers['set-cookie'].lower()
assert 'httponly' in rv.headers['set-cookie'].lower()
def test_session_using_application_root(app, client):
class PrefixPathMiddleware(object):
def __init__(self, app, prefix):
self.app = app
self.prefix = prefix
def __call__(self, environ, start_response):
environ['SCRIPT_NAME'] = self.prefix
return self.app(environ, start_response)
app.wsgi_app = PrefixPathMiddleware(app.wsgi_app, '/bar')
app.config.update(
APPLICATION_ROOT='/bar'
)
@app.route('/')
def index():
flask.session['testing'] = 42
return 'Hello World'
rv = client.get('/', 'http://example.com:8080/')
assert 'path=/bar' in rv.headers['set-cookie'].lower()
def test_session_using_session_settings(app, client):
app.config.update(
SERVER_NAME='www.example.com:8080',
APPLICATION_ROOT='/test',
SESSION_COOKIE_DOMAIN='.example.com',
SESSION_COOKIE_HTTPONLY=False,
SESSION_COOKIE_SECURE=True,
SESSION_COOKIE_SAMESITE='Lax',
SESSION_COOKIE_PATH='/'
)
@app.route('/')
def index():
flask.session['testing'] = 42
return 'Hello World'
rv = client.get('/', 'http://www.example.com:8080/test/')
cookie = rv.headers['set-cookie'].lower()
assert 'domain=.example.com' in cookie
assert 'path=/' in cookie
assert 'secure' in cookie
assert 'httponly' not in cookie
assert 'samesite' in cookie
def test_session_using_samesite_attribute(app, client):
@app.route('/')
def index():
flask.session['testing'] = 42
return 'Hello World'
app.config.update(SESSION_COOKIE_SAMESITE='invalid')
with pytest.raises(ValueError):
client.get('/')
app.config.update(SESSION_COOKIE_SAMESITE=None)
rv = client.get('/')
cookie = rv.headers['set-cookie'].lower()
assert 'samesite' not in cookie
app.config.update(SESSION_COOKIE_SAMESITE='Strict')
rv = client.get('/')
cookie = rv.headers['set-cookie'].lower()
assert 'samesite=strict' in cookie
app.config.update(SESSION_COOKIE_SAMESITE='Lax')
rv = client.get('/')
cookie = rv.headers['set-cookie'].lower()
assert 'samesite=lax' in cookie
def test_session_localhost_warning(recwarn, app, client):
app.config.update(
SERVER_NAME='localhost:5000',
)
@app.route('/')
def index():
flask.session['testing'] = 42
return 'testing'
rv = client.get('/', 'http://localhost:5000/')
assert 'domain' not in rv.headers['set-cookie'].lower()
w = recwarn.pop(UserWarning)
assert '"localhost" is not a valid cookie domain' in str(w.message)
def test_session_ip_warning(recwarn, app, client):
app.config.update(
SERVER_NAME='127.0.0.1:5000',
)
@app.route('/')
def index():
flask.session['testing'] = 42
return 'testing'
rv = client.get('/', 'http://127.0.0.1:5000/')
assert 'domain=127.0.0.1' in rv.headers['set-cookie'].lower()
w = recwarn.pop(UserWarning)
assert 'cookie domain is an IP' in str(w.message)
def test_missing_session(app):
app.secret_key = None
def expect_exception(f, *args, **kwargs):
e = pytest.raises(RuntimeError, f, *args, **kwargs)
assert e.value.args and 'session is unavailable' in e.value.args[0]
with app.test_request_context():
assert flask.session.get('missing_key') is None
expect_exception(flask.session.__setitem__, 'foo', 42)
expect_exception(flask.session.pop, 'foo')
def test_session_expiration(app, client):
permanent = True
@app.route('/')
def index():
flask.session['test'] = 42
flask.session.permanent = permanent
return ''
@app.route('/test')
def test():
return text_type(flask.session.permanent)
rv = client.get('/')
assert 'set-cookie' in rv.headers
match = re.search(r'(?i)\bexpires=([^;]+)', rv.headers['set-cookie'])
expires = parse_date(match.group())
expected = datetime.utcnow() + app.permanent_session_lifetime
assert expires.year == expected.year
assert expires.month == expected.month
assert expires.day == expected.day
rv = client.get('/test')
assert rv.data == b'True'
permanent = False
rv = client.get('/')
assert 'set-cookie' in rv.headers
match = re.search(r'\bexpires=([^;]+)', rv.headers['set-cookie'])
assert match is None
def test_session_stored_last(app, client):
@app.after_request
def modify_session(response):
flask.session['foo'] = 42
return response
@app.route('/')
def dump_session_contents():
return repr(flask.session.get('foo'))
assert client.get('/').data == b'None'
assert client.get('/').data == b'42'
def test_session_special_types(app, client):
now = datetime.utcnow().replace(microsecond=0)
the_uuid = uuid.uuid4()
@app.route('/')
def dump_session_contents():
flask.session['t'] = (1, 2, 3)
flask.session['b'] = b'\xff'
flask.session['m'] = flask.Markup('<html>')
flask.session['u'] = the_uuid
flask.session['d'] = now
flask.session['t_tag'] = {' t': 'not-a-tuple'}
flask.session['di_t_tag'] = {' t__': 'not-a-tuple'}
flask.session['di_tag'] = {' di': 'not-a-dict'}
return '', 204
with client:
client.get('/')
s = flask.session
assert s['t'] == (1, 2, 3)
assert type(s['b']) == bytes
assert s['b'] == b'\xff'
assert type(s['m']) == flask.Markup
assert s['m'] == flask.Markup('<html>')
assert s['u'] == the_uuid
assert s['d'] == now
assert s['t_tag'] == {' t': 'not-a-tuple'}
assert s['di_t_tag'] == {' t__': 'not-a-tuple'}
assert s['di_tag'] == {' di': 'not-a-dict'}
def test_session_cookie_setting(app):
is_permanent = True
@app.route('/bump')
def bump():
rv = flask.session['foo'] = flask.session.get('foo', 0) + 1
flask.session.permanent = is_permanent
return str(rv)
@app.route('/read')
def read():
return str(flask.session.get('foo', 0))
def run_test(expect_header):
with app.test_client() as c:
assert c.get('/bump').data == b'1'
assert c.get('/bump').data == b'2'
assert c.get('/bump').data == b'3'
rv = c.get('/read')
set_cookie = rv.headers.get('set-cookie')
assert (set_cookie is not None) == expect_header
assert rv.data == b'3'
is_permanent = True
app.config['SESSION_REFRESH_EACH_REQUEST'] = True
run_test(expect_header=True)
is_permanent = True
app.config['SESSION_REFRESH_EACH_REQUEST'] = False
run_test(expect_header=False)
is_permanent = False
app.config['SESSION_REFRESH_EACH_REQUEST'] = True
run_test(expect_header=False)
is_permanent = False
app.config['SESSION_REFRESH_EACH_REQUEST'] = False
run_test(expect_header=False)
def test_session_vary_cookie(app, client):
@app.route('/set')
def set_session():
flask.session['test'] = 'test'
return ''
@app.route('/get')
def get():
return flask.session.get('test')
@app.route('/getitem')
def getitem():
return flask.session['test']
@app.route('/setdefault')
def setdefault():
return flask.session.setdefault('test', 'default')
@app.route('/vary-cookie-header-set')
def vary_cookie_header_set():
response = flask.Response()
response.vary.add('Cookie')
flask.session['test'] = 'test'
return response
@app.route('/vary-header-set')
def vary_header_set():
response = flask.Response()
response.vary.update(('Accept-Encoding', 'Accept-Language'))
flask.session['test'] = 'test'
return response
@app.route('/no-vary-header')
def no_vary_header():
return ''
def expect(path, header_value='Cookie'):
rv = client.get(path)
if header_value:
# The 'Vary' key should exist in the headers only once.
assert len(rv.headers.get_all('Vary')) == 1
assert rv.headers['Vary'] == header_value
else:
assert 'Vary' not in rv.headers
expect('/set')
expect('/get')
expect('/getitem')
expect('/setdefault')
expect('/vary-cookie-header-set')
expect('/vary-header-set', 'Accept-Encoding, Accept-Language, Cookie')
expect('/no-vary-header', None)
def test_flashes(app, req_ctx):
assert not flask.session.modified
flask.flash('Zap')
flask.session.modified = False
flask.flash('Zip')
assert flask.session.modified
assert list(flask.get_flashed_messages()) == ['Zap', 'Zip']
def test_extended_flashing(app):
# Be sure app.testing=True below, else tests can fail silently.
#
# Specifically, if app.testing is not set to True, the AssertionErrors
# in the view functions will cause a 500 response to the test client
# instead of propagating exceptions.
@app.route('/')
def index():
flask.flash(u'Hello World')
flask.flash(u'Hello World', 'error')
flask.flash(flask.Markup(u'<em>Testing</em>'), 'warning')
return ''
@app.route('/test/')
def test():
messages = flask.get_flashed_messages()
assert list(messages) == [
u'Hello World',
u'Hello World',
flask.Markup(u'<em>Testing</em>')
]
return ''
@app.route('/test_with_categories/')
def test_with_categories():
messages = flask.get_flashed_messages(with_categories=True)
assert len(messages) == 3
assert list(messages) == [
('message', u'Hello World'),
('error', u'Hello World'),
('warning', flask.Markup(u'<em>Testing</em>'))
]
return ''
@app.route('/test_filter/')
def test_filter():
messages = flask.get_flashed_messages(
category_filter=['message'], with_categories=True)
assert list(messages) == [('message', u'Hello World')]
return ''
@app.route('/test_filters/')
def test_filters():
messages = flask.get_flashed_messages(
category_filter=['message', 'warning'], with_categories=True)
assert list(messages) == [
('message', u'Hello World'),
('warning', flask.Markup(u'<em>Testing</em>'))
]
return ''
@app.route('/test_filters_without_returning_categories/')
def test_filters2():
messages = flask.get_flashed_messages(
category_filter=['message', 'warning'])
assert len(messages) == 2
assert messages[0] == u'Hello World'
assert messages[1] == flask.Markup(u'<em>Testing</em>')
return ''
# Create new test client on each test to clean flashed messages.
client = app.test_client()
client.get('/')
client.get('/test_with_categories/')
client = app.test_client()
client.get('/')
client.get('/test_filter/')
client = app.test_client()
client.get('/')
client.get('/test_filters/')
client = app.test_client()
client.get('/')
client.get('/test_filters_without_returning_categories/')
def test_request_processing(app, client):
evts = []
@app.before_request
def before_request():
evts.append('before')
@app.after_request
def after_request(response):
response.data += b'|after'
evts.append('after')
return response
@app.route('/')
def index():
assert 'before' in evts
assert 'after' not in evts
return 'request'
assert 'after' not in evts
rv = client.get('/').data
assert 'after' in evts
assert rv == b'request|after'
def test_request_preprocessing_early_return(app, client):
evts = []
@app.before_request
def before_request1():
evts.append(1)
@app.before_request
def before_request2():
evts.append(2)
return "hello"
@app.before_request
def before_request3():
evts.append(3)
return "bye"
@app.route('/')
def index():
evts.append('index')
return "damnit"
rv = client.get('/').data.strip()
assert rv == b'hello'
assert evts == [1, 2]
def test_after_request_processing(app, client):
@app.route('/')
def index():
@flask.after_this_request
def foo(response):
response.headers['X-Foo'] = 'a header'
return response
return 'Test'
resp = client.get('/')
assert resp.status_code == 200
assert resp.headers['X-Foo'] == 'a header'
def test_teardown_request_handler(app, client):
called = []
@app.teardown_request
def teardown_request(exc):
called.append(True)
return "Ignored"
@app.route('/')
def root():
return "Response"
rv = client.get('/')
assert rv.status_code == 200
assert b'Response' in rv.data
assert len(called) == 1
def test_teardown_request_handler_debug_mode(app, client):
called = []
@app.teardown_request
def teardown_request(exc):
called.append(True)
return "Ignored"
@app.route('/')
def root():
return "Response"
rv = client.get('/')
assert rv.status_code == 200
assert b'Response' in rv.data
assert len(called) == 1
def test_teardown_request_handler_error(app, client):
called = []
app.testing = False
@app.teardown_request
def teardown_request1(exc):
assert type(exc) == ZeroDivisionError
called.append(True)
# This raises a new error and blows away sys.exc_info(), so we can
# test that all teardown_requests get passed the same original
# exception.
try:
raise TypeError()
except:
pass
@app.teardown_request
def teardown_request2(exc):
assert type(exc) == ZeroDivisionError
called.append(True)
# This raises a new error and blows away sys.exc_info(), so we can
# test that all teardown_requests get passed the same original
# exception.
try:
raise TypeError()
except:
pass
@app.route('/')
def fails():
1 // 0
rv = client.get('/')
assert rv.status_code == 500
assert b'Internal Server Error' in rv.data
assert len(called) == 2
def test_before_after_request_order(app, client):
called = []
@app.before_request
def before1():
called.append(1)
@app.before_request
def before2():
called.append(2)
@app.after_request
def after1(response):
called.append(4)
return response
@app.after_request
def after2(response):
called.append(3)
return response
@app.teardown_request
def finish1(exc):
called.append(6)
@app.teardown_request
def finish2(exc):
called.append(5)
@app.route('/')
def index():
return '42'
rv = client.get('/')
assert rv.data == b'42'
assert called == [1, 2, 3, 4, 5, 6]
def test_error_handling(app, client):
app.testing = False
@app.errorhandler(404)
def not_found(e):
return 'not found', 404
@app.errorhandler(500)
def internal_server_error(e):
return 'internal server error', 500
@app.errorhandler(Forbidden)
def forbidden(e):
return 'forbidden', 403
@app.route('/')
def index():
flask.abort(404)
@app.route('/error')
def error():
1 // 0
@app.route('/forbidden')
def error2():
flask.abort(403)
rv = client.get('/')
assert rv.status_code == 404
assert rv.data == b'not found'
rv = client.get('/error')
assert rv.status_code == 500
assert b'internal server error' == rv.data
rv = client.get('/forbidden')
assert rv.status_code == 403
assert b'forbidden' == rv.data
def test_error_handler_unknown_code(app):
with pytest.raises(KeyError) as exc_info:
app.register_error_handler(999, lambda e: ('999', 999))
assert 'Use a subclass' in exc_info.value.args[0]
def test_error_handling_processing(app, client):
app.testing = False
@app.errorhandler(500)
def internal_server_error(e):
return 'internal server error', 500
@app.route('/')
def broken_func():
1 // 0
@app.after_request
def after_request(resp):
resp.mimetype = 'text/x-special'
return resp
resp = client.get('/')
assert resp.mimetype == 'text/x-special'
assert resp.data == b'internal server error'
def test_baseexception_error_handling(app, client):
app.testing = False
@app.route('/')
def broken_func():
raise KeyboardInterrupt()
with pytest.raises(KeyboardInterrupt):
client.get('/')
ctx = flask._request_ctx_stack.top
assert ctx.preserved
assert type(ctx._preserved_exc) is KeyboardInterrupt
def test_before_request_and_routing_errors(app, client):
@app.before_request
def attach_something():
flask.g.something = 'value'
@app.errorhandler(404)
def return_something(error):
return flask.g.something, 404
rv = client.get('/')
assert rv.status_code == 404
assert rv.data == b'value'
def test_user_error_handling(app, client):
class MyException(Exception):
pass
@app.errorhandler(MyException)
def handle_my_exception(e):
assert isinstance(e, MyException)
return '42'
@app.route('/')
def index():
raise MyException()
assert client.get('/').data == b'42'
def test_http_error_subclass_handling(app, client):
class ForbiddenSubclass(Forbidden):
pass
@app.errorhandler(ForbiddenSubclass)
def handle_forbidden_subclass(e):
assert isinstance(e, ForbiddenSubclass)
return 'banana'
@app.errorhandler(403)
def handle_forbidden_subclass(e):
assert not isinstance(e, ForbiddenSubclass)
assert isinstance(e, Forbidden)
return 'apple'
@app.route('/1')
def index1():
raise ForbiddenSubclass()
@app.route('/2')
def index2():
flask.abort(403)
@app.route('/3')
def index3():
raise Forbidden()
assert client.get('/1').data == b'banana'
assert client.get('/2').data == b'apple'
assert client.get('/3').data == b'apple'
def test_errorhandler_precedence(app, client):
class E1(Exception):
pass
class E2(Exception):
pass
class E3(E1, E2):
pass
@app.errorhandler(E2)
def handle_e2(e):
return 'E2'
@app.errorhandler(Exception)
def handle_exception(e):
return 'Exception'
@app.route('/E1')
def raise_e1():
raise E1
@app.route('/E3')
def raise_e3():
raise E3
rv = client.get('/E1')
assert rv.data == b'Exception'
rv = client.get('/E3')
assert rv.data == b'E2'
def test_trapping_of_bad_request_key_errors(app, client):
@app.route('/key')
def fail():
flask.request.form['missing_key']
@app.route('/abort')
def allow_abort():
flask.abort(400)
rv = client.get('/key')
assert rv.status_code == 400
assert b'missing_key' not in rv.data
rv = client.get('/abort')
assert rv.status_code == 400
app.debug = True
with pytest.raises(KeyError) as e:
client.get("/key")
assert e.errisinstance(BadRequest)
assert 'missing_key' in e.value.get_description()
rv = client.get('/abort')
assert rv.status_code == 400
app.debug = False
app.config['TRAP_BAD_REQUEST_ERRORS'] = True
with pytest.raises(KeyError):
client.get('/key')
with pytest.raises(BadRequest):
client.get('/abort')
def test_trapping_of_all_http_exceptions(app, client):
app.config['TRAP_HTTP_EXCEPTIONS'] = True
@app.route('/fail')
def fail():
flask.abort(404)
with pytest.raises(NotFound):
client.get('/fail')
def test_error_handler_after_processor_error(app, client):
app.testing = False
@app.before_request
def before_request():
if trigger == 'before':
1 // 0
@app.after_request
def after_request(response):
if trigger == 'after':
1 // 0
return response
@app.route('/')
def index():
return 'Foo'
@app.errorhandler(500)
def internal_server_error(e):
return 'Hello Server Error', 500
for trigger in 'before', 'after':
rv = client.get('/')
assert rv.status_code == 500
assert rv.data == b'Hello Server Error'
def test_enctype_debug_helper(app, client):
from flask.debughelpers import DebugFilesKeyError
app.debug = True
@app.route('/fail', methods=['POST'])
def index():
return flask.request.files['foo'].filename
# with statement is important because we leave an exception on the
# stack otherwise and we want to ensure that this is not the case
# to not negatively affect other tests.
with client:
with pytest.raises(DebugFilesKeyError) as e:
client.post('/fail', data={'foo': 'index.txt'})
assert 'no file contents were transmitted' in str(e.value)
assert 'This was submitted: "index.txt"' in str(e.value)
def test_response_types(app, client):
@app.route('/text')
def from_text():
return u'Hällo Wörld'
@app.route('/bytes')
def from_bytes():
return u'Hällo Wörld'.encode('utf-8')
@app.route('/full_tuple')
def from_full_tuple():
return 'Meh', 400, {
'X-Foo': 'Testing',
'Content-Type': 'text/plain; charset=utf-8'
}
@app.route('/text_headers')
def from_text_headers():
return 'Hello', {
'X-Foo': 'Test',
'Content-Type': 'text/plain; charset=utf-8'
}
@app.route('/text_status')
def from_text_status():
return 'Hi, status!', 400
@app.route('/response_headers')
def from_response_headers():
return flask.Response('Hello world', 404, {'X-Foo': 'Baz'}), {
"X-Foo": "Bar",
"X-Bar": "Foo"
}
@app.route('/response_status')
def from_response_status():
return app.response_class('Hello world', 400), 500
@app.route('/wsgi')
def from_wsgi():
return NotFound()
assert client.get('/text').data == u'Hällo Wörld'.encode('utf-8')
assert client.get('/bytes').data == u'Hällo Wörld'.encode('utf-8')
rv = client.get('/full_tuple')
assert rv.data == b'Meh'
assert rv.headers['X-Foo'] == 'Testing'
assert rv.status_code == 400
assert rv.mimetype == 'text/plain'
rv = client.get('/text_headers')
assert rv.data == b'Hello'
assert rv.headers['X-Foo'] == 'Test'
assert rv.status_code == 200
assert rv.mimetype == 'text/plain'
rv = client.get('/text_status')
assert rv.data == b'Hi, status!'
assert rv.status_code == 400
assert rv.mimetype == 'text/html'
rv = client.get('/response_headers')
assert rv.data == b'Hello world'
assert rv.headers.getlist('X-Foo') == ['Baz', 'Bar']
assert rv.headers['X-Bar'] == 'Foo'
assert rv.status_code == 404
rv = client.get('/response_status')
assert rv.data == b'Hello world'
assert rv.status_code == 500
rv = client.get('/wsgi')
assert b'Not Found' in rv.data
assert rv.status_code == 404
def test_response_type_errors():
app = flask.Flask(__name__)
app.testing = True
@app.route('/none')
def from_none():
pass
@app.route('/small_tuple')
def from_small_tuple():
return 'Hello',
@app.route('/large_tuple')
def from_large_tuple():
return 'Hello', 234, {'X-Foo': 'Bar'}, '???'
@app.route('/bad_type')
def from_bad_type():
return True
@app.route('/bad_wsgi')
def from_bad_wsgi():
return lambda: None
c = app.test_client()
with pytest.raises(TypeError) as e:
c.get('/none')
assert 'returned None' in str(e)
with pytest.raises(TypeError) as e:
c.get('/small_tuple')
assert 'tuple must have the form' in str(e)
pytest.raises(TypeError, c.get, '/large_tuple')
with pytest.raises(TypeError) as e:
c.get('/bad_type')
assert 'it was a bool' in str(e)
pytest.raises(TypeError, c.get, '/bad_wsgi')
def test_make_response(app, req_ctx):
rv = flask.make_response()
assert rv.status_code == 200
assert rv.data == b''
assert rv.mimetype == 'text/html'
rv = flask.make_response('Awesome')
assert rv.status_code == 200
assert rv.data == b'Awesome'
assert rv.mimetype == 'text/html'
rv = flask.make_response('W00t', 404)
assert rv.status_code == 404
assert rv.data == b'W00t'
assert rv.mimetype == 'text/html'
def test_make_response_with_response_instance(app, req_ctx):
rv = flask.make_response(
flask.jsonify({'msg': 'W00t'}), 400)
assert rv.status_code == 400
assert rv.data == b'{"msg":"W00t"}\n'
assert rv.mimetype == 'application/json'
rv = flask.make_response(
flask.Response(''), 400)
assert rv.status_code == 400
assert rv.data == b''
assert rv.mimetype == 'text/html'
rv = flask.make_response(
flask.Response('', headers={'Content-Type': 'text/html'}),
400, [('X-Foo', 'bar')])
assert rv.status_code == 400
assert rv.headers['Content-Type'] == 'text/html'
assert rv.headers['X-Foo'] == 'bar'
def test_jsonify_no_prettyprint(app, req_ctx):
app.config.update({"JSONIFY_PRETTYPRINT_REGULAR": False})
compressed_msg = b'{"msg":{"submsg":"W00t"},"msg2":"foobar"}\n'
uncompressed_msg = {
"msg": {
"submsg": "W00t"
},
"msg2": "foobar"
}
rv = flask.make_response(
flask.jsonify(uncompressed_msg), 200)
assert rv.data == compressed_msg
def test_jsonify_prettyprint(app, req_ctx):
app.config.update({"JSONIFY_PRETTYPRINT_REGULAR": True})
compressed_msg = {"msg": {"submsg": "W00t"}, "msg2": "foobar"}
pretty_response = \
b'{\n "msg": {\n "submsg": "W00t"\n }, \n "msg2": "foobar"\n}\n'
rv = flask.make_response(
flask.jsonify(compressed_msg), 200)
assert rv.data == pretty_response
def test_jsonify_mimetype(app, req_ctx):
app.config.update({"JSONIFY_MIMETYPE": 'application/vnd.api+json'})
msg = {
"msg": {"submsg": "W00t"},
}
rv = flask.make_response(
flask.jsonify(msg), 200)
assert rv.mimetype == 'application/vnd.api+json'
def test_jsonify_args_and_kwargs_check(app, req_ctx):
with pytest.raises(TypeError) as e:
flask.jsonify('fake args', kwargs='fake')
assert 'behavior undefined' in str(e.value)
def test_url_generation(app, req_ctx):
@app.route('/hello/<name>', methods=['POST'])
def hello():
pass
assert flask.url_for('hello', name='test x') == '/hello/test%20x'
assert flask.url_for('hello', name='test x', _external=True) == \
'http://localhost/hello/test%20x'
def test_build_error_handler(app):
# Test base case, a URL which results in a BuildError.
with app.test_request_context():
pytest.raises(BuildError, flask.url_for, 'spam')
# Verify the error is re-raised if not the current exception.
try:
with app.test_request_context():
flask.url_for('spam')
except BuildError as err:
error = err
try:
raise RuntimeError('Test case where BuildError is not current.')
except RuntimeError:
pytest.raises(
BuildError, app.handle_url_build_error, error, 'spam', {})
# Test a custom handler.
def handler(error, endpoint, values):
# Just a test.
return '/test_handler/'
app.url_build_error_handlers.append(handler)
with app.test_request_context():
assert flask.url_for('spam') == '/test_handler/'
def test_build_error_handler_reraise(app):
# Test a custom handler which reraises the BuildError
def handler_raises_build_error(error, endpoint, values):
raise error
app.url_build_error_handlers.append(handler_raises_build_error)
with app.test_request_context():
pytest.raises(BuildError, flask.url_for, 'not.existing')
def test_url_for_passes_special_values_to_build_error_handler(app):
@app.url_build_error_handlers.append
def handler(error, endpoint, values):
assert values == {
'_external': False,
'_anchor': None,
'_method': None,
'_scheme': None,
}
return 'handled'
with app.test_request_context():
flask.url_for('/')
def test_custom_converters(app, client):
from werkzeug.routing import BaseConverter
class ListConverter(BaseConverter):
def to_python(self, value):
return value.split(',')
def to_url(self, value):
base_to_url = super(ListConverter, self).to_url
return ','.join(base_to_url(x) for x in value)
app.url_map.converters['list'] = ListConverter
@app.route('/<list:args>')
def index(args):
return '|'.join(args)
assert client.get('/1,2,3').data == b'1|2|3'
def test_static_files(app, client):
rv = client.get('/static/index.html')
assert rv.status_code == 200
assert rv.data.strip() == b'<h1>Hello World!</h1>'
with app.test_request_context():
assert flask.url_for('static', filename='index.html') == \
'/static/index.html'
rv.close()
def test_static_url_path():
app = flask.Flask(__name__, static_url_path='/foo')
app.testing = True
rv = app.test_client().get('/foo/index.html')
assert rv.status_code == 200
rv.close()
with app.test_request_context():
assert flask.url_for('static', filename='index.html') == '/foo/index.html'
def test_static_route_with_host_matching():
app = flask.Flask(__name__, host_matching=True, static_host='example.com')
c = app.test_client()
rv = c.get('http://example.com/static/index.html')
assert rv.status_code == 200
rv.close()
with app.test_request_context():
rv = flask.url_for('static', filename='index.html', _external=True)
assert rv == 'http://example.com/static/index.html'
# Providing static_host without host_matching=True should error.
with pytest.raises(Exception):
flask.Flask(__name__, static_host='example.com')
# Providing host_matching=True with static_folder but without static_host should error.
with pytest.raises(Exception):
flask.Flask(__name__, host_matching=True)
# Providing host_matching=True without static_host but with static_folder=None should not error.
flask.Flask(__name__, host_matching=True, static_folder=None)
def test_request_locals():
assert repr(flask.g) == '<LocalProxy unbound>'
assert not flask.g
def test_test_app_proper_environ():
app = flask.Flask(__name__, subdomain_matching=True)
app.config.update(
SERVER_NAME='localhost.localdomain:5000'
)
client = app.test_client()
@app.route('/')
def index():
return 'Foo'
@app.route('/', subdomain='foo')
def subdomain():
return 'Foo SubDomain'
rv = client.get('/')
assert rv.data == b'Foo'
rv = client.get('/', 'http://localhost.localdomain:5000')
assert rv.data == b'Foo'
rv = client.get('/', 'https://localhost.localdomain:5000')
assert rv.data == b'Foo'
app.config.update(SERVER_NAME='localhost.localdomain')
rv = client.get('/', 'https://localhost.localdomain')
assert rv.data == b'Foo'
try:
app.config.update(SERVER_NAME='localhost.localdomain:443')
rv = client.get('/', 'https://localhost.localdomain')
# Werkzeug 0.8
assert rv.status_code == 404
except ValueError as e:
# Werkzeug 0.7
assert str(e) == (
"the server name provided "
"('localhost.localdomain:443') does not match the "
"server name from the WSGI environment ('localhost.localdomain')"
)
try:
app.config.update(SERVER_NAME='localhost.localdomain')
rv = client.get('/', 'http://foo.localhost')
# Werkzeug 0.8
assert rv.status_code == 404
except ValueError as e:
# Werkzeug 0.7
assert str(e) == (
"the server name provided "
"('localhost.localdomain') does not match the "
"server name from the WSGI environment ('foo.localhost')"
)
rv = client.get('/', 'http://foo.localhost.localdomain')
assert rv.data == b'Foo SubDomain'
def test_exception_propagation(app, client):
def apprunner(config_key):
@app.route('/')
def index():
1 // 0
if config_key is not None:
app.config[config_key] = True
with pytest.raises(Exception):
client.get('/')
else:
assert client.get('/').status_code == 500
# we have to run this test in an isolated thread because if the
# debug flag is set to true and an exception happens the context is
# not torn down. This causes other tests that run after this fail
# when they expect no exception on the stack.
for config_key in 'TESTING', 'PROPAGATE_EXCEPTIONS', 'DEBUG', None:
t = Thread(target=apprunner, args=(config_key,))
t.start()
t.join()
@pytest.mark.parametrize('debug', [True, False])
@pytest.mark.parametrize('use_debugger', [True, False])
@pytest.mark.parametrize('use_reloader', [True, False])
@pytest.mark.parametrize('propagate_exceptions', [None, True, False])
def test_werkzeug_passthrough_errors(monkeypatch, debug, use_debugger,
use_reloader, propagate_exceptions, app):
rv = {}
# Mocks werkzeug.serving.run_simple method
def run_simple_mock(*args, **kwargs):
rv['passthrough_errors'] = kwargs.get('passthrough_errors')
monkeypatch.setattr(werkzeug.serving, 'run_simple', run_simple_mock)
app.config['PROPAGATE_EXCEPTIONS'] = propagate_exceptions
app.run(debug=debug, use_debugger=use_debugger, use_reloader=use_reloader)
def test_max_content_length(app, client):
app.config['MAX_CONTENT_LENGTH'] = 64
@app.before_request
def always_first():
flask.request.form['myfile']
assert False
@app.route('/accept', methods=['POST'])
def accept_file():
flask.request.form['myfile']
assert False
@app.errorhandler(413)
def catcher(error):
return '42'
rv = client.post('/accept', data={'myfile': 'foo' * 100})
assert rv.data == b'42'
def test_url_processors(app, client):
@app.url_defaults
def add_language_code(endpoint, values):
if flask.g.lang_code is not None and \
app.url_map.is_endpoint_expecting(endpoint, 'lang_code'):
values.setdefault('lang_code', flask.g.lang_code)
@app.url_value_preprocessor
def pull_lang_code(endpoint, values):
flask.g.lang_code = values.pop('lang_code', None)
@app.route('/<lang_code>/')
def index():
return flask.url_for('about')
@app.route('/<lang_code>/about')
def about():
return flask.url_for('something_else')
@app.route('/foo')
def something_else():
return flask.url_for('about', lang_code='en')
assert client.get('/de/').data == b'/de/about'
assert client.get('/de/about').data == b'/foo'
assert client.get('/foo').data == b'/en/about'
def test_inject_blueprint_url_defaults(app):
bp = flask.Blueprint('foo.bar.baz', __name__,
template_folder='template')
@bp.url_defaults
def bp_defaults(endpoint, values):
values['page'] = 'login'
@bp.route('/<page>')
def view(page):
pass
app.register_blueprint(bp)
values = dict()
app.inject_url_defaults('foo.bar.baz.view', values)
expected = dict(page='login')
assert values == expected
with app.test_request_context('/somepage'):
url = flask.url_for('foo.bar.baz.view')
expected = '/login'
assert url == expected
def test_nonascii_pathinfo(app, client):
@app.route(u'/киртест')
def index():
return 'Hello World!'
rv = client.get(u'/киртест')
assert rv.data == b'Hello World!'
def test_debug_mode_complains_after_first_request(app, client):
app.debug = True
@app.route('/')
def index():
return 'Awesome'
assert not app.got_first_request
assert client.get('/').data == b'Awesome'
with pytest.raises(AssertionError) as e:
@app.route('/foo')
def broken():
return 'Meh'
assert 'A setup function was called' in str(e)
app.debug = False
@app.route('/foo')
def working():
return 'Meh'
assert client.get('/foo').data == b'Meh'
assert app.got_first_request
def test_before_first_request_functions(app, client):
got = []
@app.before_first_request
def foo():
got.append(42)
client.get('/')
assert got == [42]
client.get('/')
assert got == [42]
assert app.got_first_request
def test_before_first_request_functions_concurrent(app, client):
got = []
@app.before_first_request
def foo():
time.sleep(0.2)
got.append(42)
def get_and_assert():
client.get("/")
assert got == [42]
t = Thread(target=get_and_assert)
t.start()
get_and_assert()
t.join()
assert app.got_first_request
def test_routing_redirect_debugging(app, client):
app.debug = True
@app.route('/foo/', methods=['GET', 'POST'])
def foo():
return 'success'
with client:
with pytest.raises(AssertionError) as e:
client.post('/foo', data={})
assert 'http://localhost/foo/' in str(e)
assert ('Make sure to directly send '
'your POST-request to this URL') in str(e)
rv = client.get('/foo', data={}, follow_redirects=True)
assert rv.data == b'success'
app.debug = False
with client:
rv = client.post('/foo', data={}, follow_redirects=True)
assert rv.data == b'success'
def test_route_decorator_custom_endpoint(app, client):
app.debug = True
@app.route('/foo/')
def foo():
return flask.request.endpoint
@app.route('/bar/', endpoint='bar')
def for_bar():
return flask.request.endpoint
@app.route('/bar/123', endpoint='123')
def for_bar_foo():
return flask.request.endpoint
with app.test_request_context():
assert flask.url_for('foo') == '/foo/'
assert flask.url_for('bar') == '/bar/'
assert flask.url_for('123') == '/bar/123'
assert client.get('/foo/').data == b'foo'
assert client.get('/bar/').data == b'bar'
assert client.get('/bar/123').data == b'123'
def test_preserve_only_once(app, client):
app.debug = True
@app.route('/fail')
def fail_func():
1 // 0
for x in range(3):
with pytest.raises(ZeroDivisionError):
client.get('/fail')
assert flask._request_ctx_stack.top is not None
assert flask._app_ctx_stack.top is not None
# implicit appctx disappears too
flask._request_ctx_stack.top.pop()
assert flask._request_ctx_stack.top is None
assert flask._app_ctx_stack.top is None
def test_preserve_remembers_exception(app, client):
app.debug = True
errors = []
@app.route('/fail')
def fail_func():
1 // 0
@app.route('/success')
def success_func():
return 'Okay'
@app.teardown_request
def teardown_handler(exc):
errors.append(exc)
# After this failure we did not yet call the teardown handler
with pytest.raises(ZeroDivisionError):
client.get('/fail')
assert errors == []
# But this request triggers it, and it's an error
client.get('/success')
assert len(errors) == 2
assert isinstance(errors[0], ZeroDivisionError)
# At this point another request does nothing.
client.get('/success')
assert len(errors) == 3
assert errors[1] is None
def test_get_method_on_g(app_ctx):
assert flask.g.get('x') is None
assert flask.g.get('x', 11) == 11
flask.g.x = 42
assert flask.g.get('x') == 42
assert flask.g.x == 42
def test_g_iteration_protocol(app_ctx):
flask.g.foo = 23
flask.g.bar = 42
assert 'foo' in flask.g
assert 'foos' not in flask.g
assert sorted(flask.g) == ['bar', 'foo']
def test_subdomain_basic_support():
app = flask.Flask(__name__, subdomain_matching=True)
app.config['SERVER_NAME'] = 'localhost.localdomain'
client = app.test_client()
@app.route('/')
def normal_index():
return 'normal index'
@app.route('/', subdomain='test')
def test_index():
return 'test index'
rv = client.get('/', 'http://localhost.localdomain/')
assert rv.data == b'normal index'
rv = client.get('/', 'http://test.localhost.localdomain/')
assert rv.data == b'test index'
def test_subdomain_matching():
app = flask.Flask(__name__, subdomain_matching=True)
client = app.test_client()
app.config['SERVER_NAME'] = 'localhost.localdomain'
@app.route('/', subdomain='<user>')
def index(user):
return 'index for %s' % user
rv = client.get('/', 'http://mitsuhiko.localhost.localdomain/')
assert rv.data == b'index for mitsuhiko'
def test_subdomain_matching_with_ports():
app = flask.Flask(__name__, subdomain_matching=True)
app.config['SERVER_NAME'] = 'localhost.localdomain:3000'
client = app.test_client()
@app.route('/', subdomain='<user>')
def index(user):
return 'index for %s' % user
rv = client.get('/', 'http://mitsuhiko.localhost.localdomain:3000/')
assert rv.data == b'index for mitsuhiko'
@pytest.mark.parametrize('matching', (False, True))
def test_subdomain_matching_other_name(matching):
app = flask.Flask(__name__, subdomain_matching=matching)
app.config['SERVER_NAME'] = 'localhost.localdomain:3000'
client = app.test_client()
@app.route('/')
def index():
return '', 204
# ip address can't match name
rv = client.get('/', 'http://127.0.0.1:3000/')
assert rv.status_code == 404 if matching else 204
# allow all subdomains if matching is disabled
rv = client.get('/', 'http://www.localhost.localdomain:3000/')
assert rv.status_code == 404 if matching else 204
def test_multi_route_rules(app, client):
@app.route('/')
@app.route('/<test>/')
def index(test='a'):
return test
rv = client.open('/')
assert rv.data == b'a'
rv = client.open('/b/')
assert rv.data == b'b'
def test_multi_route_class_views(app, client):
class View(object):
def __init__(self, app):
app.add_url_rule('/', 'index', self.index)
app.add_url_rule('/<test>/', 'index', self.index)
def index(self, test='a'):
return test
_ = View(app)
rv = client.open('/')
assert rv.data == b'a'
rv = client.open('/b/')
assert rv.data == b'b'
def test_run_defaults(monkeypatch, app):
rv = {}
# Mocks werkzeug.serving.run_simple method
def run_simple_mock(*args, **kwargs):
rv['result'] = 'running...'
monkeypatch.setattr(werkzeug.serving, 'run_simple', run_simple_mock)
app.run()
assert rv['result'] == 'running...'
def test_run_server_port(monkeypatch, app):
rv = {}
# Mocks werkzeug.serving.run_simple method
def run_simple_mock(hostname, port, application, *args, **kwargs):
rv['result'] = 'running on %s:%s ...' % (hostname, port)
monkeypatch.setattr(werkzeug.serving, 'run_simple', run_simple_mock)
hostname, port = 'localhost', 8000
app.run(hostname, port, debug=True)
assert rv['result'] == 'running on %s:%s ...' % (hostname, port)
@pytest.mark.parametrize('host,port,expect_host,expect_port', (
(None, None, 'pocoo.org', 8080),
('localhost', None, 'localhost', 8080),
(None, 80, 'pocoo.org', 80),
('localhost', 80, 'localhost', 80),
))
def test_run_from_config(monkeypatch, host, port, expect_host, expect_port, app):
def run_simple_mock(hostname, port, *args, **kwargs):
assert hostname == expect_host
assert port == expect_port
monkeypatch.setattr(werkzeug.serving, 'run_simple', run_simple_mock)
app.config['SERVER_NAME'] = 'pocoo.org:8080'
app.run(host, port)
def test_max_cookie_size(app, client, recwarn):
app.config['MAX_COOKIE_SIZE'] = 100
# outside app context, default to Werkzeug static value,
# which is also the default config
response = flask.Response()
default = flask.Flask.default_config['MAX_COOKIE_SIZE']
assert response.max_cookie_size == default
# inside app context, use app config
with app.app_context():
assert flask.Response().max_cookie_size == 100
@app.route('/')
def index():
r = flask.Response('', status=204)
r.set_cookie('foo', 'bar' * 100)
return r
client.get('/')
assert len(recwarn) == 1
w = recwarn.pop()
assert 'cookie is too large' in str(w.message)
app.config['MAX_COOKIE_SIZE'] = 0
client.get('/')
assert len(recwarn) == 0
|
play.py
|
import os
import time
import sys
from multiprocessing import Process
import subprocess
from flask import Flask, jsonify
from flask_restful import Resource, Api, reqparse
app = Flask(__name__)
api = Api(app)
class VideoStream(Resource):
'''
REST API class for creation/deletion of video stream on a (part of the) screen of the raspberry.
Create is done one by one
Delete removes ALL video streams at once
'''
def __init__(self):
self.reqparse = reqparse.RequestParser()
self.reqparse.add_argument('url', type = str, required = True, location = 'json')
self.reqparse.add_argument('x1', type = int, required = True, location = 'json')
self.reqparse.add_argument('y1', type = int, required = True, location = 'json')
self.reqparse.add_argument('x2', type = int, required = True, location = 'json')
self.reqparse.add_argument('y2', type = int, required = True, location = 'json')
self.reqparse.add_argument('sound', type = bool, required = False, default=False, location = 'json')
self.reqparse.add_argument('hdmi', type = int, required = False, default=0, location = 'json')
super(VideoStream, self).__init__()
def post(self):
'''
Creates a video stream on a part of the screen
'''
args = self.reqparse.parse_args()
p = Process(target=play_stream, args=(args['url'], args['x1'], args['y1'], args['x2'], args['y2'], args['sound'], args['hdmi']))
p.start()
return {}, 201
def delete(self):
'''
Removes all video streams from all screens
'''
os.system('pkill -9 omxplayer')
os.system('reset')
return {}, 201
class ScreenProperties(Resource):
'''
REST API class to retrieve the screen characteristics
'''
def get(self):
res = get_screen_resolution()
return {"screen_resolution": res}, 200
# bind resource for REST API service
api.add_resource(VideoStream, '/stream/api/v1.0/tasks', endpoint = 'tasks')
api.add_resource(ScreenProperties, '/screen/api/v1.0/resolution', endpoint = 'resolution')
def get_screen_resolution():
p = subprocess.Popen(['fbset', '-s'], stdout=subprocess.PIPE)
for line in p.stdout:
line = str(line)
if '1920x1080' in line:
return "HD"
return ""
def play_stream(url, x1, y1, x2, y2, sound, hdmi):
win = str(x1) + ',' + str(y1) + ',' + str(x2) + ',' + str(y2)
cmd_list = ['omxplayer', '--blank']
if not sound:
cmd_list.append('-n')
cmd_list.append('-1')
display = 2
if hdmi == 1:
display = 7
cmd_list.append("--display")
cmd_list.append(str(display
))
cmd_list.append('--win')
cmd_list.append(win)
cmd_list.append(url)
print (cmd_list)
# p = subprocess.Popen(['omxplayer', '-n', '-1', '--blank', '--win', win, url], stdout=subprocess.PIPE)
p = subprocess.Popen(cmd_list, stdout=subprocess.PIPE)
out = p.communicate()
print(out)
if __name__ == '__main__':
app.run(host="0.0.0.0")
|
web_server.py
|
import Utils, time, cgi, web_pages, json, threading, hashlib
u = Utils.Utils()
seconds_between_updates = "2"
if u.dev_env: seconds_between_updates = "20"
def application(env, start_response):
########################## Preprations ##########################
if not u.init_successful:
start_response('200 OK', [('Content-Type','text/html')])
return "<h1>Failed in initiing class Utils - Please correct and restart server</h1>"
# parse params
path = env['PATH_INFO'][1:].split("?")[0]
u.toLog("<-- "+str(path))
params = { }
p = cgi.parse_qs(env['QUERY_STRING'], True)
if p.keys():
u.toLog("<--(params) "+str(p))
for key in p.keys():
params[key] = p[key][0]
postData = { }
if env['REQUEST_METHOD'] == 'POST':
post_env = env.copy()
post_env['QUERY_STRING'] = ''
postData = cgi.FieldStorage( fp=env['wsgi.input'], environ=post_env).value
if len(postData):
u.toLog("<--(post data) "+str(postData))
########################## handle user authentication ##########################
raw_cookie = ""
if "HTTP_COOKIE" in env.keys():
raw_cookie = env["HTTP_COOKIE"]
user_from_cookie, hashed_pwd_from_cookie = u.parse_cookie(raw_cookie)
client_ip = str(env["REMOTE_ADDR"])
u.status["user_authenticated"] = False
# Try to authenticate by login
if path=="signin.html":
client_ip = str(env["REMOTE_ADDR"])
u.toLog("Attempt to sign in from IP '"+ client_ip +"'")
user = None
if "user" in params.keys():
user = params["user"]
u.toLog("User '"+ user +"' attmpted login")
hashed_pwd = None
if "pwd" in params.keys():
pwd = params["pwd"]
hashed_pwd = hashlib.sha256(pwd).hexdigest()
u.toLog("Hashed-pwd from user is '"+ hashed_pwd +"'")
if user in u.users.keys():
u.toLog("User '"+ user +"' exists in users file.")
if hashed_pwd == u.users[user]["hashed_pwd"]:
u.toLog("User '"+ user +"' is authonticated by submiting matching user and pwd.")
u.status["user_authenticated"] = True
if u.status["user_authenticated"]:
if client_ip != u.users[user]["ip"]:
u.toLog(" Modifient user database to reflect current IP")
u.users[user]["ip"] = client_ip
u.save_users_database()
u.toLog(" Sending cookie to user and redirecting")
u.set_status_starting("Login") # To clear 'not authenticated from status
u.set_status_done(True, "")
cookie = u.genrate_cookie(user, hashed_pwd)
headers = [ ]
headers.append( ('Set-Cookie', cookie ) )
headers.append( ('Location','EXAMPLE_PAGE1.html') )
start_response('302 Moved Temporarily', headers)
return ""
# Try to authenticate by cookie
if not u.status["user_authenticated"]:
if user_from_cookie in u.users.keys():
u.toLog("Cookie: User '"+ user_from_cookie +"' exists in users file.")
if hashed_pwd_from_cookie == u.users[user_from_cookie]["hashed_pwd"]:
u.toLog(" Cookie: pwds match.")
if client_ip == u.users[user_from_cookie]["ip"]:
u.toLog("User '"+ user_from_cookie +"' is authonticated from client-cookie.")
u.status["user_authenticated"] = True
else:
u.toLog("User '"+ user_from_cookie +"' is NOT authonticated, because stored IP and actual IP do not match")
u.toLog(" Cookie: pwd in file :"+ u.users[user_from_cookie]["hashed_pwd"])
u.toLog(" Cookie: pwd in cookie:"+ hashed_pwd_from_cookie)
# "getStatus" are honored even if we are not authenticated, so we can communicated to the user the need to auththenticate
if path=="getStatus":
start_response('200 OK', [('Content-Type','application/json')])
return json.dumps( u.get_status() )
# If we are here, we are not authenticated (=not by cookie and not by submiting user/pwd)
if not u.status["user_authenticated"]:
u.toLog("User is not authenticated - will get the login page")
u.set_status_to_failed("You are not authenticated - please login")
start_response('200 OK', [('Content-Type','text/html')])
page = web_pages.gen_page_start("EXAMPLE TITLE")
page += web_pages.gen_signin_page()
page += web_pages.gen_page_end()
return str(page)
########################## Information requests ##########################
if path=="getEXAMPLEVAR":
start_response('200 OK', [('Content-Type','application/json')])
return json.dumps( u.EXAMPLE_VAR )
########################## Actions ##########################
if path=="setEXAMPLEVAR":
start_response('200 OK', [('Content-Type','application/json')])
if u.status["currently_executing"]:
return json.dumps( {"success": False, "message": "System is busy - please try later"} )
u.set_status_starting("set EXAMPLE VAR")
val1 = params["val1"]
try:
val = int(val)
except:
return json.dumps( {"success": False, "message": "Unexpected value at the val1 param"} )
val2 = params["val2"]
try:
val = int(val)
except:
return json.dumps( {"success": False, "message": "Unexpected value at the val2 param"} )
#Long operation, so we span a new thread
thread = threading.Thread( target=u.set_EXAMPLE_VAR, args=(val1, val2) )
thread.start()
return json.dumps( {"success": True, "message": ""} )
if path=="zero_EXAMPLE_VAR":
start_response('200 OK', [('Content-Type','application/json')])
u.zero_EXAMPLE_VAR()
return json.dumps( {"success": True, "message": ""} )
if path=="abort":
u.abort_pending = True
u.toLog("Abort requested by client")
return json.dumps( {"success": True, "message": ""} )
########################## User Interface ##########################
start_response('200 OK', [('Content-Type','text/html')])
page = web_pages.gen_page_start("EXAMPLE TITLE")
if path=="EXAMPLE_PAGE1.html":
page += web_pages.gen_EXAMPLE_PAGE1_page(seconds_between_updates)
elif path=="EXAMPLE_PAGE2.html":
page += web_pages.gen_EXAMPLE_PAGE2_page(seconds_between_updates)
else:
page += web_pages.gen_EXAMPLE_DEFAULT_PAGE_page(seconds_between_updates)
page += web_pages.gen_page_end()
return str(page)
|
urls.py
|
# *****************************************************************************
#
# Copyright (c) 2020, the pyEX authors.
#
# This file is part of the pyEX library, distributed under the terms of
# the Apache License 2.0. The full license can be found in the LICENSE file.
#
from __future__ import print_function
import aiohttp
import asyncio
import json
import os
import os.path
import time
from random import random
from threading import Event, Thread
from urllib.parse import urlparse
import requests
from socketIO_client_nexus import BaseNamespace, SocketIO
from sseclient import SSEClient
from .exception import PyEXception, PyEXStopSSE
_URL_PREFIX = "https://api.iextrading.com/1.0/"
_URL_PREFIX_CLOUD = "https://cloud.iexapis.com/{version}/"
_URL_PREFIX_CLOUD_ORIG = _URL_PREFIX_CLOUD
_URL_PREFIX_CLOUD_SANDBOX = "https://sandbox.iexapis.com/stable/"
_URL_PREFIX_CLOUD_SANDBOX_ORIG = _URL_PREFIX_CLOUD_SANDBOX
_SIO_URL_PREFIX = "https://ws-api.iextrading.com"
_SIO_PORT = 443
_SSE_URL_PREFIX = (
"https://cloud-sse.iexapis.com/{version}/{channel}?symbols={symbols}&token={token}"
)
_SSE_URL_PREFIX_ORIG = _SSE_URL_PREFIX
_SSE_URL_PREFIX_ALL = "https://cloud-sse.iexapis.com/{version}/{channel}?token={token}"
_SSE_URL_PREFIX_ALL_ORIG = _SSE_URL_PREFIX_ALL
_SSE_DEEP_URL_PREFIX = "https://cloud-sse.iexapis.com/{version}/deep?symbols={symbols}&channels={channels}&token={token}"
_SSE_DEEP_URL_PREFIX_ORIG = _SSE_DEEP_URL_PREFIX
_SSE_URL_PREFIX_SANDBOX = (
"https://sandbox-sse.iexapis.com/stable/{channel}?symbols={symbols}&token={token}"
)
_SSE_URL_PREFIX_SANDBOX_ORIG = _SSE_URL_PREFIX_SANDBOX
_SSE_URL_PREFIX_ALL_SANDBOX = (
"https://sandbox-sse.iexapis.com/stable/{channel}?token={token}"
)
_SSE_URL_PREFIX_ALL_SANDBOX_ORIG = _SSE_URL_PREFIX_ALL_SANDBOX
_SSE_DEEP_URL_PREFIX_SANDBOX = "https://sandbox-sse.iexapis.com/stable/deep?symbols={symbols}&channels={channels}&token={token}"
_SSE_DEEP_URL_PREFIX_SANDBOX_ORIG = _SSE_DEEP_URL_PREFIX_SANDBOX
_PYEX_PROXIES = None
_PYEX_DEBUG = os.environ.get("PYEX_DEBUG", "")
def _get(url, token="", version="stable", filter="", format="json"):
"""for backwards compat, accepting token and version but ignoring"""
token = token or os.environ.get("IEX_TOKEN")
if token:
if version == "sandbox":
return _getIEXCloudSandbox(
url=url, token=token, version=version, filter=filter, format=format
)
return _getIEXCloud(
url=url, token=token, version=version, filter=filter, format=format
)
return _getOrig(url=url)
async def _getAsync(url, token="", version="stable", filter="", format="json"):
"""for backwards compat, accepting token and version but ignoring"""
token = token or os.environ.get("IEX_TOKEN")
if token:
if version == "sandbox":
return await _getIEXCloudSandboxAsync(
url=url, token=token, version=version, filter=filter, format=format
)
return await _getIEXCloudAsync(
url=url, token=token, version=version, filter=filter, format=format
)
return _getOrig(url=url)
def _post(
url,
data=None,
json=None,
headers=None,
token="",
version="stable",
token_in_params=True,
filter="",
format="json",
):
token = token or os.environ.get("IEX_TOKEN")
if version == "sandbox":
return _postIEXCloudSandbox(
url=url,
data=data,
json=json,
headers=headers,
token=token,
version=version,
token_in_params=token_in_params,
format=format,
filter=filter,
)
return _postIEXCloud(
url=url,
data=data,
json=json,
headers=headers,
token=token,
version=version,
token_in_params=token_in_params,
format=format,
filter=filter,
)
def _put(
url,
data=None,
json=None,
headers=None,
token="",
version="stable",
token_in_params=True,
filter="",
format="json",
):
token = token or os.environ.get("IEX_TOKEN")
if version == "sandbox":
return _putIEXCloudSandbox(
url=url,
data=data,
json=json,
headers=headers,
token=token,
version=version,
token_in_params=token_in_params,
format=format,
filter=filter,
)
return _putIEXCloud(
url=url,
data=data,
json=json,
headers=headers,
token=token,
version=version,
token_in_params=token_in_params,
format=format,
filter=filter,
)
def _patch(
url,
data=None,
json=None,
headers=None,
token="",
version="stable",
token_in_params=True,
filter="",
format="json",
):
token = token or os.environ.get("IEX_TOKEN")
if version == "sandbox":
return _patchIEXCloudSandbox(
url=url,
data=data,
json=json,
headers=headers,
token=token,
version=version,
token_in_params=token_in_params,
format=format,
filter=filter,
)
return _patchIEXCloud(
url=url,
data=data,
json=json,
headers=headers,
token=token,
version=version,
token_in_params=token_in_params,
format=format,
filter=filter,
)
def _postAsync(
url,
data=None,
json=None,
headers=None,
token="",
version="stable",
token_in_params=True,
filter="",
format="json",
):
token = token or os.environ.get("IEX_TOKEN")
if version == "sandbox":
return _postIEXCloudSandboxAsync(
url=url,
data=data,
json=json,
headers=headers,
token=token,
version=version,
token_in_params=token_in_params,
format=format,
filter=filter,
)
return _postIEXCloudAsync(
url=url,
data=data,
json=json,
headers=headers,
token=token,
version=version,
token_in_params=token_in_params,
format=format,
filter=filter,
)
def _putAsync(
url,
data=None,
json=None,
headers=None,
token="",
version="stable",
token_in_params=True,
filter="",
format="json",
):
token = token or os.environ.get("IEX_TOKEN")
if version == "sandbox":
return _putIEXCloudSandboxAsync(
url=url,
data=data,
json=json,
headers=headers,
token=token,
version=version,
token_in_params=token_in_params,
format=format,
filter=filter,
)
return _putIEXCloudAsync(
url=url,
data=data,
json=json,
headers=headers,
token=token,
version=version,
token_in_params=token_in_params,
format=format,
filter=filter,
)
def _patchAsync(
url,
data=None,
json=None,
headers=None,
token="",
version="stable",
token_in_params=True,
filter="",
format="json",
):
token = token or os.environ.get("IEX_TOKEN")
if version == "sandbox":
return _patchIEXCloudSandboxAsync(
url=url,
data=data,
json=json,
headers=headers,
token=token,
version=version,
token_in_params=token_in_params,
format=format,
filter=filter,
)
return _patchIEXCloudAsync(
url=url,
data=data,
json=json,
headers=headers,
token=token,
version=version,
token_in_params=token_in_params,
format=format,
filter=filter,
)
def _delete(url, token="", version="stable", filter="", format="json"):
token = token or os.environ.get("IEX_TOKEN")
if version == "sandbox":
return _deleteIEXCloudSandbox(
url=url, token=token, version=version, filter=filter, format=format
)
return _deleteIEXCloud(
url=url, token=token, version=version, filter=filter, format=format
)
def _deleteAsync(url, token="", version="stable", filter="", format="json"):
token = token or os.environ.get("IEX_TOKEN")
if version == "sandbox":
return _deleteIEXCloudSandboxAsync(
url=url, token=token, version=version, filter=filter, format=format
)
return _deleteIEXCloudAsync(
url=url, token=token, version=version, filter=filter, format=format
)
def _getOrig(url):
raise PyEXception(
"Old IEX API is deprecated. For a free API token, sign up at https://iexcloud.io"
)
def _getIEXCloudBase(
base_url, url, token="", version="stable", filter="", format="json"
):
"""for iex cloud"""
url = base_url.format(version=version) + url
params = {"token": token}
if filter:
params["filter"] = filter
if format not in ("json", "binary", "schema") and isinstance(format, str):
params["format"] = format
elif format == "schema":
# add schema param
params["schema"] = True
if _PYEX_DEBUG:
print(urlparse(url).geturl())
tries = 1
resp = requests.get(urlparse(url).geturl(), proxies=_PYEX_PROXIES, params=params)
while resp.status_code == 429:
resp = requests.get(
urlparse(url).geturl(), proxies=_PYEX_PROXIES, params=params
)
time.sleep(random() * 0.5 * tries)
tries += 1
if resp.status_code == 200:
if format == "json":
return resp.json()
elif format == "binary":
return resp.content
elif format == "schema":
return _parseSchema(resp.json())
return resp.text
raise PyEXception("Response %d - " % resp.status_code, resp.text)
def _getIEXCloud(url, token="", version="stable", filter="", format="json"):
"""for iex cloud"""
return _getIEXCloudBase(
base_url=_URL_PREFIX_CLOUD,
url=url,
token=token,
version=version,
filter=filter,
format=format,
)
async def _getIEXCloudBaseAsync(
base_url, url, token="", version="stable", filter="", format="json"
):
"""for iex cloud"""
url = base_url.format(version=version) + url
params = {"token": token}
if filter:
params["filter"] = filter
if format not in ("json", "binary"):
params["format"] = format
if _PYEX_DEBUG:
print(urlparse(url).geturl())
tries = 1
while tries < 5:
async with aiohttp.ClientSession() as session:
async with session.get(
urlparse(url).geturl(), proxy=_PYEX_PROXIES, params=params
) as resp:
resp = requests.get(
urlparse(url).geturl(), proxies=_PYEX_PROXIES, params=params
)
if resp.status_code == 429:
tries += 1
await asyncio.sleep(random() * 0.5 * tries)
elif resp.status == 200:
if format == "json":
return await resp.json()
elif format == "binary":
return await resp.read()
return resp.text()
else:
# break and hit the exception case
break
raise PyEXception("Response %d - " % resp.status, await resp.text())
async def _getIEXCloudAsync(url, token="", version="stable", filter="", format="json"):
"""for iex cloud"""
return await _getIEXCloudBaseAsync(
base_url=_URL_PREFIX_CLOUD,
url=url,
token=token,
version=version,
filter=filter,
format=format,
)
def _getIEXCloudSandbox(url, token="", version="stable", filter="", format="json"):
"""for iex cloud"""
return _getIEXCloudBase(
base_url=_URL_PREFIX_CLOUD_SANDBOX,
url=url,
token=token,
version="stable",
filter=filter,
format=format,
)
async def _getIEXCloudSandboxAsync(
url, token="", version="stable", filter="", format="json"
):
"""for iex cloud"""
return await _getIEXCloudBaseAsync(
base_url=_URL_PREFIX_CLOUD_SANDBOX,
url=url,
token=token,
version="stable",
filter=filter,
format=format,
)
def _pppIEXCloudBase(
base_url,
url,
data=None,
json=None,
headers=None,
token="",
version="stable",
filter=None,
format="json",
token_in_params=True,
verb="post",
):
"""for iex cloud"""
url = base_url.format(version=version) + url
if token_in_params:
params = {"token": token}
else:
params = {}
if format != "json":
params["format"] = format
if filter:
params["filter"] = filter
if _PYEX_DEBUG:
print(urlparse(url).geturl())
resp = getattr(requests, verb)(
urlparse(url).geturl(),
data=data,
json=json,
headers=headers,
proxies=_PYEX_PROXIES,
params=params,
)
if resp.status_code == 200:
if format == "json":
return resp.json()
return resp.text
raise PyEXception("Response %d - " % resp.status_code, resp.text)
def _postIEXCloud(
url,
data=None,
json=None,
headers=None,
token="",
version="stable",
filter="",
format="json",
token_in_params=True,
):
return _pppIEXCloudBase(
base_url=_URL_PREFIX_CLOUD,
url=url,
data=data,
json=json,
headers=headers,
token=token,
version=version,
filter=filter,
format=format,
token_in_params=token_in_params,
verb="post",
)
def _putIEXCloud(
url,
data=None,
json=None,
headers=None,
token="",
version="stable",
filter="",
format="json",
token_in_params=True,
):
return _pppIEXCloudBase(
base_url=_URL_PREFIX_CLOUD,
url=url,
data=data,
headers=headers,
json=json,
token=token,
version=version,
filter=filter,
format=format,
token_in_params=token_in_params,
verb="put",
)
def _patchIEXCloud(
url,
data=None,
json=None,
headers=None,
token="",
version="stable",
filter="",
format="json",
token_in_params=True,
):
return _pppIEXCloudBase(
base_url=_URL_PREFIX_CLOUD,
url=url,
data=data,
json=json,
headers=headers,
token=token,
version=version,
filter=filter,
format=format,
token_in_params=token_in_params,
verb="patch",
)
async def _pppIEXCloudBaseAsync(
base_url,
url,
data=None,
json=None,
headers=None,
token="",
version="stable",
filter="",
format="json",
token_in_params=True,
verb="post",
):
"""for iex cloud"""
import aiohttp
url = base_url.format(version=version) + url
if token_in_params:
params = {"token": token}
else:
params = {}
if format != "json":
params["format"] = format
if filter:
params["filter"] = filter
if _PYEX_DEBUG:
print(urlparse(url).geturl())
async with aiohttp.ClientSession(headers=headers) as session:
async with getattr(session, verb)(
urlparse(url).geturl(),
data=data,
json=json,
proxy=_PYEX_PROXIES,
params=params,
) as resp:
if resp.status == 200:
if format == "json":
return await resp.json()
return resp.text()
raise PyEXception("Response %d - " % resp.status, await resp.text())
async def _postIEXCloudAsync(
url,
data=None,
json=None,
headers=None,
token="",
version="stable",
filter="",
format="json",
token_in_params=True,
):
return await _pppIEXCloudBaseAsync(
base_url=_URL_PREFIX_CLOUD,
url=url,
data=data,
json=json,
headers=headers,
token=token,
version=version,
filter=filter,
format=format,
token_in_params=token_in_params,
verb="post",
)
async def _putIEXCloudAsync(
url,
data=None,
json=None,
headers=None,
token="",
version="stable",
filter="",
format="json",
token_in_params=True,
):
return await _pppIEXCloudBaseAsync(
base_url=_URL_PREFIX_CLOUD,
url=url,
data=data,
json=json,
headers=headers,
token=token,
version=version,
filter=filter,
format=format,
token_in_params=token_in_params,
verb="put",
)
async def _patchIEXCloudAsync(
url,
data=None,
json=None,
headers=None,
token="",
version="stable",
filter="",
format="json",
token_in_params=True,
):
return await _pppIEXCloudBaseAsync(
base_url=_URL_PREFIX_CLOUD,
url=url,
data=data,
json=json,
headers=headers,
token=token,
version=version,
filter=filter,
format=format,
token_in_params=token_in_params,
verb="patch",
)
def _postIEXCloudSandbox(
url,
data=None,
json=None,
headers=None,
token="",
version="stable",
filter="",
format="json",
token_in_params=True,
):
return _pppIEXCloudBase(
base_url=_URL_PREFIX_CLOUD_SANDBOX,
url=url,
data=data,
json=json,
headers=headers,
token=token,
version=version,
filter=filter,
format=format,
token_in_params=token_in_params,
verb="post",
)
def _putIEXCloudSandbox(
url,
data=None,
json=None,
headers=None,
token="",
version="stable",
filter="",
format="json",
token_in_params=True,
):
return _pppIEXCloudBase(
base_url=_URL_PREFIX_CLOUD_SANDBOX,
url=url,
data=data,
json=json,
headers=headers,
token=token,
version=version,
filter=filter,
format=format,
token_in_params=token_in_params,
verb="put",
)
def _patchIEXCloudSandbox(
url,
data=None,
json=None,
headers=None,
token="",
version="stable",
filter="",
format="json",
token_in_params=True,
):
return _pppIEXCloudBase(
base_url=_URL_PREFIX_CLOUD_SANDBOX,
url=url,
data=data,
json=json,
headers=headers,
token=token,
version=version,
filter=filter,
format=format,
token_in_params=token_in_params,
verb="patch",
)
async def _postIEXCloudSandboxAsync(
url,
data=None,
json=None,
headers=None,
token="",
version="stable",
filter="",
format="json",
token_in_params=True,
):
return await _pppIEXCloudBaseAsync(
base_url=_URL_PREFIX_CLOUD_SANDBOX,
url=url,
data=data,
json=json,
headers=headers,
token=token,
version=version,
filter=filter,
token_in_params=token_in_params,
format=format,
verb="post",
)
async def _putIEXCloudSandboxAsync(
url,
data=None,
json=None,
headers=None,
token="",
version="stable",
filter="",
format="json",
token_in_params=True,
):
return await _pppIEXCloudBaseAsync(
base_url=_URL_PREFIX_CLOUD_SANDBOX,
url=url,
data=data,
json=json,
headers=headers,
token=token,
version=version,
filter=filter,
token_in_params=token_in_params,
format=format,
verb="put",
)
async def _patchIEXCloudSandboxAsync(
url,
data=None,
json=None,
headers=None,
token="",
version="stable",
filter="",
format="json",
token_in_params=True,
):
return await _pppIEXCloudBaseAsync(
base_url=_URL_PREFIX_CLOUD_SANDBOX,
url=url,
data=data,
json=json,
headers=headers,
token=token,
version=version,
filter=filter,
token_in_params=token_in_params,
format=format,
verb="patch",
)
def _deleteIEXCloudBase(
base_url, url, token="", version="stable", filter="", format="json"
):
"""for iex cloud"""
url = base_url.format(version=version) + url
params = {"token": token}
if format != "json":
params["format"] = format
if _PYEX_DEBUG:
print(urlparse(url).geturl())
resp = requests.delete(urlparse(url).geturl(), proxies=_PYEX_PROXIES, params=params)
if resp.status_code == 200:
if format == "json":
return resp.json()
return resp.text
raise PyEXception("Response %d - " % resp.status_code, resp.text)
async def _deleteIEXCloudBaseAsync(
base_url, url, token="", version="stable", filter="", format="json"
):
"""for iex cloud"""
import aiohttp
url = base_url.format(version=version) + url
params = {"token": token}
if format != "json":
params["format"] = format
async with aiohttp.ClientSession() as session:
async with session.delete(
urlparse(url).geturl(),
proxy=_PYEX_PROXIES,
params=params,
) as resp:
if resp.status == 200:
if format == "json":
return await resp.json()
return resp.text()
raise PyEXception("Response %d - " % resp.status, await resp.text())
def _deleteIEXCloud(url, token="", version="stable", filter="", format="json"):
"""for iex cloud"""
return _deleteIEXCloudBase(
base_url=_URL_PREFIX_CLOUD, url=url, token=token, version=version, format=format
)
async def _deleteIEXCloudAsync(
url, token="", version="stable", filter="", format="json"
):
"""for iex cloud"""
return await _deleteIEXCloudBaseAsync(
base_url=_URL_PREFIX_CLOUD, url=url, token=token, version=version, format=format
)
def _deleteIEXCloudSandbox(url, token="", version="stable", filter="", format="json"):
"""for iex cloud"""
return _deleteIEXCloudBase(
base_url=_URL_PREFIX_CLOUD_SANDBOX,
url=url,
token=token,
version="stable",
filter=filter,
format=format,
)
async def _deleteIEXCloudSandboxAsync(
url, token="", version="stable", filte="", format="json"
):
"""for iex cloud"""
return await _deleteIEXCloudBaseAsync(
base_url=_URL_PREFIX_CLOUD_SANDBOX,
url=url,
token=token,
version=version,
format=format,
)
def _wsURL(url):
"""internal"""
return "/1.0/" + url
def _tryJson(data, raw=True):
"""internal"""
if raw:
return data
try:
return json.loads(data)
except ValueError:
return data
class WSClient(object):
def __init__(
self, addr, sendinit=None, on_data=None, on_open=None, on_close=None, raw=True
):
"""
addr: path to sio
sendinit: tuple to emit
on_data, on_open, on_close: functions to call
"""
self.addr = addr
self.sendinit = sendinit
on_data = on_data or print
class Namespace(BaseNamespace):
def on_connect(self, *data):
if on_open:
on_open(_tryJson(data, raw))
def on_disconnect(self, *data):
if on_close:
on_close(_tryJson(data, raw))
def on_message(self, data):
on_data(_tryJson(data, raw))
self._Namespace = Namespace
def run(self):
self.socketIO = SocketIO(_SIO_URL_PREFIX, _SIO_PORT)
self.namespace = self.socketIO.define(self._Namespace, self.addr)
if self.sendinit:
self.namespace.emit(*self.sendinit)
self.socketIO.wait()
def _stream(url, sendinit=None, on_data=print):
"""internal"""
cl = WSClient(url, sendinit=sendinit, on_data=on_data)
return cl
def _streamSSE(url, on_data=print, exit=None, nosnapshot=False):
"""internal"""
if nosnapshot:
url += "&nosnapshot=true"
messages = SSEClient(url, proxies=_PYEX_PROXIES, headers={"keep_alive": "false"})
def _runner(messages=messages, on_data=on_data):
for msg in messages:
data = msg.data
try:
on_data(json.loads(data))
except PyEXStopSSE:
# stop listening and return
return
except (json.JSONDecodeError,):
continue
except (KeyboardInterrupt,):
raise
except Exception:
raise
def _exit(messages=messages, exit=exit):
# run runner in wrapper
runthread = Thread(target=_runner)
# die with parent
runthread.daemon = True
# start the runner
runthread.start()
# wait for exit event
exit.wait()
# kill
killerthread = Thread(target=lambda: messages.resp.close())
# die with parent
killerthread.daemon = True
# start the killer
killerthread.start()
return
if isinstance(exit, Event):
# run on thread, stop when exit set
exitthread = Thread(target=_exit)
# start the threads
exitthread.start()
# return the threads
return exitthread
else:
# just call and return the function
return _runner()
async def _streamSSEAsync(url, exit=None):
"""internal"""
from asyncio import Event
from aiohttp_sse_client import client as sse_client
from aiostream.stream import merge
async with sse_client.EventSource(url) as event_source:
if isinstance(exit, Event):
async def _waitExit():
yield await exit.wait()
waits = (_waitExit(), event_source)
else:
waits = (event_source,)
try:
async with merge(*waits).stream() as stream:
try:
async for event in stream:
if event == True: # noqa: E712
return
yield json.loads(event.data)
except ConnectionError:
raise PyEXception("Could not connect to SSE Stream")
except PyEXStopSSE:
return
except BaseException:
raise
except (json.JSONDecodeError, KeyboardInterrupt):
raise
def setProxy(proxies=None):
"""Set proxies argument for requests
Args:
proxies (dict): Proxies to set
"""
global _PYEX_PROXIES
_PYEX_PROXIES = proxies
def overrideUrl(url="", env=""):
"""Override the default IEX Cloud url"""
global _URL_PREFIX_CLOUD, _URL_PREFIX_CLOUD_SANDBOX, _SSE_URL_PREFIX, _SSE_URL_PREFIX_ALL, _SSE_DEEP_URL_PREFIX, _SSE_URL_PREFIX_SANDBOX, _SSE_URL_PREFIX_ALL_SANDBOX, _SSE_DEEP_URL_PREFIX_SANDBOX
if env:
_URL_PREFIX_CLOUD = "https://cloud.{env}.iexapis.com/{{version}}/".format(
env=env
)
_URL_PREFIX_CLOUD_SANDBOX = "https://sandbox.{env}.iexapis.com/stable/".format(
env=env
)
_SSE_URL_PREFIX = "https://cloud-sse.{env}.iexapis.com/{{version}}/{{channel}}?symbols={{symbols}}&token={{token}}".format(
env=env
)
_SSE_URL_PREFIX_ALL = "https://cloud-sse.{env}.iexapis.com/{{version}}/{{channel}}?token={{token}}".format(
env=env
)
_SSE_DEEP_URL_PREFIX = "https://cloud-sse.{env}.iexapis.com/{{version}}/deep?symbols={{symbols}}&channels={{channels}}&token={{token}}".format(
env=env
)
_SSE_URL_PREFIX_SANDBOX = "https://sandbox-sse.{env}.iexapis.com/stable/{{channel}}?symbols={{symbols}}&token={{token}}".format(
env=env
)
_SSE_URL_PREFIX_ALL_SANDBOX = "https://sandbox-sse.{env}.iexapis.com/stable/{{channel}}?token={{token}}".format(
env=env
)
_SSE_DEEP_URL_PREFIX_SANDBOX = "https://sandbox-sse.{env}.iexapis.com/stable/deep?symbols={{symbols}}&channels={{channels}}&token={{token}}".format(
env=env
)
elif url:
_URL_PREFIX_CLOUD = url
_URL_PREFIX_CLOUD_SANDBOX = url
_SSE_URL_PREFIX = "{}{{channel}}?symbols={{symbols}}&token={{token}}".format(
url
)
_SSE_URL_PREFIX_ALL = "{}{{channel}}?token={{token}}".format(url)
_SSE_DEEP_URL_PREFIX = (
"{}deep?symbols={{symbols}}&channels={{channels}}&token={{token}}".format(
url
)
)
_SSE_URL_PREFIX_SANDBOX = (
"{}{{channel}}?symbols={{symbols}}&token={{token}}".format(url)
)
_SSE_URL_PREFIX_ALL_SANDBOX = "{}{{channel}}?token={{token}}".format(url)
_SSE_DEEP_URL_PREFIX_SANDBOX = (
"{}deep?symbols={{symbols}}&channels={{channels}}&token={{token}}".format(
url
)
)
else:
# reset
_URL_PREFIX_CLOUD = _URL_PREFIX_CLOUD_ORIG
_URL_PREFIX_CLOUD_SANDBOX = _URL_PREFIX_CLOUD_SANDBOX_ORIG
_SSE_URL_PREFIX = _SSE_URL_PREFIX_ORIG
_SSE_URL_PREFIX_ALL = _SSE_URL_PREFIX_ALL_ORIG
_SSE_DEEP_URL_PREFIX = _SSE_DEEP_URL_PREFIX_ORIG
_SSE_URL_PREFIX_SANDBOX = _SSE_URL_PREFIX_SANDBOX_ORIG
_SSE_URL_PREFIX_ALL_SANDBOX = _SSE_URL_PREFIX_ALL_SANDBOX_ORIG
_SSE_DEEP_URL_PREFIX_SANDBOX = _SSE_DEEP_URL_PREFIX_SANDBOX_ORIG
def _parseSchema(data):
if isinstance(data, list) and len(data) > 0:
# take first value
data = data[0]
if data:
return data
return {}
|
train_multi.py
|
#!/usr/bin/env python
"""
Multi-GPU training
"""
import argparse
import os
import signal
import torch
import onmt.opts as opts
import onmt.utils.distributed
from onmt.utils.logging import logger
from onmt.train_single import main as single_main
def main(opt, train_type):
""" Spawns 1 process per GPU """
nb_gpu = len(opt.gpuid)
mp = torch.multiprocessing.get_context('spawn')
# Create a thread to listen for errors in the child processes.
error_queue = mp.SimpleQueue()
error_handler = ErrorHandler(error_queue)
# Train with multiprocessing.
procs = []
for i in range(nb_gpu):
opt.gpu_rank = i
opt.device_id = i
procs.append(mp.Process(target=run, args=(
opt, error_queue, ), daemon=True))
procs[i].start()
logger.info(" Starting process pid: %d " % procs[i].pid)
error_handler.add_child(procs[i].pid)
for p in procs:
p.join()
def run(opt, error_queue):
""" run process """
try:
opt.gpu_rank = onmt.utils.distributed.multi_init(opt)
single_main(opt)
except KeyboardInterrupt:
pass # killed by parent, do nothing
except Exception:
# propagate exception to parent process, keeping original traceback
import traceback
error_queue.put((opt.gpu_rank, traceback.format_exc()))
class ErrorHandler(object):
"""A class that listens for exceptions in children processes and propagates
the tracebacks to the parent process."""
def __init__(self, error_queue):
""" init error handler """
import signal
import threading
self.error_queue = error_queue
self.children_pids = []
self.error_thread = threading.Thread(
target=self.error_listener, daemon=True)
self.error_thread.start()
signal.signal(signal.SIGUSR1, self.signal_handler)
def add_child(self, pid):
""" error handler """
self.children_pids.append(pid)
def error_listener(self):
""" error listener """
(rank, original_trace) = self.error_queue.get()
self.error_queue.put((rank, original_trace))
os.kill(os.getpid(), signal.SIGUSR1)
def signal_handler(self, signalnum, stackframe):
""" signal handler """
for pid in self.children_pids:
os.kill(pid, signal.SIGINT) # kill children processes
(rank, original_trace) = self.error_queue.get()
msg = """\n\n-- Tracebacks above this line can probably
be ignored --\n\n"""
msg += original_trace
raise Exception(msg)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='train.py',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
opts.add_md_help_argument(parser)
opts.model_opts(parser)
opts.train_opts(parser)
opt = parser.parse_args()
main(opt)
|
util.py
|
import math
import cv2
import tensorflow as tf
import os
import sys
'''
output states:
0: has rewards?
1: stopped?
2: num steps
3:
'''
STATE_REWARD_DIM = 0
STATE_STOPPED_DIM = 1
STATE_STEP_DIM = 2
STATE_DROPOUT_BEGIN = 3
def get_expert_file_path(expert):
expert_path = 'data/artists/fk_%s/' % expert
return expert_path
# From github.com/OlavHN/fast-neural-style
def instance_norm(x):
epsilon = 1e-9
mean, var = tf.nn.moments(x=x, axes=[1, 2], keepdims=True)
return (x - mean) / tf.sqrt(var + epsilon)
def enrich_image_input(cfg, net, states):
if cfg.img_include_states:
print(("states for enriching", states.shape))
states = states[:, None, None, :] + (net[:, :, :, 0:1] * 0)
net = tf.concat([net, states], axis=3)
return net
# based on https://stackoverflow.com/questions/2352181/how-to-use-a-dot-to-access-members-of-dictionary
class Dict(dict):
"""
Example:
m = Dict({'first_name': 'Eduardo'}, last_name='Pool', age=24, sports=['Soccer'])
"""
def __init__(self, *args, **kwargs):
super(Dict, self).__init__(*args, **kwargs)
for arg in args:
if isinstance(arg, dict):
for k, v in arg.items():
self[k] = v
if kwargs:
for k, v in kwargs.items():
self[k] = v
def __getattr__(self, attr):
return self[attr]
def __setattr__(self, key, value):
self.__setitem__(key, value)
def __setitem__(self, key, value):
super(Dict, self).__setitem__(key, value)
self.__dict__.update({key: value})
def __delattr__(self, item):
self.__delitem__(item)
def __delitem__(self, key):
super(Dict, self).__delitem__(key)
del self.__dict__[key]
def make_image_grid(images, per_row=8, padding=2):
npad = ((0, 0), (padding, padding), (padding, padding), (0, 0))
images = np.pad(images, pad_width=npad, mode='constant', constant_values=1.0)
assert images.shape[0] % per_row == 0
num_rows = images.shape[0] // per_row
image_rows = []
for i in range(num_rows):
image_rows.append(np.hstack(images[i * per_row:(i + 1) * per_row]))
return np.vstack(image_rows)
def get_image_center(image):
if image.shape[0] > image.shape[1]:
start = (image.shape[0] - image.shape[1]) // 2
image = image[start:start + image.shape[1], :]
if image.shape[1] > image.shape[0]:
start = (image.shape[1] - image.shape[0]) // 2
image = image[:, start:start + image.shape[0]]
return image
def rotate_image(image, angle):
"""
Rotates an OpenCV 2 / NumPy image about it's centre by the given angle
(in degrees). The returned image will be large enough to hold the entire
new image, with a black background
"""
# Get the image size
# No that's not an error - NumPy stores image matricies backwards
image_size = (image.shape[1], image.shape[0])
image_center = tuple(np.array(image_size) // 2)
# Convert the OpenCV 3x2 rotation matrix to 3x3
rot_mat = np.vstack(
[cv2.getRotationMatrix2D(image_center, angle, 1.0), [0, 0, 1]])
rot_mat_notranslate = np.matrix(rot_mat[0:2, 0:2])
# Shorthand for below calcs
image_w2 = image_size[0] * 0.5
image_h2 = image_size[1] * 0.5
# Obtain the rotated coordinates of the image corners
rotated_coords = [
(np.array([-image_w2, image_h2]) * rot_mat_notranslate).A[0],
(np.array([image_w2, image_h2]) * rot_mat_notranslate).A[0],
(np.array([-image_w2, -image_h2]) * rot_mat_notranslate).A[0],
(np.array([image_w2, -image_h2]) * rot_mat_notranslate).A[0]
]
# Find the size of the new image
x_coords = [pt[0] for pt in rotated_coords]
x_pos = [x for x in x_coords if x > 0]
x_neg = [x for x in x_coords if x < 0]
y_coords = [pt[1] for pt in rotated_coords]
y_pos = [y for y in y_coords if y > 0]
y_neg = [y for y in y_coords if y < 0]
right_bound = max(x_pos)
left_bound = min(x_neg)
top_bound = max(y_pos)
bot_bound = min(y_neg)
new_w = int(abs(right_bound - left_bound))
new_h = int(abs(top_bound - bot_bound))
# We require a translation matrix to keep the image centred
trans_mat = np.matrix([[1, 0, int(new_w * 0.5 - image_w2)],
[0, 1, int(new_h * 0.5 - image_h2)], [0, 0, 1]])
# Compute the tranform for the combined rotation and translation
affine_mat = (np.matrix(trans_mat) * np.matrix(rot_mat))[0:2, :]
# Apply the transform
result = cv2.warpAffine(
image, affine_mat, (new_w, new_h), flags=cv2.INTER_LINEAR)
return result
def largest_rotated_rect(w, h, angle):
"""
Given a rectangle of size wxh that has been rotated by 'angle' (in
radians), computes the width and height of the largest possible
axis-aligned rectangle within the rotated rectangle.
Original JS code by 'Andri' and Magnus Hoff from Stack Overflow
Converted to Python by Aaron Snoswell
"""
quadrant = int(math.floor(angle / (math.pi / 2))) & 3
sign_alpha = angle if ((quadrant & 1) == 0) else math.pi - angle
alpha = (sign_alpha % math.pi + math.pi) % math.pi
bb_w = w * math.cos(alpha) + h * math.sin(alpha)
bb_h = w * math.sin(alpha) + h * math.cos(alpha)
gamma = math.atan2(bb_w, bb_w) if (w < h) else math.atan2(bb_w, bb_w)
delta = math.pi - alpha - gamma
length = h if (w < h) else w
d = length * math.cos(alpha)
a = d * math.sin(alpha) / math.sin(delta)
y = a * math.cos(gamma)
x = y * math.tan(gamma)
return (bb_w - 2 * x, bb_h - 2 * y)
def crop_around_center(image, width, height):
"""
Given a NumPy / OpenCV 2 image, crops it to the given width and height,
around it's centre point
"""
image_size = (image.shape[1], image.shape[0])
image_center = (int(image_size[0] * 0.5), int(image_size[1] * 0.5))
if (width > image_size[0]):
width = image_size[0]
if (height > image_size[1]):
height = image_size[1]
x1 = int(image_center[0] - width * 0.5)
x2 = int(image_center[0] + width * 0.5)
y1 = int(image_center[1] - height * 0.5)
y2 = int(image_center[1] + height * 0.5)
return image[y1:y2, x1:x2]
# angle: degrees
def rotate_and_crop(image, angle):
image_width, image_height = image.shape[:2]
image_rotated = rotate_image(image, angle)
image_rotated_cropped = crop_around_center(image_rotated,
*largest_rotated_rect(
image_width, image_height,
math.radians(angle)))
return image_rotated_cropped
def lrelu(x, leak=0.2, name="lrelu"):
with tf.compat.v1.variable_scope(name):
f1 = 0.5 * (1 + leak)
f2 = 0.5 * (1 - leak)
return f1 * x + f2 * abs(x)
# clamps to 0, 1 with leak
def double_lrelu(x, leak=0.1, name="double_lrelu"):
with tf.compat.v1.variable_scope(name):
return tf.minimum(tf.maximum(leak * x, x), leak * x - (leak - 1))
# clamp to lower, upper; leak is RELATIVE
def leaky_clamp(x, lower, upper, leak=0.1, name="leaky_clamp"):
with tf.compat.v1.variable_scope(name):
x = (x - lower) / (upper - lower)
return tf.minimum(tf.maximum(leak * x, x), leak * x -
(leak - 1)) * (upper - lower) + lower
class Tee(object):
def __init__(self, name):
self.file = open(name, 'w')
self.stdout = sys.stdout
self.stderr = sys.stderr
sys.stdout = self
sys.stderr = self
def __del__(self):
self.file.close()
def write(self, data):
self.file.write(data)
self.stdout.write(data)
self.file.flush()
self.stdout.flush()
def write_to_file(self, data):
self.file.write(data)
def flush(self):
self.file.flush()
def rgb2lum(image):
image = 0.27 * image[:, :, :, 0] + 0.67 * image[:, :, :,
1] + 0.06 * image[:, :, :, 2]
return image[:, :, :, None]
def tanh01(x):
return tf.tanh(x) * 0.5 + 0.5
def tanh_range(l, r, initial=None):
def get_activation(left, right, initial):
def activation(x):
if initial is not None:
bias = math.atanh(2 * (initial - left) / (right - left) - 1)
else:
bias = 0
return tanh01(x + bias) * (right - left) + left
return activation
return get_activation(l, r, initial)
def merge_dict(a, b):
ret = a.copy()
for key, val in list(b.items()):
if key in ret:
assert False, 'Item ' + key + 'already exists'
else:
ret[key] = val
return ret
def lerp(a, b, l):
return (1 - l) * a + l * b
def read_tiff16(fn):
import tifffile
import numpy as np
img = tifffile.imread(fn)
if img.dtype == np.uint8:
depth = 8
elif img.dtype == np.uint16:
depth = 16
else:
print("Warning: unsupported data type {}. Assuming 16-bit.", img.dtype)
depth = 16
return (img * (1.0 / (2**depth - 1))).astype(np.float32)
def load_config(config_name):
scope = {}
exec ('from config_%s import cfg' % config_name, scope)
return scope['cfg']
# ======================================================================================================================
# added by Hao He
# ======================================================================================================================
def get_artist_batch(folder, size=128, num=64):
import os
js = os.listdir(folder)
np.random.shuffle(js)
imgs = np.zeros((num, size, size, 3))
for i, jpg in enumerate(js[:num]):
img = cv2.imread(folder + '/' + jpg)
img = get_image_center(img) / 255.
imgs[i] = cv2.resize(img, dsize=(size, size))
return imgs
def show_artist_subnails(folder, size=128, num_row=8, num_column=8):
imgs = get_artist_batch(folder, size, num_row * num_column)
return make_image_grid(imgs, per_row=num_row)
def np_tanh_range(l, r):
def get_activation(left, right):
def activation(x):
return np.tanh(x) * (right - left) + left
return activation
return get_activation(l, r)
class WB2:
def filter_param_regressor(self, features):
log_wb_range = np.log(5)
color_scaling = np.exp(
np_tanh_range(-log_wb_range, log_wb_range)(features[:, :3]))
# There will be no division by zero here unless the WB range lower bound is 0
return color_scaling
def process(self, img, param):
lum = (img[:, :, :, 0] * 0.27 + img[:, :, :, 1] * 0.67 +
img[:, :, :, 2] * 0.06 + 1e-5)[:, :, :, None]
tmp = img * param[:, None, None, :]
tmp = tmp / (tmp[:, :, :, 0] * 0.27 + tmp[:, :, :, 1] * 0.67 +
tmp[:, :, :, 2] * 0.06 + 1e-5)[:, :, :, None] * lum
return tmp
def degrade_images_in_folder(
folder,
dst_folder_suffix,
LIGHTDOWN=True,
UNBALANCECOLOR=True,):
import os
js = os.listdir(folder)
dst_folder = folder + '-' + dst_folder_suffix
try:
os.mkdir(dst_folder)
except:
print('dir exist!')
print('in ' + dst_folder)
num = 3
for j in js:
img = cv2.imread(folder + '/' + j) / 255.
if LIGHTDOWN:
for _ in range(num - 1):
out = pow(img, np.random.uniform(0.4, 0.6)) * np.random.uniform(
0.25, 0.5)
cv2.imwrite(dst_folder + '/' + ('L%d-' % _) + j, out * 255.)
out = img * img
out = out * (1.0 / out.max())
cv2.imwrite(dst_folder + '/' + ('L%d-' % num) + j, out * 255.)
if UNBALANCECOLOR:
filter = WB2()
outs = np.array([img] * num)
features = np.abs(np.random.rand(num, 3))
for _, out in enumerate(
filter.process(outs, filter.filter_param_regressor(features))):
# print out.max()
out /= out.max()
out *= np.random.uniform(0.7, 1)
cv2.imwrite(dst_folder + '/' + ('C%d-' % _) + j, out * 255.)
def vis_images_and_indexs(images, features, dir, name):
# indexs = np.reshape(indexs, (len(indexs),))
# print('visualizing images and indexs: ', images.shape, indexs.shape)
id_imgs = []
for feature in features:
img = np.ones((64, 64, 3))
cv2.putText(img,
str(feature), (4, 33), cv2.FONT_HERSHEY_SIMPLEX, 0.25,
(1.0, 0.0, 0.0))
id_imgs.append(img)
id_imgs = np.stack(id_imgs, axis=0)
# print('id imgs: ', id_imgs.shape)
vis_imgs = np.vstack([images, id_imgs])
image = make_image_grid(vis_imgs, per_row=images.shape[0])
vis_dir = dir
try:
os.mkdir(vis_dir)
except:
pass
cv2.imwrite(os.path.join(vis_dir, name + '.png'), image[:, :, ::-1] * 255.0)
def read_set(name):
if name == 'u_test':
fn = 'data/folds/FiveK_test.txt'
need_reverse = False
elif name == 'u_amt':
fn = 'data/folds/FiveK_test_AMT.txt'
need_reverse = False
elif name == '5k': # add by hao
return list(range(1, 5001))
elif name == '2k_train':
fn = 'data/folds/FiveK_train_first2k.txt'
need_reverse = False
elif name == '2k_target':
fn = 'data/folds/FiveK_train_second2k.txt'
need_reverse = False
else:
assert False, name + ' not found'
l = []
ln = 0
with open(fn, 'r') as f:
for i in f:
if i[0] != '#':
try:
i = int(i)
ln += 1
l.append(i)
except Exception as e:
print(e)
pass
if need_reverse:
l = list(set(range(1, 5001)) - set(l))
return l
'''
util_image.py
Copyright (c) 2014 Zhicheng Yan (zhicheng.yan@live.com)
modified 2017 by Yuanming Hu (yuanmhu@gmail.com)
note that some of the color space conversions are NOT exact, like gamma 1.8 or 2.2
'''
import numpy as np
from skimage import color
import tifffile as tiff
class UtilImageError(Exception):
pass
''' undo gamma correction '''
def linearize_ProPhotoRGB(pp_rgb, reverse=False):
if not reverse:
gamma = 1.8
else:
gamma = 1.0 / 1.8
pp_rgb = np.power(pp_rgb, gamma)
return pp_rgb
def XYZ_chromatic_adapt(xyz, src_white='D65', dest_white='D50'):
if src_white == 'D65' and dest_white == 'D50':
M = [[1.0478112, 0.0228866, -0.0501270], \
[0.0295424, 0.9904844, -0.0170491], \
[-0.0092345, 0.0150436, 0.7521316]]
elif src_white == 'D50' and dest_white == 'D65':
M = [[0.9555766, -0.0230393, 0.0631636], \
[-0.0282895, 1.0099416, 0.0210077], \
[0.0122982, -0.0204830, 1.3299098]]
else:
raise UtilCnnImageEnhanceError('invalid pair of source and destination white reference %s,%s') \
% (src_white, dest_white)
M = np.array(M)
sp = xyz.shape
assert sp[2] == 3
xyz = np.transpose(np.dot(M, np.transpose(xyz.reshape((sp[0] * sp[1], 3)))))
return xyz.reshape((sp[0], sp[1], 3))
# pp_rgb float in range [0,1], linear ProPhotoRGB
# refernce white is D50
def ProPhotoRGB2XYZ(pp_rgb, reverse=False):
if not reverse:
M = [[0.7976749, 0.1351917, 0.0313534], \
[0.2880402, 0.7118741, 0.0000857], \
[0.0000000, 0.0000000, 0.8252100]]
else:
M = [[1.34594337, -0.25560752, -0.05111183], \
[-0.54459882, 1.5081673, 0.02053511], \
[0, 0, 1.21181275]]
M = np.array(M)
sp = pp_rgb.shape
xyz = np.transpose(
np.dot(M, np.transpose(pp_rgb.reshape((sp[0] * sp[1], sp[2])))))
return xyz.reshape((sp[0], sp[1], 3))
''' normalize L channel so that minimum of L is 0 and maximum of L is 100 '''
def normalize_Lab_image(lab_image):
h, w, ch = lab_image.shape[0], lab_image.shape[1], lab_image.shape[2]
assert ch == 3
lab_image = lab_image.reshape((h * w, ch))
L_ch = lab_image[:, 0]
L_min, L_max = np.min(L_ch), np.max(L_ch)
# print 'before normalization L min %f,Lmax %f' % (L_min,L_max)
scale = 100.0 / (L_max - L_min)
lab_image[:, 0] = (lab_image[:, 0] - L_min) * scale
# print 'after normalization L min %f,Lmax %f' %\
(np.min(lab_image[:, 0]), np.max(lab_image[:, 0]))
return lab_image.reshape((h, w, ch))
''' white reference 'D65' '''
def read_tiff_16bit_img_into_XYZ(tiff_fn, exposure=0):
pp_rgb = tiff.imread(tiff_fn)
pp_rgb = np.float64(pp_rgb) / (2**16 - 1.0)
if not pp_rgb.shape[2] == 3:
print('pp_rgb shape', pp_rgb.shape)
raise UtilImageError('image channel number is not 3')
pp_rgb = linearize_ProPhotoRGB(pp_rgb)
pp_rgb *= np.power(2, exposure)
xyz = ProPhotoRGB2XYZ(pp_rgb)
xyz = XYZ_chromatic_adapt(xyz, src_white='D50', dest_white='D65')
return xyz
def ProPhotoRGB2Lab(img):
if not img.shape[2] == 3:
print('pp_rgb shape', img.shape)
raise UtilImageError('image channel number is not 3')
img = linearize_ProPhotoRGB(img)
xyz = ProPhotoRGB2XYZ(img)
lab = color.xyz2lab(xyz)
return lab
def linearProPhotoRGB2Lab(img):
if not img.shape[2] == 3:
print('pp_rgb shape', img.shape)
raise UtilImageError('image channel number is not 3')
xyz = ProPhotoRGB2XYZ(img)
lab = color.xyz2lab(xyz)
return lab
import threading
import time
class AsyncTaskManager:
def __init__(self, target, args=(), kwargs={}):
self.target = target
self.args = args
self.kwargs = kwargs
self.condition = threading.Condition()
self.result = None
self.thread = threading.Thread(target=self.worker)
self.stopped = False
self.thread.daemon = True
self.thread.start()
def worker(self):
while True:
self.condition.acquire()
while self.result is not None:
if self.stopped:
self.condition.release()
return
self.condition.notify()
self.condition.wait()
self.condition.notify()
self.condition.release()
result = (self.target(*self.args, **self.kwargs),)
self.condition.acquire()
self.result = result
self.condition.notify()
self.condition.release()
def get_next(self):
self.condition.acquire()
while self.result is None:
self.condition.notify()
self.condition.wait()
result = self.result[0]
self.result = None
self.condition.notify()
self.condition.release()
return result
def stop(self):
while self.thread.is_alive():
self.condition.acquire()
self.stopped = True
self.condition.notify()
self.condition.release()
def test_async_task_manager():
def task():
print('begin sleeping...')
time.sleep(1)
print('end sleeping.')
task.i += 1
print('returns', task.i)
return task.i
task.i = 0
Async = AsyncTaskManager(task)
t = time.time()
for i in range(5):
ret = Async.get_next()
# ret = task()
print('got', ret)
time.sleep(1)
Async.stop()
print(time.time() - t)
|
lepton.py
|
# lepton 3.5 purethermal camera dll imports
# the proper folder and file are defined by the __init__ file
from Lepton import CCI
from IR16Filters import IR16Capture, NewBytesFrameEvent
# python useful packages
from datetime import datetime
from typing import Tuple
from scipy import ndimage
import PySide2.QtWidgets as qtw
import PySide2.QtCore as qtc
import PySide2.QtGui as qtg
import qimage2ndarray
import numpy as np
import threading
import time
import h5py
import json
import sys
import cv2
import os
def read_file(filename: str) -> None:
"""
read the recorded data from file.
Parameters
----------
filename: a valid filename path
Returns
-------
obj: dict
a dict where each key is a timestamp which contains the
corresponding image frame
Notes
-----
the file extension is used to desume which file format is used.
Available formats are:
- ".h5" (gzip format with compression 9)
- ".npz" (compressed numpy format)
- ".json"
"""
# check filename and retrieve the file extension
assert isinstance(filename, str), "'filename' must be a str object."
extension = filename.split(".")[-1].lower()
# check the extension
valid_extensions = np.array(["npz", "json", "h5"])
txt = "file extension must be any of " + str(valid_extensions)
assert extension in valid_extensions, txt
# check if the file exists
assert os.path.exists(filename), "{} does not exists.".format(filename)
# timestamps parsing method
def to_datetime(txt):
return datetime.strptime(txt, LeptonCamera.date_format())
# obtain the readed objects
if extension == "json": # json format
with open(filename, "r") as buf:
obj = json.load(buf)
timestamps = map(to_datetime, list(obj.keys()))
samples = np.array(list(obj.values())).astype(np.float16)
elif extension == "npz": # npz format
with np.load(filename, allow_pickle=True) as obj:
timestamps = obj["timestamps"]
samples = obj["samples"]
elif extension == "h5": # h5 format
with h5py.File(filename, "r") as obj:
timestamps = list(obj["timestamps"][:].astype(str))
timestamps = map(to_datetime, timestamps)
samples = obj["samples"][:].astype(np.float16)
return dict(zip(timestamps, samples))
def to_heatmap(img, colormap=cv2.COLORMAP_JET):
"""
convert a sampled frame to a opencv colorscaled map.
Parameters
----------
img: 2D numpy.ndarray
the matrix containing the temperatures collected on one sample.
colormap: OpenCV colormap
the colormap to be used.
Returns
-------
heatmap: 2D numpy.ndarray
the matrix containing an heatmap representation of the provided
sample.
"""
# convert to bone scale (flip the values)
gry = (1 - (img - np.min(img)) / (np.max(img) - np.min(img))) * 255
gry = np.expand_dims(gry, 2).astype(np.uint8)
gry = cv2.merge([gry, gry, gry])
# converto to heatmap
return cv2.applyColorMap(gry, colormap)
class LeptonCamera:
"""
Initialize a Lepton camera object capable of communicating to
an pure thermal device equipped with a lepton 3.5 sensor.
Parameters
----------
sampling_frequency: float, int
the sampling frequency in Hz for the camera readings.
It must be <= 8.5 Hz.
"""
# class variables
_device = None
_reader = None
_data = {}
_first = None
_last = None
_dt = 200
_angle = 0
_sampling_frequency = 5
_time_format = "%H:%M:%S.%f"
_date_format = "%Y-%b-%d " + _time_format
@classmethod
def date_format(cls):
return cls._date_format
@classmethod
def time_format(cls):
return cls._time_format
def __init__(self, sampling_frequency: float = 5) -> None:
"""
constructor
"""
super(LeptonCamera, self).__init__()
# find a valid device
devices = []
for i in CCI.GetDevices():
if i.Name.startswith("PureThermal"):
devices += [i]
# if multiple devices are found,
# allow the user to select the preferred one
if len(devices) > 1:
print("Multiple Pure Thermal devices have been found.\n")
for i, d in enumerate(devices):
print("{}. {}".format(i, d))
while True:
idx = input("Select the index of the required device.")
if isinstance(idx, int) and idx in range(len(devices)):
self._device = devices[idx]
break
else:
print("Unrecognized input value.\n")
# if just one device is found, select it
elif len(devices) == 1:
self._device = devices[0]
# tell the user that no valid devices have been found.
else:
self._device = None
# open the found device
txt = "No devices called 'PureThermal' have been found."
assert self._device is not None, txt
self._device = self._device.Open()
self._device.sys.RunFFCNormalization()
# set the gain mode
self._device.sys.SetGainMode(CCI.Sys.GainMode.HIGH)
# set radiometric
try:
self._device.rad.SetTLinearEnableStateChecked(True)
except:
print("this lepton does not support tlinear")
# setup the buffer
self._reader = IR16Capture()
callback = NewBytesFrameEvent(self._add_frame)
self._reader.SetupGraphWithBytesCallback(callback)
# path init
self._path = os.path.sep.join(__file__.split(os.path.sep)[:-4])
# set the sampling frequency
self.set_sampling_frequency(sampling_frequency)
# set the rotation angle
self.set_angle(0)
def _add_frame(self, array: bytearray, width: int, height: int) -> None:
"""
add a new frame to the buffer of readed data.
"""
dt = datetime.now() # time data
img = np.fromiter(array, dtype="uint16").reshape(height, width) # parse
img = (img - 27315.0) / 100.0 # centikelvin --> celsius conversion
img = ndimage.rotate(img, angle=self.angle, reshape=True) # rotation
self._last = [dt, img.astype(np.float16)] # update the last reading
def capture(
self,
save: bool = True,
n_frames: int = None,
seconds: Tuple[float, int] = None,
) -> None:
"""
record a series of frames from the camera.
Parameters
----------
save: bool
if true the data are stored, otherwise nothing except
"last" is updated.
n_frames: None / int
if a positive int is provided, n_frames are captured.
Otherwise, all the frames collected are saved until the
stop command is given.
seconds: None / int
if a positive int is provided, data is sampled for the indicated
amount of seconds.
"""
# check input
assert isinstance(save, bool), "save must be a bool."
if seconds is not None:
txt = "'seconds' must be a float or int."
assert isinstance(seconds, (float, int)), txt
if n_frames is not None:
txt = "'n_frames' must be an int."
assert isinstance(n_frames, int), txt
# start reading data
self._reader.RunGraph()
while self._last is None:
pass
# store the last data according to the given sampling
# frequency
if save:
self._first = self._last
def store():
while self._first is not None:
self._data[self._last[0]] = self._last[1]
time.sleep(self._dt)
t = threading.Thread(target=store)
t.start()
# continue reading until a stopping criterion is met
if seconds is not None:
def stop_reading(time):
while self._first is None:
pass
dt = 0
while dt < seconds:
dt = (self._last[0] - self._first[0]).total_seconds()
self.interrupt()
t = threading.Thread(target=stop_reading, args=[time])
t.run()
elif n_frames is not None:
def stop_reading(n_frames):
while len(self._data) < n_frames:
pass
self.interrupt()
t = threading.Thread(target=stop_reading, args=[n_frames])
t.run()
def interrupt(self) -> None:
"""
stop reading from camera.
"""
self._reader.StopGraph()
self._first = None
@property
def sampling_frequency(self) -> float:
"""
return the actual sampling frequency
"""
return float(self._sampling_frequency)
def set_sampling_frequency(self, sampling_frequency: float) -> None:
"""
set the sampling frequency value and update the _dt argument.
Parameters
----------
sampling_frequency: float, int
the new sampling frequency
"""
# check the input
txt = "'sampling frequency' must be a value in the (0, 8.5] range."
assert isinstance(sampling_frequency, (int, float)), txt
assert 0 < sampling_frequency <= 8.5, txt
self._sampling_frequency = np.round(sampling_frequency, 1)
self._dt = 1.0 / self.sampling_frequency
def set_angle(self, angle: float) -> None:
"""
set the rotation angle in degrees.
Parameters
----------
angle: float
the rotation angle in degrees.
"""
assert isinstance(angle, (int, float)), "'angle' must be a float."
self._angle = angle
@property
def angle(self) -> float:
"""
return the rotation angle
"""
return self._angle
def is_recording(self) -> bool:
return self._first is not None
def clear(self) -> None:
"""
clear the current object memory and buffer
"""
self._data = {}
self._last = None
self._first = None
def to_dict(self) -> dict:
"""
return the sampled data as dict with
timestamps as keys and the sampled data as values.
Returns
-------
d: dict
the dict containing the sampled data.
Timestamps are provided as keys and the sampled data as values.
"""
timestamps = list(self._data.keys())
samples = [i.tolist() for i in self._data.values()]
return dict(zip(timestamps, samples))
def to_numpy(self) -> Tuple[np.ndarray, np.ndarray]:
"""
return the sampled data as numpy arrays.
Returns
-------
t: 1D numpy array
a numpy array containing the datetime object
x: 3D numpy array
a 3D array where each the first dimension correspond to each
sample.
"""
t = np.array(list(self._data.keys()))
x = np.atleast_3d(list(self._data.values()))
return t, x
def save(self, filename: str) -> None:
"""
store the recorded data to file.
Parameters
----------
filename: a valid filename path
Notes
-----
the file extension is used to desume which file format is required.
Available formats are:
- ".h5" (gzip format with compression 9)
- ".npz" (compressed numpy format)
- ".json"
If an invalid file format is found a TypeError is raised.
"""
# check filename and retrieve the file extension
assert isinstance(filename, str), "'filename' must be a str object."
extension = filename.split(".")[-1].lower()
# ensure the folders exist
if not os.path.exists(filename):
root = os.path.sep.join(filename.split(os.path.sep)[:-1])
os.makedirs(root, exist_ok=True)
if extension == "json": # json format
times, samples = self.to_numpy()
times = [i.strftime(self._date_format) for i in times]
samples = samples.tolist()
with open(filename, "w") as buf:
json.dump(dict(zip(times, samples)), buf)
elif extension == "npz": # npz format
timestamps, samples = self.to_numpy()
np.savez(filename, timestamps=timestamps, samples=samples)
elif extension == "h5": # h5 format
hf = h5py.File(filename, "w")
times, samples = self.to_numpy()
times = [i.strftime(self._date_format) for i in times]
hf.create_dataset(
"timestamps",
data=times,
compression="gzip",
compression_opts=9,
)
hf.create_dataset(
"samples",
data=samples,
compression="gzip",
compression_opts=9,
)
hf.close()
else: # unsupported formats
txt = "{} format not supported".format(extension)
raise TypeError(txt)
class LeptonCameraWidget(qtw.QWidget):
"""
Initialize a PySide2 widget capable of communicating to
an pure thermal device equipped with a lepton 3.5 sensor.
Parameters
----------
sampling_frequency: float, int
the sampling frequency in Hz for the camera readings.
It must be <= 8.7 Hz.
"""
# available colormaps
_colormaps = {
"JET": cv2.COLORMAP_JET,
"HSV": cv2.COLORMAP_HSV,
"AUTUMN": cv2.COLORMAP_AUTUMN,
"BONE": cv2.COLORMAP_BONE,
"WINTER": cv2.COLORMAP_WINTER,
"RAINBOW": cv2.COLORMAP_RAINBOW,
"OCEAN": cv2.COLORMAP_OCEAN,
"SUMMER": cv2.COLORMAP_SUMMER,
"SPRING": cv2.COLORMAP_SPRING,
"COOL": cv2.COLORMAP_COOL,
"PINK": cv2.COLORMAP_PINK,
"HOT": cv2.COLORMAP_HOT,
"PARULA": cv2.COLORMAP_PARULA,
"MAGMA": cv2.COLORMAP_MAGMA,
"INFERNO": cv2.COLORMAP_INFERNO,
"PLASMA": cv2.COLORMAP_PLASMA,
"VIRIDIS": cv2.COLORMAP_VIRIDIS,
"CIVIDIS": cv2.COLORMAP_CIVIDIS,
"TWILIGHT": cv2.COLORMAP_TWILIGHT,
"TWILIGHT_SHIFTED": cv2.COLORMAP_TWILIGHT_SHIFTED,
"TURBO": cv2.COLORMAP_TURBO,
"DEEPGREEN": cv2.COLORMAP_DEEPGREEN,
}
# private variables
_font_size = 10
_size = 35
_path = ""
_zoom = 1
_timer = None
_dt = None
_view = None
_colormap = list(_colormaps.values())[0]
# device
_camera = None
# widgets
frequencyBox = None
zoomBox = None
cameraLabel = None
rotationButton = None
colorBox = None
quitButton = None
recButton = None
optionsPane = None
savePopup = None
pointerLabel = None
fpsLabel = None
recordLabel = None
def getDevice(self):
"""
return the actual device.
"""
return self._camera
def start(self):
"""
start the timer.
"""
try:
self._timer.stop()
self._timer.start(int(round(1000.0 / self.getFrequency())))
except Exception:
pass
def show(self):
"""
make the widget visible.
"""
self.getDevice().capture(save=False)
self.start()
super(LeptonCameraWidget, self).show()
def close(self) -> None:
"""
terminate the app.
"""
sys.exit()
def isRecording(self):
"""
check if the camera is recording data.
"""
return self.getDevice().is_recording()
def getFrequency(self) -> float:
"""
return the actual sampling frequency
"""
return self.getDevice().sampling_frequency
def setFrequency(self) -> None:
"""
set the sampling frequency.
"""
# update the timer time
self._timer.stop()
freq = self.frequencyBox.value()
self.getDevice().set_sampling_frequency(freq)
self._timer.start(int(round(1000.0 / freq)))
def rotate(self) -> None:
"""
set the rotation angle.
"""
angle = self.getDevice().angle
self.getDevice().set_angle(angle + 90)
def setZoom(self) -> None:
"""
set the actual zoom.
"""
self._zoom = self.zoomBox.value()
def setColor(self, text) -> None:
"""
set the actual colormap
"""
self._colormap = self._colormaps[text]
def getFrame(self):
"""
return the last frame view.
It returns None if no value has been sampled.
"""
return self._dt, self._view
def __init__(self, device: LeptonCamera = None, parent=None) -> None:
"""
constructor
"""
super(LeptonCameraWidget, self).__init__(parent=parent)
self.font = qtg.QFont("Arial", self._font_size)
self._path = os.path.sep.join(__file__.split(os.path.sep)[:-1])
# sampling frequency
self.frequencyBox = qtw.QDoubleSpinBox()
self.frequencyBox.setFont(self.font)
self.frequencyBox.setDecimals(1)
self.frequencyBox.setMinimum(1.0)
self.frequencyBox.setSingleStep(0.1)
self.frequencyBox.setMaximum(8.7)
self.frequencyBox.setValue(5.0)
self.frequencyBox.setValue(5.0)
self.frequencyBox.valueChanged.connect(self.setFrequency)
frequencyLayout = qtw.QHBoxLayout()
frequencyLayout.setSpacing(0)
frequencyLayout.setContentsMargins(0, 0, 0, 0)
frequencyLayout.addWidget(self.frequencyBox)
frequencyPane = qtw.QGroupBox("Sampling frequency (Hz)")
frequencyPane.setLayout(frequencyLayout)
# zoom
self.zoomBox = qtw.QDoubleSpinBox()
self.zoomBox.setFont(self.font)
self.zoomBox.setDecimals(1)
self.zoomBox.setMinimum(1)
self.zoomBox.setMaximum(5)
self.zoomBox.setSingleStep(0.1)
self.zoomBox.setValue(3)
self.zoomBox.valueChanged.connect(self.setZoom)
zoomLayout = qtw.QHBoxLayout()
zoomLayout.setSpacing(0)
zoomLayout.setContentsMargins(0, 0, 0, 0)
zoomLayout.addWidget(self.zoomBox)
zoomPane = qtw.QGroupBox("Zoom (X times)")
zoomPane.setLayout(zoomLayout)
# colormaps
self.colorBox = qtw.QComboBox()
self.colorBox.addItems(list(self._colormaps.keys()))
self.colorBox.setFont(self.font)
self.colorBox.currentTextChanged.connect(self.setColor)
colorLayout = qtw.QHBoxLayout()
colorLayout.setSpacing(0)
colorLayout.setContentsMargins(0, 0, 0, 0)
colorLayout.addWidget(self.colorBox)
colorPane = qtw.QGroupBox("Colormap")
colorPane.setLayout(colorLayout)
# options pane
optLine = qtw.QWidget()
optLayout = qtw.QHBoxLayout()
optLayout.addWidget(frequencyPane)
optLayout.addWidget(zoomPane)
optLayout.setSpacing(0)
optLayout.setContentsMargins(0, 0, 0, 0)
optLine.setLayout(optLayout)
optionsLayout = qtw.QVBoxLayout()
optionsLayout.addWidget(optLine)
optionsLayout.addWidget(colorPane)
optionsLayout.setSpacing(0)
optionsLayout.setContentsMargins(0, 0, 0, 0)
self.optionsPane = qtw.QWidget()
self.optionsPane.setLayout(optionsLayout)
# pointer label
self.pointerLabel = qtw.QLabel("")
self.pointerLabel.setFont(self.font)
self.pointerLabel.setAlignment(qtc.Qt.AlignCenter | qtc.Qt.AlignVCenter)
pointerLayout = qtw.QHBoxLayout()
pointerLayout.setSpacing(0)
pointerLayout.setContentsMargins(0, 0, 0, 0)
pointerLayout.addWidget(self.pointerLabel)
pointerPane = qtw.QGroupBox("Pointer temp. (°C)")
pointerPane.setLayout(pointerLayout)
# fps label
self.fpsLabel = qtw.QLabel("")
self.fpsLabel.setFont(self.font)
self.fpsLabel.setAlignment(qtc.Qt.AlignCenter | qtc.Qt.AlignVCenter)
fpsLayout = qtw.QHBoxLayout()
fpsLayout.setSpacing(0)
fpsLayout.setContentsMargins(0, 0, 0, 0)
fpsLayout.addWidget(self.fpsLabel)
fpsPane = qtw.QGroupBox("FPS")
fpsPane.setLayout(fpsLayout)
# camera rotation
self.rotationButton = qtw.QPushButton()
icon = os.path.sep.join([self._path, "_contents", "rotation.png"])
icon = qtg.QIcon(qtg.QPixmap(icon).scaled(self._size, self._size))
self.rotationButton.setIcon(icon)
self.rotationButton.setFlat(True)
self.rotationButton.setFixedHeight(self._size)
self.rotationButton.setFixedWidth(self._size)
self.rotationButton.clicked.connect(self.rotate)
rotationLayout = qtw.QHBoxLayout()
rotationLayout.setSpacing(0)
rotationLayout.setContentsMargins(0, 0, 0, 0)
rotationLayout.addWidget(self.rotationButton)
rotationPane = qtw.QGroupBox("Rotate 90°")
rotationPane.setLayout(rotationLayout)
# data pane
dataLayout = qtw.QHBoxLayout()
dataLayout.setContentsMargins(0, 0, 0, 0)
dataLayout.setSpacing(10)
dataLayout.addWidget(pointerPane)
dataLayout.addWidget(fpsPane)
dataLayout.addWidget(rotationPane)
dataPane = qtw.QWidget()
dataPane.setLayout(dataLayout)
# record pane
self.recordLabel = qtw.QLabel("")
self.recordLabel.setFont(self.font)
self.recordLabel.setAlignment(qtc.Qt.AlignCenter | qtc.Qt.AlignVCenter)
recordLayout = qtw.QHBoxLayout()
recordLayout.setSpacing(0)
recordLayout.setContentsMargins(0, 0, 0, 0)
recordLayout.addWidget(self.recordLabel)
recordPane = qtw.QGroupBox("Recording time")
recordPane.setLayout(recordLayout)
# quit button
self.quitButton = qtw.QPushButton("QUIT")
self.quitButton.setFixedHeight(self._size)
self.quitButton.clicked.connect(self.close)
# rec button
self.recButton = qtw.QPushButton("● START RECORDING")
self.recButton.setFixedHeight(self._size)
self.recButton.clicked.connect(self.record)
self.recButton.setCheckable(True)
# buttons pane
buttonLayout = qtw.QHBoxLayout()
buttonLayout.addWidget(self.recButton)
buttonLayout.addWidget(self.quitButton)
buttonLayout.setSpacing(10)
buttonLayout.setContentsMargins(2, 2, 2, 2)
buttonPane = qtw.QWidget()
buttonPane.setLayout(buttonLayout)
# set the lepton camera object
if device is None:
self._camera = LeptonCamera(sampling_frequency=5)
else:
txt = "device must be a LeptonCamera instance."
assert isinstance(device, LeptonCamera), txt
self._camera = device
# camera widget
self.cameraLabel = qtw.QLabel()
self.cameraLabel.setMouseTracking(True)
self.cameraLabel.installEventFilter(self)
# main layout
layoutLeft = qtw.QVBoxLayout()
layoutLeft.addWidget(self.cameraLabel)
layoutLeft.addWidget(dataPane)
layoutLeft.addWidget(recordPane)
layoutLeft.addWidget(buttonPane)
layoutLeft.setSpacing(10)
layoutLeft.setContentsMargins(0, 0, 0, 0)
leftPane = qtw.QWidget()
leftPane.setLayout(layoutLeft)
layout = qtw.QHBoxLayout()
layout.addWidget(leftPane)
layout.addWidget(self.optionsPane)
layout.setSpacing(10)
layout.setContentsMargins(2, 2, 2, 2)
self.setLayout(layout)
icon = os.path.sep.join([self._path, "_contents", "main.png"])
icon = qtg.QIcon(qtg.QPixmap(icon).scaled(self._size, self._size))
self.setWindowIcon(icon)
self.setWindowTitle("LeptonCameraWidget")
# data saving popup
save_gif = os.path.sep.join([self._path, "_contents", "save.gif"])
movie = qtg.QMovie(save_gif)
animation = qtw.QLabel()
animation.setFixedSize(256, 256)
animation.setMovie(movie)
movie.start()
message = qtw.QLabel("SAVING COLLECTED DATA")
message.setAlignment(qtc.Qt.AlignCenter)
message.setFont(self.font)
diagLayout = qtw.QVBoxLayout()
diagLayout.addWidget(animation)
diagLayout.addWidget(message)
popup = qtw.QDialog()
popup.setWindowFlags(qtc.Qt.FramelessWindowHint)
popup.setModal(True)
popup.setLayout(diagLayout)
popup.setWindowTitle("Saving data")
popup.show()
popup.hide()
self.savePopup = popup
# stream handlers
self._timer = qtc.QTimer()
self._timer.timeout.connect(self.updateView)
# setup the pointer temperature
self._pointer_temp = "°C"
# initialize the parameters
self.setFrequency()
self.setZoom()
self.setColor(list(self._colormaps.keys())[0])
def eventFilter(self, source: qtw.QWidget, event: qtc.QEvent) -> None:
"""
calculate the temperature-related numbers.
"""
# check if the pointer is on the image
# and update pointer temperature
if source == self.cameraLabel:
if event.type() == qtc.QEvent.MouseMove:
if self.getDevice()._last is not None:
view = self.getFrame()[1]
try:
temp = view[event.y(), event.x()]
self.pointerLabel.setText("{:0.1f}".format(temp))
except Exception:
self.pointerLabel.setText("")
# the pointer leaves the image,
# thus no temperature has to be shown
elif event.type() == qtc.QEvent.Leave:
self.pointerLabel.setText("")
return False
def record(self) -> None:
"""
start and stop the recording of the data.
"""
if self.recButton.isChecked():
self.getDevice().interrupt()
self.getDevice().capture(save=True)
self.recButton.setText("■ STOP RECORDING")
else:
self.getDevice().interrupt()
self.recButton.setText("● START RECORDING")
if len(self.getDevice()._data) > 0:
# let the user decide where to save the data
file_filters = "H5 (*.h5)"
file_filters += ";;NPZ (*.npz)"
file_filters += ";;JSON (*.json)"
options = qtw.QFileDialog.Options()
options |= qtw.QFileDialog.DontUseNativeDialog
path, ext = qtw.QFileDialog.getSaveFileName(
parent=self,
filter=file_filters,
dir=self._path,
options=options,
)
# prepare the data
if len(path) > 0:
path = path.replace("/", os.path.sep)
ext = ext.split(" ")[0].lower()
if not path.endswith(ext):
path += "." + ext
# save data
try:
self.savePopup.show()
self.getDevice().save(path)
self._path = ".".join(path.split(".")[:-1])
except TypeError as err:
msgBox = qtw.QMessageBox()
msgBox.setIcon(qtw.QMessageBox.Warning)
msgBox.setText(err)
msgBox.setFont(qtg.QFont("Arial", self._font_size))
msgBox.setWindowTitle("ERROR")
msgBox.setStandardButtons(qtw.QMessageBox.Ok)
msgBox.exec()
finally:
self.savePopup.hide()
# reset the camera buffer and restart the data streaming
self.getDevice().clear()
self.getDevice().capture(save=False)
def updateView(self) -> None:
"""
update the last frame and display it.
"""
# no view is available
if self.getDevice()._last is None:
self._view = None
self._dt = None
else:
dt, img = self.getDevice()._last
# update the last datetime if required
if self._dt is None:
self._dt = dt
# update the view
self._view = ndimage.zoom(input=img.astype(float), zoom=self._zoom)
# convert to heatmap
heat = to_heatmap(self._view, self._colormap)
# set the recording time if required
if self.isRecording():
tt = dt - list(self.getDevice()._data.keys())[0]
tt = tt.total_seconds()
h, remainder = divmod(tt, 3600)
m, remainder = divmod(remainder, 60)
s, f = divmod(remainder, 1)
h = int(h)
m = int(m)
s = int(s)
f = int(f * 1000)
lbl = "{:02d}:{:02d}:{:02d}.{:03d}".format(h, m, s, f)
self.recordLabel.setText(lbl)
else:
self.recordLabel.setText("")
# update the view
qimage = qimage2ndarray.array2qimage(heat)
self.cameraLabel.setPixmap(qtg.QPixmap.fromImage(qimage))
# update the fps
den = (dt - self._dt).total_seconds()
fps = 0.0 if den == 0.0 else (1.0 / den)
self.fpsLabel.setText("{:0.2f}".format(fps))
# update the datetime
self._dt = dt
# adjust the size
self.adjustSize()
|
app.py
|
from tkinter import *
from tkinter.ttk import Combobox
import tkinter.messagebox
import whois
import subprocess
import threading
import socket
class Googles:
def __init__(self,root):
self.root=root
self.root.title("Domain Search")
self.root.geometry("500x400")
self.root.iconbitmap("logo220.ico")
self.root.resizable(0,0)
search_domain=StringVar()
domain_category=StringVar()
#=================================================================================#
def clear():
search_domain.set("")
domain_category.set("Select Categories")
text.delete("1.0","end")
def searchs():
try:
with open("C:/TEMP/domain.txt","w") as f:
if search_domain.get()!="":
if domain_category.get()!="Select Categories":
if domain_category.get()=="Name":
domain= whois.whois(search_domain.get())
f.write(domain.domain_name+"\n")
if domain_category.get()=="Expiration_Date":
domain=whois.whois(search_domain.get())
x=domain.expiration_date
f.write(str(x)+"\n")
if domain_category.get()=="Last_Updated":
domain=whois.whois(search_domain.get())
x=domain.updated_date
f.write(str(x)+"\n")
if domain_category.get()=="Registrar":
domain=whois.whois(search_domain.get())
f.write(domain.registrar+"\n")
if domain_category.get()=="Creation_Date":
domain=whois.whois(search_domain.get())
x=domain.creation_date
f.write(str(x)+"\n")
if domain_category.get()=="All":
domain=whois.whois(search_domain.get())
f.write(str(domain))
if domain_category.get()=="IP Address":
ipaddress=socket.gethostbyname(search_domain.get())
f.write(str(ipaddress))
else:
tkinter.messagebox.showerror("Error","Please Select Categories")
else:
tkinter.messagebox.showerror("Error","Please Enter Domain Name for search")
with open("C:/TEMP/domain.txt","r") as f:
text.insert("end",f.read())
except Exception as e:
#print(e)
tkinter.messagebox.showerror("Error","Please Enter only Domain Name")
def thread_search():
t1=threading.Thread(target=searchs)
t1.start()
#==================================================================================#
def on_enter1(e):
but_search['background']="black"
but_search['foreground']="cyan"
def on_leave1(e):
but_search['background']="SystemButtonFace"
but_search['foreground']="SystemButtonText"
def on_enter2(e):
but_clear['background']="black"
but_clear['foreground']="cyan"
def on_leave2(e):
but_clear['background']="SystemButtonFace"
but_clear['foreground']="SystemButtonText"
#==================================================================================#
mainframe=Frame(self.root,width=500,height=400,bd=3,relief="ridge")
mainframe.place(x=0,y=0)
firstframe=Frame(mainframe,width=494,height=150,bd=3,relief="ridge")
firstframe.place(x=0,y=0)
secondframe=Frame(mainframe,width=494,height=243,bd=3,relief="ridge")
secondframe.place(x=0,y=150)
#================================firstframe===================================================#
lab_frame=LabelFrame(firstframe,width=488,height=145,text="Domain Search",bg="#89b0ae",fg="white")
lab_frame.place(x=0,y=0)
#==============================================================================================#
lab=Label(lab_frame,text="Search Domain",font=('times new roman',12),bg="#89b0ae")
lab.place(x=0,y=5)
ent_search=Entry(lab_frame,width=37,font=('times new roman',12),bd=3,relief="ridge",textvariable=search_domain)
ent_search.place(x=170,y=5)
lab_results=Label(lab_frame,text="Domain Categories:",font=('times new roman',12),bg="#89b0ae")
lab_results.place(x=0,y=50)
but_search=Button(lab_frame,width=13,text="Search",font=('times new roman',12),cursor="hand2",command=thread_search)
but_search.place(x=50,y=90)
but_search.bind("<Enter>",on_enter1)
but_search.bind("<Leave>",on_leave1)
fileselect=["Name","IP Address","Expiration_Date","Last_Updated","Registrar","Creation_Date","All"]
fileselect_combo=Combobox(firstframe,values=fileselect,font=('arial',12),width=20,state="readonly",textvariable=domain_category)
fileselect_combo.set("Select Categories")
fileselect_combo.place(x=200,y=60)
but_clear=Button(lab_frame,width=13,text="Clear",font=('times new roman',12),cursor="hand2",command=clear)
but_clear.place(x=300,y=90)
but_clear.bind("<Enter>",on_enter2)
but_clear.bind("<Leave>",on_leave2)
#=============================================================================================================#
scol=Scrollbar(secondframe,orient="vertical")
scol.place(relx=1, rely=0, relheight=1, anchor='ne')
text=Text(secondframe,height=12,width=58,font=('times new roman',12),yscrollcommand=scol.set,relief="sunken",bd=3,fg="black")
text.place(x=0,y=0)
scol.config(command=text.yview)
if __name__ == "__main__":
root=Tk()
app=Googles(root)
root.mainloop()
|
utils.py
|
from collections import namedtuple
import asyncio
import os
import sys
import signal
import operator
import uuid
from functools import reduce
from weakref import ref, WeakKeyDictionary
import types
import inspect
from inspect import Parameter, Signature
import itertools
import abc
from collections.abc import Iterable
import numpy as np
from cycler import cycler
import datetime
from functools import wraps, partial
import threading
import time
from tqdm import tqdm
from tqdm.utils import _screen_shape_wrapper, _term_move_up, _unicode
import warnings
import msgpack
import msgpack_numpy
import zict
try:
# cytools is a drop-in replacement for toolz, implemented in Cython
from cytools import groupby
except ImportError:
from toolz import groupby
class Msg(namedtuple("Msg_base", ["command", "obj", "args", "kwargs", "run"])):
"""Namedtuple sub-class to encapsulate a message from the plan to the RE.
This class provides 3 key features:
1. dot access to the contents
2. default values and a variadic signature for args / kwargs
3. a nice repr
"""
__slots__ = ()
def __new__(cls, command, obj=None, *args, run=None, **kwargs):
return super(Msg, cls).__new__(cls, command, obj, args, kwargs, run)
def __repr__(self):
return (f"Msg({self.command!r}, obj={self.obj!r}, "
f"args={self.args}, kwargs={self.kwargs}, run={self.run!r})")
class RunEngineControlException(Exception):
"""Exception for signaling within the RunEngine."""
class RequestAbort(RunEngineControlException):
"""Request that the current run be aborted."""
exit_status = 'abort'
class RequestStop(RunEngineControlException):
"""Request that the current run be stopped and marked successful."""
exit_status = 'success'
class RunEngineInterrupted(Exception):
pass
class NoReplayAllowed(Exception):
pass
class IllegalMessageSequence(Exception):
pass
class FailedPause(Exception):
pass
class FailedStatus(Exception):
"""Exception to be raised if a SatusBase object reports done but failed"""
class InvalidCommand(KeyError):
pass
class PlanHalt(GeneratorExit):
pass
class RampFail(RuntimeError):
...
PLAN_TYPES = (types.GeneratorType,)
try:
from types import CoroutineType
except ImportError:
# < py35
pass
else:
PLAN_TYPES = PLAN_TYPES + (CoroutineType, )
del CoroutineType
def ensure_generator(plan):
"""
Ensure that the input is a generator.
Parameters
----------
plan : iterable or iterator
Returns
-------
gen : coroutine
"""
if isinstance(plan, Msg):
return single_gen(plan)
gen = iter(plan) # no-op on generators; needed for classes
if not isinstance(gen, PLAN_TYPES):
# If plan does not support .send, we must wrap it in a generator.
gen = (msg for msg in gen)
return gen
def single_gen(msg):
'''Turn a single message into a plan
If ``lambda x: yield x`` were valid Python, this would be equivalent.
In Python 3.6 or 3.7 we might get lambda generators.
Parameters
----------
msg : Msg
a single message
Yields
------
msg : Msg
the input message
'''
return (yield msg)
class SignalHandler:
"""Context manager for signal handing
If multiple signals come in quickly, they may not all be seen, quoting
the libc manual:
Remember that if there is a particular signal pending for your
process, additional signals of that same type that arrive in the
meantime might be discarded. For example, if a SIGINT signal is
pending when another SIGINT signal arrives, your program will
probably only see one of them when you unblock this signal.
https://www.gnu.org/software/libc/manual/html_node/Checking-for-Pending-Signals.html
"""
def __init__(self, sig, log=None):
self.sig = sig
self.interrupted = False
self.count = 0
self.log = log
def __enter__(self):
self.interrupted = False
self.released = False
self.count = 0
self.original_handler = signal.getsignal(self.sig)
def handler(signum, frame):
self.interrupted = True
self.count += 1
if self.log is not None:
self.log.debug('SignalHandler caught SIGINT; count is %r',
self.count)
if self.count > 10:
orig_func = self.original_handler
self.release()
orig_func(signum, frame)
self.handle_signals()
signal.signal(self.sig, handler)
return self
def __exit__(self, type, value, tb):
self.release()
def release(self):
if self.released:
return False
signal.signal(self.sig, self.original_handler)
self.released = True
return True
def handle_signals(self):
...
class SigintHandler(SignalHandler):
def __init__(self, RE):
super().__init__(signal.SIGINT, log=RE.log)
self.RE = RE
self.last_sigint_time = None # time most recent SIGINT was processed
self.num_sigints_processed = 0 # count SIGINTs processed
def __enter__(self):
return super().__enter__()
def handle_signals(self):
# Check for pause requests from keyboard.
# TODO, there is a possible race condition between the two
# pauses here
if self.RE.state.is_running and (not self.RE._interrupted):
if (self.last_sigint_time is None or
time.time() - self.last_sigint_time > 10):
# reset the counter to 1
# It's been 10 seconds since the last SIGINT. Reset.
self.count = 1
if self.last_sigint_time is not None:
self.log.debug("It has been 10 seconds since the "
"last SIGINT. Resetting SIGINT "
"handler.")
# weeee push these to threads to not block the main thread
threading.Thread(target=self.RE.request_pause,
args=(True,)).start()
print("A 'deferred pause' has been requested. The "
"RunEngine will pause at the next checkpoint. "
"To pause immediately, hit Ctrl+C again in the "
"next 10 seconds.")
self.last_sigint_time = time.time()
elif self.count == 2:
print('trying a second time')
# - Ctrl-C twice within 10 seconds -> hard pause
self.log.debug("RunEngine detected two SIGINTs. "
"A hard pause will be requested.")
threading.Thread(target=self.RE.request_pause,
args=(False,)).start()
self.last_sigint_time = time.time()
class CallbackRegistry:
"""
See matplotlib.cbook.CallbackRegistry. This is a simplified since
``bluesky`` is python3.4+ only!
"""
def __init__(self, ignore_exceptions=False, allowed_sigs=None):
self.ignore_exceptions = ignore_exceptions
self.allowed_sigs = allowed_sigs
self.callbacks = dict()
self._cid = 0
self._func_cid_map = {}
def __getstate__(self):
# We cannot currently pickle the callables in the registry, so
# return an empty dictionary.
return {}
def __setstate__(self, state):
# re-initialise an empty callback registry
self.__init__()
def connect(self, sig, func):
"""Register ``func`` to be called when ``sig`` is generated
Parameters
----------
sig
func
Returns
-------
cid : int
The callback index. To be used with ``disconnect`` to deregister
``func`` so that it will no longer be called when ``sig`` is
generated
"""
if self.allowed_sigs is not None:
if sig not in self.allowed_sigs:
raise ValueError("Allowed signals are {0}".format(
self.allowed_sigs))
self._func_cid_map.setdefault(sig, WeakKeyDictionary())
# Note proxy not needed in python 3.
# TODO rewrite this when support for python2.x gets dropped.
# Following discussion with TC: weakref.WeakMethod can not be used to
# replace the custom 'BoundMethodProxy', because it does not accept
# the 'destroy callback' as a parameter. The 'destroy callback' is
# necessary to automatically unsubscribe CB registry from the callback
# when the class object is destroyed and this is the main purpose of
# BoundMethodProxy.
proxy = _BoundMethodProxy(func)
if proxy in self._func_cid_map[sig]:
return self._func_cid_map[sig][proxy]
proxy.add_destroy_callback(self._remove_proxy)
self._cid += 1
cid = self._cid
self._func_cid_map[sig][proxy] = cid
self.callbacks.setdefault(sig, dict())
self.callbacks[sig][cid] = proxy
return cid
def _remove_proxy(self, proxy):
# need the list because `del self._func_cid_map[sig]` mutates the dict
for sig, proxies in list(self._func_cid_map.items()):
try:
# Here we need to delete the last reference to proxy (in 'self.callbacks[sig]')
# The respective entries in 'self._func_cid_map' are deleted automatically,
# since 'self._func_cid_map[sig]' entries are WeakKeyDictionary objects.
del self.callbacks[sig][proxies[proxy]]
except KeyError:
pass
# Remove dictionary items for signals with no assigned callbacks
if len(self.callbacks[sig]) == 0:
del self.callbacks[sig]
del self._func_cid_map[sig]
def disconnect(self, cid):
"""Disconnect the callback registered with callback id *cid*
Parameters
----------
cid : int
The callback index and return value from ``connect``
"""
for eventname, callbackd in self.callbacks.items():
try:
# This may or may not remove entries in 'self._func_cid_map'.
del callbackd[cid]
except KeyError:
continue
else:
# Look for cid in 'self._func_cid_map' as well. It may still be there.
for sig, functions in self._func_cid_map.items():
for function, value in list(functions.items()):
if value == cid:
del functions[function]
return
def process(self, sig, *args, **kwargs):
"""Process ``sig``
All of the functions registered to receive callbacks on ``sig``
will be called with ``args`` and ``kwargs``
Parameters
----------
sig
args
kwargs
"""
if self.allowed_sigs is not None:
if sig not in self.allowed_sigs:
raise ValueError("Allowed signals are {0}".format(
self.allowed_sigs))
exceptions = []
if sig in self.callbacks:
for cid, func in list(self.callbacks[sig].items()):
try:
func(*args, **kwargs)
except ReferenceError:
self._remove_proxy(func)
except Exception as e:
if self.ignore_exceptions:
exceptions.append((e, sys.exc_info()[2]))
else:
raise
return exceptions
class _BoundMethodProxy:
'''
Our own proxy object which enables weak references to bound and unbound
methods and arbitrary callables. Pulls information about the function,
class, and instance out of a bound method. Stores a weak reference to the
instance to support garbage collection.
@organization: IBM Corporation
@copyright: Copyright (c) 2005, 2006 IBM Corporation
@license: The BSD License
Minor bugfixes by Michael Droettboom
'''
def __init__(self, cb):
self._hash = hash(cb)
self._destroy_callbacks = []
try:
# This branch is successful if 'cb' bound method and class method,
# but destroy_callback mechanism works only for bound methods,
# since cb.__self__ points to class instance only for
# bound methods, not for class methods. Therefore destroy_callback
# will not be called for class methods.
try:
self.inst = ref(cb.__self__, self._destroy)
except TypeError:
self.inst = None
self.func = cb.__func__
self.klass = cb.__self__.__class__
except AttributeError:
# 'cb' is a function, callable object or static method.
# No weak reference is created, strong reference is stored instead.
self.inst = None
self.func = cb
self.klass = None
def add_destroy_callback(self, callback):
self._destroy_callbacks.append(_BoundMethodProxy(callback))
def _destroy(self, wk):
for callback in self._destroy_callbacks:
try:
callback(self)
except ReferenceError:
pass
def __getstate__(self):
d = self.__dict__.copy()
# de-weak reference inst
inst = d['inst']
if inst is not None:
d['inst'] = inst()
return d
def __setstate__(self, statedict):
self.__dict__ = statedict
inst = statedict['inst']
# turn inst back into a weakref
if inst is not None:
self.inst = ref(inst)
def __call__(self, *args, **kwargs):
'''
Proxy for a call to the weak referenced object. Take
arbitrary params to pass to the callable.
Raises `ReferenceError`: When the weak reference refers to
a dead object
'''
if self.inst is not None and self.inst() is None:
raise ReferenceError
elif self.inst is not None:
# build a new instance method with a strong reference to the
# instance
mtd = types.MethodType(self.func, self.inst())
else:
# not a bound method, just return the func
mtd = self.func
# invoke the callable and return the result
return mtd(*args, **kwargs)
def __eq__(self, other):
'''
Compare the held function and instance with that held by
another proxy.
'''
try:
if self.inst is None:
return self.func == other.func and other.inst is None
else:
return self.func == other.func and self.inst() == other.inst()
except Exception:
return False
def __ne__(self, other):
'''
Inverse of __eq__.
'''
return not self.__eq__(other)
def __hash__(self):
return self._hash
# The following two code blocks are adapted from David Beazley's
# 'Python 3 Metaprogramming' https://www.youtube.com/watch?v=sPiWg5jSoZI
class StructMeta(type):
def __new__(cls, name, bases, clsdict):
clsobj = super().__new__(cls, name, bases, clsdict)
args_params = [Parameter(name, Parameter.POSITIONAL_OR_KEYWORD)
for name in clsobj._fields]
kwargs_params = [Parameter(name, Parameter.KEYWORD_ONLY, default=None)
for name in ['md']]
sig = Signature(args_params + kwargs_params)
setattr(clsobj, '__signature__', sig)
return clsobj
class Struct(metaclass=StructMeta):
"The _fields of any subclass become its attritubes and __init__ args."
_fields = []
def __init__(self, *args, **kwargs):
# Now bind default values of optional arguments.
# If it seems like there should be a cleaner way to do this, see
# http://bugs.python.org/msg221104
bound = self.__signature__.bind(*args, **kwargs)
for name, param in self.__signature__.parameters.items():
if (name not in bound.arguments and
param.default is not inspect._empty):
bound.arguments[name] = param.default
for name, val in bound.arguments.items():
setattr(self, name, val)
self.flyers = []
def set(self, **kwargs):
"Update attributes as keyword arguments."
for attr, val in kwargs.items():
setattr(self, attr, val)
SUBS_NAMES = ['all', 'start', 'stop', 'event', 'descriptor']
def normalize_subs_input(subs):
"Accept a callable, a list, or a dict. Normalize to a dict of lists."
normalized = {name: [] for name in SUBS_NAMES}
if subs is None:
pass
elif callable(subs):
normalized['all'].append(subs)
elif hasattr(subs, 'items'):
for key, funcs in list(subs.items()):
if key not in SUBS_NAMES:
raise KeyError("Keys must be one of {!r:0}".format(SUBS_NAMES))
if callable(funcs):
normalized[key].append(funcs)
else:
normalized[key].extend(funcs)
elif isinstance(subs, Iterable):
normalized['all'].extend(subs)
else:
raise ValueError("Subscriptions should be a callable, a list of "
"callables, or a dictionary mapping subscription "
"names to lists of callables.")
# Validates that all entries are callables.
for name, funcs in normalized.items():
for func in funcs:
if not callable(func):
raise ValueError("subs values must be functions or lists "
"of functions. The offending entry is\n "
"{0}".format(func))
return normalized
class DefaultSubs:
"""a class-level descriptor"""
def __init__(self, default=None):
self._value = normalize_subs_input(default)
def __get__(self, instance, owner):
return self._value
def __set__(self, instance, value):
self._value = normalize_subs_input(value)
class Subs:
"""a 'reusable' property"""
def __init__(self, default=None):
self.default = normalize_subs_input(default)
self.data = WeakKeyDictionary()
def __get__(self, instance, owner):
return self.data.get(instance, self.default)
def __set__(self, instance, value):
self.data[instance] = normalize_subs_input(value)
def snake_cyclers(cyclers, snake_booleans):
"""
Combine cyclers with a 'snaking' back-and-forth order.
Parameters
----------
cyclers : cycler.Cycler
or any iterable that yields dictionaries of lists
snake_booleans : list
a list of the same length as cyclers indicating whether each cycler
should 'snake' (True) or not (False). Note that the first boolean
does not make a difference because the first (slowest) dimension
does not repeat.
Returns
-------
result : cycler
"""
if len(cyclers) != len(snake_booleans):
raise ValueError("number of cyclers does not match number of booleans")
lengths = []
new_cyclers = []
for c in cyclers:
lengths.append(len(c))
total_length = np.product(lengths)
for i, (c, snake) in enumerate(zip(cyclers, snake_booleans)):
num_tiles = np.product(lengths[:i])
num_repeats = np.product(lengths[i+1:])
for k, v in c._transpose().items():
if snake:
v = v + v[::-1]
v2 = np.tile(np.repeat(v, num_repeats), num_tiles)
expanded = v2[:total_length]
new_cyclers.append(cycler(k, expanded))
return reduce(operator.add, new_cyclers)
def first_key_heuristic(device):
"""
Get the fully-qualified data key for the first entry in describe().
This will raise is that entry's `describe()` method does not return a
dictionary with exactly one key.
"""
return next(iter(device.describe()))
def ancestry(obj):
"""
List self, parent, grandparent, ... back to ultimate ancestor.
Parameters
----------
obj : object
must have a `parent` attribute
Returns
-------
ancestry : list
list of objects, starting with obj and tracing parents recursively
"""
ancestry = []
ancestor = obj
while True:
ancestry.append(ancestor)
if ancestor.parent is None:
return ancestry
ancestor = ancestor.parent
def root_ancestor(obj):
"""
Traverse ancestry to obtain root ancestor.
Parameters
----------
obj : object
must have a `parent` attribute
Returns
-------
root : object
"""
return ancestry(obj)[-1]
def share_ancestor(obj1, obj2):
"""
Check whether obj1 and obj2 have a common ancestor.
Parameters
----------
obj1 : object
must have a `parent` attribute
obj2 : object
must have a `parent` attribute
Returns
-------
result : boolean
"""
return ancestry(obj1)[-1] is ancestry(obj2)[-1]
def separate_devices(devices):
"""
Filter out elements that have other elements as their ancestors.
If A is an ancestor of B, [A, B, C] -> [A, C].
Paremeters
----------
devices : list
All elements must have a `parent` attribute.
Returns
-------
result : list
subset of input, with order retained
"""
result = []
for det in devices:
for existing_det in result[:]:
if existing_det in ancestry(det):
# known issue: here we assume that det is in the read_attrs
# of existing_det -- to be addressed after plans.py refactor
break
elif det in ancestry(existing_det):
# existing_det is redundant; use det in its place
result.remove(existing_det)
else:
result.append(det)
return result
def all_safe_rewind(devices):
'''If all devices can have their trigger method re-run on resume.
Parameters
----------
devices : list
List of devices
Returns
-------
safe_rewind : bool
If all the device can safely re-triggered
'''
for d in devices:
if hasattr(d, 'rewindable'):
rewindable = d.rewindable.get()
if not rewindable:
return False
return True
class PersistentDict(zict.Func):
"""
A MutableMapping which syncs it contents to disk.
The contents are stored as msgpack-serialized files, with one file per item
in the mapping.
Note that when an item is *mutated* it is not immediately synced:
>>> d['sample'] = {"color": "red"} # immediately synced
>>> d['sample']['shape'] = 'bar' # not immediately synced
but that the full contents are synced to disk when the PersistentDict
instance is garbage collected.
"""
def __init__(self, directory):
self._directory = directory
self._file = zict.File(directory)
self._cache = {}
super().__init__(self._dump, self._load, self._file)
self.reload()
# Similar to flush() or _do_update(), but without reference to self
# to avoid circular reference preventing collection.
# NOTE: This still doesn't guarantee call on delete or gc.collect()!
# Explicitly call flush() if immediate write to disk required.
def finalize(zfile, cache, dump):
zfile.update((k, dump(v)) for k, v in cache.items())
import weakref
self._finalizer = weakref.finalize(
self, finalize, self._file, self._cache, PersistentDict._dump)
@property
def directory(self):
return self._directory
def __setitem__(self, key, value):
self._cache[key] = value
super().__setitem__(key, value)
def __getitem__(self, key):
return self._cache[key]
def __delitem__(self, key):
del self._cache[key]
super().__delitem__(key)
def __repr__(self):
return f"<{self.__class__.__name__} {dict(self)!r}>"
@staticmethod
def _dump(obj):
"Encode as msgpack using numpy-aware encoder."
# See https://github.com/msgpack/msgpack-python#string-and-binary-type
# for more on use_bin_type.
return msgpack.packb(
obj,
default=msgpack_numpy.encode,
use_bin_type=True)
@staticmethod
def _load(file):
return msgpack.unpackb(
file,
object_hook=msgpack_numpy.decode,
raw=False)
def flush(self):
"""Force a write of the current state to disk"""
for k, v in self.items():
super().__setitem__(k, v)
def reload(self):
"""Force a reload from disk, overwriting current cache"""
self._cache = dict(super().items())
SEARCH_PATH = []
ENV_VAR = 'BLUESKY_HISTORY_PATH'
if ENV_VAR in os.environ:
SEARCH_PATH.append(os.environ[ENV_VAR])
SEARCH_PATH.extend([os.path.expanduser('~/.config/bluesky/bluesky_history.db'),
'/etc/bluesky/bluesky_history.db'])
def get_history():
"""
DEPRECATED: Return a dict-like object for stashing metadata.
If historydict is not installed, return a dict.
If historydict is installed, look for a sqlite file in:
- $BLUESKY_HISTORY_PATH, if defined
- ~/.config/bluesky/bluesky_history.db
- /etc/bluesky/bluesky_history.db
If no existing file is found, create a new sqlite file in:
- $BLUESKY_HISTORY_PATH, if defined
- ~/.config/bluesky/bluesky_history.db, otherwise
"""
try:
import historydict
except ImportError:
print("You do not have historydict installed, your metadata "
"will not be persistent or have any history of the "
"values.")
return dict()
else:
for path in SEARCH_PATH:
if os.path.isfile(path):
print("Loading metadata history from %s" % path)
return historydict.HistoryDict(path)
# No existing file was found. Try creating one.
path = SEARCH_PATH[0]
try:
os.makedirs(os.path.dirname(path), exist_ok=True)
print("Storing metadata history in a new file at %s." % path)
return historydict.HistoryDict(path)
except IOError as exc:
print(exc)
print("Failed to create metadata history file at %s" % path)
print("Storing HistoryDict in memory; it will not persist "
"when session is ended.")
return historydict.HistoryDict(':memory:')
_QT_KICKER_INSTALLED = {}
_NB_KICKER_INSTALLED = {}
def install_kicker(loop=None, update_rate=0.03):
"""
Install a periodic callback to integrate drawing and asyncio event loops.
This dispatches to :func:`install_qt_kicker` or :func:`install_nb_kicker`
depending on the current matplotlib backend.
Parameters
----------
loop : event loop, optional
update_rate : number
Seconds between periodic updates. Default is 0.03.
"""
import matplotlib
backend = matplotlib.get_backend()
if backend == 'nbAgg':
install_nb_kicker(loop=loop, update_rate=update_rate)
elif backend in ('Qt4Agg', 'Qt5Agg'):
install_qt_kicker(loop=loop, update_rate=update_rate)
else:
raise NotImplementedError("The matplotlib backend {} is not yet "
"supported.".format(backend))
def install_qt_kicker(loop=None, update_rate=0.03):
"""Install a periodic callback to integrate Qt and asyncio event loops.
DEPRECATED: This functionality is now handled automatically by default and
is configurable via the RunEngine's new ``during_task`` parameter. Calling
this function now has no effect. It will be removed in a future release of
bluesky.
Parameters
----------
loop : event loop, optional
update_rate : number
Seconds between periodic updates. Default is 0.03.
"""
warnings.warn("bluesky.utils.install_qt_kicker is no longer necessary and "
"has no effect. Please remove your use of it. It may be "
"removed in a future release of bluesky.")
def install_nb_kicker(loop=None, update_rate=0.03):
"""
Install a periodic callback to integrate ipykernel and asyncio event loops.
It is safe to call this function multiple times.
Parameters
----------
loop : event loop, optional
update_rate : number
Seconds between periodic updates. Default is 0.03.
"""
import matplotlib
if loop is None:
loop = asyncio.get_event_loop()
global _NB_KICKER_INSTALLED
if loop in _NB_KICKER_INSTALLED:
return
def _nbagg_kicker():
# This is more brute-force variant of the _qt_kicker function used
# inside install_qt_kicker.
for f_mgr in matplotlib._pylab_helpers.Gcf.get_all_fig_managers():
if f_mgr.canvas.figure.stale:
f_mgr.canvas.draw()
loop.call_later(update_rate, _nbagg_kicker)
_NB_KICKER_INSTALLED[loop] = loop.call_soon(_nbagg_kicker)
def apply_sub_factories(factories, plan):
'''Run sub factory functions for a plan
Factory functions should return lists, which will be added onto the
subscription key (e.g., 'all' or 'start') specified in the factory
definition.
If the factory function returns None, the list will not be modified.
'''
factories = normalize_subs_input(factories)
out = {k: list(itertools.filterfalse(lambda x: x is None,
(sf(plan) for sf in v)))
for k, v in factories.items()}
return out
def update_sub_lists(out, inp):
"""Extends dictionary `out` lists with those in `inp`
Assumes dictionaries where all values are lists
"""
for k, v in inp.items():
try:
out[k].extend(v)
except KeyError:
out[k] = list(v)
def register_transform(RE, *, prefix='<', ip=None):
'''Register RunEngine IPython magic convenience transform
Assuming the default parameters
This maps `< stuff(*args, **kwargs)` -> `RE(stuff(*args, **kwargs))`
RE is assumed to be available in the global namespace
Parameters
----------
RE : str
The name of a valid RunEngine instance in the global IPython namespace
prefix : str, optional
The prefix to trigger this transform on. If this collides with
valid python syntax or an existing transform you are on your own.
ip : IPython shell, optional
If not passed, uses `IPython.get_ipython()` to get the current shell
'''
import IPython
if ip is None:
ip = IPython.get_ipython()
if IPython.__version__ >= '7':
def tr_re(lines):
if len(lines) != 1:
return lines
line, = lines
head, split, tail = line.partition(prefix)
if split == prefix and head.strip() == '':
line = f'{RE}({tail.strip()})\n'
return [line]
ip.input_transformers_post.append(tr_re)
else:
from IPython.core.inputtransformer import StatelessInputTransformer
@StatelessInputTransformer.wrap
def tr_re(line):
if line.startswith(prefix):
line = line[len(prefix):].strip()
return '{}({})'.format(RE, line)
return line
ip.input_splitter.logical_line_transforms.append(tr_re())
ip.input_transformer_manager.logical_line_transforms.append(tr_re())
class AsyncInput:
"""a input prompt that allows event loop to run in the background
adapted from http://stackoverflow.com/a/35514777/1221924
"""
def __init__(self, loop=None):
self.loop = loop or asyncio.get_event_loop()
self.q = asyncio.Queue(loop=self.loop)
self.loop.add_reader(sys.stdin, self.got_input)
def got_input(self):
asyncio.ensure_future(self.q.put(sys.stdin.readline()), loop=self.loop)
async def __call__(self, prompt, end='\n', flush=False):
print(prompt, end=end, flush=flush)
return (await self.q.get()).rstrip('\n')
def new_uid():
return str(uuid.uuid4())
def sanitize_np(val):
"Convert any numpy objects into built-in Python types."
if isinstance(val, (np.generic, np.ndarray)):
if np.isscalar(val):
return val.item()
return val.tolist()
return val
def expiring_function(func, loop, *args, **kwargs):
"""
If timeout has not occurred, call func(*args, **kwargs).
This is meant to used with the event loop's run_in_executor
method. Outside that context, it doesn't make any sense.
"""
def dummy(start_time, timeout):
if loop.time() > start_time + timeout:
return
func(*args, **kwargs)
return
return dummy
def short_uid(label=None, truncate=6):
"Return a readable but unique id like 'label-fjfi5a'"
if label:
return '-'.join([label, new_uid()[:truncate]])
else:
return new_uid()[:truncate]
def ensure_uid(doc_or_uid):
"""
Accept a uid or a dict with a 'uid' key. Return the uid.
"""
try:
return doc_or_uid['uid']
except TypeError:
return doc_or_uid
def ts_msg_hook(msg, file=sys.stdout):
t = '{:%H:%M:%S.%f}'.format(datetime.datetime.now())
msg_fmt = "{: <17s} -> {!s: <15s} args: {}, kwargs: {}, run: {}".format(
msg.command,
msg.obj.name if hasattr(msg.obj, 'name') else msg.obj,
msg.args,
msg.kwargs,
"'{}'".format(msg.run) if isinstance(msg.run, str) else msg.run)
print('{} {}'.format(t, msg_fmt), file=file)
def make_decorator(wrapper):
"""
Turn a generator instance wrapper into a generator function decorator.
The functions named <something>_wrapper accept a generator instance and
return a mutated generator instance.
Example of a 'wrapper':
>>> plan = count([det]) # returns a generator instance
>>> revised_plan = some_wrapper(plan) # returns a new instance
Example of a decorator:
>>> some_decorator = make_decorator(some_wrapper) # returns decorator
>>> customized_count = some_decorator(count) # returns generator func
>>> plan = customized_count([det]) # returns a generator instance
This turns a 'wrapper' into a decorator, which accepts a generator
function and returns a generator function.
"""
@wraps(wrapper)
def dec_outer(*args, **kwargs):
def dec(gen_func):
@wraps(gen_func)
def dec_inner(*inner_args, **inner_kwargs):
plan = gen_func(*inner_args, **inner_kwargs)
plan = wrapper(plan, *args, **kwargs)
return (yield from plan)
return dec_inner
return dec
return dec_outer
def apply_to_dict_recursively(d, f):
"""Recursively apply function to a document
This modifies the dict in place and returns it.
Parameters
----------
d: dict
e.g. event_model Document
f: function
any func to be performed on d recursively
"""
for key, val in d.items():
if hasattr(val, 'items'):
d[key] = apply_to_dict_recursively(d=val, f=f)
d[key] = f(val)
return d
class ProgressBar:
def __init__(self, status_objs, delay_draw=0.2):
"""
Represent status objects with a progress bars.
Parameters
----------
status_objs : list
Status objects
delay_draw : float, optional
To avoid flashing progress bars that will complete quickly after
they are displayed, delay drawing until the progress bar has been
around for awhile. Default is 0.2 seconds.
"""
self.meters = []
self.status_objs = []
# Determine terminal width.
self.ncols = _screen_shape_wrapper()(sys.stdout)[0] or 79
self.fp = sys.stdout
self.creation_time = time.time()
self.delay_draw = delay_draw
self.drawn = False
self.done = False
self.lock = threading.RLock()
# If the ProgressBar is not finished before the delay_draw time but
# never again updated after the delay_draw time, we need to draw it
# once.
if delay_draw:
threading.Thread(target=self._ensure_draw, daemon=True).start()
# Create a closure over self.update for each status object that
# implemets the 'watch' method.
for st in status_objs:
with self.lock:
if hasattr(st, 'watch') and not st.done:
pos = len(self.meters)
self.meters.append('')
self.status_objs.append(st)
st.watch(partial(self.update, pos))
def update(self, pos, *,
name=None,
current=None, initial=None, target=None,
unit='units', precision=None,
fraction=None,
time_elapsed=None, time_remaining=None):
if all(x is not None for x in (current, initial, target)):
# Display a proper progress bar.
total = round(_L2norm(target, initial), precision or 3)
# make sure we ignore overshoot to prevent tqdm from exploding.
n = np.clip(round(_L2norm(current, initial), precision or 3), 0, total)
# Compute this only if the status object did not provide it.
if time_elapsed is None:
time_elapsed = time.time() - self.creation_time
# TODO Account for 'fraction', which might in some special cases
# differ from the naive computation above.
# TODO Account for 'time_remaining' which might in some special
# cases differ from the naive computaiton performed by
# format_meter.
meter = tqdm.format_meter(n=n, total=total, elapsed=time_elapsed,
unit=unit,
prefix=name,
ncols=self.ncols)
else:
# Simply display completeness.
if name is None:
name = ''
if self.status_objs[pos].done:
meter = name + ' [Complete.]'
else:
meter = name + ' [In progress. No progress bar available.]'
meter += ' ' * (self.ncols - len(meter))
meter = meter[:self.ncols]
self.meters[pos] = meter
self.draw()
def draw(self):
with self.lock:
if (time.time() - self.creation_time) < self.delay_draw:
return
if self.done:
return
for meter in self.meters:
tqdm.status_printer(self.fp)(meter)
self.fp.write('\n')
self.fp.write(_unicode(_term_move_up() * len(self.meters)))
self.drawn = True
def _ensure_draw(self):
# Ensure that the progress bar is drawn at least once after the delay.
time.sleep(self.delay_draw)
with self.lock:
if (not self.done) and (not self.drawn):
self.draw()
def clear(self):
with self.lock:
self.done = True
if self.drawn:
for meter in self.meters:
self.fp.write('\r')
self.fp.write(' ' * self.ncols)
self.fp.write('\r')
self.fp.write('\n')
self.fp.write(_unicode(_term_move_up() * len(self.meters)))
class ProgressBarManager:
def __init__(self, delay_draw=0.2):
self.delay_draw = delay_draw
self.pbar = None
def __call__(self, status_objs_or_none):
if status_objs_or_none is not None:
# Start a new ProgressBar.
if self.pbar is not None:
warnings.warn("Previous ProgressBar never competed.")
self.pbar.clear()
self.pbar = ProgressBar(status_objs_or_none,
delay_draw=self.delay_draw)
else:
# Clean up an old one.
if self.pbar is None:
warnings.warn("There is no Progress bar to clean up.")
else:
self.pbar.clear()
self.pbar = None
def _L2norm(x, y):
"works on (3, 5) and ((0, 3), (4, 0))"
return np.sqrt(np.sum((np.asarray(x) - np.asarray(y))**2))
def merge_axis(objs):
'''Merge possibly related axis
This function will take a list of objects and separate it into
- list of completely independent objects (most settable things and
detectors) that do not have coupled motion.
- list of devices who have children who are coupled (PseudoPositioner
ducked by looking for 'RealPosition' as an attribute)
Both of these lists will only contain objects directly passed in
in objs
- map between parents and objects passed in. Each value
of the map is a map between the strings
{'real', 'pseudo', 'independent'} and a list of objects. All
of the objects in the (doubly nested) map are in the input.
Parameters
----------
objs : Iterable[OphydObj]
The input devices
Returns
-------
independent_objs : List[OphydObj]
Independent 'simple' axis
complex_objs : List[PseudoPositioner]
Independent objects which have interdependent children
coupled : Dict[PseudoPositioner, Dict[str, List[OphydObj]]]
Mapping of interdependent axis passed in.
'''
def get_parent(o):
return getattr(o, 'parent')
independent_objs = set()
maybe_coupled = set()
complex_objs = set()
for o in objs:
parent = o.parent
if hasattr(o, 'RealPosition'):
complex_objs.add(o)
elif (parent is not None and hasattr(parent, 'RealPosition')):
maybe_coupled.add(o)
else:
independent_objs.add(o)
coupled = {}
for parent, children in groupby(get_parent, maybe_coupled).items():
real_p = set(parent.real_positioners)
pseudo_p = set(parent.pseudo_positioners)
type_map = {'real': [], 'pseudo': [], 'unrelated': []}
for c in children:
if c in real_p:
type_map['real'].append(c)
elif c in pseudo_p:
type_map['pseudo'].append(c)
else:
type_map['unrelated'].append(c)
coupled[parent] = type_map
return (independent_objs, complex_objs, coupled)
def merge_cycler(cyc):
"""Specify movements of sets of interdependent axes atomically.
Inspect the keys of ``cyc`` (which are Devices) to indentify those
which are interdependent (part of the same
PseudoPositioner) and merge those independent entries into
a single entry.
This also validates that the user has not passed conflicting
interdependent axis (such as a real and pseudo axis from the same
PseudoPositioner)
Parameters
----------
cyc : Cycler[OphydObj, Sequence]
A cycler as would be passed to :func:`scan_nd`
Returns
-------
Cycler[OphydObj, Sequence]
A cycler as would be passed to :func:`scan_nd` with the same
or fewer keys than the input.
"""
def my_name(obj):
"""Get the attribute name of this device on its parent Device
"""
parent = obj.parent
return next(iter([nm for nm in parent.component_names
if getattr(parent, nm) is obj]))
io, co, gb = merge_axis(cyc.keys)
# only simple non-coupled objects, declare victory and bail!
if len(co) == len(gb) == 0:
return cyc
input_data = cyc.by_key()
output_data = [cycler(i, input_data[i]) for i in io | co]
for parent, type_map in gb.items():
if parent in co and (type_map['pseudo'] or type_map['real']):
raise ValueError("A PseudoPostiioner and its children were both "
"passed in. We do not yet know how to merge "
"these inputs, failing.")
if type_map['real'] and type_map['pseudo']:
raise ValueError("Passed in a mix of real and pseudo axis. "
"Can not cope, failing")
pseudo_axes = type_map['pseudo']
if len(pseudo_axes) > 1:
p_cyc = reduce(operator.add,
(cycler(my_name(c), input_data[c])
for c in type_map['pseudo']))
output_data.append(cycler(parent, list(p_cyc)))
elif len(pseudo_axes) == 1:
c, = pseudo_axes
output_data.append(cycler(c, input_data[c]))
for c in type_map['real'] + type_map['unrelated']:
output_data.append(cycler(c, input_data[c]))
return reduce(operator.add, output_data)
_qapp = None
class DuringTask:
"""This class waits on the event (which fully blocks the thread)."""
def __init__(self):
pass
def block(self, blocking_event):
"""
Wait plan to finish.
Parameters
----------
blocking_event : threading.Event
"""
blocking_event.wait()
class DefaultDuringTask(DuringTask):
"""This class run the Qt main loop while waiting for the plan to finish.
The default setting for the RunEngine's during_task parameter.
This makes it possible for plots that use Matplotlib's Qt backend to update
live during data acquisition.
It solves the problem that Qt must be run from the main thread.
If Matplotlib and a known Qt binding are already imported, run
Matplotlib qApp until the task completes. If not, there is no need to
handle qApp: just wait on the task.
"""
def __init__(self):
"""
Initialize backend.
Currently only the Qt backend is supported. The function is
initializing the 'teleporter' if Qt backend is used.
"""
if 'matplotlib' in sys.modules:
import matplotlib
backend = matplotlib.get_backend().lower()
if 'qt' in backend:
from .callbacks.mpl_plotting import initialize_qt_teleporter
initialize_qt_teleporter()
def block(self, blocking_event):
# docstring inherited
global _qapp
if 'matplotlib' not in sys.modules:
# We are not using matplotlib + Qt. Just wait on the Event.
blocking_event.wait()
# Figure out if we are using matplotlib with which backend
# without importing anything that is not already imported.
else:
import matplotlib
backend = matplotlib.get_backend().lower()
# if with a Qt backend, do the scary thing
if 'qt' in backend:
from matplotlib.backends.qt_compat import QtCore, QtWidgets
app = QtWidgets.QApplication.instance()
if app is None:
_qapp = app = QtWidgets.QApplication([b'bluesky'])
assert app is not None
event_loop = QtCore.QEventLoop()
def start_killer_thread():
def exit_loop():
blocking_event.wait()
# If the above wait ends quickly, we need to avoid the race
# condition where this thread might try to exit the qApp
# before it even starts. Therefore, we use QTimer, below,
# which will not start running until the qApp event loop is
# running.
event_loop.exit()
threading.Thread(target=exit_loop).start()
# https://www.riverbankcomputing.com/pipermail/pyqt/2015-March/035674.html
# adapted from code at
# https://bitbucket.org/tortoisehg/thg/commits/550e1df5fbad
if os.name == 'posix' and hasattr(signal, 'set_wakeup_fd'):
# Wake up Python interpreter via pipe so that SIGINT
# can be handled immediately.
# (http://qt-project.org/doc/qt-4.8/unix-signals.html)
# Updated docs:
# https://doc.qt.io/qt-5/unix-signals.html
import fcntl
rfd, wfd = os.pipe()
for fd in (rfd, wfd):
flags = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, flags | os.O_NONBLOCK)
wakeupsn = QtCore.QSocketNotifier(rfd,
QtCore.QSocketNotifier.Read)
origwakeupfd = signal.set_wakeup_fd(wfd)
def cleanup():
wakeupsn.setEnabled(False)
rfd = wakeupsn.socket()
wfd = signal.set_wakeup_fd(origwakeupfd)
os.close(int(rfd))
os.close(wfd)
def handleWakeup(inp):
# here Python signal handler will be invoked
# this book-keeping is to drain the pipe
wakeupsn.setEnabled(False)
rfd = wakeupsn.socket()
try:
os.read(int(rfd), 4096)
except OSError as inst:
print('failed to read wakeup fd: %s\n' % inst)
wakeupsn.setEnabled(True)
wakeupsn.activated.connect(handleWakeup)
else:
# On Windows, non-blocking anonymous pipe or socket is
# not available.
def null():
...
# we need to 'kick' the python interpreter so it sees
# system signals
# https://stackoverflow.com/a/4939113/380231
kick_timer = QtCore.QTimer()
kick_timer.timeout.connect(null)
kick_timer.start(50)
cleanup = kick_timer.stop
# we also need to make sure that the qApp never sees
# exceptions raised by python inside of a c++ callback (as
# it will segfault itself because due to the way the
# code is called there is no clear way to propagate that
# back to the python code.
vals = (None, None, None)
old_sys_handler = sys.excepthook
def my_exception_hook(exctype, value, traceback):
nonlocal vals
vals = (exctype, value, traceback)
event_loop.exit()
old_sys_handler(exctype, value, traceback)
# this kill the Qt event loop when the plan is finished
killer_timer = QtCore.QTimer()
killer_timer.setSingleShot(True)
killer_timer.timeout.connect(start_killer_thread)
killer_timer.start(0)
try:
sys.excepthook = my_exception_hook
event_loop.exec_()
# make sure any pending signals are processed
event_loop.processEvents()
if vals[1] is not None:
raise vals[1]
finally:
try:
cleanup()
finally:
sys.excepthook = old_sys_handler
elif 'ipympl' in backend or 'nbagg' in backend:
Gcf = matplotlib._pylab_helpers.Gcf
while True:
done = blocking_event.wait(.1)
for f_mgr in Gcf.get_all_fig_managers():
if f_mgr.canvas.figure.stale:
f_mgr.canvas.draw()
if done:
return
else:
# We are not using matplotlib + Qt. Just wait on the Event.
blocking_event.wait()
def _rearrange_into_parallel_dicts(readings):
data = {}
timestamps = {}
for key, payload in readings.items():
data[key] = payload['value']
timestamps[key] = payload['timestamp']
return data, timestamps
def is_movable(obj):
"""Check if object satisfies bluesky 'movable' interface.
Parameters
----------
obj : Object
Object to test.
Returns
-------
boolean
True if movable, False otherwise.
"""
EXPECTED_ATTRS = (
'name',
'parent',
'read',
'describe',
'read_configuration',
'describe_configuration',
'set',
)
return all(hasattr(obj, attr) for attr in EXPECTED_ATTRS)
class Movable(metaclass=abc.ABCMeta):
"""
Abstract base class for objects that satisfy the bluesky 'movable' interface.
Examples
--------
.. code-block:: python
m = hw.motor
# We need to detect if 'm' is a motor
if isinstance(m, Movable):
print(f"The object {m.name} is a motor")
"""
@classmethod
def __subclasshook__(cls, C):
# If the following condition is True, the object C is recognized
# to have Movable interface (e.g. a motor)
msg = """The Movable abstract base class is deprecated and will be removed in a future
version of bluesky. Please use bluesky.utils.is_movable(obj) to test if an object
satisfies the movable interface."""
warnings.warn(msg, DeprecationWarning)
EXPECTED_ATTRS = (
'name',
'parent',
'read',
'describe',
'read_configuration',
'describe_configuration',
'set',
'stop',
)
return all(hasattr(C, attr) for attr in EXPECTED_ATTRS)
|
test_memory.py
|
import ctypes
import gc
import pickle
import threading
import unittest
import fastrlock
import pytest
import cupy.cuda
from cupy.cuda import device
from cupy.cuda import memory
from cupy.cuda import stream as stream_module
from cupy import testing
class MockMemory(memory.Memory):
cur_ptr = 1
def __init__(self, size):
self.ptr = MockMemory.cur_ptr
MockMemory.cur_ptr += size
self.size = size
self.device_id = 0
def __del__(self):
self.ptr = 0
pass
def mock_alloc(size):
mem = MockMemory(size)
return memory.MemoryPointer(mem, 0)
class TestUnownedMemoryClass(unittest.TestCase):
def test_inherits_base_memory(self):
assert issubclass(memory.UnownedMemory, memory.BaseMemory)
@testing.parameterize(*testing.product({
'allocator': [memory._malloc, memory.malloc_managed, memory.malloc_async],
'specify_device_id': [True, False],
}))
@testing.gpu
class TestUnownedMemory(unittest.TestCase):
def check(self, device_id):
if cupy.cuda.runtime.is_hip:
if self.allocator is memory.malloc_managed:
raise unittest.SkipTest('HIP does not support managed memory')
if self.allocator is memory.malloc_async:
raise unittest.SkipTest('HIP does not support async mempool')
elif cupy.cuda.driver.get_build_version() < 11020:
raise unittest.SkipTest('malloc_async is supported since '
'CUDA 11.2')
size = 24
shape = (2, 3)
dtype = cupy.float32
with device.Device(device_id):
src_mem_ptr = self.allocator(size)
src_ptr = src_mem_ptr.ptr
args = (src_ptr, size, src_mem_ptr)
kwargs = {}
if self.specify_device_id:
kwargs = {'device_id': device_id}
unowned_mem = memory.UnownedMemory(*args, **kwargs)
assert unowned_mem.size == size
assert unowned_mem.ptr == src_ptr
assert unowned_mem.device_id == device_id
arr = cupy.ndarray(shape, dtype, memory.MemoryPointer(unowned_mem, 0))
# Delete the source object
del src_mem_ptr
with device.Device(device_id):
arr[:] = 2
assert (arr == 2).all()
def test_device0(self):
self.check(0)
@testing.multi_gpu(2)
def test_device1(self):
self.check(1)
@testing.gpu
class TestMemoryPointer(unittest.TestCase):
def test_int(self):
pval = MockMemory.cur_ptr
memptr = mock_alloc(1)
assert pval == int(memptr)
def test_add(self):
pval = MockMemory.cur_ptr
memptr = mock_alloc(8)
memptr2 = memptr + 4
assert isinstance(memptr2, memory.MemoryPointer)
assert pval + 4 == int(memptr2)
memptr3 = 4 + memptr
assert isinstance(memptr3, memory.MemoryPointer)
assert pval + 4 == int(memptr3)
memptr += 4
assert isinstance(memptr, memory.MemoryPointer)
assert pval + 4 == int(memptr)
def test_sub(self):
pval = MockMemory.cur_ptr
memptr = mock_alloc(8) + 4
memptr2 = memptr - 4
assert isinstance(memptr2, memory.MemoryPointer)
assert pval == int(memptr2)
memptr -= 4
assert isinstance(memptr, memory.MemoryPointer)
assert pval == int(memptr)
def test_copy_to_and_from_host(self):
a_gpu = memory.alloc(4)
a_cpu = ctypes.c_int(100)
a_gpu.copy_from(ctypes.cast(ctypes.byref(a_cpu), ctypes.c_void_p), 4)
b_cpu = ctypes.c_int()
a_gpu.copy_to_host(
ctypes.cast(ctypes.byref(b_cpu), ctypes.c_void_p), 4)
assert b_cpu.value == a_cpu.value
def test_copy_from_device(self):
a_gpu = memory.alloc(4)
a_cpu = ctypes.c_int(100)
a_gpu.copy_from(ctypes.cast(ctypes.byref(a_cpu), ctypes.c_void_p), 4)
b_gpu = memory.alloc(4)
b_gpu.copy_from(a_gpu, 4)
b_cpu = ctypes.c_int()
b_gpu.copy_to_host(
ctypes.cast(ctypes.byref(b_cpu), ctypes.c_void_p), 4)
assert b_cpu.value == a_cpu.value
def test_copy_to_and_from_host_using_raw_ptr(self):
a_gpu = memory.alloc(4)
a_cpu = ctypes.c_int(100)
a_cpu_ptr = ctypes.cast(ctypes.byref(a_cpu), ctypes.c_void_p)
a_gpu.copy_from(a_cpu_ptr.value, 4)
b_cpu = ctypes.c_int()
b_cpu_ptr = ctypes.cast(ctypes.byref(b_cpu), ctypes.c_void_p)
a_gpu.copy_to_host(b_cpu_ptr.value, 4)
assert b_cpu.value == a_cpu.value
def test_copy_from_device_using_raw_ptr(self):
a_gpu = memory.alloc(4)
a_cpu = ctypes.c_int(100)
a_cpu_ptr = ctypes.cast(ctypes.byref(a_cpu), ctypes.c_void_p)
a_gpu.copy_from(a_cpu_ptr.value, 4)
b_gpu = memory.alloc(4)
b_gpu.copy_from(a_gpu, 4)
b_cpu = ctypes.c_int()
b_cpu_ptr = ctypes.cast(ctypes.byref(b_cpu), ctypes.c_void_p)
b_gpu.copy_to_host(b_cpu_ptr.value, 4)
assert b_cpu.value == a_cpu.value
def test_memset(self):
a_gpu = memory.alloc(4)
a_gpu.memset(1, 4)
a_cpu = ctypes.c_ubyte()
for i in range(4):
a_gpu.copy_to_host(
ctypes.cast(ctypes.byref(a_cpu), ctypes.c_void_p), 1)
assert a_cpu.value == 1
a_gpu += 1
# -----------------------------------------------------------------------------
# Memory pool
@testing.gpu
class TestSingleDeviceMemoryPool(unittest.TestCase):
def setUp(self):
self.pool = memory.SingleDeviceMemoryPool(allocator=mock_alloc)
self.unit = memory._allocation_unit_size
self.stream = stream_module.Stream()
self.stream_ptr = self.stream.ptr
def test_round_size(self):
assert memory._round_size(self.unit - 1) == self.unit
assert memory._round_size(self.unit) == self.unit
assert memory._round_size(self.unit + 1) == self.unit * 2
def test_bin_index_from_size(self):
assert memory._bin_index_from_size(self.unit - 1) == 0
assert memory._bin_index_from_size(self.unit) == 0
assert memory._bin_index_from_size(self.unit + 1) == 1
def test_split(self):
mem = MockMemory(self.unit * 4)
chunk = memory._Chunk(mem, 0, mem.size, self.stream_ptr)
tail = chunk.split(self.unit * 2)
assert chunk.ptr() == mem.ptr
assert chunk.offset == 0
assert chunk.size == self.unit * 2
assert chunk.prev is None
assert chunk.next.ptr() == tail.ptr()
assert chunk.stream_ptr == self.stream_ptr
assert tail.ptr() == mem.ptr + self.unit * 2
assert tail.offset == self.unit * 2
assert tail.size == self.unit * 2
assert tail.prev.ptr() == chunk.ptr()
assert tail.next is None
assert tail.stream_ptr == self.stream_ptr
tail_of_head = chunk.split(self.unit)
assert chunk.ptr() == mem.ptr
assert chunk.offset == 0
assert chunk.size == self.unit
assert chunk.prev is None
assert chunk.next.ptr() == tail_of_head.ptr()
assert chunk.stream_ptr == self.stream_ptr
assert tail_of_head.ptr() == mem.ptr + self.unit
assert tail_of_head.offset == self.unit
assert tail_of_head.size == self.unit
assert tail_of_head.prev.ptr() == chunk.ptr()
assert tail_of_head.next.ptr() == tail.ptr()
assert tail_of_head.stream_ptr == self.stream_ptr
tail_of_tail = tail.split(self.unit)
assert tail.ptr() == chunk.ptr() + self.unit * 2
assert tail.offset == self.unit * 2
assert tail.size == self.unit
assert tail.prev.ptr() == tail_of_head.ptr()
assert tail.next.ptr() == tail_of_tail.ptr()
assert tail.stream_ptr == self.stream_ptr
assert tail_of_tail.ptr() == mem.ptr + self.unit * 3
assert tail_of_tail.offset == self.unit * 3
assert tail_of_tail.size == self.unit
assert tail_of_tail.prev.ptr() == tail.ptr()
assert tail_of_tail.next is None
assert tail_of_tail.stream_ptr == self.stream_ptr
def test_merge(self):
mem = MockMemory(self.unit * 4)
chunk = memory._Chunk(mem, 0, mem.size, self.stream_ptr)
chunk_ptr = chunk.ptr()
chunk_offset = chunk.offset
chunk_size = chunk.size
tail = chunk.split(self.unit * 2)
head = chunk
head_ptr = head.ptr()
head_offset = head.offset
head_size = head.size
tail_ptr = tail.ptr()
tail_offset = tail.offset
tail_size = tail.size
tail_of_head = head.split(self.unit)
tail_of_tail = tail.split(self.unit)
head.merge(tail_of_head)
assert head.ptr() == head_ptr
assert head.offset == head_offset
assert head.size == head_size
assert head.prev is None
assert head.next.ptr() == tail_ptr
assert head.stream_ptr == self.stream_ptr
tail.merge(tail_of_tail)
assert tail.ptr() == tail_ptr
assert tail.offset == tail_offset
assert tail.size == tail_size
assert tail.prev.ptr() == head_ptr
assert tail.next is None
assert tail.stream_ptr == self.stream_ptr
head.merge(tail)
assert head.ptr() == chunk_ptr
assert head.offset == chunk_offset
assert head.size == chunk_size
assert head.prev is None
assert head.next is None
assert head.stream_ptr == self.stream_ptr
def test_alloc(self):
p1 = self.pool.malloc(self.unit * 4)
p2 = self.pool.malloc(self.unit * 4)
p3 = self.pool.malloc(self.unit * 8)
assert p1.ptr != p2.ptr
assert p1.ptr != p3.ptr
assert p2.ptr != p3.ptr
def test_alloc_split(self):
p = self.pool.malloc(self.unit * 4)
ptr = p.ptr
del p
head = self.pool.malloc(self.unit * 2)
tail = self.pool.malloc(self.unit * 2)
assert ptr == head.ptr
assert ptr + self.unit * 2 == tail.ptr
def test_alloc_limit(self):
self.pool.set_limit(size=(self.unit * 6))
p1 = self.pool.malloc(self.unit * 5)
p2 = self.pool.malloc(self.unit * 1)
with self.assertRaises(memory.OutOfMemoryError):
self.pool.malloc(self.unit)
self.pool.set_limit(size=(self.unit * 7))
p3 = self.pool.malloc(self.unit)
del p1, p2, p3
def test_free(self):
p1 = self.pool.malloc(self.unit * 4)
ptr1 = p1.ptr
del p1
p2 = self.pool.malloc(self.unit * 4)
assert ptr1 == p2.ptr
def test_free_stream(self):
p1 = self.pool.malloc(self.unit * 4)
ptr1 = p1.ptr
del p1
with self.stream:
p2 = self.pool.malloc(self.unit * 4)
assert ptr1 != p2.ptr
def test_free_merge(self):
p = self.pool.malloc(self.unit * 4)
ptr = p.ptr
del p
# merge head into tail
head = self.pool.malloc(self.unit * 2)
tail = self.pool.malloc(self.unit * 2)
assert ptr == head.ptr
del tail
del head
p = self.pool.malloc(self.unit * 4)
assert ptr == p.ptr
del p
# merge tail into head
head = self.pool.malloc(self.unit * 2)
tail = self.pool.malloc(self.unit * 2)
assert ptr == head.ptr
del head
del tail
p = self.pool.malloc(self.unit * 4)
assert ptr == p.ptr
del p
def test_free_different_size(self):
p1 = self.pool.malloc(self.unit * 4)
ptr1 = p1.ptr
del p1
p2 = self.pool.malloc(self.unit * 8)
assert ptr1 != p2.ptr
def test_free_all_blocks(self):
p1 = self.pool.malloc(self.unit * 4)
ptr1 = p1.ptr
del p1
self.pool.free_all_blocks()
p2 = self.pool.malloc(self.unit * 4)
assert ptr1 != p2.ptr
del p2
def test_free_all_blocks_split(self):
# do not free splitted blocks
p = self.pool.malloc(self.unit * 4)
del p
head = self.pool.malloc(self.unit * 2)
tail = self.pool.malloc(self.unit * 2)
tailptr = tail.ptr
del tail
self.pool.free_all_blocks()
p = self.pool.malloc(self.unit * 2)
assert tailptr == p.ptr
del head
def test_free_all_blocks_stream(self):
p1 = self.pool.malloc(self.unit * 4)
ptr1 = p1.ptr
del p1
with self.stream:
p2 = self.pool.malloc(self.unit * 4)
ptr2 = p2.ptr
del p2
self.pool.free_all_blocks(stream=stream_module.Stream.null)
p3 = self.pool.malloc(self.unit * 4)
assert ptr1 != p3.ptr
assert ptr2 != p3.ptr
with self.stream:
p4 = self.pool.malloc(self.unit * 4)
assert ptr1 != p4.ptr
assert ptr2 == p4.ptr
def test_free_all_blocks_all_streams(self):
p1 = self.pool.malloc(self.unit * 4)
ptr1 = p1.ptr
del p1
with self.stream:
p2 = self.pool.malloc(self.unit * 4)
ptr2 = p2.ptr
del p2
self.pool.free_all_blocks()
p3 = self.pool.malloc(self.unit * 4)
assert ptr1 != p3.ptr
assert ptr2 != p3.ptr
with self.stream:
p4 = self.pool.malloc(self.unit * 4)
assert ptr1 != p4.ptr
assert ptr2 != p4.ptr
def test_free_all_free(self):
p1 = self.pool.malloc(self.unit * 4)
ptr1 = p1.ptr
del p1
with testing.assert_warns(DeprecationWarning):
self.pool.free_all_free()
p2 = self.pool.malloc(self.unit * 4)
assert ptr1 != p2.ptr
def test_used_bytes(self):
p1 = self.pool.malloc(self.unit * 2)
assert self.unit * 2 == self.pool.used_bytes()
p2 = self.pool.malloc(self.unit * 4)
assert self.unit * 6 == self.pool.used_bytes()
del p2
assert self.unit * 2 == self.pool.used_bytes()
del p1
assert self.unit * 0 == self.pool.used_bytes()
p3 = self.pool.malloc(self.unit * 1)
assert self.unit * 1 == self.pool.used_bytes()
del p3
def test_used_bytes_stream(self):
p1 = self.pool.malloc(self.unit * 4)
del p1
with self.stream:
p2 = self.pool.malloc(self.unit * 2)
assert self.unit * 2 == self.pool.used_bytes()
del p2
def test_free_bytes(self):
p1 = self.pool.malloc(self.unit * 2)
assert self.unit * 0 == self.pool.free_bytes()
p2 = self.pool.malloc(self.unit * 4)
assert self.unit * 0 == self.pool.free_bytes()
del p2
assert self.unit * 4 == self.pool.free_bytes()
del p1
assert self.unit * 6 == self.pool.free_bytes()
p3 = self.pool.malloc(self.unit * 1)
assert self.unit * 5 == self.pool.free_bytes()
del p3
def test_free_bytes_stream(self):
p1 = self.pool.malloc(self.unit * 4)
del p1
with self.stream:
p2 = self.pool.malloc(self.unit * 2)
assert self.unit * 4 == self.pool.free_bytes()
del p2
def test_total_bytes(self):
p1 = self.pool.malloc(self.unit * 2)
assert self.unit * 2 == self.pool.total_bytes()
p2 = self.pool.malloc(self.unit * 4)
assert self.unit * 6 == self.pool.total_bytes()
del p1
assert self.unit * 6 == self.pool.total_bytes()
del p2
assert self.unit * 6 == self.pool.total_bytes()
p3 = self.pool.malloc(self.unit * 1)
assert self.unit * 6 == self.pool.total_bytes()
assert (self.pool.used_bytes() + self.pool.free_bytes()
== self.pool.total_bytes())
del p3
self.pool.free_all_blocks()
assert 0 == self.pool.total_bytes()
def test_total_bytes_stream(self):
p1 = self.pool.malloc(self.unit * 4)
del p1
with self.stream:
p2 = self.pool.malloc(self.unit * 2)
assert self.unit * 6 == self.pool.total_bytes()
del p2
def test_get_limit(self):
# limit is disabled by default
assert 0 == self.pool.get_limit()
def test_set_limit_size(self):
self.pool.set_limit(size=1024)
assert 1024 == self.pool.get_limit()
self.pool.set_limit(size=2**33)
assert 2**33 == self.pool.get_limit()
self.pool.set_limit(size=0)
assert 0 == self.pool.get_limit()
with self.assertRaises(ValueError):
self.pool.set_limit(size=-1)
def test_set_limit_fraction(self):
_, total = cupy.cuda.runtime.memGetInfo()
self.pool.set_limit(fraction=0)
assert 0 == self.pool.get_limit()
self.pool.set_limit(fraction=0.5)
assert total * 0.5 == self.pool.get_limit()
self.pool.set_limit(fraction=1.0)
assert total == self.pool.get_limit()
with self.assertRaises(ValueError):
self.pool.set_limit(fraction=-1)
with self.assertRaises(ValueError):
self.pool.set_limit(fraction=1.1)
def test_parse_limit_string(self):
parse_limit_string = self.pool._parse_limit_string
# size
param = parse_limit_string('0')
assert 0 == param['size']
assert None is param['fraction']
param = parse_limit_string('1073741824')
assert 1073741824 == param['size']
assert None is param['fraction']
# fraction
param = parse_limit_string('0%')
assert None is param['size']
assert 0.0 == param['fraction']
param = parse_limit_string('40%')
assert None is param['size']
assert 0.4 == param['fraction']
param = parse_limit_string('70.5%')
assert None is param['size']
assert 0.705 == param['fraction']
param = parse_limit_string('100%')
assert None is param['size']
assert 1.0 == param['fraction']
@testing.parameterize(*testing.product({
'allocator': [memory._malloc, memory.malloc_managed],
}))
@testing.gpu
class TestMemoryPool(unittest.TestCase):
def setUp(self):
self.pool = memory.MemoryPool(self.allocator)
if (cupy.cuda.runtime.is_hip
and self.allocator is memory.malloc_managed):
raise unittest.SkipTest('HIP does not support managed memory')
def test_zero_size_alloc(self):
with cupy.cuda.Device(0):
mem = self.pool.malloc(0).mem
assert isinstance(mem, memory.Memory)
assert not isinstance(mem, memory.PooledMemory)
def test_double_free(self):
with cupy.cuda.Device(0):
mem = self.pool.malloc(1).mem
mem.free()
mem.free()
def test_free_all_blocks(self):
with cupy.cuda.Device(0):
mem = self.pool.malloc(1).mem
assert isinstance(mem, memory.BaseMemory)
assert isinstance(mem, memory.PooledMemory)
assert self.pool.n_free_blocks() == 0
mem.free()
assert self.pool.n_free_blocks() == 1
self.pool.free_all_blocks()
assert self.pool.n_free_blocks() == 0
def test_free_all_blocks_without_malloc(self):
with cupy.cuda.Device(0):
# call directly without malloc.
self.pool.free_all_blocks()
assert self.pool.n_free_blocks() == 0
def test_free_all_free(self):
with cupy.cuda.Device(0):
mem = self.pool.malloc(1).mem
assert isinstance(mem, memory.BaseMemory)
assert isinstance(mem, memory.PooledMemory)
assert self.pool.n_free_blocks() == 0
mem.free()
assert self.pool.n_free_blocks() == 1
with testing.assert_warns(DeprecationWarning):
self.pool.free_all_free()
assert self.pool.n_free_blocks() == 0
def test_free_all_free_without_malloc(self):
with cupy.cuda.Device(0):
# call directly without malloc.
with testing.assert_warns(DeprecationWarning):
self.pool.free_all_free()
assert self.pool.n_free_blocks() == 0
def test_n_free_blocks_without_malloc(self):
with cupy.cuda.Device(0):
# call directly without malloc/free_all_free.
assert self.pool.n_free_blocks() == 0
def test_used_bytes(self):
with cupy.cuda.Device(0):
assert 0 == self.pool.used_bytes()
def test_free_bytes(self):
with cupy.cuda.Device(0):
assert 0 == self.pool.free_bytes()
def test_total_bytes(self):
with cupy.cuda.Device(0):
assert 0 == self.pool.total_bytes()
@testing.gpu
class TestAllocator(unittest.TestCase):
def setUp(self):
self.old_pool = cupy.get_default_memory_pool()
self.pool = memory.MemoryPool()
memory.set_allocator(self.pool.malloc)
def tearDown(self):
self.pool.free_all_blocks()
memory.set_allocator(self.old_pool.malloc)
def test_set_allocator(self):
with cupy.cuda.Device(0):
assert 0 == self.pool.used_bytes()
arr = cupy.arange(128, dtype=cupy.int64)
assert 1024 == arr.data.mem.size
assert 1024 == self.pool.used_bytes()
def test_get_allocator(self):
assert memory.get_allocator() == self.pool.malloc
def test_allocator_context_manager(self):
new_pool = memory.MemoryPool()
with cupy.cuda.using_allocator(new_pool.malloc):
assert memory.get_allocator() == new_pool.malloc
assert memory.get_allocator() == self.pool.malloc
def test_set_allocator_cm(self):
new_pool = memory.MemoryPool()
new_pool2 = memory.MemoryPool()
with cupy.cuda.using_allocator(new_pool.malloc):
with self.assertRaises(ValueError):
memory.set_allocator(new_pool2.malloc)
def test_allocator_nested_context_manager(self):
new_pool = memory.MemoryPool()
with cupy.cuda.using_allocator(new_pool.malloc):
new_pool2 = memory.MemoryPool()
assert memory.get_allocator() == new_pool.malloc
with cupy.cuda.using_allocator(new_pool2.malloc):
assert memory.get_allocator() == new_pool2.malloc
assert memory.get_allocator() == new_pool.malloc
assert memory.get_allocator() == self.pool.malloc
def test_allocator_thread_local(self):
def thread_body(self):
new_pool = memory.MemoryPool()
with cupy.cuda.using_allocator(new_pool.malloc):
assert memory.get_allocator() == new_pool.malloc
threading.Barrier(2)
arr = cupy.zeros(128, dtype=cupy.int64)
threading.Barrier(2)
assert arr.data.mem.size == new_pool.used_bytes()
threading.Barrier(2)
assert memory.get_allocator() == self.pool.malloc
with cupy.cuda.Device(0):
t = threading.Thread(target=thread_body, args=(self,))
t.daemon = True
t.start()
threading.Barrier(2)
assert memory.get_allocator() == self.pool.malloc
arr = cupy.ones(256, dtype=cupy.int64)
threading.Barrier(2)
assert arr.data.mem.size == self.pool.used_bytes()
threading.Barrier(2)
t.join()
def test_thread_local_valid(self):
new_pool = memory.MemoryPool()
arr = None
with cupy.cuda.using_allocator(new_pool.malloc):
arr = cupy.zeros(128, dtype=cupy.int64)
arr += 1
# Check that arr and the pool have not ben released
assert arr.data.mem.size == new_pool.used_bytes()
assert arr.sum() == 128
def test_reuse_between_thread(self):
def job(self):
cupy.arange(16)
self._error = False
# Run in main thread.
self._error = True
job(self)
assert not self._error
# Run in sub thread.
self._error = True
with cupy.cuda.Device(0):
t = threading.Thread(target=job, args=(self,))
t.daemon = True
t.start()
t.join()
assert not self._error
@testing.gpu
class TestAllocatorDisabled(unittest.TestCase):
def setUp(self):
self.pool = cupy.get_default_memory_pool()
def tearDown(self):
memory.set_allocator(self.pool.malloc)
def _check_pool_not_used(self):
used_bytes = self.pool.used_bytes()
with cupy.cuda.Device(0):
arr = cupy.arange(128, dtype=cupy.int64)
assert 0 == self.pool.used_bytes() - used_bytes
del arr
def test(self):
memory.set_allocator()
self._check_pool_not_used()
def test_none(self):
memory.set_allocator(None)
self._check_pool_not_used()
class PythonAllocator(object):
def __init__(self):
self.malloc_called = False
self.free_called = False
def malloc(self, size, device_id):
self.malloc_called = True
return cupy.cuda.runtime.malloc(size)
def free(self, size, device_id):
self.free_called = True
cupy.cuda.runtime.free(size)
@testing.gpu
class TestPythonFunctionAllocator(unittest.TestCase):
def setUp(self):
self.old_pool = cupy.get_default_memory_pool()
self.alloc = PythonAllocator()
python_alloc = memory.PythonFunctionAllocator(
self.alloc.malloc, self.alloc.free)
memory.set_allocator(python_alloc.malloc)
def tearDown(self):
memory.set_allocator(self.old_pool.malloc)
def test_allocator(self):
assert not self.alloc.malloc_called and not self.alloc.free_called
cupy.zeros(10)
assert self.alloc.malloc_called and self.alloc.free_called
@testing.gpu
class TestMemInfo(unittest.TestCase):
def test_mem_info(self):
d = cupy.cuda.Device()
mem_info = d.mem_info
assert isinstance(mem_info, tuple)
assert len(mem_info) == 2
assert all(isinstance(m, int) for m in mem_info)
assert all(m > 0 for m in mem_info)
@testing.gpu
class TestLockAndNoGc(unittest.TestCase):
def test(self):
lock = fastrlock.rlock.FastRLock()
ctx = memory.LockAndNoGc(lock)
assert gc.isenabled()
self.assertRaises(Exception, lock.release)
with ctx:
assert not gc.isenabled()
lock.release()
lock.acquire()
assert gc.isenabled()
self.assertRaises(Exception, lock.release)
class TestExceptionPicklable(unittest.TestCase):
def test(self):
e1 = memory.OutOfMemoryError(124, 1024, 1024)
e2 = pickle.loads(pickle.dumps(e1))
assert e1.args == e2.args
assert str(e1) == str(e2)
@testing.gpu
@pytest.mark.skipif(cupy.cuda.runtime.is_hip,
reason='HIP does not support async allocator')
@pytest.mark.skipif(cupy.cuda.driver.get_build_version() < 11020,
reason='malloc_async is supported since CUDA 11.2')
class TestMallocAsync(unittest.TestCase):
def setUp(self):
self.old_pool = cupy.get_default_memory_pool()
memory.set_allocator(memory.malloc_async)
def tearDown(self):
memory.set_allocator(self.old_pool.malloc)
def _check_pool_not_used(self):
used_bytes = self.old_pool.used_bytes()
with cupy.cuda.Device(0):
arr = cupy.arange(128, dtype=cupy.int64)
assert 0 == self.old_pool.used_bytes() - used_bytes
del arr
def test(self):
self._check_pool_not_used()
def test_stream1(self):
# Check: pool is not used when on a stream
s = cupy.cuda.Stream()
with s:
self._check_pool_not_used()
def test_stream2(self):
# Check: the memory was allocated on the right stream
s = cupy.cuda.Stream()
with s:
memptr = memory.alloc(100)
assert memptr.mem.stream == s.ptr
def test_stream3(self):
# Check: destory stream does not affect memory deallocation
s = cupy.cuda.Stream()
with s:
memptr = memory.alloc(100)
del s
gc.collect()
del memptr
def test_stream4(self):
# Check: free on the same stream
s = cupy.cuda.Stream()
with s:
memptr = memory.alloc(100)
del memptr
def test_stream5(self):
# Check: free on another stream
s1 = cupy.cuda.Stream()
with s1:
memptr = memory.alloc(100)
del s1
s2 = cupy.cuda.Stream()
with s2:
del memptr
|
server.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
import tornado.web
import tornado.websocket
from tornado.ioloop import IOLoop, PeriodicCallback
import Queue
import sys
import threading
import time
import json
import argparse
#from types import *
global args
global q
q_channels = {}
wsMapingChannel = []
def PublishManager():
global q_channels
while True:
for c in q_channels:
if q_channels[c].empty():
logging.debug("PublishManager channel {} no msg wait for send".format(c))
else:
size = q_channels[c].qsize()
logging.debug("PublishManager polling channel {} [{} msg wait for send]".format(c, size))
PublishByChannel(c)
time.sleep(0.5)
def PublishByChannel(channel):
q = q_channels[channel]
size = q_channels[channel].qsize()
for i in range(size):
msg = q.get()
logging.debug("send {} to channel {}".format(msg, channel))
users = ChatManager.channels[channel]
if len(msg) > 0:
for user in users:
try:
if hasattr(user['ws'], 'write_message'):
user['ws'].write_message(msg, binary=False)
except Exception:
pass
#user.write_message(u"{}.You said: " + message)
class ChatManager(tornado.websocket.WebSocketHandler):
channels = {}
@classmethod
def add_user(cls, user):
c = user['channel']
new_user = {'username': user['username'], 'ws' : user['ws']}
cls.channels[c].append(new_user)
logging.debug("add new user {} to channel {}".format(user['username'], c))
msg = "<<---------- {} enter ---------->>".format(user['username'])
q_channels[c].put(msg)
@classmethod
def remove_user(cls, websocket):
global wsMapingChannel
global q_channels
channel = ""
for d in wsMapingChannel:
if (d['ws'] == websocket):
channel = d['channel']
wsMapingChannel.remove(d)
break
if channel:
for user in cls.channels[channel]:
if (user['ws'] == websocket):
cls.channels[channel].remove(user)
msg = "<<---------- {} Leave---------->>".format(user['username'])
q_channels[channel].put(msg)
class Application(tornado.web.Application):
def __init__(self):
handlers = [(r"/", MainHandler)]
settings = dict(debug=False)
tornado.web.Application.__init__(self, handlers, **settings)
class MainHandler(tornado.websocket.WebSocketHandler):
def check_origin(self, origin):
return True
def open(self):
logging.debug("A client connected.")
def on_close(self):
logging.debug("A client disconnected")
ChatManager.remove_user(self)
def on_message(self, message):
msg = json.loads(message)
if msg['type'] == "hello":
#logging.debug("its welcome package to {:s}".format(msg['channel']))
self.handler_hello(msg)
elif msg['type'] == "message":
#logging.debug("its message package to {:s}".format(msg['message']))
self.handler_message(msg)
else:
logging.warning("{} => unknown package type".format(self.__class__.__name__))
def handler_hello(self, msg):
global q_channels
c = str(msg['channel'])
name = msg['username']
if c in q_channels:
pass
else:
q_channels[c] = Queue.Queue()
ChatManager.channels[c] = []
logging.debug("Create new channel {}".format(c))
user = {'username' : name, 'channel': c, 'ws' : self}
ChatManager.add_user(user)
wsMapingChannel.append({'channel': c, 'ws': self})
def handler_message(self, msg):
c = msg['channel']
m = msg['message']
if c in q_channels:
q_channels[c].put(m)
else:
logging.warning("{} => Can't find channel {}".format(self.__class__.__name__, c))
def main():
global q
global q_channels
global args
parser = argparse.ArgumentParser()
parser.add_argument("--bind-ip", help="bind ip", required=True, type=str)
parser.add_argument("--bind-port", help="bind port", required=True, type=int)
parser.add_argument("--debug", help="debug mode", required=False, type=int)
args = parser.parse_args()
if args.debug:
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG, format='%(asctime)s - %(levelname)s : %(message)s')
else:
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s : %(message)s', filename="server.log", filemode='w')
q_channels["server"] = Queue.Queue()
q=Queue.Queue()
t = threading.Thread(target = PublishManager)
t.setDaemon(True)
t.start()
app = Application()
app.listen(args.bind_port,address=args.bind_ip)
IOLoop.instance().start()
if __name__ == "__main__":
main()
|
test_multiprocessing.py
|
#
# Unit tests for the multiprocessing package
#
import unittest
import Queue
import time
import sys
import os
import gc
import signal
import array
import socket
import random
import logging
import errno
import weakref
import test.script_helper
from test import test_support
from StringIO import StringIO
_multiprocessing = test_support.import_module('_multiprocessing')
# import threading after _multiprocessing to raise a more relevant error
# message: "No module named _multiprocessing". _multiprocessing is not compiled
# without thread support.
import threading
# Work around broken sem_open implementations
test_support.import_module('multiprocessing.synchronize')
import multiprocessing.dummy
import multiprocessing.connection
import multiprocessing.managers
import multiprocessing.heap
import multiprocessing.pool
from multiprocessing import util
try:
from multiprocessing import reduction
HAS_REDUCTION = True
except ImportError:
HAS_REDUCTION = False
try:
from multiprocessing.sharedctypes import Value, copy
HAS_SHAREDCTYPES = True
except ImportError:
HAS_SHAREDCTYPES = False
try:
import msvcrt
except ImportError:
msvcrt = None
#
#
#
latin = str
#
# Constants
#
LOG_LEVEL = util.SUBWARNING
#LOG_LEVEL = logging.DEBUG
DELTA = 0.1
CHECK_TIMINGS = False # making true makes tests take a lot longer
# and can sometimes cause some non-serious
# failures because some calls block a bit
# longer than expected
if CHECK_TIMINGS:
TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.82, 0.35, 1.4
else:
TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.1, 0.1, 0.1
HAVE_GETVALUE = not getattr(_multiprocessing,
'HAVE_BROKEN_SEM_GETVALUE', False)
WIN32 = (sys.platform == "win32")
try:
MAXFD = os.sysconf("SC_OPEN_MAX")
except:
MAXFD = 256
#
# Some tests require ctypes
#
try:
from ctypes import Structure, c_int, c_double
except ImportError:
Structure = object
c_int = c_double = None
def check_enough_semaphores():
"""Check that the system supports enough semaphores to run the test."""
# minimum number of semaphores available according to POSIX
nsems_min = 256
try:
nsems = os.sysconf("SC_SEM_NSEMS_MAX")
except (AttributeError, ValueError):
# sysconf not available or setting not available
return
if nsems == -1 or nsems >= nsems_min:
return
raise unittest.SkipTest("The OS doesn't support enough semaphores "
"to run the test (required: %d)." % nsems_min)
#
# Creates a wrapper for a function which records the time it takes to finish
#
class TimingWrapper(object):
def __init__(self, func):
self.func = func
self.elapsed = None
def __call__(self, *args, **kwds):
t = time.time()
try:
return self.func(*args, **kwds)
finally:
self.elapsed = time.time() - t
#
# Base class for test cases
#
class BaseTestCase(object):
ALLOWED_TYPES = ('processes', 'manager', 'threads')
def assertTimingAlmostEqual(self, a, b):
if CHECK_TIMINGS:
self.assertAlmostEqual(a, b, 1)
def assertReturnsIfImplemented(self, value, func, *args):
try:
res = func(*args)
except NotImplementedError:
pass
else:
return self.assertEqual(value, res)
# For the sanity of Windows users, rather than crashing or freezing in
# multiple ways.
def __reduce__(self, *args):
raise NotImplementedError("shouldn't try to pickle a test case")
__reduce_ex__ = __reduce__
#
# Return the value of a semaphore
#
def get_value(self):
try:
return self.get_value()
except AttributeError:
try:
return self._Semaphore__value
except AttributeError:
try:
return self._value
except AttributeError:
raise NotImplementedError
#
# Testcases
#
class DummyCallable(object):
def __call__(self, q, c):
assert isinstance(c, DummyCallable)
q.put(5)
class _TestProcess(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
def test_current(self):
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
current = self.current_process()
authkey = current.authkey
self.assertTrue(current.is_alive())
self.assertTrue(not current.daemon)
self.assertIsInstance(authkey, bytes)
self.assertTrue(len(authkey) > 0)
self.assertEqual(current.ident, os.getpid())
self.assertEqual(current.exitcode, None)
@classmethod
def _test(cls, q, *args, **kwds):
current = cls.current_process()
q.put(args)
q.put(kwds)
q.put(current.name)
if cls.TYPE != 'threads':
q.put(bytes(current.authkey))
q.put(current.pid)
def test_process(self):
q = self.Queue(1)
e = self.Event()
args = (q, 1, 2)
kwargs = {'hello':23, 'bye':2.54}
name = 'SomeProcess'
p = self.Process(
target=self._test, args=args, kwargs=kwargs, name=name
)
p.daemon = True
current = self.current_process()
if self.TYPE != 'threads':
self.assertEqual(p.authkey, current.authkey)
self.assertEqual(p.is_alive(), False)
self.assertEqual(p.daemon, True)
self.assertNotIn(p, self.active_children())
self.assertTrue(type(self.active_children()) is list)
self.assertEqual(p.exitcode, None)
p.start()
self.assertEqual(p.exitcode, None)
self.assertEqual(p.is_alive(), True)
self.assertIn(p, self.active_children())
self.assertEqual(q.get(), args[1:])
self.assertEqual(q.get(), kwargs)
self.assertEqual(q.get(), p.name)
if self.TYPE != 'threads':
self.assertEqual(q.get(), current.authkey)
self.assertEqual(q.get(), p.pid)
p.join()
self.assertEqual(p.exitcode, 0)
self.assertEqual(p.is_alive(), False)
self.assertNotIn(p, self.active_children())
@classmethod
def _test_terminate(cls):
time.sleep(1000)
def test_terminate(self):
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
p = self.Process(target=self._test_terminate)
p.daemon = True
p.start()
self.assertEqual(p.is_alive(), True)
self.assertIn(p, self.active_children())
self.assertEqual(p.exitcode, None)
p.terminate()
join = TimingWrapper(p.join)
self.assertEqual(join(), None)
self.assertTimingAlmostEqual(join.elapsed, 0.0)
self.assertEqual(p.is_alive(), False)
self.assertNotIn(p, self.active_children())
p.join()
# XXX sometimes get p.exitcode == 0 on Windows ...
#self.assertEqual(p.exitcode, -signal.SIGTERM)
def test_cpu_count(self):
try:
cpus = multiprocessing.cpu_count()
except NotImplementedError:
cpus = 1
self.assertTrue(type(cpus) is int)
self.assertTrue(cpus >= 1)
def test_active_children(self):
self.assertEqual(type(self.active_children()), list)
p = self.Process(target=time.sleep, args=(DELTA,))
self.assertNotIn(p, self.active_children())
p.daemon = True
p.start()
self.assertIn(p, self.active_children())
p.join()
self.assertNotIn(p, self.active_children())
@classmethod
def _test_recursion(cls, wconn, id):
from multiprocessing import forking
wconn.send(id)
if len(id) < 2:
for i in range(2):
p = cls.Process(
target=cls._test_recursion, args=(wconn, id+[i])
)
p.start()
p.join()
def test_recursion(self):
rconn, wconn = self.Pipe(duplex=False)
self._test_recursion(wconn, [])
time.sleep(DELTA)
result = []
while rconn.poll():
result.append(rconn.recv())
expected = [
[],
[0],
[0, 0],
[0, 1],
[1],
[1, 0],
[1, 1]
]
self.assertEqual(result, expected)
@classmethod
def _test_sys_exit(cls, reason, testfn):
sys.stderr = open(testfn, 'w')
sys.exit(reason)
def test_sys_exit(self):
# See Issue 13854
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
testfn = test_support.TESTFN
self.addCleanup(test_support.unlink, testfn)
for reason, code in (([1, 2, 3], 1), ('ignore this', 1)):
p = self.Process(target=self._test_sys_exit, args=(reason, testfn))
p.daemon = True
p.start()
p.join(5)
self.assertEqual(p.exitcode, code)
with open(testfn, 'r') as f:
self.assertEqual(f.read().rstrip(), str(reason))
for reason in (True, False, 8):
p = self.Process(target=sys.exit, args=(reason,))
p.daemon = True
p.start()
p.join(5)
self.assertEqual(p.exitcode, reason)
def test_lose_target_ref(self):
c = DummyCallable()
wr = weakref.ref(c)
q = self.Queue()
p = self.Process(target=c, args=(q, c))
del c
p.start()
p.join()
self.assertIs(wr(), None)
self.assertEqual(q.get(), 5)
#
#
#
class _UpperCaser(multiprocessing.Process):
def __init__(self):
multiprocessing.Process.__init__(self)
self.child_conn, self.parent_conn = multiprocessing.Pipe()
def run(self):
self.parent_conn.close()
for s in iter(self.child_conn.recv, None):
self.child_conn.send(s.upper())
self.child_conn.close()
def submit(self, s):
assert type(s) is str
self.parent_conn.send(s)
return self.parent_conn.recv()
def stop(self):
self.parent_conn.send(None)
self.parent_conn.close()
self.child_conn.close()
class _TestSubclassingProcess(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_subclassing(self):
uppercaser = _UpperCaser()
uppercaser.daemon = True
uppercaser.start()
self.assertEqual(uppercaser.submit('hello'), 'HELLO')
self.assertEqual(uppercaser.submit('world'), 'WORLD')
uppercaser.stop()
uppercaser.join()
#
#
#
def queue_empty(q):
if hasattr(q, 'empty'):
return q.empty()
else:
return q.qsize() == 0
def queue_full(q, maxsize):
if hasattr(q, 'full'):
return q.full()
else:
return q.qsize() == maxsize
class _TestQueue(BaseTestCase):
@classmethod
def _test_put(cls, queue, child_can_start, parent_can_continue):
child_can_start.wait()
for i in range(6):
queue.get()
parent_can_continue.set()
def test_put(self):
MAXSIZE = 6
queue = self.Queue(maxsize=MAXSIZE)
child_can_start = self.Event()
parent_can_continue = self.Event()
proc = self.Process(
target=self._test_put,
args=(queue, child_can_start, parent_can_continue)
)
proc.daemon = True
proc.start()
self.assertEqual(queue_empty(queue), True)
self.assertEqual(queue_full(queue, MAXSIZE), False)
queue.put(1)
queue.put(2, True)
queue.put(3, True, None)
queue.put(4, False)
queue.put(5, False, None)
queue.put_nowait(6)
# the values may be in buffer but not yet in pipe so sleep a bit
time.sleep(DELTA)
self.assertEqual(queue_empty(queue), False)
self.assertEqual(queue_full(queue, MAXSIZE), True)
put = TimingWrapper(queue.put)
put_nowait = TimingWrapper(queue.put_nowait)
self.assertRaises(Queue.Full, put, 7, False)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(Queue.Full, put, 7, False, None)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(Queue.Full, put_nowait, 7)
self.assertTimingAlmostEqual(put_nowait.elapsed, 0)
self.assertRaises(Queue.Full, put, 7, True, TIMEOUT1)
self.assertTimingAlmostEqual(put.elapsed, TIMEOUT1)
self.assertRaises(Queue.Full, put, 7, False, TIMEOUT2)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(Queue.Full, put, 7, True, timeout=TIMEOUT3)
self.assertTimingAlmostEqual(put.elapsed, TIMEOUT3)
child_can_start.set()
parent_can_continue.wait()
self.assertEqual(queue_empty(queue), True)
self.assertEqual(queue_full(queue, MAXSIZE), False)
proc.join()
@classmethod
def _test_get(cls, queue, child_can_start, parent_can_continue):
child_can_start.wait()
#queue.put(1)
queue.put(2)
queue.put(3)
queue.put(4)
queue.put(5)
parent_can_continue.set()
def test_get(self):
queue = self.Queue()
child_can_start = self.Event()
parent_can_continue = self.Event()
proc = self.Process(
target=self._test_get,
args=(queue, child_can_start, parent_can_continue)
)
proc.daemon = True
proc.start()
self.assertEqual(queue_empty(queue), True)
child_can_start.set()
parent_can_continue.wait()
time.sleep(DELTA)
self.assertEqual(queue_empty(queue), False)
# Hangs unexpectedly, remove for now
#self.assertEqual(queue.get(), 1)
self.assertEqual(queue.get(True, None), 2)
self.assertEqual(queue.get(True), 3)
self.assertEqual(queue.get(timeout=1), 4)
self.assertEqual(queue.get_nowait(), 5)
self.assertEqual(queue_empty(queue), True)
get = TimingWrapper(queue.get)
get_nowait = TimingWrapper(queue.get_nowait)
self.assertRaises(Queue.Empty, get, False)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(Queue.Empty, get, False, None)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(Queue.Empty, get_nowait)
self.assertTimingAlmostEqual(get_nowait.elapsed, 0)
self.assertRaises(Queue.Empty, get, True, TIMEOUT1)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1)
self.assertRaises(Queue.Empty, get, False, TIMEOUT2)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(Queue.Empty, get, timeout=TIMEOUT3)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT3)
proc.join()
@classmethod
def _test_fork(cls, queue):
for i in range(10, 20):
queue.put(i)
# note that at this point the items may only be buffered, so the
# process cannot shutdown until the feeder thread has finished
# pushing items onto the pipe.
def test_fork(self):
# Old versions of Queue would fail to create a new feeder
# thread for a forked process if the original process had its
# own feeder thread. This test checks that this no longer
# happens.
queue = self.Queue()
# put items on queue so that main process starts a feeder thread
for i in range(10):
queue.put(i)
# wait to make sure thread starts before we fork a new process
time.sleep(DELTA)
# fork process
p = self.Process(target=self._test_fork, args=(queue,))
p.daemon = True
p.start()
# check that all expected items are in the queue
for i in range(20):
self.assertEqual(queue.get(), i)
self.assertRaises(Queue.Empty, queue.get, False)
p.join()
def test_qsize(self):
q = self.Queue()
try:
self.assertEqual(q.qsize(), 0)
except NotImplementedError:
self.skipTest('qsize method not implemented')
q.put(1)
self.assertEqual(q.qsize(), 1)
q.put(5)
self.assertEqual(q.qsize(), 2)
q.get()
self.assertEqual(q.qsize(), 1)
q.get()
self.assertEqual(q.qsize(), 0)
@classmethod
def _test_task_done(cls, q):
for obj in iter(q.get, None):
time.sleep(DELTA)
q.task_done()
def test_task_done(self):
queue = self.JoinableQueue()
if sys.version_info < (2, 5) and not hasattr(queue, 'task_done'):
self.skipTest("requires 'queue.task_done()' method")
workers = [self.Process(target=self._test_task_done, args=(queue,))
for i in xrange(4)]
for p in workers:
p.daemon = True
p.start()
for i in xrange(10):
queue.put(i)
queue.join()
for p in workers:
queue.put(None)
for p in workers:
p.join()
def test_no_import_lock_contention(self):
with test_support.temp_cwd():
module_name = 'imported_by_an_imported_module'
with open(module_name + '.py', 'w') as f:
f.write("""if 1:
import multiprocessing
q = multiprocessing.Queue()
q.put('knock knock')
q.get(timeout=3)
q.close()
""")
with test_support.DirsOnSysPath(os.getcwd()):
try:
__import__(module_name)
except Queue.Empty:
self.fail("Probable regression on import lock contention;"
" see Issue #22853")
def test_queue_feeder_donot_stop_onexc(self):
# bpo-30414: verify feeder handles exceptions correctly
if self.TYPE != 'processes':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
class NotSerializable(object):
def __reduce__(self):
raise AttributeError
with test.support.captured_stderr():
q = self.Queue()
q.put(NotSerializable())
q.put(True)
# bpo-30595: use a timeout of 1 second for slow buildbots
self.assertTrue(q.get(timeout=1.0))
#
#
#
class _TestLock(BaseTestCase):
def test_lock(self):
lock = self.Lock()
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(False), False)
self.assertEqual(lock.release(), None)
self.assertRaises((ValueError, threading.ThreadError), lock.release)
def test_rlock(self):
lock = self.RLock()
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.release(), None)
self.assertEqual(lock.release(), None)
self.assertEqual(lock.release(), None)
self.assertRaises((AssertionError, RuntimeError), lock.release)
def test_lock_context(self):
with self.Lock():
pass
class _TestSemaphore(BaseTestCase):
def _test_semaphore(self, sem):
self.assertReturnsIfImplemented(2, get_value, sem)
self.assertEqual(sem.acquire(), True)
self.assertReturnsIfImplemented(1, get_value, sem)
self.assertEqual(sem.acquire(), True)
self.assertReturnsIfImplemented(0, get_value, sem)
self.assertEqual(sem.acquire(False), False)
self.assertReturnsIfImplemented(0, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(1, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(2, get_value, sem)
def test_semaphore(self):
sem = self.Semaphore(2)
self._test_semaphore(sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(3, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(4, get_value, sem)
def test_bounded_semaphore(self):
sem = self.BoundedSemaphore(2)
self._test_semaphore(sem)
# Currently fails on OS/X
#if HAVE_GETVALUE:
# self.assertRaises(ValueError, sem.release)
# self.assertReturnsIfImplemented(2, get_value, sem)
def test_timeout(self):
if self.TYPE != 'processes':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
sem = self.Semaphore(0)
acquire = TimingWrapper(sem.acquire)
self.assertEqual(acquire(False), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0.0)
self.assertEqual(acquire(False, None), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0.0)
self.assertEqual(acquire(False, TIMEOUT1), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0)
self.assertEqual(acquire(True, TIMEOUT2), False)
self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT2)
self.assertEqual(acquire(timeout=TIMEOUT3), False)
self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT3)
class _TestCondition(BaseTestCase):
@classmethod
def f(cls, cond, sleeping, woken, timeout=None):
cond.acquire()
sleeping.release()
cond.wait(timeout)
woken.release()
cond.release()
def check_invariant(self, cond):
# this is only supposed to succeed when there are no sleepers
if self.TYPE == 'processes':
try:
sleepers = (cond._sleeping_count.get_value() -
cond._woken_count.get_value())
self.assertEqual(sleepers, 0)
self.assertEqual(cond._wait_semaphore.get_value(), 0)
except NotImplementedError:
pass
def test_notify(self):
cond = self.Condition()
sleeping = self.Semaphore(0)
woken = self.Semaphore(0)
p = self.Process(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
p = threading.Thread(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
# wait for both children to start sleeping
sleeping.acquire()
sleeping.acquire()
# check no process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(0, get_value, woken)
# wake up one process/thread
cond.acquire()
cond.notify()
cond.release()
# check one process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(1, get_value, woken)
# wake up another
cond.acquire()
cond.notify()
cond.release()
# check other has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(2, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
p.join()
def test_notify_all(self):
cond = self.Condition()
sleeping = self.Semaphore(0)
woken = self.Semaphore(0)
# start some threads/processes which will timeout
for i in range(3):
p = self.Process(target=self.f,
args=(cond, sleeping, woken, TIMEOUT1))
p.daemon = True
p.start()
t = threading.Thread(target=self.f,
args=(cond, sleeping, woken, TIMEOUT1))
t.daemon = True
t.start()
# wait for them all to sleep
for i in xrange(6):
sleeping.acquire()
# check they have all timed out
for i in xrange(6):
woken.acquire()
self.assertReturnsIfImplemented(0, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
# start some more threads/processes
for i in range(3):
p = self.Process(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
t = threading.Thread(target=self.f, args=(cond, sleeping, woken))
t.daemon = True
t.start()
# wait for them to all sleep
for i in xrange(6):
sleeping.acquire()
# check no process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(0, get_value, woken)
# wake them all up
cond.acquire()
cond.notify_all()
cond.release()
# check they have all woken
for i in range(10):
try:
if get_value(woken) == 6:
break
except NotImplementedError:
break
time.sleep(DELTA)
self.assertReturnsIfImplemented(6, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
def test_timeout(self):
cond = self.Condition()
wait = TimingWrapper(cond.wait)
cond.acquire()
res = wait(TIMEOUT1)
cond.release()
self.assertEqual(res, None)
self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1)
class _TestEvent(BaseTestCase):
@classmethod
def _test_event(cls, event):
time.sleep(TIMEOUT2)
event.set()
def test_event(self):
event = self.Event()
wait = TimingWrapper(event.wait)
# Removed temporarily, due to API shear, this does not
# work with threading._Event objects. is_set == isSet
self.assertEqual(event.is_set(), False)
# Removed, threading.Event.wait() will return the value of the __flag
# instead of None. API Shear with the semaphore backed mp.Event
self.assertEqual(wait(0.0), False)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
self.assertEqual(wait(TIMEOUT1), False)
self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1)
event.set()
# See note above on the API differences
self.assertEqual(event.is_set(), True)
self.assertEqual(wait(), True)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
self.assertEqual(wait(TIMEOUT1), True)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
# self.assertEqual(event.is_set(), True)
event.clear()
#self.assertEqual(event.is_set(), False)
p = self.Process(target=self._test_event, args=(event,))
p.daemon = True
p.start()
self.assertEqual(wait(), True)
#
#
#
class _TestValue(BaseTestCase):
ALLOWED_TYPES = ('processes',)
codes_values = [
('i', 4343, 24234),
('d', 3.625, -4.25),
('h', -232, 234),
('c', latin('x'), latin('y'))
]
def setUp(self):
if not HAS_SHAREDCTYPES:
self.skipTest("requires multiprocessing.sharedctypes")
@classmethod
def _test(cls, values):
for sv, cv in zip(values, cls.codes_values):
sv.value = cv[2]
def test_value(self, raw=False):
if raw:
values = [self.RawValue(code, value)
for code, value, _ in self.codes_values]
else:
values = [self.Value(code, value)
for code, value, _ in self.codes_values]
for sv, cv in zip(values, self.codes_values):
self.assertEqual(sv.value, cv[1])
proc = self.Process(target=self._test, args=(values,))
proc.daemon = True
proc.start()
proc.join()
for sv, cv in zip(values, self.codes_values):
self.assertEqual(sv.value, cv[2])
def test_rawvalue(self):
self.test_value(raw=True)
def test_getobj_getlock(self):
val1 = self.Value('i', 5)
lock1 = val1.get_lock()
obj1 = val1.get_obj()
val2 = self.Value('i', 5, lock=None)
lock2 = val2.get_lock()
obj2 = val2.get_obj()
lock = self.Lock()
val3 = self.Value('i', 5, lock=lock)
lock3 = val3.get_lock()
obj3 = val3.get_obj()
self.assertEqual(lock, lock3)
arr4 = self.Value('i', 5, lock=False)
self.assertFalse(hasattr(arr4, 'get_lock'))
self.assertFalse(hasattr(arr4, 'get_obj'))
self.assertRaises(AttributeError, self.Value, 'i', 5, lock='navalue')
arr5 = self.RawValue('i', 5)
self.assertFalse(hasattr(arr5, 'get_lock'))
self.assertFalse(hasattr(arr5, 'get_obj'))
class _TestArray(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@classmethod
def f(cls, seq):
for i in range(1, len(seq)):
seq[i] += seq[i-1]
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_array(self, raw=False):
seq = [680, 626, 934, 821, 150, 233, 548, 982, 714, 831]
if raw:
arr = self.RawArray('i', seq)
else:
arr = self.Array('i', seq)
self.assertEqual(len(arr), len(seq))
self.assertEqual(arr[3], seq[3])
self.assertEqual(list(arr[2:7]), list(seq[2:7]))
arr[4:8] = seq[4:8] = array.array('i', [1, 2, 3, 4])
self.assertEqual(list(arr[:]), seq)
self.f(seq)
p = self.Process(target=self.f, args=(arr,))
p.daemon = True
p.start()
p.join()
self.assertEqual(list(arr[:]), seq)
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_array_from_size(self):
size = 10
# Test for zeroing (see issue #11675).
# The repetition below strengthens the test by increasing the chances
# of previously allocated non-zero memory being used for the new array
# on the 2nd and 3rd loops.
for _ in range(3):
arr = self.Array('i', size)
self.assertEqual(len(arr), size)
self.assertEqual(list(arr), [0] * size)
arr[:] = range(10)
self.assertEqual(list(arr), range(10))
del arr
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_rawarray(self):
self.test_array(raw=True)
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_array_accepts_long(self):
arr = self.Array('i', 10L)
self.assertEqual(len(arr), 10)
raw_arr = self.RawArray('i', 10L)
self.assertEqual(len(raw_arr), 10)
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_getobj_getlock_obj(self):
arr1 = self.Array('i', range(10))
lock1 = arr1.get_lock()
obj1 = arr1.get_obj()
arr2 = self.Array('i', range(10), lock=None)
lock2 = arr2.get_lock()
obj2 = arr2.get_obj()
lock = self.Lock()
arr3 = self.Array('i', range(10), lock=lock)
lock3 = arr3.get_lock()
obj3 = arr3.get_obj()
self.assertEqual(lock, lock3)
arr4 = self.Array('i', range(10), lock=False)
self.assertFalse(hasattr(arr4, 'get_lock'))
self.assertFalse(hasattr(arr4, 'get_obj'))
self.assertRaises(AttributeError,
self.Array, 'i', range(10), lock='notalock')
arr5 = self.RawArray('i', range(10))
self.assertFalse(hasattr(arr5, 'get_lock'))
self.assertFalse(hasattr(arr5, 'get_obj'))
#
#
#
class _TestContainers(BaseTestCase):
ALLOWED_TYPES = ('manager',)
def test_list(self):
a = self.list(range(10))
self.assertEqual(a[:], range(10))
b = self.list()
self.assertEqual(b[:], [])
b.extend(range(5))
self.assertEqual(b[:], range(5))
self.assertEqual(b[2], 2)
self.assertEqual(b[2:10], [2,3,4])
b *= 2
self.assertEqual(b[:], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4])
self.assertEqual(b + [5, 6], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 5, 6])
self.assertEqual(a[:], range(10))
d = [a, b]
e = self.list(d)
self.assertEqual(
e[:],
[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4]]
)
f = self.list([a])
a.append('hello')
self.assertEqual(f[:], [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 'hello']])
def test_dict(self):
d = self.dict()
indices = range(65, 70)
for i in indices:
d[i] = chr(i)
self.assertEqual(d.copy(), dict((i, chr(i)) for i in indices))
self.assertEqual(sorted(d.keys()), indices)
self.assertEqual(sorted(d.values()), [chr(i) for i in indices])
self.assertEqual(sorted(d.items()), [(i, chr(i)) for i in indices])
def test_namespace(self):
n = self.Namespace()
n.name = 'Bob'
n.job = 'Builder'
n._hidden = 'hidden'
self.assertEqual((n.name, n.job), ('Bob', 'Builder'))
del n.job
self.assertEqual(str(n), "Namespace(name='Bob')")
self.assertTrue(hasattr(n, 'name'))
self.assertTrue(not hasattr(n, 'job'))
#
#
#
def sqr(x, wait=0.0):
time.sleep(wait)
return x*x
def identity(x):
return x
class CountedObject(object):
n_instances = 0
def __new__(cls):
cls.n_instances += 1
return object.__new__(cls)
def __del__(self):
type(self).n_instances -= 1
class SayWhenError(ValueError): pass
def exception_throwing_generator(total, when):
for i in range(total):
if i == when:
raise SayWhenError("Somebody said when")
yield i
class _TestPool(BaseTestCase):
def test_apply(self):
papply = self.pool.apply
self.assertEqual(papply(sqr, (5,)), sqr(5))
self.assertEqual(papply(sqr, (), {'x':3}), sqr(x=3))
def test_map(self):
pmap = self.pool.map
self.assertEqual(pmap(sqr, range(10)), map(sqr, range(10)))
self.assertEqual(pmap(sqr, range(100), chunksize=20),
map(sqr, range(100)))
def test_map_unplicklable(self):
# Issue #19425 -- failure to pickle should not cause a hang
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
class A(object):
def __reduce__(self):
raise RuntimeError('cannot pickle')
with self.assertRaises(RuntimeError):
self.pool.map(sqr, [A()]*10)
def test_map_chunksize(self):
try:
self.pool.map_async(sqr, [], chunksize=1).get(timeout=TIMEOUT1)
except multiprocessing.TimeoutError:
self.fail("pool.map_async with chunksize stalled on null list")
def test_async(self):
res = self.pool.apply_async(sqr, (7, TIMEOUT1,))
get = TimingWrapper(res.get)
self.assertEqual(get(), 49)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1)
def test_async_timeout(self):
res = self.pool.apply_async(sqr, (6, TIMEOUT2 + 1.0))
get = TimingWrapper(res.get)
self.assertRaises(multiprocessing.TimeoutError, get, timeout=TIMEOUT2)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT2)
def test_imap(self):
it = self.pool.imap(sqr, range(10))
self.assertEqual(list(it), map(sqr, range(10)))
it = self.pool.imap(sqr, range(10))
for i in range(10):
self.assertEqual(it.next(), i*i)
self.assertRaises(StopIteration, it.next)
it = self.pool.imap(sqr, range(1000), chunksize=100)
for i in range(1000):
self.assertEqual(it.next(), i*i)
self.assertRaises(StopIteration, it.next)
def test_imap_handle_iterable_exception(self):
if self.TYPE == 'manager':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
it = self.pool.imap(sqr, exception_throwing_generator(10, 3), 1)
for i in range(3):
self.assertEqual(next(it), i*i)
self.assertRaises(SayWhenError, it.next)
# SayWhenError seen at start of problematic chunk's results
it = self.pool.imap(sqr, exception_throwing_generator(20, 7), 2)
for i in range(6):
self.assertEqual(next(it), i*i)
self.assertRaises(SayWhenError, it.next)
it = self.pool.imap(sqr, exception_throwing_generator(20, 7), 4)
for i in range(4):
self.assertEqual(next(it), i*i)
self.assertRaises(SayWhenError, it.next)
def test_imap_unordered(self):
it = self.pool.imap_unordered(sqr, range(1000))
self.assertEqual(sorted(it), map(sqr, range(1000)))
it = self.pool.imap_unordered(sqr, range(1000), chunksize=53)
self.assertEqual(sorted(it), map(sqr, range(1000)))
def test_imap_unordered_handle_iterable_exception(self):
if self.TYPE == 'manager':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
it = self.pool.imap_unordered(sqr,
exception_throwing_generator(10, 3),
1)
expected_values = map(sqr, range(10))
with self.assertRaises(SayWhenError):
# imap_unordered makes it difficult to anticipate the SayWhenError
for i in range(10):
value = next(it)
self.assertIn(value, expected_values)
expected_values.remove(value)
it = self.pool.imap_unordered(sqr,
exception_throwing_generator(20, 7),
2)
expected_values = map(sqr, range(20))
with self.assertRaises(SayWhenError):
for i in range(20):
value = next(it)
self.assertIn(value, expected_values)
expected_values.remove(value)
def test_make_pool(self):
self.assertRaises(ValueError, multiprocessing.Pool, -1)
self.assertRaises(ValueError, multiprocessing.Pool, 0)
p = multiprocessing.Pool(3)
self.assertEqual(3, len(p._pool))
p.close()
p.join()
def test_terminate(self):
p = self.Pool(4)
result = p.map_async(
time.sleep, [0.1 for i in range(10000)], chunksize=1
)
p.terminate()
join = TimingWrapper(p.join)
join()
self.assertTrue(join.elapsed < 0.2)
def test_empty_iterable(self):
# See Issue 12157
p = self.Pool(1)
self.assertEqual(p.map(sqr, []), [])
self.assertEqual(list(p.imap(sqr, [])), [])
self.assertEqual(list(p.imap_unordered(sqr, [])), [])
self.assertEqual(p.map_async(sqr, []).get(), [])
p.close()
p.join()
def test_release_task_refs(self):
# Issue #29861: task arguments and results should not be kept
# alive after we are done with them.
objs = list(CountedObject() for i in range(10))
refs = list(weakref.ref(o) for o in objs)
self.pool.map(identity, objs)
del objs
time.sleep(DELTA) # let threaded cleanup code run
self.assertEqual(set(wr() for wr in refs), {None})
# With a process pool, copies of the objects are returned, check
# they were released too.
self.assertEqual(CountedObject.n_instances, 0)
def unpickleable_result():
return lambda: 42
class _TestPoolWorkerErrors(BaseTestCase):
ALLOWED_TYPES = ('processes', )
def test_unpickleable_result(self):
from multiprocessing.pool import MaybeEncodingError
p = multiprocessing.Pool(2)
# Make sure we don't lose pool processes because of encoding errors.
for iteration in range(20):
res = p.apply_async(unpickleable_result)
self.assertRaises(MaybeEncodingError, res.get)
p.close()
p.join()
class _TestPoolWorkerLifetime(BaseTestCase):
ALLOWED_TYPES = ('processes', )
def test_pool_worker_lifetime(self):
p = multiprocessing.Pool(3, maxtasksperchild=10)
self.assertEqual(3, len(p._pool))
origworkerpids = [w.pid for w in p._pool]
# Run many tasks so each worker gets replaced (hopefully)
results = []
for i in range(100):
results.append(p.apply_async(sqr, (i, )))
# Fetch the results and verify we got the right answers,
# also ensuring all the tasks have completed.
for (j, res) in enumerate(results):
self.assertEqual(res.get(), sqr(j))
# Refill the pool
p._repopulate_pool()
# Wait until all workers are alive
# (countdown * DELTA = 5 seconds max startup process time)
countdown = 50
while countdown and not all(w.is_alive() for w in p._pool):
countdown -= 1
time.sleep(DELTA)
finalworkerpids = [w.pid for w in p._pool]
# All pids should be assigned. See issue #7805.
self.assertNotIn(None, origworkerpids)
self.assertNotIn(None, finalworkerpids)
# Finally, check that the worker pids have changed
self.assertNotEqual(sorted(origworkerpids), sorted(finalworkerpids))
p.close()
p.join()
def test_pool_worker_lifetime_early_close(self):
# Issue #10332: closing a pool whose workers have limited lifetimes
# before all the tasks completed would make join() hang.
p = multiprocessing.Pool(3, maxtasksperchild=1)
results = []
for i in range(6):
results.append(p.apply_async(sqr, (i, 0.3)))
p.close()
p.join()
# check the results
for (j, res) in enumerate(results):
self.assertEqual(res.get(), sqr(j))
#
# Test that manager has expected number of shared objects left
#
class _TestZZZNumberOfObjects(BaseTestCase):
# Because test cases are sorted alphabetically, this one will get
# run after all the other tests for the manager. It tests that
# there have been no "reference leaks" for the manager's shared
# objects. Note the comment in _TestPool.test_terminate().
ALLOWED_TYPES = ('manager',)
def test_number_of_objects(self):
EXPECTED_NUMBER = 1 # the pool object is still alive
multiprocessing.active_children() # discard dead process objs
gc.collect() # do garbage collection
refs = self.manager._number_of_objects()
debug_info = self.manager._debug_info()
if refs != EXPECTED_NUMBER:
print self.manager._debug_info()
print debug_info
self.assertEqual(refs, EXPECTED_NUMBER)
#
# Test of creating a customized manager class
#
from multiprocessing.managers import BaseManager, BaseProxy, RemoteError
class FooBar(object):
def f(self):
return 'f()'
def g(self):
raise ValueError
def _h(self):
return '_h()'
def baz():
for i in xrange(10):
yield i*i
class IteratorProxy(BaseProxy):
_exposed_ = ('next', '__next__')
def __iter__(self):
return self
def next(self):
return self._callmethod('next')
def __next__(self):
return self._callmethod('__next__')
class MyManager(BaseManager):
pass
MyManager.register('Foo', callable=FooBar)
MyManager.register('Bar', callable=FooBar, exposed=('f', '_h'))
MyManager.register('baz', callable=baz, proxytype=IteratorProxy)
class _TestMyManager(BaseTestCase):
ALLOWED_TYPES = ('manager',)
def test_mymanager(self):
manager = MyManager()
manager.start()
foo = manager.Foo()
bar = manager.Bar()
baz = manager.baz()
foo_methods = [name for name in ('f', 'g', '_h') if hasattr(foo, name)]
bar_methods = [name for name in ('f', 'g', '_h') if hasattr(bar, name)]
self.assertEqual(foo_methods, ['f', 'g'])
self.assertEqual(bar_methods, ['f', '_h'])
self.assertEqual(foo.f(), 'f()')
self.assertRaises(ValueError, foo.g)
self.assertEqual(foo._callmethod('f'), 'f()')
self.assertRaises(RemoteError, foo._callmethod, '_h')
self.assertEqual(bar.f(), 'f()')
self.assertEqual(bar._h(), '_h()')
self.assertEqual(bar._callmethod('f'), 'f()')
self.assertEqual(bar._callmethod('_h'), '_h()')
self.assertEqual(list(baz), [i*i for i in range(10)])
manager.shutdown()
#
# Test of connecting to a remote server and using xmlrpclib for serialization
#
_queue = Queue.Queue()
def get_queue():
return _queue
class QueueManager(BaseManager):
'''manager class used by server process'''
QueueManager.register('get_queue', callable=get_queue)
class QueueManager2(BaseManager):
'''manager class which specifies the same interface as QueueManager'''
QueueManager2.register('get_queue')
SERIALIZER = 'xmlrpclib'
class _TestRemoteManager(BaseTestCase):
ALLOWED_TYPES = ('manager',)
values = ['hello world', None, True, 2.25,
#'hall\xc3\xa5 v\xc3\xa4rlden'] # UTF-8
]
result = values[:]
if test_support.have_unicode:
#result[-1] = u'hall\xe5 v\xe4rlden'
uvalue = test_support.u(r'\u043f\u0440\u0438\u0432\u0456\u0442 '
r'\u0441\u0432\u0456\u0442')
values.append(uvalue)
result.append(uvalue)
@classmethod
def _putter(cls, address, authkey):
manager = QueueManager2(
address=address, authkey=authkey, serializer=SERIALIZER
)
manager.connect()
queue = manager.get_queue()
# Note that xmlrpclib will deserialize object as a list not a tuple
queue.put(tuple(cls.values))
def test_remote(self):
authkey = os.urandom(32)
manager = QueueManager(
address=(test.test_support.HOST, 0), authkey=authkey, serializer=SERIALIZER
)
manager.start()
p = self.Process(target=self._putter, args=(manager.address, authkey))
p.daemon = True
p.start()
manager2 = QueueManager2(
address=manager.address, authkey=authkey, serializer=SERIALIZER
)
manager2.connect()
queue = manager2.get_queue()
self.assertEqual(queue.get(), self.result)
# Because we are using xmlrpclib for serialization instead of
# pickle this will cause a serialization error.
self.assertRaises(Exception, queue.put, time.sleep)
# Make queue finalizer run before the server is stopped
del queue
manager.shutdown()
class _TestManagerRestart(BaseTestCase):
@classmethod
def _putter(cls, address, authkey):
manager = QueueManager(
address=address, authkey=authkey, serializer=SERIALIZER)
manager.connect()
queue = manager.get_queue()
queue.put('hello world')
def test_rapid_restart(self):
authkey = os.urandom(32)
manager = QueueManager(
address=(test.test_support.HOST, 0), authkey=authkey, serializer=SERIALIZER)
srvr = manager.get_server()
addr = srvr.address
# Close the connection.Listener socket which gets opened as a part
# of manager.get_server(). It's not needed for the test.
srvr.listener.close()
manager.start()
p = self.Process(target=self._putter, args=(manager.address, authkey))
p.start()
p.join()
queue = manager.get_queue()
self.assertEqual(queue.get(), 'hello world')
del queue
manager.shutdown()
manager = QueueManager(
address=addr, authkey=authkey, serializer=SERIALIZER)
manager.start()
manager.shutdown()
#
#
#
SENTINEL = latin('')
class _TestConnection(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
@classmethod
def _echo(cls, conn):
for msg in iter(conn.recv_bytes, SENTINEL):
conn.send_bytes(msg)
conn.close()
def test_connection(self):
conn, child_conn = self.Pipe()
p = self.Process(target=self._echo, args=(child_conn,))
p.daemon = True
p.start()
seq = [1, 2.25, None]
msg = latin('hello world')
longmsg = msg * 10
arr = array.array('i', range(4))
if self.TYPE == 'processes':
self.assertEqual(type(conn.fileno()), int)
self.assertEqual(conn.send(seq), None)
self.assertEqual(conn.recv(), seq)
self.assertEqual(conn.send_bytes(msg), None)
self.assertEqual(conn.recv_bytes(), msg)
if self.TYPE == 'processes':
buffer = array.array('i', [0]*10)
expected = list(arr) + [0] * (10 - len(arr))
self.assertEqual(conn.send_bytes(arr), None)
self.assertEqual(conn.recv_bytes_into(buffer),
len(arr) * buffer.itemsize)
self.assertEqual(list(buffer), expected)
buffer = array.array('i', [0]*10)
expected = [0] * 3 + list(arr) + [0] * (10 - 3 - len(arr))
self.assertEqual(conn.send_bytes(arr), None)
self.assertEqual(conn.recv_bytes_into(buffer, 3 * buffer.itemsize),
len(arr) * buffer.itemsize)
self.assertEqual(list(buffer), expected)
buffer = bytearray(latin(' ' * 40))
self.assertEqual(conn.send_bytes(longmsg), None)
try:
res = conn.recv_bytes_into(buffer)
except multiprocessing.BufferTooShort, e:
self.assertEqual(e.args, (longmsg,))
else:
self.fail('expected BufferTooShort, got %s' % res)
poll = TimingWrapper(conn.poll)
self.assertEqual(poll(), False)
self.assertTimingAlmostEqual(poll.elapsed, 0)
self.assertEqual(poll(TIMEOUT1), False)
self.assertTimingAlmostEqual(poll.elapsed, TIMEOUT1)
conn.send(None)
time.sleep(.1)
self.assertEqual(poll(TIMEOUT1), True)
self.assertTimingAlmostEqual(poll.elapsed, 0)
self.assertEqual(conn.recv(), None)
really_big_msg = latin('X') * (1024 * 1024 * 16) # 16Mb
conn.send_bytes(really_big_msg)
self.assertEqual(conn.recv_bytes(), really_big_msg)
conn.send_bytes(SENTINEL) # tell child to quit
child_conn.close()
if self.TYPE == 'processes':
self.assertEqual(conn.readable, True)
self.assertEqual(conn.writable, True)
self.assertRaises(EOFError, conn.recv)
self.assertRaises(EOFError, conn.recv_bytes)
p.join()
def test_duplex_false(self):
reader, writer = self.Pipe(duplex=False)
self.assertEqual(writer.send(1), None)
self.assertEqual(reader.recv(), 1)
if self.TYPE == 'processes':
self.assertEqual(reader.readable, True)
self.assertEqual(reader.writable, False)
self.assertEqual(writer.readable, False)
self.assertEqual(writer.writable, True)
self.assertRaises(IOError, reader.send, 2)
self.assertRaises(IOError, writer.recv)
self.assertRaises(IOError, writer.poll)
def test_spawn_close(self):
# We test that a pipe connection can be closed by parent
# process immediately after child is spawned. On Windows this
# would have sometimes failed on old versions because
# child_conn would be closed before the child got a chance to
# duplicate it.
conn, child_conn = self.Pipe()
p = self.Process(target=self._echo, args=(child_conn,))
p.daemon = True
p.start()
child_conn.close() # this might complete before child initializes
msg = latin('hello')
conn.send_bytes(msg)
self.assertEqual(conn.recv_bytes(), msg)
conn.send_bytes(SENTINEL)
conn.close()
p.join()
def test_sendbytes(self):
if self.TYPE != 'processes':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
msg = latin('abcdefghijklmnopqrstuvwxyz')
a, b = self.Pipe()
a.send_bytes(msg)
self.assertEqual(b.recv_bytes(), msg)
a.send_bytes(msg, 5)
self.assertEqual(b.recv_bytes(), msg[5:])
a.send_bytes(msg, 7, 8)
self.assertEqual(b.recv_bytes(), msg[7:7+8])
a.send_bytes(msg, 26)
self.assertEqual(b.recv_bytes(), latin(''))
a.send_bytes(msg, 26, 0)
self.assertEqual(b.recv_bytes(), latin(''))
self.assertRaises(ValueError, a.send_bytes, msg, 27)
self.assertRaises(ValueError, a.send_bytes, msg, 22, 5)
self.assertRaises(ValueError, a.send_bytes, msg, 26, 1)
self.assertRaises(ValueError, a.send_bytes, msg, -1)
self.assertRaises(ValueError, a.send_bytes, msg, 4, -1)
@classmethod
def _is_fd_assigned(cls, fd):
try:
os.fstat(fd)
except OSError as e:
if e.errno == errno.EBADF:
return False
raise
else:
return True
@classmethod
def _writefd(cls, conn, data, create_dummy_fds=False):
if create_dummy_fds:
for i in range(0, 256):
if not cls._is_fd_assigned(i):
os.dup2(conn.fileno(), i)
fd = reduction.recv_handle(conn)
if msvcrt:
fd = msvcrt.open_osfhandle(fd, os.O_WRONLY)
os.write(fd, data)
os.close(fd)
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
def test_fd_transfer(self):
if self.TYPE != 'processes':
self.skipTest("only makes sense with processes")
conn, child_conn = self.Pipe(duplex=True)
p = self.Process(target=self._writefd, args=(child_conn, b"foo"))
p.daemon = True
p.start()
with open(test_support.TESTFN, "wb") as f:
fd = f.fileno()
if msvcrt:
fd = msvcrt.get_osfhandle(fd)
reduction.send_handle(conn, fd, p.pid)
p.join()
with open(test_support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"foo")
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
@unittest.skipIf(sys.platform == "win32",
"test semantics don't make sense on Windows")
@unittest.skipIf(MAXFD <= 256,
"largest assignable fd number is too small")
@unittest.skipUnless(hasattr(os, "dup2"),
"test needs os.dup2()")
def test_large_fd_transfer(self):
# With fd > 256 (issue #11657)
if self.TYPE != 'processes':
self.skipTest("only makes sense with processes")
conn, child_conn = self.Pipe(duplex=True)
p = self.Process(target=self._writefd, args=(child_conn, b"bar", True))
p.daemon = True
p.start()
with open(test_support.TESTFN, "wb") as f:
fd = f.fileno()
for newfd in range(256, MAXFD):
if not self._is_fd_assigned(newfd):
break
else:
self.fail("could not find an unassigned large file descriptor")
os.dup2(fd, newfd)
try:
reduction.send_handle(conn, newfd, p.pid)
finally:
os.close(newfd)
p.join()
with open(test_support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"bar")
@classmethod
def _send_data_without_fd(self, conn):
os.write(conn.fileno(), b"\0")
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
@unittest.skipIf(sys.platform == "win32", "doesn't make sense on Windows")
def test_missing_fd_transfer(self):
# Check that exception is raised when received data is not
# accompanied by a file descriptor in ancillary data.
if self.TYPE != 'processes':
self.skipTest("only makes sense with processes")
conn, child_conn = self.Pipe(duplex=True)
p = self.Process(target=self._send_data_without_fd, args=(child_conn,))
p.daemon = True
p.start()
self.assertRaises(RuntimeError, reduction.recv_handle, conn)
p.join()
class _TestListenerClient(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
@classmethod
def _test(cls, address):
conn = cls.connection.Client(address)
conn.send('hello')
conn.close()
def test_listener_client(self):
for family in self.connection.families:
l = self.connection.Listener(family=family)
p = self.Process(target=self._test, args=(l.address,))
p.daemon = True
p.start()
conn = l.accept()
self.assertEqual(conn.recv(), 'hello')
p.join()
l.close()
def test_issue14725(self):
l = self.connection.Listener()
p = self.Process(target=self._test, args=(l.address,))
p.daemon = True
p.start()
time.sleep(1)
# On Windows the client process should by now have connected,
# written data and closed the pipe handle by now. This causes
# ConnectNamdedPipe() to fail with ERROR_NO_DATA. See Issue
# 14725.
conn = l.accept()
self.assertEqual(conn.recv(), 'hello')
conn.close()
p.join()
l.close()
#
# Test of sending connection and socket objects between processes
#
"""
class _TestPicklingConnections(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def _listener(self, conn, families):
for fam in families:
l = self.connection.Listener(family=fam)
conn.send(l.address)
new_conn = l.accept()
conn.send(new_conn)
if self.TYPE == 'processes':
l = socket.socket()
l.bind(('localhost', 0))
conn.send(l.getsockname())
l.listen(1)
new_conn, addr = l.accept()
conn.send(new_conn)
conn.recv()
def _remote(self, conn):
for (address, msg) in iter(conn.recv, None):
client = self.connection.Client(address)
client.send(msg.upper())
client.close()
if self.TYPE == 'processes':
address, msg = conn.recv()
client = socket.socket()
client.connect(address)
client.sendall(msg.upper())
client.close()
conn.close()
def test_pickling(self):
try:
multiprocessing.allow_connection_pickling()
except ImportError:
return
families = self.connection.families
lconn, lconn0 = self.Pipe()
lp = self.Process(target=self._listener, args=(lconn0, families))
lp.daemon = True
lp.start()
lconn0.close()
rconn, rconn0 = self.Pipe()
rp = self.Process(target=self._remote, args=(rconn0,))
rp.daemon = True
rp.start()
rconn0.close()
for fam in families:
msg = ('This connection uses family %s' % fam).encode('ascii')
address = lconn.recv()
rconn.send((address, msg))
new_conn = lconn.recv()
self.assertEqual(new_conn.recv(), msg.upper())
rconn.send(None)
if self.TYPE == 'processes':
msg = latin('This connection uses a normal socket')
address = lconn.recv()
rconn.send((address, msg))
if hasattr(socket, 'fromfd'):
new_conn = lconn.recv()
self.assertEqual(new_conn.recv(100), msg.upper())
else:
# XXX On Windows with Py2.6 need to backport fromfd()
discard = lconn.recv_bytes()
lconn.send(None)
rconn.close()
lconn.close()
lp.join()
rp.join()
"""
#
#
#
class _TestHeap(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_heap(self):
iterations = 5000
maxblocks = 50
blocks = []
# create and destroy lots of blocks of different sizes
for i in xrange(iterations):
size = int(random.lognormvariate(0, 1) * 1000)
b = multiprocessing.heap.BufferWrapper(size)
blocks.append(b)
if len(blocks) > maxblocks:
i = random.randrange(maxblocks)
del blocks[i]
# get the heap object
heap = multiprocessing.heap.BufferWrapper._heap
# verify the state of the heap
all = []
occupied = 0
heap._lock.acquire()
self.addCleanup(heap._lock.release)
for L in heap._len_to_seq.values():
for arena, start, stop in L:
all.append((heap._arenas.index(arena), start, stop,
stop-start, 'free'))
for arena, start, stop in heap._allocated_blocks:
all.append((heap._arenas.index(arena), start, stop,
stop-start, 'occupied'))
occupied += (stop-start)
all.sort()
for i in range(len(all)-1):
(arena, start, stop) = all[i][:3]
(narena, nstart, nstop) = all[i+1][:3]
self.assertTrue((arena != narena and nstart == 0) or
(stop == nstart))
def test_free_from_gc(self):
# Check that freeing of blocks by the garbage collector doesn't deadlock
# (issue #12352).
# Make sure the GC is enabled, and set lower collection thresholds to
# make collections more frequent (and increase the probability of
# deadlock).
if not gc.isenabled():
gc.enable()
self.addCleanup(gc.disable)
thresholds = gc.get_threshold()
self.addCleanup(gc.set_threshold, *thresholds)
gc.set_threshold(10)
# perform numerous block allocations, with cyclic references to make
# sure objects are collected asynchronously by the gc
for i in range(5000):
a = multiprocessing.heap.BufferWrapper(1)
b = multiprocessing.heap.BufferWrapper(1)
# circular references
a.buddy = b
b.buddy = a
#
#
#
class _Foo(Structure):
_fields_ = [
('x', c_int),
('y', c_double)
]
class _TestSharedCTypes(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def setUp(self):
if not HAS_SHAREDCTYPES:
self.skipTest("requires multiprocessing.sharedctypes")
@classmethod
def _double(cls, x, y, foo, arr, string):
x.value *= 2
y.value *= 2
foo.x *= 2
foo.y *= 2
string.value *= 2
for i in range(len(arr)):
arr[i] *= 2
def test_sharedctypes(self, lock=False):
x = Value('i', 7, lock=lock)
y = Value(c_double, 1.0/3.0, lock=lock)
foo = Value(_Foo, 3, 2, lock=lock)
arr = self.Array('d', range(10), lock=lock)
string = self.Array('c', 20, lock=lock)
string.value = latin('hello')
p = self.Process(target=self._double, args=(x, y, foo, arr, string))
p.daemon = True
p.start()
p.join()
self.assertEqual(x.value, 14)
self.assertAlmostEqual(y.value, 2.0/3.0)
self.assertEqual(foo.x, 6)
self.assertAlmostEqual(foo.y, 4.0)
for i in range(10):
self.assertAlmostEqual(arr[i], i*2)
self.assertEqual(string.value, latin('hellohello'))
def test_synchronize(self):
self.test_sharedctypes(lock=True)
def test_copy(self):
foo = _Foo(2, 5.0)
bar = copy(foo)
foo.x = 0
foo.y = 0
self.assertEqual(bar.x, 2)
self.assertAlmostEqual(bar.y, 5.0)
#
#
#
class _TestFinalize(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def setUp(self):
self.registry_backup = util._finalizer_registry.copy()
util._finalizer_registry.clear()
def tearDown(self):
self.assertFalse(util._finalizer_registry)
util._finalizer_registry.update(self.registry_backup)
@classmethod
def _test_finalize(cls, conn):
class Foo(object):
pass
a = Foo()
util.Finalize(a, conn.send, args=('a',))
del a # triggers callback for a
b = Foo()
close_b = util.Finalize(b, conn.send, args=('b',))
close_b() # triggers callback for b
close_b() # does nothing because callback has already been called
del b # does nothing because callback has already been called
c = Foo()
util.Finalize(c, conn.send, args=('c',))
d10 = Foo()
util.Finalize(d10, conn.send, args=('d10',), exitpriority=1)
d01 = Foo()
util.Finalize(d01, conn.send, args=('d01',), exitpriority=0)
d02 = Foo()
util.Finalize(d02, conn.send, args=('d02',), exitpriority=0)
d03 = Foo()
util.Finalize(d03, conn.send, args=('d03',), exitpriority=0)
util.Finalize(None, conn.send, args=('e',), exitpriority=-10)
util.Finalize(None, conn.send, args=('STOP',), exitpriority=-100)
# call multiprocessing's cleanup function then exit process without
# garbage collecting locals
util._exit_function()
conn.close()
os._exit(0)
def test_finalize(self):
conn, child_conn = self.Pipe()
p = self.Process(target=self._test_finalize, args=(child_conn,))
p.daemon = True
p.start()
p.join()
result = [obj for obj in iter(conn.recv, 'STOP')]
self.assertEqual(result, ['a', 'b', 'd10', 'd03', 'd02', 'd01', 'e'])
def test_thread_safety(self):
# bpo-24484: _run_finalizers() should be thread-safe
def cb():
pass
class Foo(object):
def __init__(self):
self.ref = self # create reference cycle
# insert finalizer at random key
util.Finalize(self, cb, exitpriority=random.randint(1, 100))
finish = False
exc = []
def run_finalizers():
while not finish:
time.sleep(random.random() * 1e-1)
try:
# A GC run will eventually happen during this,
# collecting stale Foo's and mutating the registry
util._run_finalizers()
except Exception as e:
exc.append(e)
def make_finalizers():
d = {}
while not finish:
try:
# Old Foo's get gradually replaced and later
# collected by the GC (because of the cyclic ref)
d[random.getrandbits(5)] = {Foo() for i in range(10)}
except Exception as e:
exc.append(e)
d.clear()
old_interval = sys.getcheckinterval()
old_threshold = gc.get_threshold()
try:
sys.setcheckinterval(10)
gc.set_threshold(5, 5, 5)
threads = [threading.Thread(target=run_finalizers),
threading.Thread(target=make_finalizers)]
with test_support.start_threads(threads):
time.sleep(4.0) # Wait a bit to trigger race condition
finish = True
if exc:
raise exc[0]
finally:
sys.setcheckinterval(old_interval)
gc.set_threshold(*old_threshold)
gc.collect() # Collect remaining Foo's
#
# Test that from ... import * works for each module
#
class _TestImportStar(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_import(self):
modules = [
'multiprocessing', 'multiprocessing.connection',
'multiprocessing.heap', 'multiprocessing.managers',
'multiprocessing.pool', 'multiprocessing.process',
'multiprocessing.synchronize', 'multiprocessing.util'
]
if HAS_REDUCTION:
modules.append('multiprocessing.reduction')
if c_int is not None:
# This module requires _ctypes
modules.append('multiprocessing.sharedctypes')
for name in modules:
__import__(name)
mod = sys.modules[name]
for attr in getattr(mod, '__all__', ()):
self.assertTrue(
hasattr(mod, attr),
'%r does not have attribute %r' % (mod, attr)
)
#
# Quick test that logging works -- does not test logging output
#
class _TestLogging(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_enable_logging(self):
logger = multiprocessing.get_logger()
logger.setLevel(util.SUBWARNING)
self.assertTrue(logger is not None)
logger.debug('this will not be printed')
logger.info('nor will this')
logger.setLevel(LOG_LEVEL)
@classmethod
def _test_level(cls, conn):
logger = multiprocessing.get_logger()
conn.send(logger.getEffectiveLevel())
def test_level(self):
LEVEL1 = 32
LEVEL2 = 37
logger = multiprocessing.get_logger()
root_logger = logging.getLogger()
root_level = root_logger.level
reader, writer = multiprocessing.Pipe(duplex=False)
logger.setLevel(LEVEL1)
p = self.Process(target=self._test_level, args=(writer,))
p.daemon = True
p.start()
self.assertEqual(LEVEL1, reader.recv())
logger.setLevel(logging.NOTSET)
root_logger.setLevel(LEVEL2)
p = self.Process(target=self._test_level, args=(writer,))
p.daemon = True
p.start()
self.assertEqual(LEVEL2, reader.recv())
root_logger.setLevel(root_level)
logger.setLevel(level=LOG_LEVEL)
# class _TestLoggingProcessName(BaseTestCase):
#
# def handle(self, record):
# assert record.processName == multiprocessing.current_process().name
# self.__handled = True
#
# def test_logging(self):
# handler = logging.Handler()
# handler.handle = self.handle
# self.__handled = False
# # Bypass getLogger() and side-effects
# logger = logging.getLoggerClass()(
# 'multiprocessing.test.TestLoggingProcessName')
# logger.addHandler(handler)
# logger.propagate = False
#
# logger.warn('foo')
# assert self.__handled
#
# Check that Process.join() retries if os.waitpid() fails with EINTR
#
class _TestPollEintr(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@classmethod
def _killer(cls, pid):
time.sleep(0.5)
os.kill(pid, signal.SIGUSR1)
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1')
def test_poll_eintr(self):
got_signal = [False]
def record(*args):
got_signal[0] = True
pid = os.getpid()
oldhandler = signal.signal(signal.SIGUSR1, record)
try:
killer = self.Process(target=self._killer, args=(pid,))
killer.start()
p = self.Process(target=time.sleep, args=(1,))
p.start()
p.join()
self.assertTrue(got_signal[0])
self.assertEqual(p.exitcode, 0)
killer.join()
finally:
signal.signal(signal.SIGUSR1, oldhandler)
#
# Test to verify handle verification, see issue 3321
#
class TestInvalidHandle(unittest.TestCase):
@unittest.skipIf(WIN32, "skipped on Windows")
def test_invalid_handles(self):
conn = _multiprocessing.Connection(44977608)
self.assertRaises(IOError, conn.poll)
self.assertRaises(IOError, _multiprocessing.Connection, -1)
#
# Functions used to create test cases from the base ones in this module
#
def get_attributes(Source, names):
d = {}
for name in names:
obj = getattr(Source, name)
if type(obj) == type(get_attributes):
obj = staticmethod(obj)
d[name] = obj
return d
def create_test_cases(Mixin, type):
result = {}
glob = globals()
Type = type.capitalize()
for name in glob.keys():
if name.startswith('_Test'):
base = glob[name]
if type in base.ALLOWED_TYPES:
newname = 'With' + Type + name[1:]
class Temp(base, unittest.TestCase, Mixin):
pass
result[newname] = Temp
Temp.__name__ = newname
Temp.__module__ = Mixin.__module__
return result
#
# Create test cases
#
class ProcessesMixin(object):
TYPE = 'processes'
Process = multiprocessing.Process
locals().update(get_attributes(multiprocessing, (
'Queue', 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore',
'Condition', 'Event', 'Value', 'Array', 'RawValue',
'RawArray', 'current_process', 'active_children', 'Pipe',
'connection', 'JoinableQueue', 'Pool'
)))
testcases_processes = create_test_cases(ProcessesMixin, type='processes')
globals().update(testcases_processes)
class ManagerMixin(object):
TYPE = 'manager'
Process = multiprocessing.Process
manager = object.__new__(multiprocessing.managers.SyncManager)
locals().update(get_attributes(manager, (
'Queue', 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore',
'Condition', 'Event', 'Value', 'Array', 'list', 'dict',
'Namespace', 'JoinableQueue', 'Pool'
)))
testcases_manager = create_test_cases(ManagerMixin, type='manager')
globals().update(testcases_manager)
class ThreadsMixin(object):
TYPE = 'threads'
Process = multiprocessing.dummy.Process
locals().update(get_attributes(multiprocessing.dummy, (
'Queue', 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore',
'Condition', 'Event', 'Value', 'Array', 'current_process',
'active_children', 'Pipe', 'connection', 'dict', 'list',
'Namespace', 'JoinableQueue', 'Pool'
)))
testcases_threads = create_test_cases(ThreadsMixin, type='threads')
globals().update(testcases_threads)
class OtherTest(unittest.TestCase):
# TODO: add more tests for deliver/answer challenge.
def test_deliver_challenge_auth_failure(self):
class _FakeConnection(object):
def recv_bytes(self, size):
return b'something bogus'
def send_bytes(self, data):
pass
self.assertRaises(multiprocessing.AuthenticationError,
multiprocessing.connection.deliver_challenge,
_FakeConnection(), b'abc')
def test_answer_challenge_auth_failure(self):
class _FakeConnection(object):
def __init__(self):
self.count = 0
def recv_bytes(self, size):
self.count += 1
if self.count == 1:
return multiprocessing.connection.CHALLENGE
elif self.count == 2:
return b'something bogus'
return b''
def send_bytes(self, data):
pass
self.assertRaises(multiprocessing.AuthenticationError,
multiprocessing.connection.answer_challenge,
_FakeConnection(), b'abc')
#
# Test Manager.start()/Pool.__init__() initializer feature - see issue 5585
#
def initializer(ns):
ns.test += 1
class TestInitializers(unittest.TestCase):
def setUp(self):
self.mgr = multiprocessing.Manager()
self.ns = self.mgr.Namespace()
self.ns.test = 0
def tearDown(self):
self.mgr.shutdown()
def test_manager_initializer(self):
m = multiprocessing.managers.SyncManager()
self.assertRaises(TypeError, m.start, 1)
m.start(initializer, (self.ns,))
self.assertEqual(self.ns.test, 1)
m.shutdown()
def test_pool_initializer(self):
self.assertRaises(TypeError, multiprocessing.Pool, initializer=1)
p = multiprocessing.Pool(1, initializer, (self.ns,))
p.close()
p.join()
self.assertEqual(self.ns.test, 1)
#
# Issue 5155, 5313, 5331: Test process in processes
# Verifies os.close(sys.stdin.fileno) vs. sys.stdin.close() behavior
#
def _this_sub_process(q):
try:
item = q.get(block=False)
except Queue.Empty:
pass
def _test_process(q):
queue = multiprocessing.Queue()
subProc = multiprocessing.Process(target=_this_sub_process, args=(queue,))
subProc.daemon = True
subProc.start()
subProc.join()
def _afunc(x):
return x*x
def pool_in_process():
pool = multiprocessing.Pool(processes=4)
x = pool.map(_afunc, [1, 2, 3, 4, 5, 6, 7])
class _file_like(object):
def __init__(self, delegate):
self._delegate = delegate
self._pid = None
@property
def cache(self):
pid = os.getpid()
# There are no race conditions since fork keeps only the running thread
if pid != self._pid:
self._pid = pid
self._cache = []
return self._cache
def write(self, data):
self.cache.append(data)
def flush(self):
self._delegate.write(''.join(self.cache))
self._cache = []
class TestStdinBadfiledescriptor(unittest.TestCase):
def test_queue_in_process(self):
queue = multiprocessing.Queue()
proc = multiprocessing.Process(target=_test_process, args=(queue,))
proc.start()
proc.join()
def test_pool_in_process(self):
p = multiprocessing.Process(target=pool_in_process)
p.start()
p.join()
def test_flushing(self):
sio = StringIO()
flike = _file_like(sio)
flike.write('foo')
proc = multiprocessing.Process(target=lambda: flike.flush())
flike.flush()
assert sio.getvalue() == 'foo'
class TestWait(unittest.TestCase):
@classmethod
def _child_test_wait(cls, w, slow):
for i in range(10):
if slow:
time.sleep(random.random()*0.1)
w.send((i, os.getpid()))
w.close()
def test_wait(self, slow=False):
from multiprocessing import Pipe, Process
from multiprocessing.connection import wait
readers = []
procs = []
messages = []
for i in range(4):
r, w = Pipe(duplex=False)
p = Process(target=self._child_test_wait, args=(w, slow))
p.daemon = True
p.start()
w.close()
readers.append(r)
procs.append(p)
while readers:
for r in wait(readers):
try:
msg = r.recv()
except EOFError:
readers.remove(r)
r.close()
else:
messages.append(msg)
messages.sort()
expected = sorted((i, p.pid) for i in range(10) for p in procs)
self.assertEqual(messages, expected)
@classmethod
def _child_test_wait_socket(cls, address, slow):
s = socket.socket()
s.connect(address)
for i in range(10):
if slow:
time.sleep(random.random()*0.1)
s.sendall(('%s\n' % i).encode('ascii'))
s.close()
def test_wait_socket(self, slow=False):
from multiprocessing import Process
from multiprocessing.connection import wait
l = socket.socket()
l.bind(('', 0))
l.listen(4)
addr = ('localhost', l.getsockname()[1])
readers = []
procs = []
dic = {}
for i in range(4):
p = Process(target=self._child_test_wait_socket, args=(addr, slow))
p.daemon = True
p.start()
procs.append(p)
for i in range(4):
r, _ = l.accept()
readers.append(r)
dic[r] = []
l.close()
while readers:
for r in wait(readers):
msg = r.recv(32)
if not msg:
readers.remove(r)
r.close()
else:
dic[r].append(msg)
expected = ''.join('%s\n' % i for i in range(10)).encode('ascii')
for v in dic.values():
self.assertEqual(b''.join(v), expected)
def test_wait_slow(self):
self.test_wait(True)
def test_wait_socket_slow(self):
self.test_wait(True)
def test_wait_timeout(self):
from multiprocessing.connection import wait
expected = 1
a, b = multiprocessing.Pipe()
start = time.time()
res = wait([a, b], 1)
delta = time.time() - start
self.assertEqual(res, [])
self.assertLess(delta, expected + 0.2)
self.assertGreater(delta, expected - 0.2)
b.send(None)
start = time.time()
res = wait([a, b], 1)
delta = time.time() - start
self.assertEqual(res, [a])
self.assertLess(delta, 0.2)
def test_wait_integer(self):
from multiprocessing.connection import wait
expected = 5
sorted_ = lambda l: sorted(l, key=lambda x: id(x))
a, b = multiprocessing.Pipe()
p = multiprocessing.Process(target=time.sleep, args=(expected,))
p.start()
self.assertIsInstance(p.sentinel, int)
start = time.time()
res = wait([a, p.sentinel, b], expected + 20)
delta = time.time() - start
self.assertEqual(res, [p.sentinel])
self.assertLess(delta, expected + 1)
self.assertGreater(delta, expected - 1)
a.send(None)
start = time.time()
res = wait([a, p.sentinel, b], 20)
delta = time.time() - start
self.assertEqual(sorted_(res), sorted_([p.sentinel, b]))
self.assertLess(delta, 0.2)
b.send(None)
start = time.time()
res = wait([a, p.sentinel, b], 20)
delta = time.time() - start
self.assertEqual(sorted_(res), sorted_([a, p.sentinel, b]))
self.assertLess(delta, 0.2)
p.join()
#
# Test interaction with socket timeouts - see Issue #6056
#
class TestTimeouts(unittest.TestCase):
@classmethod
def _test_timeout(cls, child, address):
time.sleep(1)
child.send(123)
child.close()
conn = multiprocessing.connection.Client(address)
conn.send(456)
conn.close()
def test_timeout(self):
old_timeout = socket.getdefaulttimeout()
try:
socket.setdefaulttimeout(0.1)
parent, child = multiprocessing.Pipe(duplex=True)
l = multiprocessing.connection.Listener(family='AF_INET')
p = multiprocessing.Process(target=self._test_timeout,
args=(child, l.address))
p.start()
child.close()
self.assertEqual(parent.recv(), 123)
parent.close()
conn = l.accept()
self.assertEqual(conn.recv(), 456)
conn.close()
l.close()
p.join(10)
finally:
socket.setdefaulttimeout(old_timeout)
#
# Test what happens with no "if __name__ == '__main__'"
#
class TestNoForkBomb(unittest.TestCase):
def test_noforkbomb(self):
name = os.path.join(os.path.dirname(__file__), 'mp_fork_bomb.py')
if WIN32:
rc, out, err = test.script_helper.assert_python_failure(name)
self.assertEqual(out, '')
self.assertIn('RuntimeError', err)
else:
rc, out, err = test.script_helper.assert_python_ok(name)
self.assertEqual(out.rstrip(), '123')
self.assertEqual(err, '')
#
# Issue 12098: check sys.flags of child matches that for parent
#
class TestFlags(unittest.TestCase):
@classmethod
def run_in_grandchild(cls, conn):
conn.send(tuple(sys.flags))
@classmethod
def run_in_child(cls):
import json
r, w = multiprocessing.Pipe(duplex=False)
p = multiprocessing.Process(target=cls.run_in_grandchild, args=(w,))
p.start()
grandchild_flags = r.recv()
p.join()
r.close()
w.close()
flags = (tuple(sys.flags), grandchild_flags)
print(json.dumps(flags))
@test_support.requires_unicode # XXX json needs unicode support
def test_flags(self):
import json, subprocess
# start child process using unusual flags
prog = ('from test.test_multiprocessing import TestFlags; ' +
'TestFlags.run_in_child()')
data = subprocess.check_output(
[sys.executable, '-E', '-B', '-O', '-c', prog])
child_flags, grandchild_flags = json.loads(data.decode('ascii'))
self.assertEqual(child_flags, grandchild_flags)
#
# Issue #17555: ForkAwareThreadLock
#
class TestForkAwareThreadLock(unittest.TestCase):
# We recurisvely start processes. Issue #17555 meant that the
# after fork registry would get duplicate entries for the same
# lock. The size of the registry at generation n was ~2**n.
@classmethod
def child(cls, n, conn):
if n > 1:
p = multiprocessing.Process(target=cls.child, args=(n-1, conn))
p.start()
p.join()
else:
conn.send(len(util._afterfork_registry))
conn.close()
def test_lock(self):
r, w = multiprocessing.Pipe(False)
l = util.ForkAwareThreadLock()
old_size = len(util._afterfork_registry)
p = multiprocessing.Process(target=self.child, args=(5, w))
p.start()
new_size = r.recv()
p.join()
self.assertLessEqual(new_size, old_size)
#
# Issue #17097: EINTR should be ignored by recv(), send(), accept() etc
#
class TestIgnoreEINTR(unittest.TestCase):
@classmethod
def _test_ignore(cls, conn):
def handler(signum, frame):
pass
signal.signal(signal.SIGUSR1, handler)
conn.send('ready')
x = conn.recv()
conn.send(x)
conn.send_bytes(b'x'*(1024*1024)) # sending 1 MB should block
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1')
def test_ignore(self):
conn, child_conn = multiprocessing.Pipe()
try:
p = multiprocessing.Process(target=self._test_ignore,
args=(child_conn,))
p.daemon = True
p.start()
child_conn.close()
self.assertEqual(conn.recv(), 'ready')
time.sleep(0.1)
os.kill(p.pid, signal.SIGUSR1)
time.sleep(0.1)
conn.send(1234)
self.assertEqual(conn.recv(), 1234)
time.sleep(0.1)
os.kill(p.pid, signal.SIGUSR1)
self.assertEqual(conn.recv_bytes(), b'x'*(1024*1024))
time.sleep(0.1)
p.join()
finally:
conn.close()
@classmethod
def _test_ignore_listener(cls, conn):
def handler(signum, frame):
pass
signal.signal(signal.SIGUSR1, handler)
l = multiprocessing.connection.Listener()
conn.send(l.address)
a = l.accept()
a.send('welcome')
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1')
def test_ignore_listener(self):
conn, child_conn = multiprocessing.Pipe()
try:
p = multiprocessing.Process(target=self._test_ignore_listener,
args=(child_conn,))
p.daemon = True
p.start()
child_conn.close()
address = conn.recv()
time.sleep(0.1)
os.kill(p.pid, signal.SIGUSR1)
time.sleep(0.1)
client = multiprocessing.connection.Client(address)
self.assertEqual(client.recv(), 'welcome')
p.join()
finally:
conn.close()
#
#
#
testcases_other = [OtherTest, TestInvalidHandle, TestInitializers,
TestStdinBadfiledescriptor, TestWait, TestTimeouts, TestNoForkBomb,
TestFlags, TestForkAwareThreadLock, TestIgnoreEINTR]
#
#
#
def test_main(run=None):
if sys.platform.startswith("linux"):
try:
lock = multiprocessing.RLock()
except OSError:
raise unittest.SkipTest("OSError raises on RLock creation, see issue 3111!")
check_enough_semaphores()
if run is None:
from test.test_support import run_unittest as run
util.get_temp_dir() # creates temp directory for use by all processes
multiprocessing.get_logger().setLevel(LOG_LEVEL)
ProcessesMixin.pool = multiprocessing.Pool(4)
ThreadsMixin.pool = multiprocessing.dummy.Pool(4)
ManagerMixin.manager.__init__()
ManagerMixin.manager.start()
ManagerMixin.pool = ManagerMixin.manager.Pool(4)
testcases = (
sorted(testcases_processes.values(), key=lambda tc:tc.__name__) +
sorted(testcases_threads.values(), key=lambda tc:tc.__name__) +
sorted(testcases_manager.values(), key=lambda tc:tc.__name__) +
testcases_other
)
loadTestsFromTestCase = unittest.defaultTestLoader.loadTestsFromTestCase
suite = unittest.TestSuite(loadTestsFromTestCase(tc) for tc in testcases)
# (ncoghlan): Whether or not sys.exc_clear is executed by the threading
# module during these tests is at least platform dependent and possibly
# non-deterministic on any given platform. So we don't mind if the listed
# warnings aren't actually raised.
with test_support.check_py3k_warnings(
(".+__(get|set)slice__ has been removed", DeprecationWarning),
(r"sys.exc_clear\(\) not supported", DeprecationWarning),
quiet=True):
run(suite)
ThreadsMixin.pool.terminate()
ProcessesMixin.pool.terminate()
ManagerMixin.pool.terminate()
ManagerMixin.manager.shutdown()
del ProcessesMixin.pool, ThreadsMixin.pool, ManagerMixin.pool
def main():
test_main(unittest.TextTestRunner(verbosity=2).run)
if __name__ == '__main__':
main()
|
url_info_finder.py
|
#this plugin searches for url links in each message, and sends a message with
#info about each link.
import re
import urllib2
from cookielib import CookieJar
import lxml.html
import simplejson
import logging
import settings
import threading
class url_info_finder():
def url_info_finder(self, main_ref, msg_info):
#we block private msg, to prevent from flooding/api usage etc.
if msg_info["channel"] == settings.NICK:
return None
#for each message we start a new thread, because this can be pretty slow (sometimes very slow with dns lookups etc.)
thread = threading.Thread(target = self.start_thread, args=(main_ref, msg_info))
thread.start()
def start_thread(self, main_ref, msg_info):
#find all url links in the message, and send info about them, in one formatted string
info_string = ""
url_info_list = self.parse_msg(msg_info["message"])
for i, url_info in enumerate(url_info_list):
info_string = info_string + url_info
if i != len(url_info_list)-1:
info_string = info_string + "\x0F ... "
if info_string:
main_ref.send_msg(msg_info["channel"], info_string[0:450])
def bytestring(self, n):
tiers = ['B', 'KB', 'MB', 'GB']
i = 0
while n >= 1024 and i < len(tiers):
n = n / 1024
i += 1
return "{:.0f}".format(n) + tiers[i]
def get_url_info(self, url, ignore_redirects = False):
#add http:// to www.-only links
if "https://" not in url:
if "http://" not in url:
url = "http://" + url
#open url
try:
cj = CookieJar()
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
opener.addheaders = [('User-agent', 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:12.0) Gecko/20100101 Firefox/12.0')]
source = opener.open(url)
logging.debug("url open:%s", url)
except:
logging.debug("url_finder error: could not open site - %s", url)
return None
redirect_warning = ""
if source.geturl() != url and ignore_redirects is False:
redirect_warning = "|REDIRECT| "
#remove the "/" ending, if any
url = url.rstrip("/")
#looking into the header
try:
header_content_type = source.info().getheader("Content-type")
except:
logging.debug("url_finder error: header - invalid. url: %s", url)
source.close()
return None
if not header_content_type:
detected_file_header = source.read(4)
source.close()
return "!this webserver might be malicious! detected content-type: " + detected_file_header[1:4]
if "html" in header_content_type: #resolve normal text type site - get the "title"
#if it's a normal text/html we just find the title heads, except if it's a youtube video
#needs cleaning up!
if ".youtube." in source.geturl():
yt = self.yt_info(source.geturl())
if yt is None:
return_string = self.get_title(source, url)
else:
source.close()
return yt
elif "github.com" in url:
git = self.github_info(url)
if git is None:
return_string = self.get_title(source, url)
else:
source.close()
return git
else:
return_string = self.get_title(source, url)
if return_string is not None:
return_string = (return_string.lstrip()).rstrip()
source.close()
return redirect_warning + return_string
else:
source.close()
return None
else: #other types, just show the content type and content lenght (if any!)
return_string = source.info().getheader("Content-type")
if source.info().getheader("Content-Length") is not None:
return_string = return_string + " | " + str(self.bytestring(int(source.info().getheader("Content-Length"))))
#check for imgur
if "i.imgur.com" in url: #we check the title of the album
rex = '(.gif|.png|.jpeg|.img|.jpg|.bmp)\Z' #common image formats, search at end of string
search_res = re.search(rex, url)
if search_res: #only if it is formatted the way we expect (with one of the image formats at the end) (I should probably use the imgur api instead though)
new_url = url.rstrip(search_res.group())
img_title = self.get_url_info(new_url, True)
if img_title is not None:
return_string = (img_title.lstrip()).rstrip() + " | " + return_string
source.close()
return redirect_warning + return_string
def github_info(self, url):
result = re.search("(\.com)(/[^ /]+/[^ /]+$)", url)
if result is not None:
result = result.group(2)
api_url = "https://api.github.com/repos" + result
logging.debug("api url:%s", api_url)
try:
result = simplejson.load(urllib2.urlopen(api_url))
except:
logging.debug("url_finder error: github error, either urllib or simplejson fail")
return None
#make sure it's a dictionary, otherwise we might not be looking at a repo at all!
if not isinstance(result, dict):
return None
return_string = "|GITHUB| "
if "name" in result and result["name"]:
return_string = return_string + result["name"]
if "description" in result and result["description"]:
return_string = return_string + " - " + result["description"]
if "language" in result and result["language"]:
return_string = return_string + " | >" + result["language"]
return return_string
else:
return None
def yt_info(self, url):
yt_ID = re.search("(\?|\&)v=([a-zA-Z0-9_-]*)", url)
if yt_ID is None:
return None
yt_ID = yt_ID.group()
if "?v=" in yt_ID:
yt_ID = yt_ID.partition("?v=")[2]
elif "&v=" in yt_ID:
yt_ID = yt_ID.partition("&v=")[2]
yt_api_key = settings.yt_api_key
yt_string_start = "https://www.googleapis.com/youtube/v3/videos?id="
yt_string_end = "&part=snippet,statistics,contentDetails"
api_url = yt_string_start + yt_ID + "&key=" + yt_api_key + yt_string_end
logging.debug("api url:%s", api_url)
try:
result = simplejson.load(urllib2.urlopen(api_url))
except:
logging.debug("url_finder error: youtube error, either urllib or simplejson fail")
return None
if not result["items"]:
logging.debug("url_finder error: youtube error, no info on video")
return None
l = result["items"][0]
stats = l["statistics"]
details = l["contentDetails"]
snippet = l["snippet"]
title = snippet["title"]
duration = (details["duration"].replace("PT", "")).lower()
views = stats["viewCount"]
dislikes = stats["dislikeCount"]
likes = stats["likeCount"]
comments = stats["commentCount"]
return "|YOUTUBE| " + title + " | " + duration +" |" # additional info, not in use views: " + views +" | d: " + dislikes +" l: " + likes +" | comments: " + comments
def get_title(self, source, url):
#get the html
try:
t = lxml.html.fromstring(source.read(32768)) #make sure it won't load more then that, because then we might run out of memory
except:
logging.debug("url_finder error: couldn't parse with lxml")
return None
try:
string = t.find(".//title").text
except:
logging.debug("url_finder error: didn't find title tags")
return None
return string
def find_urls(self, text):
URL_REGEX = "((http://|https://|www.)\S+)|(\S+\.(com|([a-z][a-z]|biz|gov|info|mil|net|org|name|edu|coop|aero|musem|asia|int|xxx|jobs|travel))\S*)"
url_array = []
for url in re.findall(URL_REGEX, text):
if url[0]:
url_array.append(url[0]) #if starts with http https or www
elif url[2]:
url_array.append(url[2]) #if a other type of link
return url_array
def parse_msg(self, msg):
url_info = []
#first we search it for links, if any found, send message with info about them, if any
for url in self.find_urls(msg):
info = self.get_url_info(url)
#we encode into utf-8
try:
info = info.encode('utf-8')
except:
logging.debug("url_finder error: couldn't parse with lxml")
info = None
if info is not None:
#add a pracet at the beginning and end
info = "[" + info + "]"
#the color code for the message (green), the 0x02 is just a hack
color = "\x033"
#if NSFW found in msg, mark it red
if re.search(r'(nsfw|NSFW)', msg) is not None:
color = "\x030,4"
#if NSFL found in msg, mark it other color
if re.search(r'(nsfl|NSFL)', msg) is not None:
color = "\x030,6"
#sanitizing the message
#remove newlines etc.
forbidden = ["\n", "\r", "\t", "\f", "\v"]
for i in forbidden:
info = info.replace(i, " ")
#remove any empty start
info = info.lstrip()
#make sure it isn't longer then 150
info = info[0:150]
info_message = '%s%s' % (color, info)
url_info.append(info_message)
return url_info
|
backups.py
|
# Copyright (C) 2018-2019 Amano Team <contact@amanoteam.ml>
# -*- coding: utf-8 -*-
#███╗ ███╗ █████╗ ███╗ ██╗██╗ ██████╗ ██████╗ ███╗ ███╗██╗ ██████╗
#████╗ ████║██╔══██╗████╗ ██║██║██╔════╝██╔═══██╗████╗ ████║██║██╔═══██╗
#██╔████╔██║███████║██╔██╗ ██║██║██║ ██║ ██║██╔████╔██║██║██║ ██║
#██║╚██╔╝██║██╔══██║██║╚██╗██║██║██║ ██║ ██║██║╚██╔╝██║██║██║ ██║
#██║ ╚═╝ ██║██║ ██║██║ ╚████║██║╚██████╗╚██████╔╝██║ ╚═╝ ██║██║╚██████╔╝
#╚═╝ ╚═╝╚═╝ ╚═╝╚═╝ ╚═══╝╚═╝ ╚═════╝ ╚═════╝ ╚═╝ ╚═╝╚═╝ ╚═════╝
# [+] @GorpoOrko 2020 - Telegram Bot and Personal Assistant [+]
# | TCXS Project Hacker Team - https://tcxsproject.com.br |
# | Telegram: @GorpoOrko Mail:gorpoorko@protonmail.com |
# [+] Github Gorpo Dev: https://github.com/gorpo [+]
import os
import time
import schedule
from datetime import datetime
from utils import backup_sources
from multiprocessing import Process
from config import backups_chat, backup_hours, na_bot,token_dropbox
import dropbox
import time
from config import bot, version, bot_username, git_repo,logs,sudoers
import os
from datetime import datetime
import sqlite3
#função que faz os backups com base em hora
def backup_func():
cstrftime = datetime.now().strftime('%d/%m/%Y - %H:%M:%S')
file = backup_sources('Backup_automatico_bot')
targetfile = f"/TCXS-Project-Bot-IA-With-Database/{file}"
d = dropbox.Dropbox(token_dropbox)
with open(file, "rb") as f:
meta = d.files_upload(f.read(), targetfile, mode=dropbox.files.WriteMode("overwrite"))
link = d.sharing_create_shared_link(targetfile)
url = link.url
print(f"Backup automatico concluido: {cstrftime}\nDownload: {url}")
na_bot.sendMessage(logs,f"Backup automatico concluido: {cstrftime}\nDownload: {url}")
os.remove(file)
file1 = backup_sources('Backup_bot')
na_bot.sendDocument(logs, open(file1, 'rb'), caption="📅 " + cstrftime)
os.remove(file1)
#sistema de verificaçao automatica para banimento no grupo
try:
conexao_sqlite = sqlite3.connect('bot_database.db')
conexao_sqlite.row_factory = sqlite3.Row
cursor_sqlite = conexao_sqlite.cursor()
hoje = datetime.now().strftime('%d/%m/%Y %H:%M:%S')
cursor_sqlite.execute("""SELECT * FROM permanencia; """)
resultados = cursor_sqlite.fetchall()
for resutado in resultados:
data_inicial = resutado['data_inicial']
data_ban = resutado['data_final']
id_doador = resutado['id_doador']
doador = resutado['doador']
dias = resutado['dias']
aviso = resutado['data_aviso']
id_grupo = resutado['id_grupo']
#ALERTA DE AVISO PARA O DOADOR----:
#try:
# if hoje[0:2] == aviso[0:2]:
# na_bot.sendMessage(id_grupo,f"🤖 {doador} ***Falta uma semana para você grupo, caso ainda tenha interesse em continuar usando a loja faça uma doação, envie o comprovante aqui no grupo que um de nossos administradores irá colocar mas dias em sua permanencia.***\n`Usuário:` {doador}\n`Id_Usuário:` {id_doador}\n`Início:` {data_inicial}\n`Termino:` {data_ban}\n`Permanência:` {dias}",'markdown')
#BANE O USUARIO CASO A DATA TENHA SIDO IGUAL A DO DIA HOJE
# if hoje[3:5] == data_ban[3:5]:
# na_bot.kickChatMember(str(id_grupo), id_doador)
# cursor_sqlite.execute(f"""DELETE FROM permanencia WHERE doador='{doador}'""")
# conexao_sqlite.commit()
# na_bot.sendMessage(str(id_grupo),f"🤖 ***Removido do grupo pois deu a sua permanência do grupo.***\n`Usuário:` {doador}\n`Id_Usuário:` {id_doador}\n`Início:` {data_inicial}\n`Termino:` {data_ban}\n`Permanência:` {dias}",'markdown')
# na_bot.unbanChatMember(str(id_grupo), id_doador)
#except:
# pass
except Exception as e:
print(e)
def backup_scheduler(target):
for hour in backup_hours:
schedule.every().day.at(hour).do(target)
while True:
schedule.run_pending()
time.sleep(5)
def backup_service():
p = Process(target=backup_scheduler, args=(backup_func,))
p.start()
|
wallet_multiwallet.py
|
#!/usr/bin/env python3
# Copyright (c) 2017-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test multiwallet.
Verify that a mericad node can load multiple wallet files
"""
from decimal import Decimal
from threading import Thread
import os
import shutil
import stat
import time
from test_framework.authproxy import JSONRPCException
from test_framework.test_framework import MERICATestFramework
from test_framework.test_node import ErrorMatch
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
get_rpc_proxy,
)
got_loading_error = False
def test_load_unload(node, name):
global got_loading_error
for _ in range(10):
if got_loading_error:
return
try:
node.loadwallet(name)
node.unloadwallet(name)
except JSONRPCException as e:
if e.error['code'] == -4 and 'Wallet already being loading' in e.error['message']:
got_loading_error = True
return
class MultiWalletTest(MERICATestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
self.rpc_timeout = 120
self.extra_args = [["-nowallet"], []]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def add_options(self, parser):
parser.add_argument(
'--data_wallets_dir',
default=os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data/wallets/'),
help='Test data with wallet directories (default: %(default)s)',
)
def run_test(self):
node = self.nodes[0]
data_dir = lambda *p: os.path.join(node.datadir, self.chain, *p)
wallet_dir = lambda *p: data_dir('wallets', *p)
wallet = lambda name: node.get_wallet_rpc(name)
def wallet_file(name):
if name == self.default_wallet_name:
return wallet_dir(self.default_wallet_name, self.wallet_data_filename)
if os.path.isdir(wallet_dir(name)):
return wallet_dir(name, "wallet.dat")
return wallet_dir(name)
assert_equal(self.nodes[0].listwalletdir(), { 'wallets': [{ 'name': self.default_wallet_name }] })
# check wallet.dat is created
self.stop_nodes()
assert_equal(os.path.isfile(wallet_dir(self.default_wallet_name, self.wallet_data_filename)), True)
# create symlink to verify wallet directory path can be referenced
# through symlink
os.mkdir(wallet_dir('w7'))
os.symlink('w7', wallet_dir('w7_symlink'))
os.symlink('..', wallet_dir('recursive_dir_symlink'))
os.mkdir(wallet_dir('self_walletdat_symlink'))
os.symlink('wallet.dat', wallet_dir('self_walletdat_symlink/wallet.dat'))
# rename wallet.dat to make sure plain wallet file paths (as opposed to
# directory paths) can be loaded
# create another dummy wallet for use in testing backups later
self.start_node(0)
node.createwallet("empty")
node.createwallet("plain")
node.createwallet("created")
self.stop_nodes()
empty_wallet = os.path.join(self.options.tmpdir, 'empty.dat')
os.rename(wallet_file("empty"), empty_wallet)
shutil.rmtree(wallet_dir("empty"))
empty_created_wallet = os.path.join(self.options.tmpdir, 'empty.created.dat')
os.rename(wallet_dir("created", self.wallet_data_filename), empty_created_wallet)
shutil.rmtree(wallet_dir("created"))
os.rename(wallet_file("plain"), wallet_dir("w8"))
shutil.rmtree(wallet_dir("plain"))
# restart node with a mix of wallet names:
# w1, w2, w3 - to verify new wallets created when non-existing paths specified
# w - to verify wallet name matching works when one wallet path is prefix of another
# sub/w5 - to verify relative wallet path is created correctly
# extern/w6 - to verify absolute wallet path is created correctly
# w7_symlink - to verify symlinked wallet path is initialized correctly
# w8 - to verify existing wallet file is loaded correctly. Not tested for SQLite wallets as this is a deprecated BDB behavior.
# '' - to verify default wallet file is created correctly
to_create = ['w1', 'w2', 'w3', 'w', 'sub/w5', 'w7_symlink']
in_wallet_dir = [w.replace('/', os.path.sep) for w in to_create] # Wallets in the wallet dir
in_wallet_dir.append('w7') # w7 is not loaded or created, but will be listed by listwalletdir because w7_symlink
to_create.append(os.path.join(self.options.tmpdir, 'extern/w6')) # External, not in the wallet dir, so we need to avoid adding it to in_wallet_dir
to_load = [self.default_wallet_name]
if not self.options.descriptors:
to_load.append('w8')
wallet_names = to_create + to_load # Wallet names loaded in the wallet
in_wallet_dir += to_load # The loaded wallets are also in the wallet dir
self.start_node(0)
for wallet_name in to_create:
self.nodes[0].createwallet(wallet_name)
for wallet_name in to_load:
self.nodes[0].loadwallet(wallet_name)
os.mkdir(wallet_dir('no_access'))
os.chmod(wallet_dir('no_access'), 0)
try:
with self.nodes[0].assert_debug_log(expected_msgs=['Too many levels of symbolic links', 'Error scanning']):
walletlist = self.nodes[0].listwalletdir()['wallets']
finally:
# Need to ensure access is restored for cleanup
os.chmod(wallet_dir('no_access'), stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
assert_equal(sorted(map(lambda w: w['name'], walletlist)), sorted(in_wallet_dir))
assert_equal(set(node.listwallets()), set(wallet_names))
# should raise rpc error if wallet path can't be created
err_code = -4 if self.options.descriptors else -1
assert_raises_rpc_error(err_code, "boost::filesystem::create_directory:", self.nodes[0].createwallet, "w8/bad")
# check that all requested wallets were created
self.stop_node(0)
for wallet_name in wallet_names:
assert_equal(os.path.isfile(wallet_file(wallet_name)), True)
self.nodes[0].assert_start_raises_init_error(['-walletdir=wallets'], 'Error: Specified -walletdir "wallets" does not exist')
self.nodes[0].assert_start_raises_init_error(['-walletdir=wallets'], 'Error: Specified -walletdir "wallets" is a relative path', cwd=data_dir())
self.nodes[0].assert_start_raises_init_error(['-walletdir=debug.log'], 'Error: Specified -walletdir "debug.log" is not a directory', cwd=data_dir())
self.start_node(0, ['-wallet=w1', '-wallet=w1'])
self.stop_node(0, 'Warning: Ignoring duplicate -wallet w1.')
if not self.options.descriptors:
# Only BDB doesn't open duplicate wallet files. SQLite does not have this limitation. While this may be desired in the future, it is not necessary
# should not initialize if one wallet is a copy of another
shutil.copyfile(wallet_dir('w8'), wallet_dir('w8_copy'))
in_wallet_dir.append('w8_copy')
exp_stderr = r"BerkeleyDatabase: Can't open database w8_copy \(duplicates fileid \w+ from w8\)"
self.nodes[0].assert_start_raises_init_error(['-wallet=w8', '-wallet=w8_copy'], exp_stderr, match=ErrorMatch.PARTIAL_REGEX)
# should not initialize if wallet file is a symlink
os.symlink('w8', wallet_dir('w8_symlink'))
self.nodes[0].assert_start_raises_init_error(['-wallet=w8_symlink'], r'Error: Invalid -wallet path \'w8_symlink\'\. .*', match=ErrorMatch.FULL_REGEX)
# should not initialize if the specified walletdir does not exist
self.nodes[0].assert_start_raises_init_error(['-walletdir=bad'], 'Error: Specified -walletdir "bad" does not exist')
# should not initialize if the specified walletdir is not a directory
not_a_dir = wallet_dir('notadir')
open(not_a_dir, 'a', encoding="utf8").close()
self.nodes[0].assert_start_raises_init_error(['-walletdir=' + not_a_dir], 'Error: Specified -walletdir "' + not_a_dir + '" is not a directory')
self.log.info("Do not allow -upgradewallet with multiwallet")
self.nodes[0].assert_start_raises_init_error(['-upgradewallet'], "Error: Error parsing command line arguments: Invalid parameter -upgradewallet")
# if wallets/ doesn't exist, datadir should be the default wallet dir
wallet_dir2 = data_dir('walletdir')
os.rename(wallet_dir(), wallet_dir2)
self.start_node(0)
self.nodes[0].createwallet("w4")
self.nodes[0].createwallet("w5")
assert_equal(set(node.listwallets()), {"w4", "w5"})
w5 = wallet("w5")
node.generatetoaddress(nblocks=1, address=w5.getnewaddress())
# now if wallets/ exists again, but the rootdir is specified as the walletdir, w4 and w5 should still be loaded
os.rename(wallet_dir2, wallet_dir())
self.restart_node(0, ['-nowallet', '-walletdir=' + data_dir()])
self.nodes[0].loadwallet("w4")
self.nodes[0].loadwallet("w5")
assert_equal(set(node.listwallets()), {"w4", "w5"})
w5 = wallet("w5")
w5_info = w5.getwalletinfo()
assert_equal(w5_info['immature_balance'], 50)
competing_wallet_dir = os.path.join(self.options.tmpdir, 'competing_walletdir')
os.mkdir(competing_wallet_dir)
self.restart_node(0, ['-nowallet', '-walletdir=' + competing_wallet_dir])
self.nodes[0].createwallet(self.default_wallet_name)
if self.options.descriptors:
exp_stderr = r"Error: SQLiteDatabase: Unable to obtain an exclusive lock on the database, is it being used by another mericad?"
else:
exp_stderr = r"Error: Error initializing wallet database environment \"\S+competing_walletdir\S*\"!"
self.nodes[1].assert_start_raises_init_error(['-walletdir=' + competing_wallet_dir], exp_stderr, match=ErrorMatch.PARTIAL_REGEX)
self.restart_node(0)
for wallet_name in wallet_names:
self.nodes[0].loadwallet(wallet_name)
assert_equal(sorted(map(lambda w: w['name'], self.nodes[0].listwalletdir()['wallets'])), sorted(in_wallet_dir))
wallets = [wallet(w) for w in wallet_names]
wallet_bad = wallet("bad")
# check wallet names and balances
node.generatetoaddress(nblocks=1, address=wallets[0].getnewaddress())
for wallet_name, wallet in zip(wallet_names, wallets):
info = wallet.getwalletinfo()
assert_equal(info['immature_balance'], 50 if wallet is wallets[0] else 0)
assert_equal(info['walletname'], wallet_name)
# accessing invalid wallet fails
assert_raises_rpc_error(-18, "Requested wallet does not exist or is not loaded", wallet_bad.getwalletinfo)
# accessing wallet RPC without using wallet endpoint fails
assert_raises_rpc_error(-19, "Wallet file not specified", node.getwalletinfo)
w1, w2, w3, w4, *_ = wallets
node.generatetoaddress(nblocks=101, address=w1.getnewaddress())
assert_equal(w1.getbalance(), 100)
assert_equal(w2.getbalance(), 0)
assert_equal(w3.getbalance(), 0)
assert_equal(w4.getbalance(), 0)
w1.sendtoaddress(w2.getnewaddress(), 1)
w1.sendtoaddress(w3.getnewaddress(), 2)
w1.sendtoaddress(w4.getnewaddress(), 3)
node.generatetoaddress(nblocks=1, address=w1.getnewaddress())
assert_equal(w2.getbalance(), 1)
assert_equal(w3.getbalance(), 2)
assert_equal(w4.getbalance(), 3)
batch = w1.batch([w1.getblockchaininfo.get_request(), w1.getwalletinfo.get_request()])
assert_equal(batch[0]["result"]["chain"], self.chain)
assert_equal(batch[1]["result"]["walletname"], "w1")
self.log.info('Check for per-wallet settxfee call')
assert_equal(w1.getwalletinfo()['paytxfee'], 0)
assert_equal(w2.getwalletinfo()['paytxfee'], 0)
w2.settxfee(0.001)
assert_equal(w1.getwalletinfo()['paytxfee'], 0)
assert_equal(w2.getwalletinfo()['paytxfee'], Decimal('0.00100000'))
self.log.info("Test dynamic wallet loading")
self.restart_node(0, ['-nowallet'])
assert_equal(node.listwallets(), [])
assert_raises_rpc_error(-18, "No wallet is loaded. Load a wallet using loadwallet or create a new one with createwallet. (Note: A default wallet is no longer automatically created)", node.getwalletinfo)
self.log.info("Load first wallet")
loadwallet_name = node.loadwallet(wallet_names[0])
assert_equal(loadwallet_name['name'], wallet_names[0])
assert_equal(node.listwallets(), wallet_names[0:1])
node.getwalletinfo()
w1 = node.get_wallet_rpc(wallet_names[0])
w1.getwalletinfo()
self.log.info("Load second wallet")
loadwallet_name = node.loadwallet(wallet_names[1])
assert_equal(loadwallet_name['name'], wallet_names[1])
assert_equal(node.listwallets(), wallet_names[0:2])
assert_raises_rpc_error(-19, "Wallet file not specified", node.getwalletinfo)
w2 = node.get_wallet_rpc(wallet_names[1])
w2.getwalletinfo()
self.log.info("Concurrent wallet loading")
threads = []
for _ in range(3):
n = node.cli if self.options.usecli else get_rpc_proxy(node.url, 1, timeout=600, coveragedir=node.coverage_dir)
t = Thread(target=test_load_unload, args=(n, wallet_names[2], ))
t.start()
threads.append(t)
for t in threads:
t.join()
global got_loading_error
assert_equal(got_loading_error, True)
self.log.info("Load remaining wallets")
for wallet_name in wallet_names[2:]:
loadwallet_name = self.nodes[0].loadwallet(wallet_name)
assert_equal(loadwallet_name['name'], wallet_name)
assert_equal(set(self.nodes[0].listwallets()), set(wallet_names))
# Fail to load if wallet doesn't exist
path = os.path.join(self.options.tmpdir, "node0", "regtest", "wallets", "wallets")
assert_raises_rpc_error(-18, "Wallet file verification failed. Failed to load database path '{}'. Path does not exist.".format(path), self.nodes[0].loadwallet, 'wallets')
# Fail to load duplicate wallets
path = os.path.join(self.options.tmpdir, "node0", "regtest", "wallets", "w1", "wallet.dat")
if self.options.descriptors:
assert_raises_rpc_error(-4, "Wallet file verification failed. SQLiteDatabase: Unable to obtain an exclusive lock on the database, is it being used by another mericad?", self.nodes[0].loadwallet, wallet_names[0])
else:
assert_raises_rpc_error(-4, "Wallet file verification failed. Refusing to load database. Data file '{}' is already loaded.".format(path), self.nodes[0].loadwallet, wallet_names[0])
# This tests the default wallet that BDB makes, so SQLite wallet doesn't need to test this
# Fail to load duplicate wallets by different ways (directory and filepath)
path = os.path.join(self.options.tmpdir, "node0", "regtest", "wallets", "wallet.dat")
assert_raises_rpc_error(-4, "Wallet file verification failed. Refusing to load database. Data file '{}' is already loaded.".format(path), self.nodes[0].loadwallet, 'wallet.dat')
# Only BDB doesn't open duplicate wallet files. SQLite does not have this limitation. While this may be desired in the future, it is not necessary
# Fail to load if one wallet is a copy of another
assert_raises_rpc_error(-4, "BerkeleyDatabase: Can't open database w8_copy (duplicates fileid", self.nodes[0].loadwallet, 'w8_copy')
# Fail to load if one wallet is a copy of another, test this twice to make sure that we don't re-introduce #14304
assert_raises_rpc_error(-4, "BerkeleyDatabase: Can't open database w8_copy (duplicates fileid", self.nodes[0].loadwallet, 'w8_copy')
# Fail to load if wallet file is a symlink
assert_raises_rpc_error(-4, "Wallet file verification failed. Invalid -wallet path 'w8_symlink'", self.nodes[0].loadwallet, 'w8_symlink')
# Fail to load if a directory is specified that doesn't contain a wallet
os.mkdir(wallet_dir('empty_wallet_dir'))
path = os.path.join(self.options.tmpdir, "node0", "regtest", "wallets", "empty_wallet_dir")
assert_raises_rpc_error(-18, "Wallet file verification failed. Failed to load database path '{}'. Data is not in recognized format.".format(path), self.nodes[0].loadwallet, 'empty_wallet_dir')
self.log.info("Test dynamic wallet creation.")
# Fail to create a wallet if it already exists.
path = os.path.join(self.options.tmpdir, "node0", "regtest", "wallets", "w2")
assert_raises_rpc_error(-4, "Failed to create database path '{}'. Database already exists.".format(path), self.nodes[0].createwallet, 'w2')
# Successfully create a wallet with a new name
loadwallet_name = self.nodes[0].createwallet('w9')
in_wallet_dir.append('w9')
assert_equal(loadwallet_name['name'], 'w9')
w9 = node.get_wallet_rpc('w9')
assert_equal(w9.getwalletinfo()['walletname'], 'w9')
assert 'w9' in self.nodes[0].listwallets()
# Successfully create a wallet using a full path
new_wallet_dir = os.path.join(self.options.tmpdir, 'new_walletdir')
new_wallet_name = os.path.join(new_wallet_dir, 'w10')
loadwallet_name = self.nodes[0].createwallet(new_wallet_name)
assert_equal(loadwallet_name['name'], new_wallet_name)
w10 = node.get_wallet_rpc(new_wallet_name)
assert_equal(w10.getwalletinfo()['walletname'], new_wallet_name)
assert new_wallet_name in self.nodes[0].listwallets()
self.log.info("Test dynamic wallet unloading")
# Test `unloadwallet` errors
assert_raises_rpc_error(-1, "JSON value is not a string as expected", self.nodes[0].unloadwallet)
assert_raises_rpc_error(-18, "Requested wallet does not exist or is not loaded", self.nodes[0].unloadwallet, "dummy")
assert_raises_rpc_error(-18, "Requested wallet does not exist or is not loaded", node.get_wallet_rpc("dummy").unloadwallet)
assert_raises_rpc_error(-8, "Both the RPC endpoint wallet and wallet_name parameter were provided (only one allowed)", w1.unloadwallet, "w2"),
assert_raises_rpc_error(-8, "Both the RPC endpoint wallet and wallet_name parameter were provided (only one allowed)", w1.unloadwallet, "w1"),
# Successfully unload the specified wallet name
self.nodes[0].unloadwallet("w1")
assert 'w1' not in self.nodes[0].listwallets()
# Successfully unload the wallet referenced by the request endpoint
# Also ensure unload works during walletpassphrase timeout
w2.encryptwallet('test')
w2.walletpassphrase('test', 1)
w2.unloadwallet()
time.sleep(1.1)
assert 'w2' not in self.nodes[0].listwallets()
# Successfully unload all wallets
for wallet_name in self.nodes[0].listwallets():
self.nodes[0].unloadwallet(wallet_name)
assert_equal(self.nodes[0].listwallets(), [])
assert_raises_rpc_error(-18, "No wallet is loaded. Load a wallet using loadwallet or create a new one with createwallet. (Note: A default wallet is no longer automatically created)", self.nodes[0].getwalletinfo)
# Successfully load a previously unloaded wallet
self.nodes[0].loadwallet('w1')
assert_equal(self.nodes[0].listwallets(), ['w1'])
assert_equal(w1.getwalletinfo()['walletname'], 'w1')
assert_equal(sorted(map(lambda w: w['name'], self.nodes[0].listwalletdir()['wallets'])), sorted(in_wallet_dir))
# Test backing up and restoring wallets
self.log.info("Test wallet backup")
self.restart_node(0, ['-nowallet'])
for wallet_name in wallet_names:
self.nodes[0].loadwallet(wallet_name)
for wallet_name in wallet_names:
rpc = self.nodes[0].get_wallet_rpc(wallet_name)
addr = rpc.getnewaddress()
backup = os.path.join(self.options.tmpdir, 'backup.dat')
if os.path.exists(backup):
os.unlink(backup)
rpc.backupwallet(backup)
self.nodes[0].unloadwallet(wallet_name)
shutil.copyfile(empty_created_wallet if wallet_name == self.default_wallet_name else empty_wallet, wallet_file(wallet_name))
self.nodes[0].loadwallet(wallet_name)
assert_equal(rpc.getaddressinfo(addr)['ismine'], False)
self.nodes[0].unloadwallet(wallet_name)
shutil.copyfile(backup, wallet_file(wallet_name))
self.nodes[0].loadwallet(wallet_name)
assert_equal(rpc.getaddressinfo(addr)['ismine'], True)
# Test .walletlock file is closed
self.start_node(1)
wallet = os.path.join(self.options.tmpdir, 'my_wallet')
self.nodes[0].createwallet(wallet)
if self.options.descriptors:
assert_raises_rpc_error(-4, "Unable to obtain an exclusive lock", self.nodes[1].loadwallet, wallet)
else:
assert_raises_rpc_error(-4, "Error initializing wallet database environment", self.nodes[1].loadwallet, wallet)
self.nodes[0].unloadwallet(wallet)
self.nodes[1].loadwallet(wallet)
if __name__ == '__main__':
MultiWalletTest().main()
|
serial_connection.py
|
from logging import getLogger
import pathlib
import platform
import sys
import threading
import time
from textwrap import dedent
from thonny.plugins.micropython.bare_metal_backend import (
NORMAL_PROMPT,
FIRST_RAW_PROMPT,
OUTPUT_ENQ,
OUTPUT_ACK,
)
from thonny.common import ConnectionFailedException
from thonny.plugins.micropython.connection import MicroPythonConnection
logger = getLogger(__name__)
class SerialConnection(MicroPythonConnection):
def __init__(self, port, baudrate, dtr=None, rts=None, skip_reader=False):
import serial
from serial.serialutil import SerialException
super().__init__()
try:
self._serial = serial.Serial(
port=None, baudrate=baudrate, timeout=None, write_timeout=2, exclusive=True
)
# Tweaking dtr and rts was proposed by
# https://github.com/thonny/thonny/pull/1187
# but in some cases it messes up communication.
# At the same time, in some cases it is required
# https://github.com/thonny/thonny/issues/1462
if dtr is not None:
logger.debug("Setting DTR to %s", dtr)
self._serial.dtr = dtr
if rts is not None:
logger.debug("Setting RTS to %s", rts)
self._serial.rts = rts
self._serial.port = port
logger.debug("Opening serial port %s", port)
self._serial.open()
except SerialException as error:
err_str = str(error)
if "FileNotFoundError" in err_str:
err_str = "port not found"
message = "Unable to connect to " + port + ": " + err_str
# TODO: check if these error codes also apply to Linux and Mac
if error.errno == 13 and sys.platform == "linux":
try:
group = pathlib.Path(self._serial.port).group()
except Exception as e:
logger.warning("Could not query group for '%s'", self._serial.port)
group = "dialoutfb"
# TODO: check if user already has this group
message += "\n\n" + dedent(
"""\
Try adding yourself to the '{group}' group:
> sudo usermod -a -G {group} <username>
(NB! You may need to reboot your system after this!)""".format(
group=group
)
)
elif "PermissionError" in message or "Could not exclusively lock" in message:
message += "\n\n" + dedent(
"""\
If you have serial connection to the device from another program, then disconnect it there first."""
)
elif error.errno == 16:
message += "\n\n" + "Try restarting the device."
raise ConnectionFailedException(message)
if skip_reader:
self._reading_thread = None
else:
self._reading_thread = threading.Thread(target=self._listen_serial, daemon=True)
self._reading_thread.start()
def write(self, data):
size = self._serial.write(data)
# print(data.decode(), end="")
assert size == len(data)
return len(data)
def _listen_serial(self):
"NB! works in background thread"
try:
data = b""
while not self._reader_stopped:
data += self._serial.read(1) # To avoid busy loop
if len(data) == 0:
self._error = "EOF"
# print("LISTEN EOFFFFFFFFFF")
break
data += self._serial.read_all()
# logger.debug("GOT %r", data)
if data.endswith(OUTPUT_ENQ) and self.text_mode:
# Flow control.
logger.debug("Read ENQ, responding with ACK")
# Assuming connection is idle and it is safe to write in this thread
self._serial.write(OUTPUT_ACK)
self._serial.flush()
data = data[:-1]
continue
# don't publish incomplete utf-8 data
try:
if self.text_mode:
data.decode("utf-8") # testing if data decodes
to_be_published = data
data = b""
except UnicodeDecodeError as e:
if e.start == 0:
# Invalid start byte, ie. we have missed first byte(s) of the codepoint.
# No use of waiting, output everything
to_be_published = data
data = b""
else:
to_be_published = data[: e.start]
data = data[e.start :]
if to_be_published:
self._make_output_available(to_be_published)
except Exception as e:
self._error = str(e)
def incoming_is_empty(self):
return self._serial.in_waiting == 0 and super().incoming_is_empty()
def outgoing_is_empty(self):
return self._serial.out_waiting == 0
def reset_output_buffer(self):
self._serial.reset_output_buffer()
def close(self):
if self._serial is not None:
try:
self._serial.cancel_read()
if self._reading_thread:
self._reading_thread.join()
finally:
try:
self._serial.close()
self._serial = None
except Exception:
logger.exception("Couldn't close serial")
class DifficultSerialConnection(SerialConnection):
"""For hardening the communication protocol"""
def _make_output_available(self, data, block=True):
# output prompts in parts
if FIRST_RAW_PROMPT in data or NORMAL_PROMPT in data:
if FIRST_RAW_PROMPT in data:
start = data.find(FIRST_RAW_PROMPT)
end = start + len(FIRST_RAW_PROMPT)
else:
start = data.find(NORMAL_PROMPT)
end = start + len(NORMAL_PROMPT)
super()._make_output_available(data[: start + 1], block=block)
time.sleep(0.1)
super()._make_output_available(data[start + 1 : end - 1], block=block)
time.sleep(0.1)
super()._make_output_available(data[end - 1 :], block=block)
else:
super()._make_output_available(data, block=block)
def debug(*args, file=sys.stderr):
print(*args, file=file)
|
test.py
|
import os.path as p
import random
import threading
import time
import pytest
from random import randrange
import pika
from sys import getdefaultencoding
from helpers.cluster import ClickHouseCluster
from helpers.test_tools import TSV
from helpers.client import QueryRuntimeException
from helpers.network import PartitionManager
import json
import subprocess
from google.protobuf.internal.encoder import _VarintBytes
cluster = ClickHouseCluster(__file__)
instance = cluster.add_instance('instance',
config_dir='configs',
main_configs=['configs/rabbitmq.xml','configs/log_conf.xml'],
with_rabbitmq=True)
rabbitmq_id = ''
# Helpers
def check_rabbitmq_is_available():
p = subprocess.Popen(('docker',
'exec',
'-i',
rabbitmq_id,
'rabbitmqctl',
'await_startup'),
stdout=subprocess.PIPE)
p.communicate()
return p.returncode == 0
def enable_consistent_hash_plugin():
p = subprocess.Popen(('docker',
'exec',
'-i',
rabbitmq_id,
"rabbitmq-plugins", "enable", "rabbitmq_consistent_hash_exchange"),
stdout=subprocess.PIPE)
p.communicate()
return p.returncode == 0
def wait_rabbitmq_is_available(max_retries=50):
retries = 0
while True:
if check_rabbitmq_is_available():
break
else:
retries += 1
if retries > max_retries:
raise "RabbitMQ is not available"
print("Waiting for RabbitMQ to start up")
time.sleep(1)
def wait_rabbitmq_plugin_enabled(max_retries=50):
retries = 0
while True:
if enable_consistent_hash_plugin():
break
else:
retries += 1
if retries > max_retries:
raise "RabbitMQ plugin is not available"
print("Waiting for plugin")
time.sleep(1)
def rabbitmq_check_result(result, check=False, ref_file='test_rabbitmq_json.reference'):
fpath = p.join(p.dirname(__file__), ref_file)
with open(fpath) as reference:
if check:
assert TSV(result) == TSV(reference)
else:
return TSV(result) == TSV(reference)
# Fixtures
@pytest.fixture(scope="module")
def rabbitmq_cluster():
try:
global rabbitmq_id
cluster.start()
rabbitmq_id = instance.cluster.rabbitmq_docker_id
print("rabbitmq_id is {}".format(rabbitmq_id))
instance.query('CREATE DATABASE test')
yield cluster
finally:
cluster.shutdown()
@pytest.fixture(autouse=True)
def rabbitmq_setup_teardown():
wait_rabbitmq_is_available()
wait_rabbitmq_plugin_enabled()
print("RabbitMQ is available - running test")
yield # run test
instance.query('DROP TABLE IF EXISTS test.rabbitmq')
# Tests
@pytest.mark.timeout(180)
def test_rabbitmq_select_from_new_syntax_table(rabbitmq_cluster):
instance.query('''
CREATE TABLE test.rabbitmq (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_routing_key_list = 'new',
rabbitmq_exchange_name = 'clickhouse-exchange',
rabbitmq_format = 'JSONEachRow',
rabbitmq_row_delimiter = '\\n';
''')
credentials = pika.PlainCredentials('root', 'clickhouse')
parameters = pika.ConnectionParameters('localhost', 5672, '/', credentials)
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
channel.exchange_declare(exchange='clickhouse-exchange', exchange_type='fanout')
messages = []
for i in range(25):
messages.append(json.dumps({'key': i, 'value': i}))
for message in messages:
channel.basic_publish(exchange='clickhouse-exchange', routing_key='new', body=message)
messages = []
for i in range(25, 50):
messages.append(json.dumps({'key': i, 'value': i}))
for message in messages:
channel.basic_publish(exchange='clickhouse-exchange', routing_key='new', body=message)
connection.close()
result = ''
while True:
result += instance.query('SELECT * FROM test.rabbitmq', ignore_error=True)
if rabbitmq_check_result(result):
break
rabbitmq_check_result(result, True)
@pytest.mark.timeout(180)
def test_rabbitmq_select_from_old_syntax_table(rabbitmq_cluster):
instance.query('''
CREATE TABLE test.rabbitmq (key UInt64, value UInt64)
ENGINE = RabbitMQ('rabbitmq1:5672', 'old', 'clickhouse-exchange', 'JSONEachRow', '\\n');
''')
credentials = pika.PlainCredentials('root', 'clickhouse')
parameters = pika.ConnectionParameters('localhost', 5672, '/', credentials)
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
channel.exchange_declare(exchange='clickhouse-exchange', exchange_type='fanout')
messages = []
for i in range(50):
messages.append(json.dumps({'key': i, 'value': i}))
for message in messages:
channel.basic_publish(exchange='clickhouse-exchange', routing_key='old', body=message)
connection.close()
result = ''
while True:
result += instance.query('SELECT * FROM test.rabbitmq', ignore_error=True)
if rabbitmq_check_result(result):
break
rabbitmq_check_result(result, True)
@pytest.mark.timeout(180)
def test_rabbitmq_select_empty(rabbitmq_cluster):
instance.query('''
CREATE TABLE test.rabbitmq (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_routing_key_list = 'empty',
rabbitmq_format = 'TSV',
rabbitmq_row_delimiter = '\\n';
''')
assert int(instance.query('SELECT count() FROM test.rabbitmq')) == 0
@pytest.mark.timeout(180)
def test_rabbitmq_json_without_delimiter(rabbitmq_cluster):
instance.query('''
CREATE TABLE test.rabbitmq (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_routing_key_list = 'json',
rabbitmq_exchange_name = 'clickhouse-exchange',
rabbitmq_format = 'JSONEachRow'
''')
credentials = pika.PlainCredentials('root', 'clickhouse')
parameters = pika.ConnectionParameters('localhost', 5672, '/', credentials)
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
channel.exchange_declare(exchange='clickhouse-exchange', exchange_type='fanout')
messages = ''
for i in range(25):
messages += json.dumps({'key': i, 'value': i}) + '\n'
all_messages = [messages]
for message in all_messages:
channel.basic_publish(exchange='clickhouse-exchange', routing_key='json', body=message)
messages = ''
for i in range(25, 50):
messages += json.dumps({'key': i, 'value': i}) + '\n'
all_messages = [messages]
for message in all_messages:
channel.basic_publish(exchange='clickhouse-exchange', routing_key='json', body=message)
result = ''
while True:
result += instance.query('SELECT * FROM test.rabbitmq', ignore_error=True)
if rabbitmq_check_result(result):
break
connection.close()
rabbitmq_check_result(result, True)
@pytest.mark.timeout(180)
def test_rabbitmq_csv_with_delimiter(rabbitmq_cluster):
instance.query('''
CREATE TABLE test.rabbitmq (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_routing_key_list = 'csv',
rabbitmq_exchange_name = 'clickhouse-exchange',
rabbitmq_format = 'CSV',
rabbitmq_row_delimiter = '\\n';
''')
credentials = pika.PlainCredentials('root', 'clickhouse')
parameters = pika.ConnectionParameters('localhost', 5672, '/', credentials)
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
channel.exchange_declare(exchange='clickhouse-exchange', exchange_type='fanout')
messages = []
for i in range(50):
messages.append('{i}, {i}'.format(i=i))
for message in messages:
channel.basic_publish(exchange='clickhouse-exchange', routing_key='csv', body=message)
result = ''
while True:
result += instance.query('SELECT * FROM test.rabbitmq', ignore_error=True)
if rabbitmq_check_result(result):
break
connection.close()
rabbitmq_check_result(result, True)
@pytest.mark.timeout(180)
def test_rabbitmq_tsv_with_delimiter(rabbitmq_cluster):
instance.query('''
CREATE TABLE test.rabbitmq (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_routing_key_list = 'tsv',
rabbitmq_exchange_name = 'clickhouse-exchange',
rabbitmq_format = 'TSV',
rabbitmq_row_delimiter = '\\n';
''')
credentials = pika.PlainCredentials('root', 'clickhouse')
parameters = pika.ConnectionParameters('localhost', 5672, '/', credentials)
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
channel.exchange_declare(exchange='clickhouse-exchange', exchange_type='fanout')
messages = []
for i in range(50):
messages.append('{i}\t{i}'.format(i=i))
for message in messages:
channel.basic_publish(exchange='clickhouse-exchange', routing_key='tsv', body=message)
result = ''
while True:
result += instance.query('SELECT * FROM test.rabbitmq', ignore_error=True)
if rabbitmq_check_result(result):
break
connection.close()
rabbitmq_check_result(result, True)
@pytest.mark.timeout(180)
def test_rabbitmq_materialized_view(rabbitmq_cluster):
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.rabbitmq (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_routing_key_list = 'mv',
rabbitmq_format = 'JSONEachRow',
rabbitmq_row_delimiter = '\\n';
CREATE TABLE test.view (key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.rabbitmq;
''')
credentials = pika.PlainCredentials('root', 'clickhouse')
parameters = pika.ConnectionParameters('localhost', 5672, '/', credentials)
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
messages = []
for i in range(50):
messages.append(json.dumps({'key': i, 'value': i}))
for message in messages:
channel.basic_publish(exchange='clickhouse-exchange', routing_key='mv', body=message)
while True:
result = instance.query('SELECT * FROM test.view')
if (rabbitmq_check_result(result)):
break;
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
connection.close()
rabbitmq_check_result(result, True)
@pytest.mark.timeout(180)
def test_rabbitmq_materialized_view_with_subquery(rabbitmq_cluster):
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.rabbitmq (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_routing_key_list = 'mvsq',
rabbitmq_format = 'JSONEachRow',
rabbitmq_row_delimiter = '\\n';
CREATE TABLE test.view (key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM (SELECT * FROM test.rabbitmq);
''')
credentials = pika.PlainCredentials('root', 'clickhouse')
parameters = pika.ConnectionParameters('localhost', 5672, '/', credentials)
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
messages = []
for i in range(50):
messages.append(json.dumps({'key': i, 'value': i}))
for message in messages:
channel.basic_publish(exchange='clickhouse-exchange', routing_key='mvsq', body=message)
while True:
result = instance.query('SELECT * FROM test.view')
if rabbitmq_check_result(result):
break
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
connection.close();
rabbitmq_check_result(result, True)
@pytest.mark.timeout(180)
def test_rabbitmq_many_materialized_views(rabbitmq_cluster):
instance.query('''
DROP TABLE IF EXISTS test.view1;
DROP TABLE IF EXISTS test.view2;
DROP TABLE IF EXISTS test.consumer1;
DROP TABLE IF EXISTS test.consumer2;
CREATE TABLE test.rabbitmq (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_routing_key_list = 'mmv',
rabbitmq_format = 'JSONEachRow',
rabbitmq_row_delimiter = '\\n';
CREATE TABLE test.view1 (key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
CREATE TABLE test.view2 (key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer1 TO test.view1 AS
SELECT * FROM test.rabbitmq;
CREATE MATERIALIZED VIEW test.consumer2 TO test.view2 AS
SELECT * FROM test.rabbitmq;
''')
credentials = pika.PlainCredentials('root', 'clickhouse')
parameters = pika.ConnectionParameters('localhost', 5672, '/', credentials)
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
messages = []
for i in range(50):
messages.append(json.dumps({'key': i, 'value': i}))
for message in messages:
channel.basic_publish(exchange='clickhouse-exchange', routing_key='mmv', body=message)
while True:
result1 = instance.query('SELECT * FROM test.view1')
result2 = instance.query('SELECT * FROM test.view2')
if rabbitmq_check_result(result1) and rabbitmq_check_result(result2):
break
instance.query('''
DROP TABLE test.consumer1;
DROP TABLE test.consumer2;
DROP TABLE test.view1;
DROP TABLE test.view2;
''')
rabbitmq_check_result(result1, True)
rabbitmq_check_result(result2, True)
@pytest.mark.timeout(240)
def test_rabbitmq_big_message(rabbitmq_cluster):
# Create batchs of messages of size ~100Kb
rabbitmq_messages = 1000
batch_messages = 1000
messages = [json.dumps({'key': i, 'value': 'x' * 100}) * batch_messages for i in range(rabbitmq_messages)]
credentials = pika.PlainCredentials('root', 'clickhouse')
parameters = pika.ConnectionParameters('localhost', 5672, '/', credentials)
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.rabbitmq (key UInt64, value String)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_routing_key_list = 'big',
rabbitmq_format = 'JSONEachRow';
CREATE TABLE test.view (key UInt64, value String)
ENGINE = MergeTree
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.rabbitmq;
''')
for message in messages:
channel.basic_publish(exchange='clickhouse-exchange', routing_key='big', body=message)
while True:
result = instance.query('SELECT count() FROM test.view')
print("Result", result, "Expected", batch_messages * rabbitmq_messages)
if int(result) == batch_messages * rabbitmq_messages:
break
connection.close()
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
assert int(result) == rabbitmq_messages*batch_messages, 'ClickHouse lost some messages: {}'.format(result)
@pytest.mark.timeout(420)
def test_rabbitmq_sharding_between_channels_publish(rabbitmq_cluster):
NUM_CHANNELS = 5
instance.query('''
CREATE TABLE test.rabbitmq (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_num_consumers = 5,
rabbitmq_format = 'JSONEachRow',
rabbitmq_row_delimiter = '\\n';
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.view (key UInt64, value UInt64)
ENGINE = MergeTree
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.rabbitmq;
''')
time.sleep(1)
i = [0]
messages_num = 10000
credentials = pika.PlainCredentials('root', 'clickhouse')
parameters = pika.ConnectionParameters('localhost', 5672, '/', credentials)
def produce():
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
channel.exchange_declare(exchange='clickhouse-exchange', exchange_type='fanout')
messages = []
for _ in range(messages_num):
messages.append(json.dumps({'key': i[0], 'value': i[0]}))
i[0] += 1
key = str(randrange(1, NUM_CHANNELS))
for message in messages:
channel.basic_publish(exchange='clickhouse-exchange', routing_key=key, body=message)
connection.close()
threads = []
threads_num = 20
for _ in range(threads_num):
threads.append(threading.Thread(target=produce))
for thread in threads:
time.sleep(random.uniform(0, 1))
thread.start()
while True:
result = instance.query('SELECT count() FROM test.view')
time.sleep(1)
print("Result", result, "Expected", messages_num * threads_num)
if int(result) == messages_num * threads_num:
break
for thread in threads:
thread.join()
assert int(result) == messages_num * threads_num, 'ClickHouse lost some messages: {}'.format(result)
@pytest.mark.timeout(420)
def test_rabbitmq_sharding_between_queues_publish(rabbitmq_cluster):
NUM_QUEUES = 4
instance.query('''
CREATE TABLE test.rabbitmq (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_num_queues = 4,
rabbitmq_format = 'JSONEachRow',
rabbitmq_row_delimiter = '\\n';
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.view (key UInt64, value UInt64)
ENGINE = MergeTree
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.rabbitmq;
''')
time.sleep(1)
i = [0]
messages_num = 10000
credentials = pika.PlainCredentials('root', 'clickhouse')
parameters = pika.ConnectionParameters('localhost', 5672, '/', credentials)
def produce():
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
channel.exchange_declare(exchange='clickhouse-exchange', exchange_type='fanout')
messages = []
for _ in range(messages_num):
messages.append(json.dumps({'key': i[0], 'value': i[0]}))
i[0] += 1
key = str(randrange(1, NUM_QUEUES))
for message in messages:
channel.basic_publish(exchange='clickhouse-exchange', routing_key=key, body=message)
connection.close()
threads = []
threads_num = 20
for _ in range(threads_num):
threads.append(threading.Thread(target=produce))
for thread in threads:
time.sleep(random.uniform(0, 1))
thread.start()
while True:
result = instance.query('SELECT count() FROM test.view')
time.sleep(1)
if int(result) == messages_num * threads_num:
break
for thread in threads:
thread.join()
assert int(result) == messages_num * threads_num, 'ClickHouse lost some messages: {}'.format(result)
@pytest.mark.timeout(420)
def test_rabbitmq_sharding_between_channels_and_queues_publish(rabbitmq_cluster):
NUM_CONSUMERS = 10
NUM_QUEUES = 2
instance.query('''
CREATE TABLE test.rabbitmq (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_num_queues = 2,
rabbitmq_num_consumers = 10,
rabbitmq_format = 'JSONEachRow',
rabbitmq_row_delimiter = '\\n';
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.view (key UInt64, value UInt64)
ENGINE = MergeTree
ORDER BY key
SETTINGS old_parts_lifetime=5, cleanup_delay_period=2, cleanup_delay_period_random_add=3;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.rabbitmq;
''')
time.sleep(1)
i = [0]
messages_num = 10000
credentials = pika.PlainCredentials('root', 'clickhouse')
parameters = pika.ConnectionParameters('localhost', 5672, '/', credentials)
def produce():
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
channel.exchange_declare(exchange='clickhouse-exchange', exchange_type='fanout')
messages = []
for _ in range(messages_num):
messages.append(json.dumps({'key': i[0], 'value': i[0]}))
i[0] += 1
key = str(randrange(1, NUM_QUEUES * NUM_CONSUMERS))
for message in messages:
channel.basic_publish(exchange='clickhouse-exchange', routing_key=key, body=message)
connection.close()
threads = []
threads_num = 20
for _ in range(threads_num):
threads.append(threading.Thread(target=produce))
for thread in threads:
time.sleep(random.uniform(0, 1))
thread.start()
while True:
result = instance.query('SELECT count() FROM test.view')
time.sleep(1)
if int(result) == messages_num * threads_num:
break
for thread in threads:
thread.join()
assert int(result) == messages_num * threads_num, 'ClickHouse lost some messages: {}'.format(result)
@pytest.mark.timeout(420)
def test_rabbitmq_read_only_combo(rabbitmq_cluster):
NUM_MV = 5;
NUM_CONSUMERS = 4
instance.query('''
CREATE TABLE test.rabbitmq (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_num_consumers = 4,
rabbitmq_format = 'JSONEachRow',
rabbitmq_row_delimiter = '\\n';
''')
for mv_id in range(NUM_MV):
table_name = 'view{}'.format(mv_id)
print("Setting up {}".format(table_name))
instance.query('''
DROP TABLE IF EXISTS test.{0};
DROP TABLE IF EXISTS test.{0}_mv;
CREATE TABLE test.{0} (key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.{0}_mv TO test.{0} AS
SELECT * FROM test.rabbitmq;
'''.format(table_name))
time.sleep(2)
i = [0]
messages_num = 10000
credentials = pika.PlainCredentials('root', 'clickhouse')
parameters = pika.ConnectionParameters('localhost', 5672, '/', credentials)
def produce():
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
channel.exchange_declare(exchange='clickhouse-exchange', exchange_type='fanout')
messages = []
for _ in range(messages_num):
messages.append(json.dumps({'key': i[0], 'value': i[0]}))
i[0] += 1
key = str(randrange(1, NUM_CONSUMERS))
for message in messages:
channel.basic_publish(exchange='clickhouse-exchange', routing_key=key, body=message)
connection.close()
threads = []
threads_num = 20
for _ in range(threads_num):
threads.append(threading.Thread(target=produce))
for thread in threads:
time.sleep(random.uniform(0, 1))
thread.start()
while True:
result = 0
for view in range(NUM_MV):
result += int(instance.query('SELECT count() FROM test.view{0}'.format(view)))
if int(result) == messages_num * threads_num * NUM_MV:
break
time.sleep(1)
for thread in threads:
thread.join()
for mv_id in range(NUM_MV):
table_name = 'view{}'.format(mv_id)
instance.query('''
DROP TABLE IF EXISTS test.{0};
'''.format(table_name))
assert int(result) == messages_num * threads_num * NUM_MV, 'ClickHouse lost some messages: {}'.format(result)
@pytest.mark.timeout(240)
def test_rabbitmq_insert(rabbitmq_cluster):
instance.query('''
CREATE TABLE test.rabbitmq (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_exchange_name = 'insert',
rabbitmq_routing_key_list = 'insert1',
rabbitmq_format = 'TSV',
rabbitmq_row_delimiter = '\\n';
''')
credentials = pika.PlainCredentials('root', 'clickhouse')
parameters = pika.ConnectionParameters('localhost', 5672, '/', credentials)
consumer_connection = pika.BlockingConnection(parameters)
consumer = consumer_connection.channel()
consumer.exchange_declare(exchange='insert_rabbitmq_direct', exchange_type='direct')
result = consumer.queue_declare(queue='')
queue_name = result.method.queue
consumer.queue_bind(exchange='insert_rabbitmq_direct', queue=queue_name, routing_key='insert1')
values = []
for i in range(50):
values.append("({i}, {i})".format(i=i))
values = ','.join(values)
while True:
try:
instance.query("INSERT INTO test.rabbitmq VALUES {}".format(values))
break
except QueryRuntimeException as e:
if 'Local: Timed out.' in str(e):
continue
else:
raise
insert_messages = []
def onReceived(channel, method, properties, body):
i = 0
insert_messages.append(body.decode())
if (len(insert_messages) == 50):
channel.stop_consuming()
consumer.basic_qos(prefetch_count=50)
consumer.basic_consume(onReceived, queue_name)
consumer.start_consuming()
consumer_connection.close()
result = '\n'.join(insert_messages)
rabbitmq_check_result(result, True)
@pytest.mark.timeout(240)
def test_rabbitmq_many_inserts(rabbitmq_cluster):
instance.query('''
DROP TABLE IF EXISTS test.rabbitmq_many;
DROP TABLE IF EXISTS test.view_many;
DROP TABLE IF EXISTS test.consumer_many;
CREATE TABLE test.rabbitmq_many (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_routing_key_list = 'insert2',
rabbitmq_format = 'TSV',
rabbitmq_row_delimiter = '\\n';
CREATE TABLE test.view_many (key UInt64, value UInt64)
ENGINE = MergeTree
ORDER BY key
SETTINGS old_parts_lifetime=5, cleanup_delay_period=2, cleanup_delay_period_random_add=3;
CREATE MATERIALIZED VIEW test.consumer_many TO test.view_many AS
SELECT * FROM test.rabbitmq_many;
''')
messages_num = 1000
def insert():
values = []
for i in range(messages_num):
values.append("({i}, {i})".format(i=i))
values = ','.join(values)
while True:
try:
instance.query("INSERT INTO test.rabbitmq_many VALUES {}".format(values))
break
except QueryRuntimeException as e:
if 'Local: Timed out.' in str(e):
continue
else:
raise
threads = []
threads_num = 20
for _ in range(threads_num):
threads.append(threading.Thread(target=insert))
for thread in threads:
time.sleep(random.uniform(0, 1))
thread.start()
while True:
result = instance.query('SELECT count() FROM test.view_many')
time.sleep(1)
if int(result) == messages_num * threads_num:
break
instance.query('''
DROP TABLE IF EXISTS test.rabbitmq_many;
DROP TABLE IF EXISTS test.consumer_many;
DROP TABLE IF EXISTS test.view_many;
''')
for thread in threads:
thread.join()
assert int(result) == messages_num * threads_num, 'ClickHouse lost some messages: {}'.format(result)
@pytest.mark.timeout(240)
def test_rabbitmq_sharding_between_channels_and_queues_insert(rabbitmq_cluster):
instance.query('''
DROP TABLE IF EXISTS test.view_sharding;
DROP TABLE IF EXISTS test.consumer_sharding;
CREATE TABLE test.rabbitmq_sharding (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_num_consumers = 5,
rabbitmq_num_queues = 2,
rabbitmq_format = 'TSV',
rabbitmq_row_delimiter = '\\n';
CREATE TABLE test.view_sharding (key UInt64, value UInt64)
ENGINE = MergeTree
ORDER BY key
SETTINGS old_parts_lifetime=5, cleanup_delay_period=2, cleanup_delay_period_random_add=3;
CREATE MATERIALIZED VIEW test.consumer_sharding TO test.view_sharding AS
SELECT * FROM test.rabbitmq_sharding;
''')
messages_num = 10000
def insert():
values = []
for i in range(messages_num):
values.append("({i}, {i})".format(i=i))
values = ','.join(values)
while True:
try:
instance.query("INSERT INTO test.rabbitmq_sharding VALUES {}".format(values))
break
except QueryRuntimeException as e:
if 'Local: Timed out.' in str(e):
continue
else:
raise
threads = []
threads_num = 20
for _ in range(threads_num):
threads.append(threading.Thread(target=insert))
for thread in threads:
time.sleep(random.uniform(0, 1))
thread.start()
while True:
result = instance.query('SELECT count() FROM test.view_sharding')
time.sleep(1)
if int(result) == messages_num * threads_num:
break
instance.query('''
DROP TABLE IF EXISTS test.rabbitmq_sharding;
DROP TABLE IF EXISTS test.consumer_sharding;
DROP TABLE IF EXISTS test.view_sharding;
''')
for thread in threads:
thread.join()
assert int(result) == messages_num * threads_num, 'ClickHouse lost some messages: {}'.format(result)
@pytest.mark.timeout(420)
def test_rabbitmq_overloaded_insert(rabbitmq_cluster):
instance.query('''
DROP TABLE IF EXISTS test.view_overload;
DROP TABLE IF EXISTS test.consumer_overload;
CREATE TABLE test.rabbitmq_overload (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_num_consumers = 10,
rabbitmq_format = 'TSV',
rabbitmq_row_delimiter = '\\n';
CREATE TABLE test.view_overload (key UInt64, value UInt64)
ENGINE = MergeTree
ORDER BY key
SETTINGS old_parts_lifetime=5, cleanup_delay_period=2, cleanup_delay_period_random_add=3;
CREATE MATERIALIZED VIEW test.consumer_overload TO test.view_overload AS
SELECT * FROM test.rabbitmq_overload;
''')
messages_num = 100000
def insert():
values = []
for i in range(messages_num):
values.append("({i}, {i})".format(i=i))
values = ','.join(values)
while True:
try:
instance.query("INSERT INTO test.rabbitmq_overload VALUES {}".format(values))
break
except QueryRuntimeException as e:
if 'Local: Timed out.' in str(e):
continue
else:
raise
threads = []
threads_num = 5
for _ in range(threads_num):
threads.append(threading.Thread(target=insert))
for thread in threads:
time.sleep(random.uniform(0, 1))
thread.start()
while True:
result = instance.query('SELECT count() FROM test.view_overload')
time.sleep(1)
print("Result", int(result), "Expected", messages_num * threads_num)
if int(result) == messages_num * threads_num:
break
instance.query('''
DROP TABLE IF EXISTS test.rabbitmq_overload;
DROP TABLE IF EXISTS test.consumer_overload;
DROP TABLE IF EXISTS test.view_overload;
''')
for thread in threads:
thread.join()
assert int(result) == messages_num * threads_num, 'ClickHouse lost some messages: {}'.format(result)
@pytest.mark.timeout(420)
def test_rabbitmq_direct_exchange(rabbitmq_cluster):
instance.query('''
DROP TABLE IF EXISTS test.destination;
CREATE TABLE test.destination(key UInt64, value UInt64,
_consumed_by LowCardinality(String))
ENGINE = MergeTree()
ORDER BY key
SETTINGS old_parts_lifetime=5, cleanup_delay_period=2, cleanup_delay_period_random_add=3;
''')
num_tables = 5
for consumer_id in range(num_tables):
print("Setting up table {}".format(consumer_id))
instance.query('''
DROP TABLE IF EXISTS test.direct_exchange_{0};
DROP TABLE IF EXISTS test.direct_exchange_{0}_mv;
CREATE TABLE test.direct_exchange_{0} (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_num_consumers = 5,
rabbitmq_exchange_name = 'direct_exchange_testing',
rabbitmq_exchange_type = 'direct',
rabbitmq_routing_key_list = 'direct_{0}',
rabbitmq_format = 'JSONEachRow',
rabbitmq_row_delimiter = '\\n';
CREATE MATERIALIZED VIEW test.direct_exchange_{0}_mv TO test.destination AS
SELECT key, value, '{0}' as _consumed_by FROM test.direct_exchange_{0};
'''.format(consumer_id))
i = [0]
messages_num = 1000
credentials = pika.PlainCredentials('root', 'clickhouse')
parameters = pika.ConnectionParameters('localhost', 5672, '/', credentials)
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
channel.exchange_declare(exchange='direct_exchange_testing', exchange_type='direct')
messages = []
for _ in range(messages_num):
messages.append(json.dumps({'key': i[0], 'value': i[0]}))
i[0] += 1
key_num = 0
for num in range(num_tables):
key = "direct_" + str(key_num)
key_num += 1
for message in messages:
mes_id = str(randrange(10))
channel.basic_publish(
exchange='direct_exchange_testing', routing_key=key,
properties=pika.BasicProperties(message_id=mes_id), body=message)
connection.close()
while True:
result = instance.query('SELECT count() FROM test.destination')
time.sleep(1)
if int(result) == messages_num * num_tables:
break
for consumer_id in range(num_tables):
instance.query('''
DROP TABLE IF EXISTS test.direct_exchange_{0};
DROP TABLE IF EXISTS test.direct_exchange_{0}_mv;
'''.format(consumer_id))
instance.query('''
DROP TABLE IF EXISTS test.destination;
''')
assert int(result) == messages_num * num_tables, 'ClickHouse lost some messages: {}'.format(result)
@pytest.mark.timeout(420)
def test_rabbitmq_fanout_exchange(rabbitmq_cluster):
instance.query('''
DROP TABLE IF EXISTS test.destination;
CREATE TABLE test.destination(key UInt64, value UInt64,
_consumed_by LowCardinality(String))
ENGINE = MergeTree()
ORDER BY key;
''')
num_tables = 5
for consumer_id in range(num_tables):
print("Setting up table {}".format(consumer_id))
instance.query('''
DROP TABLE IF EXISTS test.fanout_exchange_{0};
DROP TABLE IF EXISTS test.fanout_exchange_{0}_mv;
CREATE TABLE test.fanout_exchange_{0} (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_num_consumers = 5,
rabbitmq_routing_key_list = 'key_{0}',
rabbitmq_exchange_name = 'fanout_exchange_testing',
rabbitmq_exchange_type = 'fanout',
rabbitmq_format = 'JSONEachRow',
rabbitmq_row_delimiter = '\\n';
CREATE MATERIALIZED VIEW test.fanout_exchange_{0}_mv TO test.destination AS
SELECT key, value, '{0}' as _consumed_by FROM test.fanout_exchange_{0};
'''.format(consumer_id))
i = [0]
messages_num = 1000
credentials = pika.PlainCredentials('root', 'clickhouse')
parameters = pika.ConnectionParameters('localhost', 5672, '/', credentials)
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
channel.exchange_declare(exchange='fanout_exchange_testing', exchange_type='fanout')
messages = []
for _ in range(messages_num):
messages.append(json.dumps({'key': i[0], 'value': i[0]}))
i[0] += 1
key_num = 0
for message in messages:
mes_id = str(randrange(10))
channel.basic_publish(
exchange='fanout_exchange_testing', routing_key='',
properties=pika.BasicProperties(message_id=mes_id), body=message)
connection.close()
while True:
result = instance.query('SELECT count() FROM test.destination')
time.sleep(1)
if int(result) == messages_num * num_tables:
break
for consumer_id in range(num_tables):
instance.query('''
DROP TABLE IF EXISTS test.fanout_exchange_{0};
DROP TABLE IF EXISTS test.fanout_exchange_{0}_mv;
'''.format(consumer_id))
instance.query('''
DROP TABLE IF EXISTS test.destination;
''')
assert int(result) == messages_num * num_tables, 'ClickHouse lost some messages: {}'.format(result)
@pytest.mark.timeout(420)
def test_rabbitmq_topic_exchange(rabbitmq_cluster):
instance.query('''
DROP TABLE IF EXISTS test.destination;
CREATE TABLE test.destination(key UInt64, value UInt64,
_consumed_by LowCardinality(String))
ENGINE = MergeTree()
ORDER BY key;
''')
num_tables = 5
for consumer_id in range(num_tables):
print("Setting up table {}".format(consumer_id))
instance.query('''
DROP TABLE IF EXISTS test.topic_exchange_{0};
DROP TABLE IF EXISTS test.topic_exchange_{0}_mv;
CREATE TABLE test.topic_exchange_{0} (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_num_consumers = 5,
rabbitmq_exchange_name = 'topic_exchange_testing',
rabbitmq_exchange_type = 'topic',
rabbitmq_routing_key_list = '*.{0}',
rabbitmq_format = 'JSONEachRow',
rabbitmq_row_delimiter = '\\n';
CREATE MATERIALIZED VIEW test.topic_exchange_{0}_mv TO test.destination AS
SELECT key, value, '{0}' as _consumed_by FROM test.topic_exchange_{0};
'''.format(consumer_id))
for consumer_id in range(num_tables):
print("Setting up table {}".format(num_tables + consumer_id))
instance.query('''
DROP TABLE IF EXISTS test.topic_exchange_{0};
DROP TABLE IF EXISTS test.topic_exchange_{0}_mv;
CREATE TABLE test.topic_exchange_{0} (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_num_consumers = 4,
rabbitmq_exchange_name = 'topic_exchange_testing',
rabbitmq_exchange_type = 'topic',
rabbitmq_routing_key_list = '*.logs',
rabbitmq_format = 'JSONEachRow',
rabbitmq_row_delimiter = '\\n';
CREATE MATERIALIZED VIEW test.topic_exchange_{0}_mv TO test.destination AS
SELECT key, value, '{0}' as _consumed_by FROM test.topic_exchange_{0};
'''.format(num_tables + consumer_id))
i = [0]
messages_num = 1000
credentials = pika.PlainCredentials('root', 'clickhouse')
parameters = pika.ConnectionParameters('localhost', 5672, '/', credentials)
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
channel.exchange_declare(exchange='topic_exchange_testing', exchange_type='topic')
messages = []
for _ in range(messages_num):
messages.append(json.dumps({'key': i[0], 'value': i[0]}))
i[0] += 1
key_num = 0
for num in range(num_tables):
key = "topic." + str(key_num)
key_num += 1
for message in messages:
channel.basic_publish(exchange='topic_exchange_testing', routing_key=key, body=message)
key = "random.logs"
for message in messages:
mes_id = str(randrange(10))
channel.basic_publish(
exchange='topic_exchange_testing', routing_key=key,
properties=pika.BasicProperties(message_id=mes_id), body=message)
connection.close()
while True:
result = instance.query('SELECT count() FROM test.destination')
time.sleep(1)
if int(result) == messages_num * num_tables + messages_num * num_tables:
break
for consumer_id in range(num_tables * 2):
instance.query('''
DROP TABLE IF EXISTS test.topic_exchange_{0};
DROP TABLE IF EXISTS test.topic_exchange_{0}_mv;
'''.format(consumer_id))
instance.query('''
DROP TABLE IF EXISTS test.destination;
''')
assert int(result) == messages_num * num_tables + messages_num * num_tables, 'ClickHouse lost some messages: {}'.format(result)
@pytest.mark.timeout(420)
def test_rabbitmq_hash_exchange(rabbitmq_cluster):
instance.query('''
DROP TABLE IF EXISTS test.destination;
CREATE TABLE test.destination(key UInt64, value UInt64,
_consumed_by LowCardinality(String))
ENGINE = MergeTree()
ORDER BY key;
''')
num_tables = 4
for consumer_id in range(num_tables):
table_name = 'rabbitmq_consumer{}'.format(consumer_id)
print("Setting up {}".format(table_name))
instance.query('''
DROP TABLE IF EXISTS test.{0};
DROP TABLE IF EXISTS test.{0}_mv;
CREATE TABLE test.{0} (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_num_consumers = 10,
rabbitmq_exchange_type = 'consistent_hash',
rabbitmq_exchange_name = 'hash_exchange_testing',
rabbitmq_format = 'JSONEachRow',
rabbitmq_row_delimiter = '\\n';
CREATE MATERIALIZED VIEW test.{0}_mv TO test.destination AS
SELECT key, value, '{0}' as _consumed_by FROM test.{0};
'''.format(table_name))
i = [0]
messages_num = 500
credentials = pika.PlainCredentials('root', 'clickhouse')
parameters = pika.ConnectionParameters('localhost', 5672, '/', credentials)
def produce():
# init connection here because otherwise python rabbitmq client might fail
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
channel.exchange_declare(exchange='hash_exchange_testing', exchange_type='x-consistent-hash')
messages = []
for _ in range(messages_num):
messages.append(json.dumps({'key': i[0], 'value': i[0]}))
i[0] += 1
for message in messages:
key = str(randrange(10))
channel.basic_publish(exchange='hash_exchange_testing', routing_key=key, body=message)
connection.close()
threads = []
threads_num = 10
for _ in range(threads_num):
threads.append(threading.Thread(target=produce))
for thread in threads:
time.sleep(random.uniform(0, 1))
thread.start()
while True:
result = instance.query('SELECT count() FROM test.destination')
time.sleep(1)
if int(result) == messages_num * threads_num:
break
for consumer_id in range(num_tables):
table_name = 'rabbitmq_consumer{}'.format(consumer_id)
instance.query('''
DROP TABLE IF EXISTS test.{0};
DROP TABLE IF EXISTS test.{0}_mv;
'''.format(table_name))
instance.query('''
DROP TABLE IF EXISTS test.destination;
''')
for thread in threads:
thread.join()
assert int(result) == messages_num * threads_num, 'ClickHouse lost some messages: {}'.format(result)
@pytest.mark.timeout(420)
def test_rabbitmq_multiple_bindings(rabbitmq_cluster):
instance.query('''
DROP TABLE IF EXISTS test.destination;
CREATE TABLE test.destination(key UInt64, value UInt64,
_consumed_by LowCardinality(String))
ENGINE = MergeTree()
ORDER BY key;
''')
instance.query('''
DROP TABLE IF EXISTS test.bindings_1;
DROP TABLE IF EXISTS test.bindings_1_mv;
CREATE TABLE test.bindings_1 (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_num_consumers = 5,
rabbitmq_num_queues = 2,
rabbitmq_exchange_name = 'multiple_bindings_testing',
rabbitmq_exchange_type = 'direct',
rabbitmq_routing_key_list = 'key1,key2,key3,key4,key5',
rabbitmq_format = 'JSONEachRow',
rabbitmq_row_delimiter = '\\n';
CREATE MATERIALIZED VIEW test.bindings_1_mv TO test.destination AS
SELECT * FROM test.bindings_1;
''')
# in case num_consumers and num_queues are not set - multiple bindings are implemented differently, so test them too
instance.query('''
DROP TABLE IF EXISTS test.bindings_2;
DROP TABLE IF EXISTS test.bindings_2_mv;
CREATE TABLE test.bindings_2 (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_exchange_name = 'multiple_bindings_testing',
rabbitmq_exchange_type = 'direct',
rabbitmq_routing_key_list = 'key1,key2,key3,key4,key5',
rabbitmq_format = 'JSONEachRow',
rabbitmq_row_delimiter = '\\n';
CREATE MATERIALIZED VIEW test.bindings_2_mv TO test.destination AS
SELECT * FROM test.bindings_2;
''')
i = [0]
messages_num = 500
credentials = pika.PlainCredentials('root', 'clickhouse')
parameters = pika.ConnectionParameters('localhost', 5672, '/', credentials)
def produce():
# init connection here because otherwise python rabbitmq client might fail
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
channel.exchange_declare(exchange='multiple_bindings_testing', exchange_type='direct')
messages = []
for _ in range(messages_num):
messages.append(json.dumps({'key': i[0], 'value': i[0]}))
i[0] += 1
keys = ['key1', 'key2', 'key3', 'key4', 'key5']
for key in keys:
for message in messages:
mes_id = str(randrange(10))
channel.basic_publish(exchange='multiple_bindings_testing', routing_key=key,
properties=pika.BasicProperties(message_id=mes_id), body=message)
connection.close()
threads = []
threads_num = 10
for _ in range(threads_num):
threads.append(threading.Thread(target=produce))
for thread in threads:
time.sleep(random.uniform(0, 1))
thread.start()
while True:
result = instance.query('SELECT count() FROM test.destination')
time.sleep(1)
if int(result) == messages_num * threads_num * 5 * 2:
break
for thread in threads:
thread.join()
instance.query('''
DROP TABLE IF EXISTS test.bindings_1;
DROP TABLE IF EXISTS test.bindings_2;
DROP TABLE IF EXISTS test.destination;
''')
assert int(result) == messages_num * threads_num * 5 * 2, 'ClickHouse lost some messages: {}'.format(result)
@pytest.mark.timeout(420)
def test_rabbitmq_headers_exchange(rabbitmq_cluster):
instance.query('''
DROP TABLE IF EXISTS test.destination;
CREATE TABLE test.destination(key UInt64, value UInt64,
_consumed_by LowCardinality(String))
ENGINE = MergeTree()
ORDER BY key;
''')
num_tables_to_receive = 3
for consumer_id in range(num_tables_to_receive):
print("Setting up table {}".format(consumer_id))
instance.query('''
DROP TABLE IF EXISTS test.headers_exchange_{0};
DROP TABLE IF EXISTS test.headers_exchange_{0}_mv;
CREATE TABLE test.headers_exchange_{0} (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_num_consumers = 4,
rabbitmq_exchange_name = 'headers_exchange_testing',
rabbitmq_exchange_type = 'headers',
rabbitmq_routing_key_list = 'x-match=all,format=logs,type=report,year=2020',
rabbitmq_format = 'JSONEachRow',
rabbitmq_row_delimiter = '\\n';
CREATE MATERIALIZED VIEW test.headers_exchange_{0}_mv TO test.destination AS
SELECT key, value, '{0}' as _consumed_by FROM test.headers_exchange_{0};
'''.format(consumer_id))
num_tables_to_ignore = 2
for consumer_id in range(num_tables_to_ignore):
print("Setting up table {}".format(consumer_id + num_tables_to_receive))
instance.query('''
DROP TABLE IF EXISTS test.headers_exchange_{0};
DROP TABLE IF EXISTS test.headers_exchange_{0}_mv;
CREATE TABLE test.headers_exchange_{0} (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_exchange_name = 'headers_exchange_testing',
rabbitmq_exchange_type = 'headers',
rabbitmq_routing_key_list = 'x-match=all,format=logs,type=report,year=2019',
rabbitmq_format = 'JSONEachRow',
rabbitmq_row_delimiter = '\\n';
CREATE MATERIALIZED VIEW test.headers_exchange_{0}_mv TO test.destination AS
SELECT key, value, '{0}' as _consumed_by FROM test.headers_exchange_{0};
'''.format(consumer_id + num_tables_to_receive))
i = [0]
messages_num = 1000
credentials = pika.PlainCredentials('root', 'clickhouse')
parameters = pika.ConnectionParameters('localhost', 5672, '/', credentials)
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
channel.exchange_declare(exchange='headers_exchange_testing', exchange_type='headers')
messages = []
for _ in range(messages_num):
messages.append(json.dumps({'key': i[0], 'value': i[0]}))
i[0] += 1
fields={}
fields['format']='logs'
fields['type']='report'
fields['year']='2020'
key_num = 0
for message in messages:
mes_id = str(randrange(10))
channel.basic_publish(exchange='headers_exchange_testing', routing_key='',
properties=pika.BasicProperties(headers=fields, message_id=mes_id), body=message)
connection.close()
while True:
result = instance.query('SELECT count() FROM test.destination')
time.sleep(1)
if int(result) == messages_num * num_tables_to_receive:
break
for consumer_id in range(num_tables_to_receive + num_tables_to_ignore):
instance.query('''
DROP TABLE IF EXISTS test.direct_exchange_{0};
DROP TABLE IF EXISTS test.direct_exchange_{0}_mv;
'''.format(consumer_id))
instance.query('''
DROP TABLE IF EXISTS test.destination;
''')
assert int(result) == messages_num * num_tables_to_receive, 'ClickHouse lost some messages: {}'.format(result)
if __name__ == '__main__':
cluster.start()
raw_input("Cluster created, press any key to destroy...")
cluster.shutdown()
|
mesos.py
|
#!/usr/bin/env python3
"""
DMLC submission script by mesos
One need to make sure all slaves machines are ssh-able.
"""
from __future__ import absolute_import
import os
import sys
import json
import uuid
import logging
from threading import Thread
from . import tracker
try:
import pymesos.subprocess
logging.getLogger('pymesos').setLevel(logging.WARNING)
def _run(prog, env, resources):
cwd = os.getcwd()
pymesos.subprocess.check_call(
prog, shell=True, env=env, cwd=cwd,
cpus=resources['cpus'], mem=resources['mem']
)
_USE_PYMESOS = True
except ImportError:
import subprocess
DEVNULL = open(os.devnull, 'w')
def _run(prog, env, resources):
master = os.environ['MESOS_MASTER']
if ':' not in master:
master += ':5050'
name = str(uuid.uuid4())
cwd = os.getcwd()
prog = "cd %s && %s" % (cwd, prog)
resources = ';'.join('%s:%s' % (k, v) for k, v in resources.items())
prog = prog.replace('\'', '\\\'')
env = json.dumps(env).replace('\'', '\\\'')
resources = resources.replace('\'', '\\\'')
cmd = (
'mesos-execute --master=%s --name=\'%s\''
' --command=\'%s\' --env=\'%s\' --resources=\'%s\'' %
(master, name, prog, env, resources)
)
subprocess.check_call(
cmd,
shell=True,
stdout=DEVNULL,
stderr=subprocess.STDOUT)
_USE_PYMESOS = False
def get_env():
# get system envs
keys = set(['OMP_NUM_THREADS', 'KMP_AFFINITY', 'LD_LIBRARY_PATH'])
return {k: v for k, v in os.environ.items() if k in keys}
def submit(args):
def mesos_submit(nworker, nserver, pass_envs):
"""
customized submit script
"""
# launch jobs
for i in range(nworker + nserver):
resources = {}
pass_envs['DMLC_ROLE'] = 'server' if i < nserver else 'worker'
if i < nserver:
pass_envs['DMLC_SERVER_ID'] = i
resources['cpus'] = args.server_cores
resources['mem'] = args.server_memory_mb
else:
pass_envs['DMLC_WORKER_ID'] = i - nserver
resources['cpus'] = args.worker_cores
resources['mem'] = args.worker_memory_mb
env = {str(k): str(v) for k, v in pass_envs.items()}
env.update(get_env())
prog = ' '.join(args.command)
thread = Thread(target=_run, args=(prog, env, resources))
thread.setDaemon(True)
thread.start()
return mesos_submit
if not _USE_PYMESOS:
logging.warning('No PyMesos found, use mesos-execute instead,'
' no task output available')
if args.mesos_master:
os.environ['MESOS_MASTER'] = args.mesos_master
assert 'MESOS_MASTER' in os.environ, 'No mesos master configured!'
tracker.submit(args.num_workers, args.num_servers,
fun_submit=mesos_submit,
pscmd=(' '.join(args.command)))
|
SearchEngine.py
|
from ScienceSearcher.DownloadClient import DownloadClient
from ScienceSearcher.ESClient import ESClient
from time import sleep
import threading
import os
import re
class SearchEngine:
def __init__(self, download_server_ip, download_server_port, download_client_ip, download_client_port,
es_ip, es_port, index_name, video_index_name, group_name):
"""
1.调用DowonloadClient中的file_tag()检查文件是否下好,
如果没下好,先调用DownloadClient中的download()进行下载
2.初始化一个ESClient对象
:param download_server_ip: 数据服务器ip
:param download_server_port: 数据服务器端口
:param download_client_ip: 接收数据的ip
:param download_client_port: 接收数据的端口
:param es_ip: es服务的ip
:param es_port: es服务的端口
"""
self.client = DownloadClient(download_client_ip, download_client_port, group_name)
self.es = ESClient([{"host": es_ip, "port": es_port}], index_name=index_name, video_index_name=video_index_name)
self.titles = []
self.send_flag = [False]
# thread for update data
t = threading.Thread(target=self.client.send, args=(download_server_ip, download_server_port, self.send_flag))
t.setDaemon(True)
t.start()
sleep(1)
if self.send_flag[0]:
print("receiving data, please don't stop the process until \"extracting end\" appears in terminal")
while self.send_flag[0]:
sleep(1)
# thread for update title list
t = threading.Thread(target=self.es.get_all_title, args=(self.titles,))
t.setDaemon(True)
t.start()
while not self.titles:
sleep(1)
def search(self, query):
"""
对ES的search进行封装,供展示组调用
:param query: 展示组发送的查询
:return: 结果列表
"""
if query["type"] == 1:
complete_new_query = dict()
new_query = dict()
new_query["title"] = query["query_text"]["title"].lower()
new_query["authors"] = query["query_text"]["authors"].lower()
new_query["abstract"] = query["query_text"]["abstract"].lower()
new_query["pdfText"] = query["query_text"]["content"].lower()
# new_query["year"] = query["query_text"]["year"].lower()
complete_new_query["type"] = query["type"]
complete_new_query["top_number"] = query["top_number"]
complete_new_query["query_text"] = new_query
complete_new_query["operator"] = query["operator"]
query = complete_new_query
res = self.es.search(query)
processed_res = []
abs_dirname = os.path.dirname(os.path.abspath(__file__))
if query["type"] == 2:
for item in res:
content = dict()
content["timeStart"] = item["timeStart"]
content["timeEnd"] = item["timeEnd"]
content["sentence"] = item["sentence"]
paper = self.es.search_by_id(item['paper_id'])
paper["videoStruct"] = [content]
paper["pdfPath"] = abs_dirname + paper["pdfPath"]
paper["videoPath"] = abs_dirname + paper["videoPath"]
processed_res.append(paper)
else:
for item in res:
item["pdfPath"] = abs_dirname + item["pdfPath"]
item["videoPath"] = abs_dirname + item["videoPath"]
processed_res.append(item)
return processed_res
def auto_complete(self, query):
cnt = 0
res = []
for title in self.titles:
if re.search(query, title, re.IGNORECASE):
res.append(title)
cnt += 1
if cnt > 10:
break
return res
def search_by_id(self, id_):
try:
res = self.es.search_by_id(id_)
except BaseException as e:
print(e)
print("id doesn't exist")
return {}
abs_dirname = os.path.dirname(os.path.abspath(__file__))
res["pdfPath"] = abs_dirname + res["pdfPath"]
res["videoPath"] = abs_dirname + res["videoPath"]
return res
|
get_video.py
|
import json
import threading
import cv2
import PySimpleGUI as sg
import trt_pose.coco
import trt_pose.models
from flask import Flask
from flask_restful import Api, Resource
from trt_pose.parse_objects import ParseObjects
from camera import Camera
from exercise import LeftBicepCurl, RightBicepCurl, ShoulderPress, Squat
from helper import HEIGHT, WIDTH, preprocess
from model import Model
executing = False # global flag for session start
exercise = None # global exercise object required for model inference and drawing
stopExercise = False # global flag for stopping exercise after loop ends
drawn = None # global for our image
class LeftCurlAPI(Resource):
def get(self):
global exercise, executing
exercise = LeftBicepCurl()
executing = True
return {"leftCurl": f"{id}"}
class RightCurlAPI(Resource):
def get(self):
global exercise, executing
exercise = RightBicepCurl()
executing = True
return {"rightCurl": f"{id}"}
class ShoulderPressAPI(Resource):
def get(self):
global exercise, executing
exercise = ShoulderPress()
executing = True
return {"shoulderPress": f"{id}"}
class SquatAPI(Resource):
def get(self):
global exercise, executing
exercise = Squat()
executing = True
return {"squat": f"{id}"}
class RepCountAPI(Resource):
def get(self):
global exercise
reps = exercise.rep_count if exercise else 0
return {"repCount": f"{reps}"}
class EndExerciseAPI(Resource):
def get(self):
global stopExercise
stopExercise = True
return {"endExercise": f"{id}"}
class StartSessionAPI(Resource):
def get(self):
return {"startSession": f"{id}"}
class DebugAPI(Resource):
def get(self):
return {"debug": f"{id}"}
# ------ Begin GUI layout ------
video_viewer_column = [
# image will be flab2ab image
[sg.Image(filename="", key="image")],
]
repcount_list_column = [
[
# current rep count
sg.Text("Rep Count"),
# change folder to pull actual rep count
sg.In(size=(25, 1), enable_events=True, key="repCount"),
],
[
# previous exercise list
sg.Listbox(values=[], enable_events=True, size=(40, 20), key="exerciseList")
],
]
# finally builds layout of gui
layout = [
[
sg.Column(video_viewer_column),
sg.VSeperator(),
sg.Column(repcount_list_column),
]
]
# ------ End GUI layout ------
def main():
global exercise, stopExercise, drawn
print("Beginning script")
# Load the annotation file and create a topology tensor
with open("human_pose.json", "r") as f:
human_pose = json.load(f)
# Create a topology tensor (intermediate DS that describes part linkages)
topology = trt_pose.coco.coco_category_to_topology(human_pose)
# Construct and load the model
model = Model(pose_annotations=human_pose)
model.load_model("resnet")
model.get_optimized()
model.log_fps()
print("Set up model")
# Set up the camera
camera = Camera(width=640, height=480)
camera.capture_video("mp4v", "/tmp/output.mp4")
assert camera.cap is not None, "Camera Open Error"
print("Set up camera")
# Set up callable class used to parse the objects from the neural network
parse_objects = ParseObjects(topology)
app = Flask(__name__)
api = Api(app)
# add endpoints
api.add_resource(LeftCurlAPI, "/leftCurl")
api.add_resource(RightCurlAPI, "/rightCurl")
api.add_resource(ShoulderPressAPI, "/shoulderPress")
api.add_resource(SquatAPI, "/squat")
api.add_resource(RepCountAPI, "/repCount")
api.add_resource(EndExerciseAPI, "/endExercise")
api.add_resource(StartSessionAPI, "/startSession")
api.add_resource(DebugAPI, "/debug")
t = threading.Thread(target=app.run, kwargs={"host": "0.0.0.0"})
t.start()
print("After networking")
while not executing:
pass
window = sg.Window("Flab2Ab", location=(800, 400))
window.Layout(layout).Finalize()
print("Executing...")
while True:
while camera.cap.isOpened() and exercise:
succeeded, image = camera.cap.read()
if not succeeded:
print("Camera read Error")
break
resized_img = cv2.resize(
image, dsize=(WIDTH, HEIGHT), interpolation=cv2.INTER_AREA
)
preprocessed = preprocess(resized_img)
counts, objects, peaks = model.execute_neural_net(
data=preprocessed, parser=parse_objects
)
drawn = exercise.draw(image, counts, objects, peaks, t)
encoded_img = cv2.imencode(".png", image)[1].tobytes()
window.FindElement("image").update(data=encoded_img)
if camera.out:
camera.out.write(drawn)
cv2.waitKey(1)
if stopExercise:
exercise = None
stopExercise = False
print("exercise ended successfully")
# Clean up resources
print("Cleaning up")
cv2.destroyAllWindows()
camera.out.release()
camera.cap.release()
if __name__ == "__main__":
main()
|
scale_config.py
|
#python scale_v3.py --api_server_ip '10.204.216.64' --keystone_ip '10.204.216.150' --n_vns 10 --vnc --cleanup --n_process 2
#python scale_v3.py --api_server_ip '10.204.216.64' --keystone_ip '10.204.216.150' --n_policies 5 --n_policy_rules 2 --vnc --cleanup --n_process 2
#python scale_v3.py --api_server_ip '10.204.216.64' --keystone_ip '10.204.216.150' --n_sgs 5 --n_sg_rules 2 --vnc --cleanup --n_process 2
#python scale_v3.py --api_server_ip '10.204.216.64' --keystone_ip '10.204.216.150' --n_fips 1200 --vnc --cleanup --n_process 120
#python scale_v3.py --api_server_ip '10.204.216.64' --keystone_ip '10.204.216.150' --n_vns 1 --n_ports 10 --vnc --cleanup --n_process 10
from servicechain import ServiceChain
import argparse
import random
import socket
import struct
import os
import sys
import time
import uuid
import copy
import signal
import string
#import MySQLdb
from Queue import Empty
from netaddr import *
from datetime import datetime
from multiprocessing import Process, Queue
from neutronclient.neutron import client as neutron_client
from novaclient import client as nova_client
from keystoneclient.v3 import client as ks_client
from keystoneauth1 import identity as ks_identity
from keystoneauth1 import session as ks_session
try:
from vnc_api.vnc_api import *
except:
pass
from copy_reg import pickle
from types import MethodType
import threading
import thread
import logging
lock = dict()
def get_lock(key):
global lock
if key not in lock.keys():
lock[key] = threading.Lock()
return lock[key]
def _pickle_lock(lock):
return _unpickle_lock, (lock.__hash__(),)
def _unpickle_lock(key):
return get_lock(key)
pickle(thread.LockType, _pickle_lock, _unpickle_lock)
def _pickle_method(method):
func_name = method.im_func.__name__
obj = method.im_self
cls = method.im_class
return _unpickle_method, (func_name, obj, cls)
def _unpickle_method(func_name, obj, cls):
for cls in cls.mro():
try:
func = cls.__dict__[func_name]
except KeyError:
pass
else:
break
return func.__get__(obj, cls)
pickle(MethodType, _pickle_method, _unpickle_method)
alloc_addr_list = list()
debug = True
max_inst = 20
def retry(tries=12, delay=5):
def deco_retry(f):
def f_retry(*args, **kwargs):
mtries, result = tries, False
while mtries > 0:
mtries -= 1
try:
result = f(*args, **kwargs)
except:
if not mtries:
raise
if result is True:
break
time.sleep(delay)
if not result:
return False
else:
return (tries - mtries)*delay
return f_retry
return deco_retry
class DB(object):
def __init__(self, user, password, host, database):
self.db = MySQLdb.connect(user=user, passwd=password,
host=host, db=database)
self.cursor = self.db.cursor(MySQLdb.cursors.DictCursor)
def query_db(self, query):
self.db.rollback()
self.cursor.execute(query)
return self.cursor.fetchall()
class ScaleTest(object):
def __init__ (self, args):
self._args = args
self._args.auth_url = 'http://%s:35357/v3' % self._args.keystone_ip
self._args.admin_obj = Openstack(self._args.auth_url,
self._args.admin_username,
self._args.admin_password,
self._args.admin_project)
self.obj = self._args.admin_obj
self.userid = self.obj.get_user_id(self._args.username)
role = '_member_'
if self._args.vnc or self._args.n_svc_chains or self._args.n_svc_templates:
role = 'admin'
self.roleid = self.obj.get_role_id(role)
if self._args.project:
self.project_id = Openstack(self._args.auth_url,
self._args.username,
self._args.password,
self._args.project).project_id
self._args.computes = self.get_compute_hosts()
self._args.timeout = 60 if self._args.rate \
else self._args.timeout
self.db = None
if self._args.n_vms or self._args.n_svc_chains:
self.db = DB(user='root', password=self._args.mysql_passwd,
host=self._args.keystone_ip, database='nova')
self.initial_vm_count = self.get_active_vm_count()
self.initial_port_count = self.get_port_count()
def get_compute_hosts(self):
''' Check whether all compute nodes are up '''
hosts = self.obj.nova.services.list(binary='nova-compute')
computes = [h.host for h in hosts if h.status == 'enabled'
and h.state == 'up']
hosts = list(map((lambda x: x.host), hosts))
if set(hosts) - set(computes):
raise Exception('Few hosts are down %s'%list(set(hosts)
- set(computes)))
return computes
def is_per_project_obj_reqd(self):
if self._args.n_vns or self._args.n_vms or self._args.n_sgs or \
self._args.n_sg_rules or self._args.n_routers or \
self._args.n_fips or self._args.n_ports or \
self._args.n_svc_templates or self._args.n_svc_chains or \
self._args.n_policies or self._args.n_policy_rules:
return True
return False
@retry(30, 2)
def get_vm_active_time(self, initial, expected):
current = self.get_active_vm_count()
if current - initial < expected:
print('Not all VMs are active, actual: %s, expected: %s'%(
current-initial, expected))
return False
return True
def get_active_vm_count(self):
query = 'select count(vm_state) from instances '+\
'where vm_state = "active" and power_state = "1";'
count_dict = self.db.query_db(query)[0]
return count_dict['count(vm_state)']
def get_port_count(self):
return len(self.obj.list_ports())
def setUp(self):
kwargs_list = list()
queues = list()
self.projects_list = list()
for index in range(self._args.n_process):
queues.append(Queue())
kwargs_list.append({'queue': queues[index]})
(success, timediff) = create_n_process(self.create,
self._args.n_process,
kwargs_list,
self._args.timeout,
callback=self.read_from_queue)
print('Time to create all projects %s'%timediff)
kwargs_list = list()
for projects in self.projects_list:
kwargs_list.append({'projects': projects})
# Get the boot time of VMs
self.post_create()
# Verify the created objects
if self._args.verify:
(success, timediff) = create_n_process(self.verify,
len(self.projects_list),
kwargs_list)
print( 'Time to verify all projects %s Success Percentage: %s'%(timediff, success))
def read_from_queue(self, process, kwargs):
try:
self.projects_list.append(kwargs['queue'].get(
timeout=self._args.timeout))
except Empty:
process.terminate()
def create(self, queue):
try:
projects = list()
create = False if self._args.project else True
for index in range(self._args.n_projects):
project_dict = {}
if create is True:
self._args.project = random_string('Project')
self.project_id = self.obj.create_project(self._args.project,domain_id='default')
self.obj.add_user_to_project(self.userid, self.roleid,
self.project_id)
project_dict['name'] = self._args.project
project_dict['id'] = self.project_id
# Check if any objs has to be created per project
if self.is_per_project_obj_reqd():
self._args.auth_token = None #self.obj.get_auth_token()
perprojectobj = PerprojectWrapper(self._args)
perprojectobj.setUp()
project_dict['perprojectobj'] = perprojectobj
projects.append(project_dict)
except:
queue.put_nowait(projects)
raise
queue.put(projects)
def post_create(self):
# Check VM active count
if self._args.n_vms or self._args.n_svc_chains:
expected = self._args.n_process * self._args.n_projects *\
self._args.n_threads * ((self._args.n_vns or 1) * self._args.n_vms * max_inst + self._args.n_svc_chains)
exp_ports = self._args.n_process * self._args.n_projects *\
self._args.n_threads * ((self._args.n_vns or 1) * self._args.n_vms * max_inst + self._args.n_svc_chains*2)
print( "Took %s secs to have all vms in active state" %(
self.get_vm_active_time(self.initial_vm_count, expected)))
current_port_count = self.get_port_count()
if current_port_count - self.initial_port_count != exp_ports:
print( 'Port count mismatch, current:%s, expected:%s'%(
current_port_count-self.initial_port_count, expected))
for projects in self.projects_list:
for project in projects:
project['perprojectobj'].populate_vm_obj()
def verify(self, projects, delete=False):
# Verify project got created
# Call perproject verify
for project_dict in projects:
if project_dict.has_key('perprojectobj'):
project_dict['perprojectobj'].verify()
def cleanup(self):
kwargs_list = list()
for projects in self.projects_list:
kwargs_list.append({'projects': projects})
(success, timediff) = create_n_process(self.delete,
len(self.projects_list),
kwargs_list,
self._args.timeout)
print( 'Time to delete all projects %s'%timediff)
def delete(self, projects):
for project_dict in projects:
if project_dict.has_key('perprojectobj'):
project_dict['perprojectobj'].cleanup()
if not self._args.project:
self.obj.delete_project(project_dict['id'])
class PerprojectWrapper(object):
def __init__(self, args):
self._args = args
self.admin_obj = self._args.admin_obj
self.get_handles()
def get_handles(self):
if self._args.vnc:
self.obj = VNC(self._args.auth_url,
self._args.username,
self._args.password,
self._args.project,
self._args.api_server_ip,
self._args.api_server_port,
self._args.keystone_ip,
self._args.auth_token)
else:
self.obj = Openstack(self._args.auth_url,
self._args.username,
self._args.password,
self._args.project,
self._args.auth_token)
if self._args.n_svc_templates or self._args.n_svc_chains or self._args.n_policies:
self.sc = ServiceChain(self.obj, self._args.username,
self._args.password,
self._args.project,
self._args.api_server_ip,
self._args.api_server_port,
self._args.keystone_ip,
self._args.auth_token)
def pre_conf(self):
''' Create certain objects before staring test '''
if self._args.vdns:
self.obj.create_vdns(self.get_name('vdns', ''))
if self._args.ipam:
self.obj.create_ipam(self.get_name('ipam', ''))
if (self._args.n_ports or self._args.n_vms)\
and not self._args.n_vns:
vn_name = self.get_name('VN', 'G')
self.obj.create_network(vn_name, mask=16)
if self._args.n_sg_rules and not self._args.n_sgs:
sg_name = self.get_name('SG', 'G')
self.obj.create_sg(sg_name)
if self._args.n_fips or self._args.n_routers:
if not self._args.public_vn_id:
vn_name = random_string('EXT-VN')
self.admin_obj.create_network(vn_name, mask=16, external=True)
else:
self.admin_obj.ext_vn_uuid = self._args.public_vn_id
if self._args.n_svc_chains:
st_name = random_string('ServiceT')
self.sc.create_svc_template(name=st_name,
image_id= self._args.image_id,
service_mode='in-network')
if self._args.n_policies or self._args.n_policy_rules:
left_vn = random_string('Left-VN')
right_vn = random_string('Right-VN')
self.obj.create_network(left_vn)
self.obj.create_network(right_vn)
if self._args.n_policy_rules and not self._args.n_policies:
self._args.n_policies = 1
def setUp(self):
''' Create N objects '''
self.pre_conf()
queues = list()
self.id_obj = list()
kwargs_list = list()
# Create Processes
for i in range(self._args.n_threads):
queues.append(Queue())
kwargs_list.append({'queue':queues[i], 'index':i})
(success, timediff) = create_n_process(self.start_create,
self._args.n_threads,
kwargs_list,
self._args.timeout,
callback=self.read_from_queue)
print( 'Time to create objects for project %s is %s. Success %% %s' %(
self._args.project, timediff, success))
def read_from_queue(self, process, kwargs):
try:
self.id_obj.append(kwargs['queue'].get(timeout=self._args.timeout))
except Empty:
process.terminate()
def merge_to_self(self, parent):
for attr in parent.__dict__:
if type(parent.__dict__[attr]) is list:
self.__dict__[attr].extend(parent.__dict__[attr])
if type(parent.__dict__[attr]) is dict:
self.__dict__[attr].update(parent.__dict__[attr])
def start_create(self, index, queue):
parent_id = self.obj.id
self.get_handles()
self.obj.id = copy.deepcopy(parent_id)
try:
# Create virtual network
for vn_index in range(index, index+self._args.n_vns):
vn_name = self.get_name('VN', vn_index)
self.obj.create_network(vn_name=vn_name)
# Create Ports
for vn_name in self.obj.id.vn_uuid.keys():
for port_index in range(index, index+self._args.n_ports):
port_name = vn_name+'-Port'+str(port_index)
self.obj.create_port(vn_name, port_name)
# Create Security Group
for sg_index in range(index, index+self._args.n_sgs):
sg_name = self.get_name('SG', sg_index)
self.obj.create_sg(sg_name)
# Create Security Group Rules
for sg_name in self.obj.id.sg_id.keys():
cidr = get_randmon_cidr(mask=29)
for rule_index in range(index, index+self._args.n_sg_rules):
self.obj.create_sg_rule(sg_name, rule_index+1000,
rule_index+1000, cidr)
# Create Router
for rtr_index in range(index, index+self._args.n_routers):
router_name = self.get_name('RTR', rtr_index)
self.obj.create_router(router_name)
self.obj.add_gateway_router(router_name, self.admin_obj.ext_vn_uuid)
# Attach all the VNs to a LR
for vn_name in self.obj.id.vn_uuid.keys():
if self.obj.id.router_id.keys():
rtr_name = self.obj.id.router_id.keys()[0]
self.obj.add_interface_router(rtr_name, vn_name)
# Create Floating IP
port_ids = self.obj.id.port_id.values()
for fip_index in range(index, index+self._args.n_fips):
self.obj.create_floatingip(self.admin_obj.ext_vn_uuid)
if fip_index < len(port_ids):
port_id = port_ids[fip_index]
fip_id = self.obj.id.fip_id[fip_index]
self.obj.assoc_floatingip(fip_id, port_id)
# Create virtual machines
for vn_name in self.obj.id.vn_uuid.keys():
for vm_index in range(index, index+self._args.n_vms):
vm_name = vn_name+'-VM'+random_string(str(vm_index))
port_name = vn_name+'-Port'+str(vm_index)
self.obj.create_vm(image_id=self._args.image_id,
vm_name=vm_name, port_name=port_name,
vn_name=vn_name)
# Create Service Template
for st_index in range(index, index+self._args.n_svc_templates):
st_name = random_string('ServiceT')
self.sc.create_svc_template(name=st_name,
image_id= self._args.image_id,
service_mode='in-network')
for st_name in self.obj.id.st_obj.keys():
for si_index in range(index, index+self._args.n_svc_chains):
si_name = self.get_name('ServiceI', si_index)
pol_name = 'Policy-'+si_name
left_vn = self.get_name('leftVN', si_index)
right_vn = self.get_name('rightVN', si_index)
self.obj.create_network(vn_name=left_vn)
self.obj.create_network(vn_name=right_vn)
self.sc.create_svc_instance(si_name, st_name, left_vn, right_vn)
self.sc.create_policy(name=pol_name, si_name=si_name,
src_vn=left_vn, dst_vn=right_vn)
# Create Policies
for policy_index in range(index, index+self._args.n_policies):
policy_name = self.get_name('Policy', policy_index)
vn_list = self.obj.id.vn_uuid.keys()
self.sc.create_policy(name=policy_name,
src_vn=vn_list[0],
dst_vn=vn_list[1],
n_rules=self._args.n_policy_rules)
except:
queue.put_nowait(self.obj.id)
raise
queue.put(self.obj.id)
def cleanup(self):
''' Cleanup created objects '''
kwargs_list = list()
# Create Processes
for i in range(self._args.n_threads):
kwargs_list.append({'id':self.id_obj[i]})
(success, timediff) = create_n_process(self.start_cleanup,
self._args.n_threads,
kwargs_list,
self._args.timeout)
print( 'Time to delete objects for project %s is %s. Success %% %s' %(
self._args.project, timediff, success))
self.post_cleanup()
def start_cleanup(self, id):
# Delete VM
for vm_obj in id.vm_obj.values():
self.obj.delete_vm(vm_obj)
# Delete Floating IP
for fip_id in id.fip_id:
self.obj.delete_floatingip(fip_id)
# Delete Port
if not id.vm_id:
for port_id in id.port_id.values():
self.obj.delete_port(port_id)
# Delete Security Group rule
for rules in id.rule_id.values():
for rule in rules:
self.obj.delete_sg_rule(rule)
# Delete Security Group
for sg_id in id.sg_id.values():
self.obj.delete_sg(sg_id)
# Delete Router
for router_id in id.router_id.values():
for subnet_id in id.subnet_uuid.values():
self.obj.remove_interface_router(router_id, subnet_id)
self.obj.remove_gateway_router(router_id)
self.obj.delete_router(router_id)
# Delete VN
if not id.vm_id and not id.port_id:
for vn_id in id.vn_uuid.values():
self.obj.delete_network(vn_id)
# Delete Policies
for policy_id in id.policy_id.values():
self.obj.delete_policy(policy_id)
def post_cleanup(self):
''' Cleanup the parent created objects '''
# If child has failed to cleanup certain objects then parent cleanup will fail too
self.start_cleanup(self.obj.id)
def populate_vm_obj(self):
vm_objs = self.obj.list_vms()
vm_dict = dict()
for vm_obj in vm_objs:
vm_dict[vm_obj.name] = vm_obj
'''
vm_obj.delete()
logger.info( vm_dict.keys(), vm_dict.values()
'''
for id in self.id_obj:
for actual_name in vm_dict.keys():
for vm_name in id.vm_id.keys():
if vm_name in actual_name:
id.vm_obj[actual_name] = vm_dict[actual_name]
def verify(self, op=None):
pass
def get_name(self, prefix, index):
return random_string(self._args.project + '-' + str(prefix) + str(index))
# A Class of UUIDs
class UUID(object):
def __init__(self):
self.vn_uuid = dict()
self.subnet_uuid = dict()
self.port_id = dict()
self.sg_id = dict()
self.rule_id = dict()
self.router_id = dict()
self.policy_id = dict()
self.fip_id = list()
self.vm_id = dict()
self.vn_obj = dict()
self.sg_obj = dict()
self.fip_pool_obj = None
self.vm_obj = dict()
self.st_obj = dict()
self.si_obj = dict()
self.policy_obj = dict()
self.ipam_obj = None
self.vdns_obj = None
class Openstack(object):
def __init__(self, auth_url, username, password, project, auth_token=None,
domain_name=None, domain_id='default', insecure=True, region_name='RegionOne',
logger=None, scope='domain'):
''' Get keystone client obj '''
self.session = None
self.auth_url = auth_url
self.username = username
self.password = password
self.project = project
self.domain_name = domain_name or 'Default'
self.domain_id = domain_id
self.keystone = self.get_client()
self.session = self.keystone.session
self.project_id = self.keystone.projects.find(name=self.project, domain_id=domain_id).id
''' Get nova client handle '''
self.nova = nova_client.Client('2',session=self.session,region_name=region_name)
''' Get neutron client handle '''
self.neutron = neutron_client.Client('2.0',session=self.session,region_name=region_name)
self.id = UUID()
def get_session(self):
self.auth = ks_identity.v3.Password(auth_url=self.auth_url, username=self.username,
password=self.password, project_name=self.project,
user_domain_id="default", project_domain_id="default")
session = ks_session.Session(auth=self.auth)
return session
def get_client(self):
return ks_client.Client(session=self.get_session())
def get_handle(self):
return self.keystone
def get_auth_token(self):
return self.keystone.session.get_token()
def create_project(self, project_name,domain_id):
return self.keystone.projects.create(project_name,domain_id).id
def get_user_id(self, username):
users = self.keystone.users.list()
for user in users:
if user.name == username:
return user.id
return None
def get_role_id(self, role_name):
roles = self.keystone.roles.list()
for role in roles:
if role.name == role_name:
return role.id
return None
def add_user_to_project(self, userid, roleid, projectid):
self.keystone.roles.grant(project=projectid,
user=userid,
role=roleid,
group=None)
def delete_project(self, project_id):
return self.keystone.projects.delete(project_id)
def create_network(self, vn_name, mask=24, external=False):
''' Create Network via Neutron client call '''
cidr = get_randmon_cidr(mask=mask)
vn_dict = {'name': vn_name}
if external:
vn_dict['router:external'] = True
response = self.neutron.create_network({'network': vn_dict})
''' Store VN uuid and subnet uuid dicts '''
net_id = response['network']['id']
if external:
self.ext_vn_uuid = net_id
else:
self.id.vn_uuid[vn_name] = net_id
response = self.neutron.create_subnet({'subnet':
{'cidr': cidr,
'ip_version': 4,
'network_id': net_id
}})
self.id.subnet_uuid[vn_name] = response['subnet']['id']
def update_network(self, vn_name, network_dict):
vn_id = self.id.vn_uuid[vn_name]
self.neutron.update_network(vn_id, {'network': network_dict})
@retry(15, 2)
def delete_network(self, vn_id):
''' Delete network '''
self.neutron.delete_network(vn_id)
return True
def create_port(self, vn_name, port_name):
''' Create port using Neutron api '''
port_dict = {'network_id': self.id.vn_uuid[vn_name]}
response = self.neutron.create_port({'port': port_dict})
''' Store Port UUID's '''
self.id.port_id[port_name] = response['port']['id']
def delete_port(self, port_id):
''' Delete Port '''
self.neutron.delete_port(port_id)
def list_ports(self):
''' List Port '''
return self.neutron.list_ports()['ports']
def create_floatingip(self, ext_vn_uuid):
''' Create Floating IP '''
fip_dict = {'floating_network_id': ext_vn_uuid}
response = self.neutron.create_floatingip({'floatingip': fip_dict})
self.id.fip_id.append(response['floatingip']['id'])
def assoc_floatingip(self, fip_id, port_id):
fip_dict = {'floatingip': {'port_id': port_id}}
return self.neutron.update_floatingip(fip_id, fip_dict)
def delete_floatingip(self, fip_id):
''' Delete FloatingIP '''
self.neutron.delete_floatingip(fip_id)
def create_router(self, router_name):
''' Create Logical Router '''
router_dict = {'name': router_name, 'admin_state_up': True}
response = self.neutron.create_router({'router': router_dict})
self.id.router_id[router_name] = response['router']['id']
def add_interface_router(self, router_name, vn_name):
router_id = self.id.router_id[router_name]
subnet_id = self.id.subnet_uuid[vn_name]
self.neutron.add_interface_router(router_id, {'subnet_id': subnet_id})
def remove_interface_router(self, router_id, subnet_id):
self.neutron.remove_interface_router(router_id, {'subnet_id': subnet_id})
def add_gateway_router(self, router_name, vn_uuid):
router_id = self.id.router_id[router_name]
self.neutron.add_gateway_router(router_id, {'network_id': vn_uuid})
def remove_gateway_router(self, router_id):
self.neutron.remove_gateway_router(router_id)
def delete_router(self, router_id):
''' Delete Logical Router '''
self.neutron.delete_router(router_id)
def create_sg(self, sg_name):
''' Create Security Group '''
sg_dict = {'name': sg_name}
res = self.neutron.create_security_group({'security_group': sg_dict})
self.id.sg_id[sg_name] = res['security_group']['id']
def delete_sg(self, sg_id):
self.neutron.delete_security_group(sg_id)
def create_sg_rule(self, sg_name, min, max, cidr='0.0.0.0/0',
direction='ingress', proto='tcp'):
sg_id = self.id.sg_id[sg_name]
rule_dict = {'security_group_id': sg_id, 'direction': direction,
'remote_ip_prefix': cidr, 'protocol': proto,
'port_range_min': min, 'port_range_max': max}
response = self.neutron.create_security_group_rule(
{'security_group_rule': rule_dict})
if sg_name not in self.id.rule_id:
self.id.rule_id[sg_name] = list()
self.id.rule_id[sg_name].append(response['security_group_rule']['id'])
def delete_sg_rule(self, rule_id):
self.neutron.delete_security_group_rule(rule_id)
def create_vm(self, vm_name, image_id, port_name=None,
vn_name=None, compute_host=None, zone='nova'):
''' Create virtual machine '''
nics = []
launch_on = None
port_id = None
''' Few harcoded values '''
flavor = 1 # m1.tiny
if port_name in self.id.port_id:
port_id = self.id.port_id[port_name]
if port_id is not None:
nics = [{'port-id': port_id}]
else:
nics = [{'net-id': self.id.vn_uuid[vn_name]}]
if compute_host:
launch_on = zone + ':' + compute_host
'''
with open("/tmp/userdata.sh", "w") as f:
f.write("""#!/bin/sh
ls -al / | tee /tmp/output.txt
""")
'''
response = self.nova.servers.create(name=vm_name,
flavor=flavor,
image=image_id,
nics=nics,
availability_zone=launch_on,
max_count=max_inst)
# userdata='/tmp/userdata.sh')
# logger.info( '%s, %s' %(response, response.__dict__)
self.id.vm_id[vm_name] = response.id
def list_vms(self, all_projects=False):
return self.nova.servers.list(search_opts={"all_projects": all_projects},
detailed=False)
def delete_vm(self, vm_obj):
vm_obj.delete()
def delete_policy(self, policy_id):
self.neutron.delete_policy(policy_id)
class VNC(Openstack):
def __init__(self, auth_url, username, password, project, ip, port, auth_host, auth_token=None):
super(VNC, self).__init__(auth_url, username, password, project)
self.vnc = VncApi(api_server_host=ip,
api_server_port=port,
username=username,
password=password,
tenant_name=project,
auth_host=auth_host)
#print(self.project_id)
#print(project)
#print(self.vnc.projects_list())
self.project_obj = self.vnc.project_read(id=str(uuid.UUID(self.project_id)))
def create_vdns(self, vdns_name):
vdns_type = VirtualDnsType(domain_name='juniper.net',
dynamic_records_from_client=True,
default_ttl_seconds=100,
record_order='random')
vdns_obj = VirtualDns(vdns_name, self.project_obj, virtual_DNS_data=vdns_type)
vdns_id = self.vnc.virtual_DNS_create(vdns_obj)
self.id.vdns_obj = self.vnc.virtual_DNS_read(id=vdns_id)
def create_ipam(self, ipam_name):
ipam_obj = NetworkIpam(ipam_name, self.project_obj, network_ipam_mgmt=IpamType("dhcp"))
if self.id.vdns_obj:
ipam_obj.add_virtual_DNS(self.id.vdns_obj)
ipam_uuid = self.vnc.network_ipam_create(ipam_obj)
self.id.ipam_obj = self.vnc.network_ipam_read(id=ipam_uuid)
def create_network(self, vn_name, mask=24, external=False):
''' Create virtual network using VNC api '''
cidr = get_randmon_cidr(mask=mask).split('/')[0]
vn_obj = VirtualNetwork(vn_name, self.project_obj,
router_external=external)
vn_obj.add_network_ipam(self.id.ipam_obj or NetworkIpam(),
VnSubnetsType([IpamSubnetType(
subnet=SubnetType(cidr, mask))]))
net_id = self.vnc.virtual_network_create(vn_obj)
if external:
fip_pool_obj = FloatingIpPool(vn_name, vn_obj)
self.vnc.floating_ip_pool_create(fip_pool_obj)
self.project_obj.add_floating_ip_pool(fip_pool_obj)
self.vnc.project_update(self.project_obj)
self.id.fip_pool_obj = self.vnc.floating_ip_pool_read(fq_name=fip_pool_obj.get_fq_name())
self.ext_vn_uuid = net_id
else:
self.id.vn_uuid[vn_name] = net_id
self.id.vn_obj[vn_name] = self.vnc.virtual_network_read(fq_name=vn_obj.get_fq_name())
def create_port(self, vn_name, port_name):
''' Create Port through VNC api '''
port_obj = VirtualMachineInterface(port_name, parent_obj=self.project_obj)
self.id.port_id[port_name] = port_obj.uuid = str(uuid.uuid4())
port_obj.add_virtual_network(self.id.vn_obj[vn_name])
self.vnc.virtual_machine_interface_create(port_obj)
iip_id = str(uuid.uuid4())
iip_obj = InstanceIp(name=iip_id)
iip_obj.uuid = iip_id
iip_obj.add_virtual_network(self.id.vn_obj[vn_name])
iip_obj.add_virtual_machine_interface(port_obj)
self.vnc.instance_ip_create(iip_obj)
def create_floatingip(self, ext_vn_uuid):
''' Create Floating IP using VNC api '''
vn_obj = self.vnc.virtual_network_read(id=ext_vn_uuid)
fip_pool_obj = FloatingIpPool('floating-ip-pool', vn_obj)
fip_id = str(uuid.uuid4())
fip_obj = FloatingIp(name=fip_id, parent_obj=fip_pool_obj)
fip_obj.uuid = fip_id
fip_obj.set_project(self.project_obj)
fip_obj.set_virtual_machine_interface_list([])
fip_obj.set_floating_ip_fixed_ip_address(None)
self.vnc.floating_ip_create(fip_obj)
self.id.fip_id.append(fip_id)
def create_sg(self, sg_name):
''' Create Security group using VNC api '''
def _get_rule(prefix, ethertype):
dst_addr = AddressType(subnet=SubnetType(prefix, 0))
src_addr = AddressType(security_group='local')
rule = PolicyRuleType(rule_uuid=str(uuid.uuid4()), direction='>',
protocol='any', src_addresses=[src_addr],
src_ports=[PortType(0, 65535)],
dst_addresses=[dst_addr],
dst_ports=[PortType(0, 65535)],
ethertype=ethertype)
return rule
rules = [_get_rule('0.0.0.0', 'IPv4'), _get_rule('::', 'IPv6')]
sg_obj = SecurityGroup(name=sg_name, parent_obj=self.project_obj,
security_group_entries=PolicyEntriesType(rules))
self.id.sg_id[sg_name] = sg_obj.uuid = str(uuid.uuid4())
self.vnc.security_group_create(sg_obj)
self.id.sg_obj[sg_name] = self.vnc.security_group_read(id=sg_obj.uuid)
def create_sg_rule(self, sg_name, min, max, cidr='0.0.0.0/0',
direction='ingress', proto='tcp'):
''' Create Security Group Rule using VNC api '''
def _get_rule(dir, cidr, min, max, proto, ethertype):
prefix = cidr.split('/')
if dir == 'ingress':
src_addr = AddressType(subnet=SubnetType(prefix[0], int(prefix[1])))
dst_addr = AddressType(security_group='local')
else:
dst_addr = AddressType(subnet=SubnetType(prefix[0], int(prefix[1])))
src_addr = AddressType(security_group='local')
rule = PolicyRuleType(rule_uuid=str(uuid.uuid4()), direction='>',
protocol=proto, src_addresses=[src_addr],
src_ports=[PortType(0, 65535)],
dst_addresses=[dst_addr],
dst_ports=[PortType(min, max)],
ethertype=ethertype)
return rule
rule = _get_rule(direction, cidr, min, max, proto, 'IPv4')
sg_obj = self.id.sg_obj[sg_name]
rules = sg_obj.get_security_group_entries()
if rules is None:
rules = PolicyEntriesType([rule])
else:
rules.add_policy_rule(rule)
sg_obj.set_security_group_entries(rules)
self.vnc.security_group_update(sg_obj)
if sg_name not in self.id.rule_id:
self.id.rule_id[sg_name] = list()
self.id.rule_id[sg_name].append(rule.rule_uuid)
self.id.sg_obj[sg_name] = self.vnc.security_group_read(id=sg_obj.uuid)
def create_router(self, router_name):
''' Create Logical Router using VNC api '''
router_obj = LogicalRouter(router_name, self.project_obj,
id_perms=IdPermsType(enable=True))
self.id.router_id[router_name] = router_obj.uuid = str(uuid.uuid4())
self.vnc.logical_router_create(router_obj)
def delete_policy(self, policy_id):
self.vnc.network_policy_delete(id=policy_id)
def get_randmon_cidr(mask=16):
''' Generate random non-overlapping cidr '''
global alloc_addr_list
address = socket.inet_ntop(socket.AF_INET,
struct.pack('>I',
random.randint(2**24, 2**32 - 2**29 - 1)))
address = str(IPNetwork(address+'/'+str(mask)).network)
if address.startswith('169.254') or address in alloc_addr_list:
cidr = get_randmon_cidr()
else:
alloc_addr_list.append(address)
cidr = address+'/'+str(mask)
return cidr
def parse_cli(args):
'''Define and Parse arguments for the script'''
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--api_server_ip',
action='store',
default='127.0.0.1',
help='API Server IP [127.0.0.1]')
parser.add_argument('--keystone_ip',
action='store',
default='127.0.0.1',
help='Keystone IP [127.0.0.1]')
parser.add_argument('--api_server_port',
action='store',
default='8082',
help='API Server Port [8082]')
parser.add_argument('--admin_username',
action='store',
default='admin',
help='Admin user name [admin]')
parser.add_argument('--admin_password',
action='store',
default='c0ntrail123',
help="Admin user's password [contrail123]")
parser.add_argument('--admin_project',
action='store',
default='admin',
help='Admin project name [admin]')
parser.add_argument('--username',
action='store',
default='admin',
help='project user name [admin]')
parser.add_argument('--password',
action='store',
default='c0ntrail123',
help="project user's password [contrail123]")
parser.add_argument('--mysql_passwd',
action='store',
default=None,
help="Root password for mysql, reqd in case of n_vms")
parser.add_argument('--project',
action='store',
default='',
help='project name []')
parser.add_argument('--n_projects',
action='store',
default='1', type=int,
help='No of projects to create [1]')
parser.add_argument('--n_process',
action='store',
default='1', type=int,
help='No of Parallel processes to run [1]')
parser.add_argument('--n_threads',
action='store',
default='1', type=int,
help='No of threads to run per process [1]')
parser.add_argument('--image_id',
action='store',
default=None,
help='Image ID [None]')
parser.add_argument('--public_vn_id',
action='store',
default=None,
help='UUID of public network')
parser.add_argument('--n_vns',
action='store',
default='0', type=int,
help='No of Vns to create per project [0]')
parser.add_argument('--n_ports',
action='store',
default='0', type=int,
help='No of Ports to create per VN [0]')
parser.add_argument('--n_sgs',
action='store',
default='0', type=int,
help='No of Security Groups to create per project [0]')
parser.add_argument('--n_sg_rules',
action='store',
default='0', type=int,
help='No of Security Group Rules to create per SG [0]')
parser.add_argument('--n_routers',
action='store',
default='0', type=int,
help='No of Routers to create per project [0]')
parser.add_argument('--n_vms',
action='store',
default='0', type=int,
help='No of VMs to create per VN [0]. Each create spawns 20 vms by default')
parser.add_argument('--n_fips',
action='store',
default='0', type=int,
help='No of Floating-IPs to create per project [0]')
parser.add_argument('--n_svc_chains',
action='store',
default='0', type=int,
help='No of Service chains(instances+policy) to create per project [0]')
parser.add_argument('--n_svc_templates',
action='store',
default='0', type=int,
help='No of Service templates to create per project [0]')
parser.add_argument('--n_policies',
action='store',
default='0', type=int,
help='No of policies to create per project [0]')
parser.add_argument('--n_policy_rules',
action='store',
default='0', type=int,
help='No of policy rules to create per policy [0]')
parser.add_argument('--vnc',
action='store_true',
help='Use VNC client to configure objects [False]')
parser.add_argument('--vdns',
action='store_true',
help='Create VDNS per project [False]')
parser.add_argument('--ipam',
action='store_true',
help='Create IPAM per project [False]')
parser.add_argument('--cleanup',
action='store_true',
help='Cleanup the created objects [False]')
parser.add_argument('--verify',
action='store_true',
help='Verify the created objects [False]')
parser.add_argument('--rate',
action='store_true',
help='Terminate children after a min [False]')
parser.add_argument('--timeout',
action='store',
default='180000', type=int,
help='Max wait time in secs [1 hr]')
pargs = parser.parse_args(args)
if pargs.n_projects > 1 and pargs.project:
pargs.project = None
print( 'Overriding --project as --n_projects is set')
time.sleep(1)
if pargs.n_vms and pargs.n_ports and (pargs.n_vms != pargs.n_ports):
pargs.n_ports = pargs.n_vms
print( 'Setting n_ports to be same as n_vms')
time.sleep(1)
return pargs
def create_n_process(target, n_process, kwargs_list, timeout=None, callback=None):
process = list()
events = list()
for i in range(n_process):
process.append(Process(target=target, kwargs=kwargs_list[i]))
start_time = datetime.now()
if debug is True:
print( 'Time at start %s'%str(start_time))
start_process(process)
if callback:
callback_process(callback, process, kwargs_list)
join_process(process, timeout)
success = get_success_percentile(process)
end_time = datetime.now()
if debug is True:
print( 'Time at End %s'%str(end_time))
return (success, end_time-start_time)
def start_process(processes):
for process in processes:
time.sleep(2)
process.start()
def callback_process(callback, processes, kwargs_list):
for i in xrange(len(processes)):
callback(processes[i], kwargs_list[i])
def join_process(processes, timeout):
for process in processes:
process.join(timeout=timeout)
process.terminate()
def get_success_percentile(processes):
success = 0
for process in processes:
if process.exitcode == 0:
success += 1
return (success * 100)/len(processes)
def random_string(prefix):
return prefix+''.join(random.choice(string.hexdigits) for _ in range(4))
def sig_handler(_signo, _stack_frame):
raise KeyboardInterrupt
def main():
signal.signal(signal.SIGTERM, sig_handler)
pargs = parse_cli(sys.argv[1:])
obj = ScaleTest(pargs)
obj.setUp()
if pargs.cleanup:
print('Cleaning up all the objects')
import pdb; pdb.set_trace()
obj.cleanup()
if __name__ == '__main__':
#logging.basicConfig(filename='scale.log', filemode='w')
#logger=logging.getLogger()
#logger.setLevel(logging.INFO)
main()
|
b2.py
|
import json
import time
from requests import get, post
from requests.auth import HTTPBasicAuth
from threading import Thread
from typing import Callable, Dict, List, Tuple
from b2py import constants, utils
class B2Error(Exception):
"""General exception type when interacting with the B2 API."""
pass
class B2:
"""This module is a wrapper around the Backblaze B2 API to read and write files
to a remote object store."""
def __init__(self, account_id: str = None, account_key: str = None):
"""To initialize the interface we need access to a B2 account.
Args:
account_id: The ID of the account to connect to. This can be found by logging
into your Backblaze account. If not provided, we try to read from
the ENV variable `B2_ACCOUNT_ID`.
account_key: The key to perform operations on your behalf. Once you reset
it, the old key will no longer work. If not provided, we try
to read from the ENV variable `B2_APPLICATION_KEY`.
"""
self.account_id = account_id or constants.B2_ACCOUNT_ID
self.account_key = account_key or constants.B2_ACCOUNT_KEY
if not (self.account_id and self.account_key):
raise B2Error('No ID or key for B2 account.')
# Map from a bucket id to a tuple (upload_url, auth_token)
self.upload_urls = {}
# Map from a file id to a tuple(upload_url, auth_token)
self.file_upload_urls = {}
self.auth_token = None
self._authorize()
@property
def authorized(self) -> bool:
"""
Returns:
Whether we have alread authenticated and received a token.
"""
return bool(self.auth_token)
def _call(self,
host: str,
endpoint: str = '',
headers: Dict = {},
body: Dict = {},
requires_auth: bool = True,
method: Callable = get,
num_retries: int = 3,
backoff_time: float = 0.5,
**kwargs) -> Dict:
"""Makes a B2 API call and catches any errors.
Args:
host: The host to use for the request.
endpoint: The endpoint to get.
headers: HTTP headers to send.
body: Data to send in the body of the request.
requires_auth: Whether the request requires the user to be logged in.
method: The type of request for the URL fetch.
num_retries: The number of retries after receiving a 5xx error.
backoff_time: Time to wait between retries.
kwargs: Any extra params to pass.
Returns:
The return call of the method on the parameters, if successful.
"""
# Add authorization header
if requires_auth:
if not self.authorized:
raise B2Error(
'Must be authorized to make this call! Endpoint: {0}'.format(
endpoint))
headers['Authorization'] = self.auth_token
# Call the API
url = utils.construct_url(host, endpoint)
response = method(url, headers=headers, params=body, **kwargs)
# Attempt to retry bad calls
if response.status_code >= 500 and num_retries > 0:
time.sleep(backoff_time)
return self._call(host, endpoint, headers, body, requires_auth, method,
num_retries - 1, backoff_time * 2, **kwargs)
# Out of retries and we still have an error
if response.status_code >= 400:
raise B2Error(
'Received status code {0} making request to url {1}. {2}'.format(
response.status_code, url, response.json()))
return response
def _authorize(self):
"""Authorize the client to access the B2 account."""
auth = HTTPBasicAuth(self.account_id, self.account_key)
response = self._call(
constants.B2_API_BASE,
'/b2_authorize_account',
requires_auth=False,
auth=auth)
data = response.json()
self.auth_token = data['authorizationToken']
self.api_url = data['apiUrl'] + constants.B2_API_VERSION
self.download_url = data['downloadUrl']
self.file_part_size = data['absoluteMinimumPartSize']
if not self.authorized:
raise B2Error('Failed to authorize with account id {0} and key {1}')
def create_bucket(self, bucket_name: str, private: bool = True) -> Dict:
"""Create a new bucket.
Args:
bucket_name: The name of the new bucket.
private: Whether files in the bucket should be all private.
Returns:
The new bucket data.
"""
bucket_type = 'allPrivate' if private else 'allPublic'
body = {
'accountId': self.account_id,
'bucketName': bucket_name,
'bucketType': bucket_type
}
response = self._call(self.api_url, '/b2_create_bucket', body=body)
return response.json()
def delete_bucket(self, bucket_id: str):
"""Delete a bucket.
Args:
bucket_id: The bucket to delete.
"""
body = {'accountId': self.account_id, 'bucketId': bucket_id}
self._call(self.api_url, '/b2_delete_bucket', body=body)
def list_buckets(self) -> List[Dict]:
"""List all the buckets.
Returns:
A list of all buckets in the account.
"""
body = {'accountId': self.account_id}
response = self._call(self.api_url, '/b2_list_buckets', body=body)
return response.json()['buckets']
def _get_upload_url(self, bucket_id: str):
"""In order to upload a file, we first request an upload URL.
This method will update the `upload_urls` dict. We cannot upload to a bucket
until that bucket has an entry in this dict.
Args:
bucket_id: The bucket to upload to.
"""
body = {'bucketId': bucket_id}
response = self._call(self.api_url, '/b2_get_upload_url', body=body)
data = response.json()
self.upload_urls[bucket_id] = (data['uploadUrl'],
data['authorizationToken'])
def _get_file_part_upload_url(self, file_id: str):
"""Get the URL to upload parts of a large file and save it to a dict.
Args:
file_id: The id of the large file.
"""
body = {'fileId': file_id}
response = self._call(self.api_url, '/b2_get_upload_part_url', body=body)
data = response.json()
self.file_upload_urls[file_id] = (data['uploadUrl'],
data['authorizationToken'])
def _start_large_file_upload(self,
bucket_id: str,
file_name: str,
content_type: str = None) -> str:
"""For large files, we upload in multiple parts.
Args:
bucket_id: The bucket to put the file in.
file_name: The name of the file in the object store.
content_type: The value of the Content-Type header to send.
Returns:
The file id of the created file.
"""
body = {
'bucketId': bucket_id,
'fileName': file_name,
'contentType': content_type or 'b2/x-auto'
}
response = self._call(self.api_url, '/b2_start_large_file', body=body)
data = response.json()
return data['fileId']
def _upload_large_file_part(self, file_id: str, part_idx: int,
contents: str) -> Dict:
"""Upload a chunk of a large file.
Args:
upload_url: The url to send the upload request to.
part_idx: The chunk number.
contents: The content to write.
"""
if file_id not in self.file_upload_urls:
self._get_file_part_upload_url(file_id)
upload_url, auth_token = self.file_upload_urls[file_id]
headers = {
'Authorization': auth_token,
'X-Bz-Part-Number': str(part_idx),
'Content-Length': str(len(contents)),
'X-Bz-Content-Sha1': utils.sha1(contents)
}
response = self._call(
upload_url,
method=post,
headers=headers,
requires_auth=False,
data=contents)
return response.json()
def _finish_large_file_upload(self, file_id: str, hashes: List[str]) -> Dict:
"""Finish the upload of a large file.
Args:
file_id: The file to finish uploading.
hashes: A list of SHA1 hashes for the file parts to verify correct upload.
Returns:
The file information.
"""
body = json.dumps({'fileId': file_id, 'partSha1Array': hashes})
response = self._call(
self.api_url, '/b2_finish_large_file', data=body, method=post)
return response.json()
def _upload_large_file(self,
bucket_id: str,
file_name: str,
contents: str,
content_type: str = None) -> Dict:
"""For large files, we upload in multiple parts.
We break up the file into chunks and upload each sequentially.
Args:
bucket_id: The bucket to put the file in.
file_name: The name of the file in the object store.
contents: The contents of the file.
content_type: The value of the Content-Type header to send.
num_threads: How many threads to use to concurrently upload file parts.
Returns:
Information about the created large file.
"""
file_id = self._start_large_file_upload(bucket_id, file_name, content_type)
num_parts = int(len(contents) / self.file_part_size) + 1
if num_parts < 2:
raise B2Error('Large file uploads should have at least 2 parts.')
parts = [
contents[i * self.file_part_size:(i + 1) * self.file_part_size]
for i in range(num_parts)
]
# Each worker uploads one part of the file and stores the content hash.
hashes = {}
def thread_worker(i, part):
hashes[i] = self._upload_large_file_part(file_id, i + 1,
part)['contentSha1']
# Launch threads in parallel
threads = [
Thread(target=thread_worker, args=(i, part))
for i, part in enumerate(parts)
]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
# Collect the hashes and finish the upload
content_hashes = [hashes[i] for i in range(len(parts))]
return self._finish_large_file_upload(file_id, content_hashes)
def upload_file(self,
bucket_id: str,
file_name: str,
contents: str,
content_type: str = None,
num_retries: int = 5) -> Dict:
"""Upload a file to a given bucket.
Args:
bucket_id: The bucket to put the file in.
file_name: The name of the file in the object store.
contents: The file contents
content_type: The value of the Content-Type header to send.
Returns:
Information about the created file.
"""
if len(contents) > self.file_part_size:
return self._upload_large_file(bucket_id, file_name, contents,
content_type)
# If the upload fails, retry a few times with new auth tokens
for _ in range(num_retries):
if bucket_id not in self.upload_urls:
self._get_upload_url(bucket_id)
upload_url, auth_token = self.upload_urls[bucket_id]
headers = {
'Authorization': auth_token,
'X-Bz-File-Name': file_name,
'Content-Type': content_type or 'b2/x-auto',
'Content-Length': str(len(contents)),
'X-Bz-Content-Sha1': utils.sha1(contents)
}
try:
response = self._call(
upload_url,
method=post,
headers=headers,
requires_auth=False,
data=contents,
num_retries=0)
# Call succeded
return response.json()
except B2Error:
# Remove the cached upload url and auth token
self.upload_urls.pop(bucket_id, None)
def download_file(self,
file_id: str,
byte_range: Tuple[int, int] = None) -> str:
"""Downloads a file.
Args:
file_id: The Id of the file to download.
byte_range: Tuple of start and end byte offsets to retrieve.
Returns:
The file contents.
"""
headers = {}
if byte_range:
start, end = byte_range
headers['Range'] = 'bytes={0}-{1}'.format(start, end)
body = {'fileId': file_id}
response = self._call(
self.download_url,
'/b2api/v1/b2_download_file_by_id',
headers=headers,
body=body)
return response.content
def list_files(self,
bucket_id: str,
start_file_name: str = None,
prefix: str = None,
list_directory: bool = False,
limit: int = None) -> List[Dict]:
"""List files in a bucket.
Args:
bucket_id: The bucket to search.
start_file_name: Name of the first file to list from.
prefix: Filter files that begin with a given prefix.
list_directory: If True, treat 'prefix' as a directory name
and list all the files in that directory.
limit: The maximum number of files returned.
Returns:
The files in the bucket.
"""
body = {
'bucketId': bucket_id,
}
if start_file_name:
body['startFileName'] = start_file_name
if prefix:
body['prefix'] = prefix
if list_directory:
body['delimiter'] = '/'
if limit:
body['maxFileCount'] = str(limit)
response = self._call(self.api_url, '/b2_list_file_versions', body=body)
return response.json()['files']
def get_file_info(self, file_id: str) -> Dict:
"""Get metadata for a file.
Args:
file_id: The file to retrieve.
Returns:
The details of the file.
"""
body = {'fileId': file_id}
response = self._call(self.api_url, '/b2_get_file_info', body=body)
return response.json()
def delete_file(self, file_id: str, file_name: str):
"""Delete a file.
Args:
file_id: The Id of the file to delete.
file_name: The name of the file.
"""
body = {'fileId': file_id, 'fileName': file_name}
self._call(self.api_url, '/b2_delete_file_version', body=body)
|
can_bridge.py
|
#!/usr/bin/env python3
#pylint: skip-file
import os
import time
#import math
#import atexit
#import numpy as np
#import threading
#import random
import cereal.messaging as messaging
#import argparse
from common.params import Params
from common.realtime import Ratekeeper
from selfdrive.golden.can import can_function, sendcan_function
from selfdrive.car.honda.values import CruiseButtons
#import subprocess
import sys
import signal
import threading
from queue import Queue
from selfdrive.golden.keyboard_ctrl import keyboard_poll_thread, keyboard_shutdown
params = Params()
def shutdown():
global params
global pm
print('shutdown !')
keyboard_shutdown()
#params.delete("CalibrationParams")
dat = messaging.new_message('pandaStates', 1)
dat.valid = True
dat.pandaStates[0] = {
'ignitionLine': False,
'pandaType': "uno",
'controlsAllowed': True,
'safetyModel': "hondaNidec"
}
for seq in range(10):
pm.send('pandaStates', dat)
time.sleep(0.1)
print ("exiting")
sys.exit(0)
def main():
global params
global pm
params.delete("Offroad_ConnectivityNeeded")
#params.delete("CalibrationParams")
#params.put("CalibrationParams", '{"calib_radians": [0,0,0], "valid_blocks": 20}')
os.system('rm /tmp/op_git_updated')
os.system('touch /tmp/op_simulation')
start_loggerd = False
if len(sys.argv) > 1:
start_loggerd = (sys.argv[1] == '1')
print ('start_loggerd=', start_loggerd)
if start_loggerd:
os.system('cd /data/openpilot/; ./selfdrive/loggerd/loggerd &')
os.system('echo 1 > /tmp/force_calibration')
# make volume 0
os.system('service call audio 3 i32 3 i32 0 i32 1')
q = Queue()
t = threading.Thread(target=keyboard_poll_thread, args=[q])
t.start()
pm = messaging.PubMaster(['can', 'pandaStates'])
# can loop
sendcan = messaging.sub_sock('sendcan')
rk = Ratekeeper(100, print_delay_threshold=None)
steer_angle = 0.0
speed = 50.0 / 3.6
cruise_button = 0
btn_list = []
btn_hold_times = 2
frames = 0
while 1:
# check keyboard input
if not q.empty():
message = q.get()
print (message)
if (message == 'quit'):
shutdown()
return
m = message.split('_')
if m[0] == "cruise":
if m[1] == "down":
cruise_button = CruiseButtons.DECEL_SET
if len(btn_list) == 0:
for x in range(btn_hold_times):
btn_list.append(cruise_button)
if m[1] == "up":
cruise_button = CruiseButtons.RES_ACCEL
if len(btn_list) == 0:
for x in range(btn_hold_times):
btn_list.append(cruise_button)
if m[1] == "cancel":
cruise_button = CruiseButtons.CANCEL
if len(btn_list) == 0:
for x in range(btn_hold_times):
btn_list.append(cruise_button)
btn = 0
if len(btn_list) > 0:
btn = btn_list[0]
btn_list.pop(0)
# print ('cruise_button=', cruise_button)
can_function(pm, speed * 3.6, steer_angle, rk.frame, cruise_button=btn, is_engaged=1)
if rk.frame%5 == 0:
throttle, brake, steer = sendcan_function(sendcan)
steer_angle += steer/10000.0 # torque
#print(speed * 3.6, steer, throttle, brake)
if frames % 20 == 0:
dat = messaging.new_message('pandaStates', 1)
dat.valid = True
dat.pandaStates[0] = {
'ignitionLine': True,
'pandaType': "uno",
'controlsAllowed': True,
'safetyModel': "hondaNidec",
}
pm.send('pandaStates', dat)
frames += 1
rk.keep_time()
shutdown()
def signal_handler(sig, frame):
print('You pressed Ctrl+C!')
shutdown()
if __name__ == "__main__":
signal.signal(signal.SIGINT, signal_handler)
print (sys.argv)
print ("input 1 to curse resume/+")
print ("input 2 to curse set/-")
print ("input 3 to curse cancel")
print ("input q to quit")
main()
|
axonwebsocket.py
|
import logging
import time
import json
import datetime
from threading import Thread
from websocket import WebSocketApp
logger = logging.getLogger(__name__)
class AxonWebsocket:
"""
A simple to use python wrapper for automatic trading based on Axon's websocket. Enhance your trading decisions by
leveraging Axon's reinforcement learning gathered from tons of features from the market.
Instance Variables:
timestamp: int - keeps track of system's time to compare and validate incoming websocket messages.
socket: websocekt object - a live websocket object that connects to Axon's api
"""
def __init__(self, forecast_queue, api_key="n1MLgSRxLP86iHUUjAFEv6PDC9NCWoVP5DBTAcN6"):
self.timestamp = int(time.time())
# Initialize logger
self.log = logging.getLogger()
self.log.setLevel(logging.INFO)
# Initialize parameters
self.uri = "wss://api.axonintellex.com"
self.api_key = api_key
self.header = {"x-api-key": self.api_key}
self.ping_interval = 60
self.time_out = 30
# self.wsapp = WebSocketApp(self.uri,
# on_message=self.on_message,
# on_close=self.on_websocket_close,
# header=self.header)
self.wsapp = WebSocketApp(self.uri,
on_message=lambda ws, msg: self.on_message(ws, msg),
on_close=lambda ws: self.on_websocket_close(ws),
header=self.header)
self.forecast = None
self.new_forecast = False
self.thread = None
self.qu = forecast_queue
def connect(self):
self.timestamp = int(time.time())
self.thread = Thread(target=self.wsapp.run_forever, args=(None, None, self.ping_interval, self.time_out))
self.thread.start()
return True
def on_message(self, wsapp, msg):
"""
This is the main function in the class and where strategy should be built based on the latest notification from
Axon. wsapp was added there to allow for the class websocket to be initialized
:param message: string - received message
:return: puts a message in queue so it can be read by the creator of the object (example axonbot)
"""
try:
msg = json.loads(msg)
# assert self.valid_axon_forecast(msg)
self.forecast = msg
if self.is_new_message(msg['timestamp']):
self.new_forecast = True
self.log.info("NEW notification came in: " + str(msg))
else:
self.new_forecast = False
self.log.info("OLD notification after a websocket initial connect/re-connect: " + str(msg))
# self.execute_strategy()
self.qu.put(msg)
except Exception as e:
self.log.info("Caught exception when parsing message from websocket\n" + str(e))
def valid_axon_forecast(self, msg):
"""
Validates that the message follows axon's standards and has the information needed for decision making
:param msg: str notification received by axon
:return: True for valid and False for not valid or incomplete
"""
ts = msg['timestamp']
if ts:
forecast = msg['forecast']
assert datetime.datetime.utcfromtimestamp(ts).replace(hour=0, minute=0,
second=0) == datetime.datetime.utcnow() \
.replace(hour=0, minute=0, second=0, microsecond=0)
assert forecast['candle'] == datetime.datetime.utcnow().strftime("%Y-%m-%d")
return True
else:
return False
def is_new_message(self, ts):
"""
Compares timestamps of the incoming message from the websocket to determine if a new notification came in.
Sets class timestamp to the newest incoming timestamp
:param ts: integer timestamp from incoming notification
:return: True or False
"""
if ts > self.timestamp:
self.timestamp = ts
return True
else:
return False
def notify(self, message):
"""
Displays a message to the screen the same way print does.
:param message: str - message to be displayed
:return: prints out to user
"""
print(message)
def on_websocket_close(self, ws):
self.qu.put("websocket_disconnected")
|
utils.py
|
# -*- coding: utf-8 -*-
from __future__ import division
import os
import sys
import socket
import signal
import functools
import atexit
import tempfile
from subprocess import Popen, PIPE, STDOUT
from threading import Thread
try:
from Queue import Queue, Empty
except ImportError:
from queue import Queue, Empty
from time import sleep
try:
import simplejson as json
except ImportError:
import json
from .exceptions import CommandError, TimeoutWaitingFor
USED_PORTS = set()
ON_POSIX = 'posix' in sys.builtin_module_names
# Directory relative to basetest module location
CURRENT_DIR = os.path.dirname(os.path.abspath(__file__))
# Location of binary files (usually the src/ folder)
BIN_PREFIX = os.path.abspath(
os.path.join(CURRENT_DIR, "..", "..", "src")
)
# Default location of test certificates
DEFAULT_CERT_PATH = os.path.abspath(
os.path.join(CURRENT_DIR, "..", "test_certs")
)
# Default location of test hooks
DEFAULT_HOOK_PATH = os.path.abspath(
os.path.join(CURRENT_DIR, "..", "test_hooks")
)
# Environment flags to control skipping of task and taskd tests
TASKW_SKIP = os.environ.get("TASKW_SKIP", False)
TASKD_SKIP = os.environ.get("TASKD_SKIP", False)
# Environment flags to control use of PATH or in-tree binaries
TASK_USE_PATH = os.environ.get("TASK_USE_PATH", False)
TASKD_USE_PATH = os.environ.get("TASKD_USE_PATH", False)
UUID_REGEXP = ("[0-9A-Fa-f]{8}-" + ("[0-9A-Fa-f]{4}-" * 3) + "[0-9A-Fa-f]{12}")
def taskd_binary_location(cmd="taskd"):
"""If TASKD_USE_PATH is set rely on PATH to look for taskd binaries.
Otherwise ../src/ is used by default.
"""
return binary_location(cmd, TASKD_USE_PATH)
def binary_location(cmd, USE_PATH=False):
"""If USE_PATH is True rely on PATH to look for taskd binaries.
Otherwise ../src/ is used by default.
"""
if USE_PATH:
return cmd
else:
return os.path.join(BIN_PREFIX, cmd)
def wait_condition(cond, timeout=1, sleeptime=.01):
"""Wait for condition to return anything other than None
"""
# NOTE Increasing sleeptime can dramatically increase testsuite runtime
# It also reduces CPU load significantly
if timeout is None:
timeout = 1
if timeout < sleeptime:
print("Warning, timeout cannot be smaller than", sleeptime)
timeout = sleeptime
# Max number of attempts until giving up
tries = int(timeout / sleeptime)
for i in range(tries):
val = cond()
if val is not None:
break
sleep(sleeptime)
return val
def wait_process(pid, timeout=None):
"""Wait for process to finish
"""
def process():
try:
os.kill(pid, 0)
except OSError:
# Process is dead
return True
else:
# Process is still ticking
return None
return wait_condition(process, timeout)
def _queue_output(arguments, pidq, outputq):
"""Read/Write output/input of given process.
This function is meant to be executed in a thread as it may block
"""
kwargs = arguments["process"]
input = arguments["input"]
try:
proc = Popen(**kwargs)
except OSError as e:
# pid None is read by the main thread as a crash of the process
pidq.put(None)
outputq.put((
"",
("Unexpected exception caught during execution of taskw: '{0}' . "
"If you are running out-of-tree tests set TASK_USE_PATH=1 or "
"TASKD_USE_PATH=1 in shell env before execution and add the "
"location of the task(d) binary to the PATH".format(e)),
255)) # false exitcode
return
# Put the PID in the queue for main process to know.
pidq.put(proc.pid)
# Send input and wait for finish
out, err = proc.communicate(input)
if sys.version_info > (3,):
out, err = out.decode('utf-8'), err.decode('utf-8')
# Give the output back to the caller
outputq.put((out, err, proc.returncode))
def _retrieve_output(thread, timeout, queue, thread_error):
"""Fetch output from taskw subprocess queues
"""
# Try to join the thread on failure abort
thread.join(timeout)
if thread.isAlive():
# Join should have killed the thread. This is unexpected
raise TimeoutWaitingFor(thread_error + ". Unexpected error")
# Thread died so we should have output
try:
# data = (stdout, stderr, exitcode)
data = queue.get(timeout=timeout)
except Empty:
data = TimeoutWaitingFor("streams from TaskWarrior")
return data
def _get_output(arguments, timeout=None):
"""Collect output from the subprocess without blocking the main process if
subprocess hangs.
"""
# NOTE Increase this value if tests fail with None being received as
# stdout/stderr instead of the expected content
output_timeout = 0.1 # seconds
pidq = Queue()
outputq = Queue()
t = Thread(target=_queue_output, args=(arguments, pidq, outputq))
t.daemon = True
t.start()
try:
pid = pidq.get(timeout=timeout)
except Empty:
pid = None
# Process crashed or timed out for some reason
if pid is None:
return _retrieve_output(t, output_timeout, outputq,
"TaskWarrior to start")
# Wait for process to finish (normal execution)
state = wait_process(pid, timeout)
if state:
# Process finished
return _retrieve_output(t, output_timeout, outputq,
"TaskWarrior thread to join")
# If we reach this point we assume the process got stuck or timed out
for sig in (signal.SIGABRT, signal.SIGTERM, signal.SIGKILL):
# Start with lower signals and escalate if process ignores them
try:
os.kill(pid, signal.SIGABRT)
except OSError as e:
# 3 means the process finished/died between last check and now
if e.errno != 3:
raise
# Wait for process to finish (should die/exit after signal)
state = wait_process(pid, timeout)
if state:
# Process finished
return _retrieve_output(t, output_timeout, outputq,
"TaskWarrior to die")
# This should never happen but in case something goes really bad
raise OSError("TaskWarrior stopped responding and couldn't be killed")
def run_cmd_wait(cmd, input=None, stdout=PIPE, stderr=PIPE,
merge_streams=False, env=os.environ, timeout=None):
"Run a subprocess and wait for it to finish"
if input is None:
stdin = None
else:
stdin = PIPE
if merge_streams:
stderr = STDOUT
else:
stderr = PIPE
arguments = {
"process": {
"args": cmd,
"stdin": stdin,
"stdout": stdout,
"stderr": stderr,
"bufsize": 1,
"close_fds": ON_POSIX,
"env": env,
},
"input": input,
}
out, err, exit = _get_output(arguments, timeout)
if merge_streams:
if exit != 0:
raise CommandError(cmd, exit, out)
else:
return exit, out
else:
if exit != 0:
raise CommandError(cmd, exit, out, err)
else:
return exit, out, err
def run_cmd_wait_nofail(*args, **kwargs):
"Same as run_cmd_wait but silence the exception if it happens"
try:
return run_cmd_wait(*args, **kwargs)
except CommandError as e:
return e.code, e.out, e.err
def get_IPs(hostname):
output = {}
addrs = socket.getaddrinfo(hostname, 0, 0, 0, socket.IPPROTO_TCP)
for family, socktype, proto, canonname, sockaddr in addrs:
addr = sockaddr[0]
output[family] = addr
return output
def port_used(addr="localhost", port=None):
"Return True if port is in use, False otherwise"
if port is None:
raise TypeError("Argument 'port' may not be None")
# If we got an address name, resolve it both to IPv6 and IPv4.
IPs = get_IPs(addr)
# Taskd seems to prefer IPv6 so we do it first
for family in (socket.AF_INET6, socket.AF_INET):
try:
addr = IPs[family]
except KeyError:
continue
s = socket.socket(family, socket.SOCK_STREAM)
result = s.connect_ex((addr, port))
s.close()
if result == 0:
# connection was successful
return True
else:
return False
def find_unused_port(addr="localhost", start=53589, track=True):
"""Find an unused port starting at `start` port
If track=False the returned port will not be marked as in-use and the code
will rely entirely on the ability to connect to addr:port as detection
mechanism. Note this may cause problems if ports are assigned but not used
immediately
"""
maxport = 65535
unused = None
for port in xrange(start, maxport):
if not port_used(addr, port):
if track and port in USED_PORTS:
continue
unused = port
break
if unused is None:
raise ValueError("No available port in the range {0}-{1}".format(
start, maxport))
if track:
USED_PORTS.add(unused)
return unused
def release_port(port):
"""Forget that given port was marked as'in-use
"""
try:
USED_PORTS.remove(port)
except KeyError:
pass
def memoize(obj):
"""Keep an in-memory cache of function results given it's inputs
"""
cache = obj.cache = {}
@functools.wraps(obj)
def memoizer(*args, **kwargs):
key = str(args) + str(kwargs)
if key not in cache:
cache[key] = obj(*args, **kwargs)
return cache[key]
return memoizer
try:
from shutil import which
which = memoize(which)
except ImportError:
# NOTE: This is shutil.which backported from python-3.3.3
@memoize
def which(cmd, mode=os.F_OK | os.X_OK, path=None):
"""Given a command, mode, and a PATH string, return the path which
conforms to the given mode on the PATH, or None if there is no such
file.
`mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result
of os.environ.get("PATH"), or can be overridden with a custom search
path.
"""
# Check that a given file can be accessed with the correct mode.
# Additionally check that `file` is not a directory, as on Windows
# directories pass the os.access check.
def _access_check(fn, mode):
return (os.path.exists(fn) and os.access(fn, mode) and
not os.path.isdir(fn))
# If we're given a path with a directory part, look it up directly
# rather than referring to PATH directories. This includes checking
# relative to the current directory, e.g. ./script
if os.path.dirname(cmd):
if _access_check(cmd, mode):
return cmd
return None
if path is None:
path = os.environ.get("PATH", os.defpath)
if not path:
return None
path = path.split(os.pathsep)
if sys.platform == "win32":
# The current directory takes precedence on Windows.
if os.curdir not in path:
path.insert(0, os.curdir)
# PATHEXT is necessary to check on Windows.
pathext = os.environ.get("PATHEXT", "").split(os.pathsep)
# See if the given file matches any of the expected path
# extensions. This will allow us to short circuit when given
# "python.exe". If it does match, only test that one, otherwise we
# have to try others.
if any(cmd.lower().endswith(ext.lower()) for ext in pathext):
files = [cmd]
else:
files = [cmd + ext for ext in pathext]
else:
# On other platforms you don't have things like PATHEXT to tell you
# what file suffixes are executable, so just pass on cmd as-is.
files = [cmd]
seen = set()
for dir in path:
normdir = os.path.normcase(dir)
if normdir not in seen:
seen.add(normdir)
for thefile in files:
name = os.path.join(dir, thefile)
if _access_check(name, mode):
return name
return None
def parse_datafile(file):
"""Parse .data files on the client and server treating files as JSON
"""
data = []
with open(file) as fh:
for line in fh:
line = line.rstrip("\n")
# Turn [] strings into {} to be treated properly as JSON hashes
if line.startswith('[') and line.endswith(']'):
line = '{' + line[1:-1] + '}'
if line.startswith("{"):
data.append(json.loads(line))
else:
data.append(line)
return data
def mkstemp(data):
"""
Create a temporary file that is removed at process exit
"""
def rmtemp(name):
try:
os.remove(name)
except OSError:
pass
f = tempfile.NamedTemporaryFile(delete=False)
f.write(data)
f.close()
# Ensure removal at end of python session
atexit.register(rmtemp, f.name)
return f.name
def mkstemp_exec(data):
"""Create a temporary executable file that is removed at process exit
"""
name = mkstemp(data)
os.chmod(name, 0o755)
return name
# vim: ai sts=4 et sw=4
|
get-panw-interfaces.py
|
#!/usr/bin/env python3.8
''' Remove nargs='*' from firewalls arg and split to make compatible with Golang calls and
Comment out stdin.isatty() code block
Print header to sys.stdout
Add blank line in between firewalls (line 192)
'''
'''Get firewall interfaces
get-panw-interfaces.py
Author: David Cruz (davidcruz72@gmail.com)
Python version >= 3.6
Required Python packages:
lxml
Features:
Returns a list of firewalls interfaces
Output can be pasted directly into Excel
Terse output option for piping to other commands
Command line options
Platform independent
Save API key and default firewall
Update saved settings
Override/supply API key on the command line
Filter on interface properties
Multi-threaded
'''
import argparse
import json
import os
import os.path
import queue
import re
import signal
import ssl
import sys
import threading
import urllib.request
import lxml.etree as ET
results = []
print_queue = queue.Queue()
results_queue = queue.Queue()
def sigint_handler(signum, frame):
sys.exit(1)
def query_api(host, params):
# Disable certifcate verification
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
# Get connected firewalls
params = urllib.parse.urlencode(params)
url = f'https://{host}/api/?{params}'
try:
with urllib.request.urlopen(url, context=ctx) as response:
xml = response.read().decode('utf-8')
except OSError as err:
sys.stderr.write(f'{host}: Unable to connect to host ({err})\n')
sys.exit(1)
return
return xml
def parse_interfaces(root, hostname):
interfaces = {}
hw = root.findall('./result/hw/entry')
for int in hw:
ifname = int.find('name').text
mac = int.find('mac').text
status = int.find('st').text
interfaces[ifname] = {
'Firewall': hostname,
'MacAddress': mac,
'Status': status
}
ifnet = root.findall('./result/ifnet/entry')
for int in ifnet:
ifname = int.find('name').text
ip = int.find('ip').text or 'N/A'
zone = int.find('zone').text or 'N/A'
vsys = int.find("vsys").text
vsys = f'vsys{int.find("vsys").text}' if vsys != '0' else 'N/A'
interfaces[ifname] = {
**interfaces.get(ifname, {}),
'Firewall': hostname,
'Zone': zone,
'IpAddress': ip,
'vSys': vsys
}
return interfaces
def parse_interface_config(root, interfaces):
for ifname, attrs in interfaces.items():
try:
attrs['Comment'] = root.find(f'./result/network/interface/ethernet/entry[@name="{ifname}"]/comment').text
except AttributeError:
attrs['Comment'] = ''
# Collect the link state of physical interfaces only
if re.match(r'^ethernet\d+/\d+$', ifname):
try:
attrs['LinkState'] = root.find(
f'./result/network/interface/ethernet/entry[@name="{ifname}"]/link-state').text
except AttributeError:
# Default interface state auto returns nothing
attrs['LinkState'] = 'auto'
# Collect the aggregate-group
if re.match(r'^ethernet\d+/\d+$', ifname):
try:
attrs['AggGrp'] = root.find(
f'./result/network/interface/ethernet/entry[@name="{ifname}"]/aggregate-group').text
except AttributeError:
# Default interface state auto returns nothing
attrs['AggGrp'] = 'N/A'
try:
vrouter = root.xpath(f'//member[text()="{ifname}"]')[0].getparent().getparent().get('name')
attrs['VirtualRouter'] = vrouter if vrouter != None else 'N/A'
except IndexError:
attrs['VirtualRouter'] = 'N/A'
return interfaces
def print_results(args, results):
if args.terse:
regex = re.compile(r'.*?(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}).*$')
else:
fields = {
'Firewall': {
'width': max([len('Firewall')] + [len(attrs.get('Firewall', '')) for int in results for attrs in int.values()]),
'na': 'N/A'
},
'Interface': {
'width': max([len('Interface')] + [len(ifname) for i in results for ifname in i.keys()]),
'na': 'N/A'
},
'LinkState': {
'width': max([len('LinkState')] + [len(attrs.get('LinkState', '')) for int in results for attrs in int.values()]),
'na': 'N/A'
},
'Status': {
'width': max([len('Status')] + [len(attrs.get('Status', '')) for int in results for attrs in int.values()]),
'na': 'N/A'
},
'MacAddress': {
'width': max([len('MacAddress')] + [len(attrs.get('MacAddress', '')) for int in results for attrs in int.values()]),
'na': 'N/A'
},
'AggGrp': {
'width': max([len('AggGrp')] + [len(attrs.get('AggGrp', '')) for int in results for attrs in int.values()]),
'na': 'N/A'
},
'Zone': {
'width': max([len('Zone')] + [len(attrs.get('Zone', '')) for int in results for attrs in int.values()]),
'na': 'N/A'
},
'IpAddress': {
'width': max([len('IpAddress')] + [len(attrs.get('IpAddress', '')) for int in results for attrs in int.values()]),
'na': 'N/A'
},
'vSys': {
'width': max([len('vSys')] + [len(attrs.get('vSys', '')) for int in results for attrs in int.values()]),
'na': 'N/A'
},
'VirtualRouter': {
'width': max([len('VirtualRouter')] + [len(attrs.get('VirtualRouter', '')) for int in results for attrs in int.values()]),
'na': 'N/A'
},
'Comment': {
'width': max([len('Comment')] + [len(attrs.get('Comment', '')) for int in results for attrs in int.values()]),
'na': ''
},
}
# Print header
header = ''
hr = ''
first_iter = True
for field, attrs in fields.items():
if not first_iter:
header += '\t'
hr += '\t'
else:
first_iter = False
header += f'{field :<{attrs["width"]}}'
hr += f'{("=" * attrs["width"]) :<{attrs["width"]}}'
print(header, file=sys.stdout)
print(hr, file=sys.stdout)
# Print interfaces info
current_hostname = ''
for interfaces in results:
for ifname, if_attrs in sorted(interfaces.items()):
hostname = if_attrs['Firewall']
if not current_hostname:
current_hostname = hostname
if_status = if_attrs.get('Status', 'N/A')
if args.terse:
try:
ip = re.match(regex, if_attrs.get('IpAddress', '')).group(1)
except AttributeError:
continue
if not args.if_status or args.if_status in if_status:
print(ip)
else:
# Insert a newline between hosts
if current_hostname != hostname:
print()
if not args.if_status or args.if_status in if_status:
line = ''
first_iter = True
for field in fields.keys():
if not first_iter:
line += '\t'
else:
first_iter = False
if field == 'Interface':
line += f'{ifname :<{fields["Interface"]["width"]}}'
continue
attr = if_attrs.get(field, fields[field]["na"])
line += f'{attr :<{fields[field]["width"]}}'
print(line)
current_hostname = hostname
def worker(args, host):
url_params = {
'type': 'op',
'cmd': '<show><interface>all</interface></show>',
'key': args.key,
}
xml = query_api(host, url_params)
if args.raw_output:
print_queue.put(xml.split('\n'))
return
# Parse interface operational information
try:
interfaces = parse_interfaces(ET.fromstring(xml), host)
except TypeError as err:
sys.stderr.write(f'Unable to parse XML! ({err})\n')
sys.exit(1)
url_params = {
'type': 'config',
'action': 'show',
'xpath': 'devices/entry/network',
'key': args.key,
}
xml = query_api(host, url_params)
root = ET.fromstring(xml)
# Parse interface configuration
try:
interfaces = parse_interface_config(root, interfaces)
except TypeError as err:
sys.stderr.write(f'Unable to parse XML! ({err})\n')
sys.exit(1)
results_queue.put(interfaces)
def print_manager():
while True:
job = print_queue.get()
for line in job:
print(line)
print_queue.task_done()
def results_manager():
global results
while True:
result = results_queue.get()
results.append(result)
results_queue.task_done()
def main():
# Ctrl+C graceful exit
signal.signal(signal.SIGINT, sigint_handler)
parser = argparse.ArgumentParser(description='Returns a list of firewalls interfaces')
parser.add_argument('firewalls', type=str, help='Space separated list of firewalls to query')
parser.add_argument('-k', '--key', metavar='', type=str, help='API key')
parser.add_argument('-r', '--raw-output', action='store_true', help='Raw XML output')
parser.add_argument('-t', '--terse', action='store_true', help='Output IP addresses only')
parser.add_argument('-U', '--update', action='store_true', help='Update saved settings')
parser.add_argument('--if-status', metavar='', choices=['up', 'down'], help='Filter on interface state')
parser.add_argument('--if-state', metavar='', choices=['up', 'down'], help='DEPRECATED: Filter on interface state')
args = parser.parse_args()
args.firewalls = args.firewalls.split()
# Deprecated: Remove args.if_state in the near future
if args.if_state:
args.if_status = args.if_state
if 'USERPROFILE' in os.environ:
settings_path = os.path.join(os.environ["USERPROFILE"], '.panw-settings.json')
else:
settings_path = os.path.join(os.environ["HOME"], '.panw-settings.json')
# Import saved settings
if os.path.exists(settings_path):
with open(settings_path, 'r') as f:
settings = json.load(f)
# Check for the existence of settings and add if missing
changed = False
if not 'default_firewall' in settings:
settings['default_firewall'] = input(f'Default Firewall: ')
changed = True
if not 'key' in settings:
settings['key'] = input('API Key: ')
changed = True
if changed:
with open(settings_path, 'w') as f:
json.dump(settings, f, sort_keys=True, indent=2)
else:
settings = {
'default_firewall': input('Default Firewall: '),
'key': input('API Key: '),
}
with open(settings_path, 'w') as f:
json.dump(settings, f, sort_keys=True, indent=2)
os.chmod(settings_path, 0o600)
# Update saved settings
if args.update:
print('\nUpdating saved settings ...\n')
settings['key'] = input(f'New API Key [{settings["key"]}]: ') or settings['key']
settings['default_firewall'] = input(
f'New Default Firewall [{settings["default_firewall"]}]: ') or settings['default_firewall']
with open(settings_path, 'w') as f:
json.dump(settings, f, sort_keys=True, indent=2)
print('\nSettings updated!')
sys.exit(0)
# Receive firewalls from stdin
# if not sys.stdin.isatty():
# args.firewalls = [i.strip() for i in sys.stdin]
# # Remove empty strings (Windows PowerShell Select-String cmdlet issue)
# args.firewalls = list(filter(None, args.firewalls))
# elif not args.firewalls:
# args.firewalls = [settings['default_firewall']]
if not args.key:
args.key = settings['key']
if not args.firewalls:
args.firewalls = settings['default_firewall']
# Start print manager
t = threading.Thread(target=print_manager)
t.daemon = True
t.start()
del t
# Results manager
t = threading.Thread(target=results_manager)
t.daemon = True
t.start()
del t
worker_threads = []
for host in args.firewalls:
t = threading.Thread(target=worker, args=(args, host))
worker_threads.append(t)
t.start()
for t in worker_threads:
t.join()
results_queue.join()
print_queue.join()
print_results(args, results)
sys.exit(0)
if __name__ == '__main__':
main()
|
main.py
|
import numpy as np
import os
import tensorflow as tf
import gym
import multiprocessing
from src.networks.ac_network import AC_Network
from src.worker import Worker
import threading
from time import sleep
import shutil
from gym.envs.box2d import LunarLanderContinuous
max_global_steps = 200000
max_episode_length = 20
gamma = .99
entropy_beta = 0.005
model_path = './net/a3c.ckpt'
output_graph = True
graph_dir = './graph_log'
env = gym.make("Festium-v2")
tf.reset_default_graph()
with tf.device("/cpu:0"):
global_episodes = tf.Variable(0, dtype=tf.int32)
trainer = tf.train.AdamOptimizer(learning_rate=1e-4)
master_net = AC_Network(env, 'global',model_path, None, None)
num_workers = multiprocessing.cpu_count()
workers = []
for i in range(num_workers):
workers.append(Worker(env, i, trainer, model_path, global_episodes, max_global_steps,entropy_beta))
saver = tf.train.Saver(max_to_keep=5)
with tf.Session() as sess:
try:
coord = tf.train.Coordinator()
sess.run(tf.global_variables_initializer())
if output_graph:
if os.path.exists(graph_dir):
shutil.rmtree(graph_dir)
tf.summary.FileWriter(graph_dir, sess.graph)
worker_threads = []
for worker in workers:
worker_work = lambda: worker.work(max_episode_length, gamma, sess, coord, saver)
t = threading.Thread(target=(worker_work))
t.start()
sleep(0.1)
worker_threads.append(t)
coord.join(worker_threads)
except Exception as e:
print(str(e) + " Try to save model")
master_net.save_ckpt(sess,saver)
|
meter.py
|
#-*- coding: UTF-8 -*-
#!/usr/bin/env python
from pathlib import Path
cur_file_path = Path(__file__).absolute()
workdir = Path(cur_file_path).parent.parent
import argparse
import configparser
import cv2
import numpy as np
import os
import signal
import threading
from gevent import monkey
from gevent.pywsgi import WSGIServer
monkey.patch_all()
from flask import Flask,request,jsonify
from detection import MeterDetection
from log import detlog
def signal_handler(signal, frame):
print('You pressed Ctrl+C!')
os._exit(0)
app = Flask(__name__)
@app.route('/meter',methods=['POST'])
def demo():
name=request.form['filename']
logger.info(name)
img=cv2.imread(name)
predict=model(img).tolist()
print(predict)
res={'predict':predict}
return res
def parse_args():
parser = argparse.ArgumentParser(description='Flask demo')
parser.add_argument('--gpu', dest='gpu',type=int,default=0)
parser.add_argument('--port',dest='port',type=int,default=1111)
parser.add_argument('--gpuRatio',dest='gpuRatio',type=float,default=0.1)
parser.add_argument('--host',dest='host',type=str,default='0.0.0.0')
parser.add_argument('--logID',dest='logID',type=str,default='0')
args = parser.parse_args()
return args
def serv_start():
global host, portNum
print(host,portNum)
logger.info('serv starting...')
http_server = WSGIServer((host, portNum), app)
http_server.serve_forever()
logger.info('serv started')
if __name__ == '__main__':
args = parse_args()
portNum = args.port
host = args.host
cf=configparser.ConfigParser()
cf.read('config.ini')
modelname=Path(cf.get('common','model_path')).stem
logfilename='mmm'
logger=detlog(modelname,logfilename,args.logID)
model=MeterDetection(args,cf,logger=logger)
logger.info('instance of model created')
threads = []
t0 = threading.Thread(target=serv_start)
threads.append(t0)
t1=threading.Thread(target=model.model_restore)
threads.append(t1)
print('-*'*20)
signal.signal(signal.SIGINT, signal_handler)
for t in threads:
t.start()
for t in threads:
t.join()
|
lyr_fillin.py
|
#!/usr/bin/python
import pxlBuffer as pxb
import random
from time import sleep
import time
def wheel(pos, brightness):
"""Generate rainbow colors across 0-255 positions."""
if pos < 85:
return pxb.Color(pos * 3 * brightness, (255 - pos * 3) * brightness, 0)
elif pos < 170:
pos -= 85
return pxb.Color((255 - pos * 3) * brightness, 0, pos * 3 * brightness)
else:
pos -= 170
return pxb.Color(0, pos * 3 * brightness, (255 - pos * 3) * brightness)
def randomDrop(q, led_count, layerNum, wait_ms=.1, runtime=70):
layer = pxb.pixelLayer(q, led_count, layerNum)
count=0
endTime=time.time()+runtime
while time.time() < endTime:
if count < 500:
layer.setPixelColor(random.randrange(layer.numPixels()), pxb.Color(0,0,255))
elif count < 1000:
layer.setPixelColor(random.randrange(layer.numPixels()), pxb.Color(255,0,0))
elif count < 1500:
layer.setPixelColor(random.randrange(layer.numPixels()), pxb.Color(0,255,0))
elif count < 2000:
layer.setPixelColor(random.randrange(layer.numPixels()), pxb.Color(255,255,255))
elif count < 2500:
layer.setPixelColor(random.randrange(layer.numPixels()), wheel(random.randrange(255),random.randrange(100)/100.0))
layer.setPixelColor(random.randrange(layer.numPixels()), wheel(random.randrange(255),random.randrange(100)/100.0))
layer.setPixelColor(random.randrange(layer.numPixels()), wheel(random.randrange(255),random.randrange(100)/100.0))
else:
count = 0
count += 1
#layer.setPixelColor(random.randrange(layer.numPixels()), wheel(random.randrange(255),random.randrange(100)/100.0))
#layer.setPixelColor(random.randrange(layer.numPixels()), wheel(random.randrange(255),random.randrange(100)/100.0))
#layer.setPixelColor(random.randrange(layer.numPixels()), wheel(random.randrange(255),random.randrange(100)/100.0))
for pixel in range(layer.numPixels()):
layer.pixelBrightness(pixel, -0.00001)
layer.show()
sleep(wait_ms/1000.0)
layer.die()
# entry function
def NeoFX(q, led_count, layerNum, *args):
randomDrop(q, led_count, layerNum, *args)
# if we're testing the module, setup and execute
if __name__ == "__main__":
from neopixel import *
import multiprocessing
import time
from pprint import pprint
# target FPS
#TARGET_FPS = 1
TARGET_FPS = 24
# LED strip configuration:
LED_COUNT = 632 # Number of LED pixels.
LED_PIN = 18 # GPIO pin connected to the pixels (must support PWM!).
LED_FREQ_HZ = 800000 # LED signal frequency in hertz (usually 800khz)
LED_DMA = 5 # DMA channel to use for generating signal (try 5)
LED_BRIGHTNESS = 128 # Set to 0 for darkest and 255 for brightest
LED_INVERT = False # True to invert the signal (when using NPN transistor level shift)
q = multiprocessing.Queue()
def masterThread(q):
# Create NeoPixel object with appropriate configuration.
strip = Adafruit_NeoPixel(LED_COUNT, LED_PIN, LED_FREQ_HZ, LED_DMA, LED_INVERT, LED_BRIGHTNESS)
# Intialize the library (must be called once before other functions).
strip.begin()
master = pxb.pixelMaster(strip, q)
master.show()
#pprint(master.layers)
#pprint(master.ledsColorBuffer)
startTime=time.time()
iterTime=startTime
count=1
targetSleep=1/float(TARGET_FPS)
print "target FPS: %s" % TARGET_FPS
print "target runtime per frame: %s" % targetSleep
updateFreq=TARGET_FPS*10 # every 10 seconds
while master.die == False:
iterTime=time.time()
runTime=(time.time()-startTime)
master.show()
if count % updateFreq == 0:
print "Time: %2.3f FPS: %2.3f" % (runTime, count/runTime)
print master.layers
startTime=time.time()
count = 1
else:
count += 1
sleepTime=targetSleep-(time.time()-iterTime)
if sleepTime > 0:
sleep(sleepTime)
m = multiprocessing.Process(target=masterThread, args=(q,))
m.daemon=True
m.start()
try:
layer = 1
while True:
NeoFX(q, LED_COUNT, layer)
layer += 1
except KeyboardInterrupt:
q.put("die")
m.join()
|
client.py
|
# -*- coding: utf-8 -*-
#
# Cloud Robotics FX クライアント
#
# @author: Hiroki Wakabayashi <hiroki.wakabayashi@jbs.com>
# @version: 0.0.1
import os
import time, datetime
import json
import urllib
import ssl
import base64
import hashlib, hmac
from threading import Thread,Lock
import logging
import paho.mqtt.client as mqtt
from cloudrobotics.message import CRFXMessage
class CRFXClient(object):
def __init__(self, hostname, deviceid, shared_accesskey):
self.hostname = hostname
self.deviceid = deviceid
# paho MQTTクライアントの設定
self.mqtt_client = mqtt.Client(client_id=self.deviceid, protocol=mqtt.MQTTv311)
self.mqtt_client.on_connect = self.__on_connect
self.mqtt_client.on_disconnect = self.__on_disconnect
self.mqtt_client.on_message = self.__on_message
self.mqtt_client.on_publish = self.__on_publish
# Callback
self.on_connect_successful = None
self.on_connect_failed = None
self.on_disconnect = None
self.on_message = None
self.on_publish = None
# デバイスに対して割り当てられているC2DのMQTTのトピック
self.topic = "devices/"+self.deviceid+"/messages/devicebound/#"
# SASの生成
sas = self._create_sas_token(self.hostname, self.deviceid, shared_accesskey)
self.mqtt_client.username_pw_set(username=self.hostname + "/" + self.deviceid, password=sas)
self.mqtt_client.tls_set(os.path.join(os.path.dirname(__file__), 'cert/ca.cer'), tls_version=ssl.PROTOCOL_TLSv1)
self.mqtt_port = 8883
self.lock = Lock()
self.started = False
self.seqno = 0
self.retry_count = 0
logging.basicConfig()
self.logger = logging.getLogger(__name__)
self.logger.setLevel(logging.DEBUG)
# 接続後の処理
#
def __on_connect(self, client, userdata, flags, rc):
if rc == 0:
# IoT Hubからのメッセージ受信を開始する。
self.mqtt_client.subscribe(self.topic)
self.logger.info('Succeeded to connect to the Azure IoT Hub.')
if self.on_connect_successful: self.on_connect_successful()
# 接続リトライ回数をリセットする。
self.retry_count = 0
# 切断後の処理
#
def __on_disconnect(self, client, userdata, rc):
if rc != 0:
self.mqtt_client.disconnect() # loop_forever()を停止する
# 異常切断が発生した場合は、1秒間隔で5回接続リトライする。
if self.retry_count < 5:
self.retry_count += 1
self.logger.error('Failed to connect to the Azure IoT Hub, rc: %d. Trying to reconnect in %d times.', rc, self.retry_count)
time.sleep(1)
self.start()
else:
self.logger.error("Failed to connect to the Azure IoT Hub even if tried 5 times, gave up reconnecting.")
if self.on_connect_failed: self.on_connect_failed()
elif rc == 0 and not self.started:
if self.on_disconnect: self.on_disconnect()
# メッセージ受信後の処理
#
def __on_message(self, client, userdata, msg):
received_message = CRFXMessage()
received_message.loads(msg.payload)
self.logger.debug("Received message. header: %s, body: %s", received_message.header, received_message.body)
if self.on_message: self.on_message(received_message)
# メッセージ送信後の処理
#
def __on_publish(self, client, userdata, mid):
self.logger.debug("Publish message: [%d]", mid)
if self.on_publish: self.on_publish()
# Security Access Sigunature(SAS)を作成する
# デフォルトの有効期限は20時間(60*60*20=72000)とする。
def _create_sas_token(self, hostname, deviceid, shared_accesskey, expire_term=72000):
expiry = time.mktime(datetime.datetime.now().utctimetuple())+expire_term
expiry = str(int(expiry))
# quoteだと、スラッシュがデフォルトでエンコードされない対象となっているため、safeを空にする。
uri = "{hostname}/devices/{deviceId}".format(hostname=hostname, deviceId=deviceid)
uri_enc = urllib.quote(uri, safe='')
signature = uri_enc + '\n' + expiry
# SharedAccessKeyはBase64でエンコードされているため、デコードする。
k = bytes(base64.b64decode(shared_accesskey))
v = bytes(signature)
# SignatureはHMAC-SHA256で処理する。
sig_enc = base64.b64encode(hmac.new(k, v, digestmod=hashlib.sha256).digest())
sig_enc = urllib.quote(sig_enc, safe='')
# sknにkeyNameが入っていると認証エラーになる。
token = 'SharedAccessSignature sr=' + uri_enc + '&sig=' + sig_enc + '&se=' + expiry
return token
# シーケンス番号のインクリメントを行う
#
def _increment_seq(self):
with self.lock:
self.seqno += 1
return self.seqno
# クライアントの処理を開始する。
#
def start(self):
try:
self.mqtt_client.connect(self.hostname, port=self.mqtt_port)
self.started = True
except Exception as e:
self.logger.error("Failed to connect to the Azure IoT Hub: %s, because: %s", self.hostname, str(e))
self.started = False
return
# 別スレッドで実行する。
thread = Thread(target=self.mqtt_client.loop_forever, args=())
thread.start()
# クライアントの処理を停止する。
#
def stop(self):
if not self.started:
return
try:
self.mqtt_client.unsubscribe(self.topic)
self.mqtt_client.disconnect() # loop_forever()を停止する
except Exception as e:
pass
finally:
self.started = False
# メッセージを送信する。
#
def send_message(self, message):
seq = None
try:
# シーケンス番号をセット
seq = self._increment_seq()
message.set_seq(seq)
self.logger.debug('send[%d]: %s', seq, message.payload())
self.mqtt_client.publish('devices/%s/messages/events/' % (self.deviceid), message.payload(), qos=1)
except Exception as e:
self.logger.error("Failed to send this message, because: %s", str(e))
return seq
|
test_path_utils.py
|
# -*- coding: utf-8 -*-
u"""
Copyright 2018 Telefónica Investigación y Desarrollo, S.A.U.
This file is part of Toolium.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
try:
import Queue as queue # Py2
except ImportError:
import queue as queue # Py3
import threading
import uuid
import pytest
from toolium.path_utils import get_valid_filename, makedirs_safe
filename_tests = (
('hola_pepito', 'hola_pepito'),
(' hola:pep /ito* ', 'hola_pep__ito'),
('successful login -- @1.1 john.doe', 'successful_login_1_1_john_doe'),
('successful login -- @1.2 Mark options: {Length=10 Mark=mark File=file_name.jpg}',
'successful_login_1_2_Mark_options___Length_10_Mark_mark_File_file_name_jpg'),
)
@pytest.mark.parametrize('input_filename, expected_filename', filename_tests)
def test_get_valid_filename(input_filename, expected_filename):
valid_filename = get_valid_filename(input_filename)
assert expected_filename == valid_filename
def test_get_valid_filename_length():
input_filename = ' hola:pep /ito* '
expected_filename = 'hola_pep__it'
valid_filename = get_valid_filename(input_filename, 12)
assert expected_filename == valid_filename
def test_create_new_folder():
folder = os.path.join('output', str(uuid.uuid4()))
makedirs_safe(folder)
assert os.path.isdir(folder)
os.rmdir(folder)
def test_create_existing_folder():
folder = os.path.join('output', str(uuid.uuid4()))
os.makedirs(folder)
makedirs_safe(folder)
assert os.path.isdir(folder)
os.rmdir(folder)
def test_create_new_folder_parallel():
folder = os.path.join('output', str(uuid.uuid4()))
def run_makedirs(folder, exceptions):
try:
makedirs_safe(folder)
except Exception as exc:
exceptions.put(exc)
for _ in range(5):
exceptions = queue.Queue()
thread1 = threading.Thread(target=run_makedirs, args=(folder, exceptions))
thread2 = threading.Thread(target=run_makedirs, args=(folder, exceptions))
thread1.start()
thread2.start()
thread1.join()
thread2.join()
assert exceptions.qsize() == 0
assert os.path.isdir(folder)
os.rmdir(folder)
|
presentation.py
|
#!/usr/bin/env python3
import os
import shutil
import time
import cairosvg
import validators
import wget
import urllib
import threading
from PyPDF2 import PdfFileMerger
def download_and_convert(url: str, i: int, out: str):
try:
wget.download('{}/svg/{}'.format(url, i), out='{}/{}/'.format(out, 'svg'))
except urllib.error.HTTPError:
return -1
print(' ' + str(i))
# Path to svg and pdf
svg = '{}/{}/{}'.format(out, 'svg', i)
pdf = '{}/{}/{}'.format(out, 'pdf', i)
# Convert svg to pdf
cairosvg.svg2pdf(url = svg, write_to = pdf)
return 0
def presentation(url: str):
if not validators.url(url):
print("Not an url.")
return -1
# Processing the url
arr = url.split('/')
if arr[-2] == "svg":
arr = arr[:-2]
url = '/'.join(arr)
else:
print("Incorrect patern.")
return -1
out = str(time.time()) + '.tmp'
os.mkdir(out)
os.mkdir(out + '/svg')
os.mkdir(out + '/pdf')
# Found the size of the presentation
slides = []
i = 0
total = 0
found = True
while found:
i += 8
slides.append(i)
if download_and_convert(url, i, out) == -1:
while download_and_convert(url, i, out) == -1:
slides.append(i)
i -= 1
total = i
found = False
# Install thread on other slides to download
threads = []
for i in range(1, total):
if i not in slides:
t = threading.Thread(target=download_and_convert, args=(url, i, out))
t.start()
threads.append(t)
lock = threading.Lock()
for i in threads:
with lock:
i.join()
# Merge .pdf files to one
if total != 0:
merger = PdfFileMerger()
for i in range(1, total + 1):
merger.append('{}/pdf/{}'.format(out, i))
merger.write(('{}.pdf').format(out))
merger.close()
print("Final .pdf was created with {} slides.".format(total))
else:
print("Download fail.")
shutil.rmtree(out)
return 0
|
cache_agent.py
|
import logging
import threading
import time
import datetime
#import gdrivefs.report
import gdrivefs.state
from gdrivefs.conf import Conf
from gdrivefs.cache_registry import CacheRegistry, CacheFault
_logger = logging.getLogger(__name__)
_logger.setLevel(logging.INFO)
class CacheAgent(object):
"""A particular namespace within the cache."""
registry = None
resource_name = None
max_age = None
fault_handler = None
cleanup_pretrigger = None
report = None
report_source_name = None
def __init__(self, resource_name, max_age, fault_handler=None,
cleanup_pretrigger=None):
_logger.debug("CacheAgent(%s,%s,%s,%s)" % (resource_name, max_age,
type(fault_handler),
cleanup_pretrigger))
self.registry = CacheRegistry.get_instance(resource_name)
self.resource_name = resource_name
self.max_age = max_age
self.fault_handler = fault_handler
self.cleanup_pretrigger = cleanup_pretrigger
# self.report = Report.get_instance()
# self.report_source_name = ("cache-%s" % (self.resource_name))
self.__t = None
self.__t_quit_ev = threading.Event()
self.__start_cleanup()
def __del__(self):
self.__stop_cleanup()
# TODO(dustin): Currently disabled. The system doesn't rely on it, and it's
# just another thread that unnecessarily runs, and trips up our
# ability to test individual components in simple isolation. It
# needs to be refactored.
#
# We'd like to either refactor into a multiprocessing worker, or
# just send to statsd (which would be kindof cool).
# self.__post_status()
# def __del__(self):
#
# if self.report.is_source(self.report_source_name):
# self.report.remove_all_values(self.report_source_name)
# pass
# def __post_status(self):
# """Send the current status to our reporting tool."""
#
# num_values = self.registry.count(self.resource_name)
#
# self.report.set_values(self.report_source_name, 'count',
# num_values)
#
# status_post_interval_s = Conf.get('cache_status_post_frequency_s')
# status_timer = Timer(status_post_interval_s, self.__post_status)
#
# Timers.get_instance().register_timer('status', status_timer)
def __cleanup(self):
"""Scan the current cache and determine items old-enough to be
removed.
"""
cleanup_interval_s = Conf.get('cache_cleanup_check_frequency_s')
_logger.info("Cache-cleanup thread running: %s", self)
while self.__t_quit_ev.is_set() is False and \
gdrivefs.state.GLOBAL_EXIT_EVENT.is_set() is False:
_logger.debug("Doing clean-up for cache resource with name [%s]." %
(self.resource_name))
cache_dict = self.registry.list_raw(self.resource_name)
total_keys = [ (key, value_tuple[1]) for key, value_tuple \
in cache_dict.items() ]
cleanup_keys = [ key for key, value_tuple \
in cache_dict.items() \
if (datetime.datetime.now() - value_tuple[1]).seconds > \
self.max_age ]
_logger.debug("Found (%d) entries to clean-up from entry-cache." %
(len(cleanup_keys)))
if cleanup_keys:
for key in cleanup_keys:
_logger.debug("Cache entry [%s] under resource-name [%s] "
"will be cleaned-up." %
(key, self.resource_name))
if self.exists(key, no_fault_check=True) == False:
_logger.debug("Entry with ID [%s] has already been "
"cleaned-up." % (key))
else:
self.remove(key)
else:
_logger.debug("No cache-cleanup required.")
time.sleep(cleanup_interval_s)
_logger.info("Cache-cleanup thread terminating: %s", self)
def __start_cleanup(self):
_logger.info("Starting cache-cleanup thread: %s", self)
self.__t = threading.Thread(target=self.__cleanup)
self.__t.start()
def __stop_cleanup(self):
_logger.info("Stopping cache-cleanup thread: %s", self)
self.__t_quit_ev.set()
self.__t.join()
def set(self, key, value):
_logger.debug("CacheAgent.set(%s,%s)" % (key, value))
return self.registry.set(self.resource_name, key, value)
def remove(self, key):
_logger.debug("CacheAgent.remove(%s)" % (key))
return self.registry.remove(self.resource_name,
key,
cleanup_pretrigger=self.cleanup_pretrigger)
def get(self, key, handle_fault = None):
if handle_fault == None:
handle_fault = True
_logger.debug("CacheAgent.get(%s)" % (key))
try:
result = self.registry.get(self.resource_name,
key,
max_age=self.max_age,
cleanup_pretrigger=self.cleanup_pretrigger)
except CacheFault:
_logger.debug("There was a cache-miss while requesting item with "
"ID (key).")
if self.fault_handler == None or not handle_fault:
raise
result = self.fault_handler(self.resource_name, key)
if result is None:
raise
return result
def exists(self, key, no_fault_check=False):
_logger.debug("CacheAgent.exists(%s)" % (key))
return self.registry.exists(self.resource_name, key,
max_age=self.max_age,
cleanup_pretrigger=self.cleanup_pretrigger,
no_fault_check=no_fault_check)
def __getitem__(self, key):
return self.get(key)
def __setitem__(self, key, value):
return self.set(key, value)
def __delitem__(self, key):
return self.remove(key)
|
upgrade_tests_collections.py
|
from .newupgradebasetest import NewUpgradeBaseTest
import queue
import copy
import threading
from random import randint
from remote.remote_util import RemoteMachineShellConnection
from couchbase_helper.tuq_helper import N1QLHelper
from pytests.eventing.eventing_helper import EventingHelper
from eventing.eventing_base import EventingBaseTest
from lib.testconstants import STANDARD_BUCKET_PORT
from membase.api.rest_client import RestConnection, RestHelper
from membase.helper.bucket_helper import BucketOperationHelper
from membase.helper.cluster_helper import ClusterOperationHelper
from pytests.eventing.eventing_constants import HANDLER_CODE
from remote.remote_util import RemoteMachineShellConnection
from .newupgradebasetest import NewUpgradeBaseTest
from rebalance.rebalance_base import RebalanceBaseTest
from couchbase_helper.documentgenerator import BlobGenerator
from collection.collections_cli_client import CollectionsCLI
from collection.collections_rest_client import CollectionsRest
from collection.collections_stats import CollectionsStats
class UpgradeTestsCollections(NewUpgradeBaseTest):
def setUp(self):
super(UpgradeTestsCollections, self).setUp()
self.queue = queue.Queue()
self.graceful = self.input.param("graceful", False)
self.after_upgrade_nodes_in = self.input.param("after_upgrade_nodes_in", 1)
self.after_upgrade_nodes_out = self.input.param("after_upgrade_nodes_out", 1)
self.verify_vbucket_info = self.input.param("verify_vbucket_info", True)
self.initialize_events = self.input.param("initialize_events", "").split(":")
self.upgrade_services_in = self.input.param("upgrade_services_in", None)
self.after_upgrade_services_in = \
self.input.param("after_upgrade_services_in", None)
self.after_upgrade_services_out_dist = \
self.input.param("after_upgrade_services_out_dist", None)
self.in_between_events = self.input.param("in_between_events", "").split(":")
self.after_events = self.input.param("after_events", "").split(":")
self.before_events = self.input.param("before_events", "").split(":")
self.upgrade_type = self.input.param("upgrade_type", "online")
self.sherlock_upgrade = self.input.param("sherlock", False)
self.max_verify = self.input.param("max_verify", None)
self.verify_after_events = self.input.param("verify_after_events", True)
self.online_upgrade_type = self.input.param("online_upgrade_type", "swap")
self.offline_upgrade_type = self.input.param("offline_upgrade_type", "offline_shutdown")
self.src_bucket_name = self.input.param('src_bucket_name', 'src_bucket')
self.eventing_log_level = self.input.param('eventing_log_level', 'INFO')
self.dst_bucket_name = self.input.param('dst_bucket_name', 'dst_bucket')
self.dst_bucket_name1 = self.input.param('dst_bucket_name1', 'dst_bucket1')
self.metadata_bucket_name = self.input.param('metadata_bucket_name', 'metadata')
self.source_bucket_mutation_name = self.input.param('source_bucket_mutation_name', 'source_bucket_mutation')
self.dst_bucket_curl_name = self.input.param('dst_bucket_curl_name', 'dst_bucket_curl')
self.create_functions_buckets = self.input.param('create_functions_buckets', True)
self.use_memory_manager = self.input.param('use_memory_manager', True)
self.test_upgrade_with_xdcr = self.input.param('xdcr', False)
self.final_events = []
self.n1ql_helper = None
self.total_buckets = 1
self.in_servers_pool = self._convert_server_map(self.servers[:self.nodes_init])
""" Init nodes to not upgrade yet """
for key in list(self.in_servers_pool.keys()):
self.in_servers_pool[key].upgraded = False
self.out_servers_pool = self._convert_server_map(self.servers[self.nodes_init:])
self.gen_initial_create = BlobGenerator('upgrade', 'upgrade',
self.value_size,
end=self.num_items)
self.gen_create = BlobGenerator('upgrade', 'upgrade', self.value_size,
start=self.num_items + 1,
end=self.num_items * 1.5)
self.gen_update = BlobGenerator('upgrade', 'upgrade', self.value_size,
start=self.num_items // 2,
end=self.num_items)
self.gen_delete = BlobGenerator('upgrade', 'upgrade', self.value_size,
start=self.num_items // 4,
end=self.num_items // 2 - 1)
self.after_gen_create = BlobGenerator('upgrade', 'upgrade',
self.value_size,
start=self.num_items * 1.6,
end=self.num_items * 2)
self.after_gen_update = BlobGenerator('upgrade', 'upgrade',
self.value_size, start=1,
end=self.num_items/4)
self.after_gen_delete = BlobGenerator('upgrade', 'upgrade',
self.value_size,
start=self.num_items * .5,
end=self.num_items * 0.75)
initial_services_setting = self.input.param("initial-services-setting", None)
if initial_services_setting is not None and initial_services_setting.count("kv") < 2:
raise Exception("This test need at least 2 kv nodes to run")
""" Install original cb server """
self._install(self.servers[:self.nodes_init])
if not self.init_nodes and initial_services_setting is not None:
if "-" in initial_services_setting:
self.multi_nodes_services = True
initial_services = initial_services_setting.split("-")[0]
else:
initial_services = initial_services_setting
self.initialize_nodes([self.servers[:self.nodes_init][0]],
services=initial_services)
RestConnection(self.master).set_indexer_storage_mode()
self._log_start(self)
if len(self.servers[:self.nodes_init]) > 1:
if initial_services_setting is None:
self.cluster.rebalance(self.servers[:1],
self.servers[1:self.nodes_init],
[],
use_hostnames=self.use_hostnames)
else:
set_services = self.initial_services(initial_services_setting)
for i in range(1, len(set_services)):
self.cluster.rebalance([self.servers[0]],
[self.servers[i]],
[],
use_hostnames=self.use_hostnames,
services=[set_services[i]])
self.sleep(10)
else:
self.cluster.rebalance([self.servers[0]], self.servers[1:], [],
use_hostnames=self.use_hostnames)
self.sleep(5)
self.upgrade_master_node = self.servers[self.nodes_init:][0]
self.rest = RestConnection(self.upgrade_master_node)
self.rest_col = CollectionsRest(self.upgrade_master_node)
self.cli_col = CollectionsCLI(self.upgrade_master_node)
self.stat_col = CollectionsStats(self.upgrade_master_node)
""" sometimes, when upgrade failed and node does not install couchbase
server yet, we could not set quota at beginning of the test. We
have to wait to install new couchbase server to set it properly here """
servers_available = copy.deepcopy(self.servers)
if len(self.servers) > int(self.nodes_init):
servers_available = servers_available[:self.nodes_init]
self.quota = self._initialize_nodes(
self.cluster, servers_available, self.disabled_consistent_view,
self.rebalanceIndexWaitingDisabled,
self.rebalanceIndexPausingDisabled, self.maxParallelIndexers,
self.maxParallelReplicaIndexers, self.port)
self.add_built_in_server_user(node=self.master)
self.bucket_size = self._get_bucket_size(self.quota, self.total_buckets)
#self.bucket_size = "256"
self.create_buckets()
self.buckets = RestConnection(self.master).get_buckets() # remove this
self.n1ql_server = None
self.success_run = True
self.failed_thread = None
self.generate_map_nodes_out_dist_upgrade(self.after_upgrade_services_out_dist)
if self.upgrade_services_in != "same":
self.upgrade_services_in = self.get_services(list(self.in_servers_pool.values()),
self.upgrade_services_in, start_node = 0)
self.after_upgrade_services_in = self.get_services(list(self.out_servers_pool.values()),
self.after_upgrade_services_in, start_node = 0)
self.fts_obj = None
self.index_name_prefix = None
if self.test_upgrade_with_xdcr:
from pytests.xdcr.xdcr_callable import XDCRCallable
# Setup XDCR src and target clusters
self.xdcr_handle = XDCRCallable(self.servers[:self.nodes_init])
def tearDown(self):
super(UpgradeTestsCollections, self).tearDown()
"""
This test_upgrade is written to upgrade from 6.x.x to 7.0 and later
This test_upgrade function could run with many differnt test cases. All you need is params.
params:
**** Must include when run test_upgrade in job config or in conf file ****
upgrade_test=True (this param must include to run this test_upgrade)
skip_init_check_cbserver=true (this param will by pass check ns_server inside node)
*** these params could change its value ***
items=10000 (can any number)
initial_version=6.6.0-7909 (original cb version in cluster.
Must be in format x.x.x-xxxx )
released_upgrade_version=7.0.0-3254 (upgrade cluster to Cheshire cat.
Must be in format x.x.x-xxxx )
nodes_init=2 (number of node cluster will form)
upgrade_type=offline (if this param not pass, default value is online.
If value is offline, default value of
offline_upgrade_type is normal offline upgrade)
offline_upgrade_type=offline_failover (this param is used with upgrade_type=offline
if do offline failover, it needs to pass
offline_upgrade_type=offline_failover)
initialize_events=event_before_upgrade (it must be separated with dash like
kv_ops_initialize-create_fts_index_query_compare.
Function called must be in underscore format)
initial-services-setting=kv,index-kv,n1ql,fts-kv,eventing,index,n1ql
Services for each node is separated with dash.
Remember, no space around comma
In example above, node 1 with services kv,index
node 2 with services kv,n1ql,fts
node 3 with services kv,eventing,index
init_nodes=False (default value is true and will get service from ini file, disable
initial-services-setting param above)
upgrade_services_in=same (if not pass this param, it will get services in ini file)
after_events=rebalance_in-create_scope_collection-load_collection-verify_collection_data
(event must separate with dash)
after_upgrade_services_in=kv,fts (this param will pass services to rebalance_in a
node above. If add 2 nodes in, it needs 2
services separated by dash. Otherwise, it will
get service from ini file)
Here is example of an offline failover upgrade test with fts
-t upgrade.upgrade_tests.UpgradeTests.test_upgrade,items=5000,initial_version=6.0.0-7909,
nodes_init=3,initialize_events=kv_ops_initialize-create_fts_index_query_compare,
initial-services-setting=kv,index-kv,n1ql,fts-kv,eventing,index,n1ql,
upgrade_services_in=same,after_events=rebalance_in-create_scope_collection-load_collection-verify_collection_data,
after_upgrade_services_in=kv,fts,disable_HTP=True,upgrade_test=True,init_nodes=False,
skip_init_check_cbserver=true,released_upgrade_version=7.0.0-3154,dgm_run=true,
upgrade_type=offline,offline_upgrade_type=offline_failover
"""
def test_upgrade(self):
self.event_threads = []
self.after_event_threads = []
try:
self.log.info("\n*** Start init operations before upgrade begins ***")
if self.initialize_events:
print("\ninit event: ", self.initialize_events)
initialize_events = self.run_event(self.initialize_events)
self.finish_events(initialize_events)
if not self.success_run and self.failed_thread is not None:
raise Exception("*** Failed to {0} ***".format(self.failed_thread))
self.cluster_stats(self.servers[:self.nodes_init])
if self.before_events:
self.event_threads += self.run_event(self.before_events)
self.log.info("\n*** Start upgrade cluster ***")
self.event_threads += self.upgrade_event()
self.finish_events(self.event_threads)
self.log.info("\nWill install upgrade version to any free nodes")
out_nodes = self._get_free_nodes()
self.log.info("Here is free nodes {0}".format(out_nodes))
""" only install nodes out when there is cluster operation """
cluster_ops = ["rebalance_in", "rebalance_out", "rebalance_in_out"]
for event in self.after_events[0].split("-"):
if event in cluster_ops:
self.log.info(
"\n\nThere are cluster ops after upgrade. "
"Need to install free nodes in upgrade version")
self.initial_version = self.upgrade_versions[0]
self._install(out_nodes)
break
self.generate_map_nodes_out_dist_upgrade(
self.after_upgrade_services_out_dist)
self.log.info("\n\n*** Start operations after upgrade is done ***")
self.add_built_in_server_user()
if self.after_events:
self.after_event_threads = self.run_event(self.after_events)
self.finish_events(self.after_event_threads)
if not self.success_run and self.failed_thread is not None:
raise Exception("*** Failed to {0} ***".format(self.failed_thread))
""" Default set to always verify data """
if self.after_events[0]:
self.log.info("*** Start after events ***")
for event in self.after_events[0].split("-"):
if "delete_buckets" in event:
self.log.info("After events has delete buckets event. "
"No items verification needed")
self.verify_after_events = False
break
if self.verify_after_events:
self.log.info("*** Start data verification ***")
self.cluster_stats(list(self.in_servers_pool.values()))
self._verify_data_active_replica()
except Exception as ex:
self.log.info(ex)
print("*** Stop all events to stop the test ***")
self.stop_all_events(self.event_threads)
self.stop_all_events(self.after_event_threads)
raise
finally:
self.log.info("any events for which we need to cleanup")
self.cleanup_events()
def _record_vbuckets(self, master, servers):
bucket_map = dict()
for bucket in self.buckets:
self.log.info("Record vbucket for the bucket {0}"
.format(bucket.name))
bucket_map[bucket.name] = RestHelper(RestConnection(master))\
._get_vbuckets(servers, bucket_name=bucket.name)
return bucket_map
def _find_master(self):
self.master = list(self.in_servers_pool.values())[0]
def _verify_data_active_replica(self):
""" set data_analysis True by default """
self.data_analysis = self.input.param("data_analysis", False)
self.total_vbuckets = self.initial_vbuckets
if self.data_analysis:
disk_replica_dataset, disk_active_dataset = \
self.get_and_compare_active_replica_data_set_all(
list(self.in_servers_pool.values()),
self.buckets, path=None)
self.data_analysis_active_replica_all(
disk_active_dataset, disk_replica_dataset,
list(self.in_servers_pool.values()), self.buckets, path=None)
""" check vbucket distribution analysis after rebalance """
self.vb_distribution_analysis(
servers=list(self.in_servers_pool.values()),
buckets=self.buckets, std=1.0,
total_vbuckets=self.total_vbuckets)
def _verify_vbuckets(self, old_vbucket_map, new_vbucket_map):
for bucket in self.buckets:
self._verify_vbucket_nums_for_swap(old_vbucket_map[bucket.name],
new_vbucket_map[bucket.name])
def stop_all_events(self, thread_list):
for t in thread_list:
try:
if t.isAlive():
t.stop()
except Exception as ex:
self.log.info(ex)
def cleanup_events(self):
thread_list = []
for event in self.final_events:
t = threading.Thread(target=self.find_function(event), args=())
t.daemon = True
t.start()
thread_list.append(t)
for t in thread_list:
t.join()
def run_event_in_sequence(self, events):
q = self.queue
self.log.info("run_event_in_sequence")
print("\nevent run: ", events)
for event in events.split("-"):
print("\n 1 event: ", event)
t = threading.Thread(target=self.find_function(event), args=(q,))
t.daemon = True
t.start()
t.join()
self.success_run = True
while not self.queue.empty():
self.success_run &= self.queue.get()
if not self.success_run:
self.failed_thread = event
break
def run_event(self, events):
thread_list = []
for event in events:
if "-" in event:
t = threading.Thread(target=self.run_event_in_sequence, args=(event,))
t.start()
t.join()
elif event != '':
t = threading.Thread(target=self.find_function(event), args=())
t.daemon = True
t.start()
thread_list.append(t)
return thread_list
def find_function(self, event):
return getattr(self, event)
def finish_events(self, thread_list):
for t in thread_list:
t.join()
def upgrade_event(self):
self.log.info("upgrade_event")
thread_list = []
if self.upgrade_type == "online":
t = threading.Thread(target=self.online_upgrade, args=())
elif self.upgrade_type == "offline":
t = threading.Thread(target=self.offline_upgrade, args=())
t.daemon = True
t.start()
thread_list.append(t)
return thread_list
def server_crash(self):
try:
self.log.info("server_crash")
self.targetProcess = self.input.param("targetProcess", 'memcached')
for node in self.nodes_out_list:
remote = RemoteMachineShellConnection(node)
remote.terminate_process(process_name=self.targetProcess)
except Exception as ex:
self.log.info(ex)
raise
def server_stop(self):
try:
self.log.info("server_stop")
for node in self.nodes_out_list:
remote = RemoteMachineShellConnection(node)
remote.stop_server()
self.final_events.append("start_server")
except Exception as ex:
self.log.info(ex)
raise
def start_server(self):
try:
self.log.info("start_server")
for node in self.nodes_out_list:
remote = RemoteMachineShellConnection(node)
remote.start_server()
except Exception as ex:
self.log.info(ex)
raise
def failover(self, queue=None):
failover_node = False
try:
self.log.info("VVVVVV failover a node ")
print("failover node ", self.nodes_out_list)
nodes = self.get_nodes_in_cluster_after_upgrade()
failover_task = self.cluster.async_failover([self.master],
failover_nodes = self.nodes_out_list, graceful=self.graceful)
failover_task.result()
if self.graceful:
""" Check if rebalance is still running """
msg = "graceful failover failed for nodes"
self.assertTrue(RestConnection(self.master).monitorRebalance(\
stop_if_loop=True), msg=msg)
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init],
[], self.nodes_out_list)
rebalance.result()
failover_node = True
else:
msg = "Failed to failover a node"
self.assertTrue(RestConnection(self.master).monitorRebalance(\
stop_if_loop=True), msg=msg)
rebalance = self.cluster.async_rebalance(nodes, [],
self.nodes_out_list)
rebalance.result()
failover_node = True
except Exception as ex:
self.log.info(ex)
if queue is not None:
queue.put(False)
if failover_node and queue is not None:
queue.put(True)
def autofailover(self):
try:
self.log.info("autofailover")
autofailover_timeout = 30
status = RestConnection(self.master).update_autofailover_settings(True, autofailover_timeout)
self.assertTrue(status, 'failed to change autofailover_settings!')
servr_out = self.nodes_out_list
remote = RemoteMachineShellConnection(self.nodes_out_list[0])
remote.stop_server()
self.sleep(autofailover_timeout + 10, "Wait for autofailover")
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init],
[], [self.nodes_out_list[0]])
rebalance.result()
except Exception as ex:
self.log.info(ex)
raise
def network_partitioning(self):
try:
self.log.info("network_partitioning")
for node in self.nodes_out_list:
self.start_firewall_on_node(node)
self.final_events.append("undo_network_partitioning")
except Exception as ex:
self.log.info(ex)
raise
def undo_network_partitioning(self):
try:
self.log.info("remove_network_partitioning")
for node in self.nodes_out_list:
self.stop_firewall_on_node(node)
except Exception as ex:
self.log.info(ex)
raise
def bucket_compaction(self):
try:
self.log.info("couchbase_bucket_compaction")
compact_tasks = []
for bucket in self.buckets:
compact_tasks.append(self.cluster.async_compact_bucket(self.master, bucket))
except Exception as ex:
self.log.info(ex)
raise
def warmup(self, queue=None):
node_warmuped = False
try:
self.log.info("Start warmup operation")
nodes = self.get_nodes_in_cluster_after_upgrade()
for server in nodes:
remote = RemoteMachineShellConnection(server)
remote.stop_server()
remote.start_server()
remote.disconnect()
ClusterOperationHelper.wait_for_ns_servers_or_assert(nodes, self)
node_warmuped = True
except Exception as ex:
self.log.info(ex)
if queue is not None:
queue.put(False)
if node_warmuped and queue is not None:
queue.put(True)
def create_lww_bucket(self):
self.time_synchronization='enabledWithOutDrift'
bucket='default'
print('time_sync {0}'.format(self.time_synchronization))
helper = RestHelper(self.rest)
if not helper.bucket_exists(bucket):
node_ram_ratio = BucketOperationHelper.base_bucket_ratio(
self.servers)
info = self.rest.get_nodes_self()
self.rest.create_bucket(bucket=bucket,
ramQuotaMB=512, timeSynchronization=self.time_synchronization)
try:
ready = BucketOperationHelper.wait_for_memcached(self.master,
bucket)
self.assertTrue(ready, '', msg = '[ERROR] Expect bucket creation to not work.')
finally:
self.log.info("Success, created lww bucket")
def bucket_flush(self, queue=None):
bucket_flushed = False
try:
self.log.info("bucket_flush ops")
self.rest =RestConnection(self.master)
for bucket in self.buckets:
self.rest.flush_bucket(bucket.name)
bucket_flushed = True
except Exception as ex:
self.log.info(ex)
if queue is not None:
queue.put(False)
if bucket_flushed and queue is not None:
queue.put(True)
def delete_buckets(self, queue=None):
bucket_deleted = False
try:
self.log.info("delete_buckets")
self.rest = RestConnection(self.master)
for bucket in self.buckets:
self.log.info("delete bucket {0}".format(bucket.name))
self.rest.delete_bucket(bucket.name)
bucket_deleted = True
except Exception as ex:
self.log.info(ex)
if queue is not None:
queue.put(False)
if bucket_deleted and queue is not None:
queue.put(True)
def create_buckets(self, queue=None):
bucket_created = False
try:
self.log.info("create_buckets")
if self.dgm_run:
self.bucket_size = 256
self.default_bucket = False
self.sasl_buckets = 1
self.sasl_bucket_name = self.sasl_bucket_name + "_" \
+ str(self.total_buckets)
self.rest = RestConnection(self.master)
self._bucket_creation()
self.sleep(5, "sleep after create bucket")
self.total_buckets +=1
bucket_created = True
except Exception as ex:
self.log.info(ex)
if queue is not None:
queue.put(False)
if bucket_created and queue is not None:
queue.put(True)
def change_bucket_properties(self):
try:
self.rest = RestConnection(self.master)
#Change Bucket Properties
for bucket in self.buckets:
self.rest.change_bucket_props(bucket, ramQuotaMB=None,\
replicaNumber=0,\
proxyPort=None, replicaIndex=None, flushEnabled=False)
except Exception as ex:
self.log.info(ex)
raise
def rebalance_in(self, queue=None):
rebalance_in = False
service_in = copy.deepcopy(self.after_upgrade_services_in)
if service_in is None:
service_in = ["kv"]
free_nodes = self._convert_server_map(self._get_free_nodes())
if not list(free_nodes.values()):
raise Exception("No free node available to rebalance in")
try:
self.nodes_in_list = list(self.out_servers_pool.values())[:self.nodes_in]
if int(self.nodes_in) == 1:
if len(list(free_nodes.keys())) > 1:
free_node_in = [list(free_nodes.values())[0]]
if len(self.after_upgrade_services_in) > 1:
service_in = [self.after_upgrade_services_in[0]]
else:
free_node_in = list(free_nodes.values())
self.log.info("<<<=== rebalance_in node {0} with services {1}"\
.format(free_node_in, service_in[0]))
rebalance = \
self.cluster.async_rebalance(self.servers[:self.nodes_init],
free_node_in,
[], services = service_in)
rebalance.result()
self.in_servers_pool.update(free_nodes)
rebalance_in = True
if any("index" in services for services in service_in):
self.log.info("Set storageMode to forestdb after add "
"index node {0} to cluster".format(list(free_nodes.keys())))
RestConnection(list(free_nodes.values())[0]).set_indexer_storage_mode()
if self.after_upgrade_services_in and \
len(self.after_upgrade_services_in) > 1:
self.log.info("remove service '{0}' from service list after "
"rebalance done ".format(self.after_upgrade_services_in[0]))
self.after_upgrade_services_in.pop(0)
self.sleep(10, "wait 10 seconds after rebalance")
if free_node_in and free_node_in[0] not in self.servers:
self.servers.append(free_node_in[0])
except Exception as ex:
self.log.info(ex)
if queue is not None:
queue.put(False)
if rebalance_in and queue is not None:
queue.put(True)
def rebalance_out(self, queue=None):
rebalance_out = False
try:
self.log.info("=====>>>> rebalance_out node {0}"\
.format(self.nodes_out_list))
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init],\
[], self.nodes_out_list)
rebalance.result()
rebalance_out = True
except Exception as ex:
self.log.info(ex)
if queue is not None:
queue.put(False)
if rebalance_out and queue is not None:
queue.put(True)
def rebalance_in_out(self, queue=None):
rebalance_in_out = False
try:
self.nodes_in_list = list(self.out_servers_pool.values())[:self.nodes_in]
self.log.info("<<<<<===== rebalance_in node {0}"\
.format(self.nodes_in_list))
self.log.info("=====>>>>> rebalance_out node {0}"\
.format(self.nodes_out_list))
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init],\
self.nodes_in_list, self.nodes_out_list,\
services = self.after_upgrade_services_in)
rebalance.result()
rebalance_in_out = True
except Exception as ex:
self.log.info(ex)
if queue is not None:
queue.put(False)
if rebalance_in_out and queue is not None:
queue.put(True)
def incremental_backup(self):
self.log.info("incremental_backup")
def full_backup(self):
self.log.info("full_backup")
def cb_collect_info(self):
try:
self.log.info("cb_collect_info")
log_file_name = "/tmp/sample.zip"
output, error = self.shell.execute_cbcollect_info("%s" % (log_file_name))
except Exception as ex:
raise
finally:
self.log.info(ex)
def create_index(self, queue=None):
self.log.info("create_index")
self.index_list = {}
create_index = False
self._initialize_n1ql_helper()
try:
self.n1ql_helper.create_primary_index(using_gsi = True,
server = self.n1ql_server)
#self.n1ql_helper.create_primary_index(using_gsi = False,
# server = self.n1ql_server)
self.log.info("done create_index")
create_index = True
except Exception as e:
self.log.info(e)
if queue is not None:
queue.put(False)
if create_index and queue is not None:
queue.put(True)
def create_index_with_replica_and_query(self, queue=None):
""" ,groups=simple,reset_services=True
"""
self.log.info("Create index with replica and query")
self.n1ql_node = self.get_nodes_from_services_map(service_type="n1ql")
self._initialize_n1ql_helper()
self.index_name_prefix = "random_index_" + str(randint(100000, 999999))
create_index_query = "CREATE INDEX " + self.index_name_prefix + \
" ON default(age) USING GSI WITH {{'num_replica': {0}}};"\
.format(self.num_index_replicas)
try:
self.create_index()
self.n1ql_helper.run_cbq_query(query=create_index_query,
server=self.n1ql_node)
except Exception as e:
self.log.info(e)
self.sleep(30)
index_map = self.get_index_map()
self.log.info(index_map)
if not self.expected_err_msg:
self.n1ql_helper.verify_replica_indexes([self.index_name_prefix],
index_map,
self.num_index_replicas)
def verify_index_with_replica_and_query(self, queue=None):
index_map = self.get_index_map()
try:
self.n1ql_helper.verify_replica_indexes([self.index_name_prefix],
index_map,
self.num_index_replicas)
except Exception as e:
self.log.info(e)
if queue is not None:
queue.put(False)
def create_views(self, queue=None):
self.log.info("*** create_views ***")
""" default is 1 ddoc. Change number of ddoc by param ddocs_num=new_number
default is 2 views. Change number of views by param
view_per_ddoc=new_view_per_doc """
try:
self.create_ddocs_and_views(queue)
except Exception as e:
self.log.info(e)
def query_views(self, queue=None):
self.log.info("*** query_views ***")
try:
self.verify_all_queries(queue)
except Exception as e:
self.log.info(e)
def drop_views(self):
self.log.info("drop_views")
def drop_index(self):
self.log.info("drop_index")
for bucket_name in list(self.index_list.keys()):
query = "drop index {0} on {1} using gsi"\
.format(self.index_list[bucket_name], bucket_name)
self.n1ql_helper.run_cbq_query(query, self.n1ql_server)
def query_explain(self):
self.log.info("query_explain")
for bucket in self.buckets:
query = "select count(*) from {0}".format(bucket.name)
self.n1ql_helper.run_cbq_query(query, self.n1ql_server)
query = "explain select count(*) from {0}".format(bucket.name)
self.n1ql_helper.run_cbq_query(query, self.n1ql_server)
query = "select count(*) from {0} where field_1 = 1".format(bucket.name)
self.n1ql_helper.run_cbq_query(query, self.n1ql_server)
query = "explain select count(*) from {0} where field_1 = 1".format(bucket.name)
self.n1ql_helper.run_cbq_query(query, self.n1ql_server)
def change_settings(self):
try:
status = True
if "update_notifications" in self.input.test_params:
status &= self.rest.update_notifications(str(self.input.param("update_notifications", 'true')).lower())
if "autofailover_timeout" in self.input.test_params:
status &= self.rest.update_autofailover_settings(True, self.input.param("autofailover_timeout", None))
if "autofailover_alerts" in self.input.test_params:
status &= self.rest.set_alerts_settings('couchbase@localhost', 'root@localhost', 'user', 'pwd')
if "autocompaction" in self.input.test_params:
tmp, _, _ = self.rest.set_auto_compaction(viewFragmntThresholdPercentage=
self.input.param("autocompaction", 50))
status &= tmp
if not status:
self.fail("some settings were not set correctly!")
except Exception as ex:
self.log.info(ex)
raise
def create_cbas_services(self, queue=None):
"""
This test only need max 4 servers to run
Command to run:
upgrade.upgrade_tests_collections.UpgradeTestsCollections.test_upgrade,items=5000,initial_version=6.0.2-2413,initial-services-setting=kv,index-kv,n1ql,fts-cbas
nodes_init=3,initialize_events=create_cbas_services-kv_ops_initialize-create_n1ql_index_query-create_fts_index,after_upgrade_services_in=same,
dgm_run=true,upgrade_test=True,skip_init_check_cbserver=true,released_upgrade_version=7.0.0-4502
"""
try:
self.validate_error = False
rest = RestConnection(self.master)
cb_version = rest.get_nodes_version()
if 5.5 > float(cb_version[:3]):
self.log.info("This analytic test is only for cb version 5.5 and later.")
return
self.log.info("Get cbas nodes in cluster")
cbas_node = self.get_nodes_from_services_map(service_type="cbas")
cbas_rest = RestConnection(cbas_node)
self.get_services_map()
kv_nodes = []
kv_maps = [x.replace(":8091", "") for x in self.services_map["kv"]]
self.log.info("Get kv node in cluster")
for server in self.servers:
if server.ip in kv_maps:
kv_nodes.append(server)
self.cbas_node = cbas_node
self.load_sample_buckets(servers=kv_nodes, bucketName="travel-sample",
total_items=31591, rest=cbas_rest)
self.test_create_dataset_on_bucket()
except Exception as e:
self.log.info(e)
if queue is not None:
queue.put(False)
if queue is not None:
queue.put(True)
def online_upgrade(self):
try:
self.log.info("online_upgrade")
self.initial_version = self.upgrade_versions[0]
self.sleep(self.sleep_time,
"Pre-setup of old version is done. "
"Wait for online upgrade to {0} version"
.format(self.initial_version))
self.product = 'couchbase-server'
if self.online_upgrade_type == "swap":
self.online_upgrade_swap_rebalance()
else:
self.online_upgrade_incremental()
except Exception as ex:
self.log.info(ex)
raise
def online_upgrade_swap_rebalance(self):
self.log.info("online_upgrade_swap_rebalance")
self.swap_num_servers = self.input.param('swap_num_servers', 1)
servers = self._convert_server_map(self.servers[:self.nodes_init])
out_servers = self._convert_server_map(self.servers[self.nodes_init:])
self.swap_num_servers = min(self.swap_num_servers, len(out_servers))
start_services_num = 0
for i in range(self.nodes_init // self.swap_num_servers):
servers_in = {}
new_servers = copy.deepcopy(servers)
servicesNodeOut = ""
for key in list(out_servers.keys()):
servers_in[key] = out_servers[key]
out_servers[key].upgraded = True
out_servers.pop(key)
if len(servers_in) == self.swap_num_servers:
break
servers_out = {}
node_out = None
new_servers.update(servers_in)
for key in list(servers.keys()):
if len(servers_out) == self.swap_num_servers:
break
elif not servers[key].upgraded:
servers_out[key] = servers[key]
new_servers.pop(key)
out_servers.update(servers_out)
rest = RestConnection(list(servers.values())[0])
self.log.info("****************************************".format(servers))
self.log.info("cluster nodes = {0}".format(list(servers.values())))
self.log.info("cluster service map = {0}".format(rest.get_nodes_services()))
self.log.info("cluster version map = {0}".format(rest.get_nodes_version()))
self.log.info("to include in cluster = {0}".format(list(servers_in.values())))
self.log.info("to exclude from cluster = {0}".format(list(servers_out.values())))
self.log.info("****************************************".format(servers))
rest = RestConnection(list(servers_out.values())[0])
servicesNodeOut = rest.get_nodes_services()
servicesNodeOut = ",".join(servicesNodeOut[list(servers_out.keys())[0]] )
self._install(list(servers_in.values()))
self.sleep(10, "Wait for ns server is ready")
old_vbucket_map = self._record_vbuckets(self.master, list(servers.values()))
try:
if self.upgrade_services_in == "same":
self.cluster.rebalance(list(servers.values()),
list(servers_in.values()),
list(servers_out.values()),
services=[servicesNodeOut])
elif self.upgrade_services_in is not None \
and len(self.upgrade_services_in) > 0:
tem_services = self.upgrade_services_in[
start_services_num:start_services_num
+ len(list(servers_in.values()))]
self.cluster.rebalance(list(servers.values()),
list(servers_in.values()),
list(servers_out.values()),
services=tem_services)
start_services_num += len(list(servers_in.values()))
else:
self.cluster.rebalance(list(servers.values()),
list(servers_in.values()),
list(servers_out.values()))
except Exception as ex:
self.log.info(ex)
raise
self.out_servers_pool = servers_out
self.in_servers_pool = new_servers
servers = new_servers
self.servers = list(servers.values())
self.master = self.servers[0]
if self.verify_vbucket_info:
new_vbucket_map = self._record_vbuckets(self.master, self.servers)
self._verify_vbuckets(old_vbucket_map, new_vbucket_map)
# in the middle of online upgrade events
if self.in_between_events:
self.event_threads = []
self.event_threads += self.run_event(self.in_between_events)
self.finish_events(self.event_threads)
self.in_between_events = None
def online_upgrade_incremental(self):
self.log.info("online_upgrade_incremental")
try:
for server in self.servers[1:]:
self.cluster.rebalance(self.servers, [], [server])
self.initial_version = self.upgrade_versions[0]
self.sleep(self.sleep_time, "Pre-setup of old version is done. Wait for online upgrade to {0} version".\
format(self.initial_version))
self.product = 'couchbase-server'
self._install([server])
self.sleep(self.sleep_time, "Installation of new version is done. Wait for rebalance")
self.cluster.rebalance(self.servers, [server], [])
self.log.info("Rebalanced in upgraded nodes")
self.sleep(self.sleep_time)
self._new_master(self.servers[1])
self.cluster.rebalance(self.servers, [], [self.servers[0]])
self.log.info("Rebalanced out all old version nodes")
except Exception as ex:
self.log.info(ex)
raise
def offline_upgrade(self):
if self.offline_upgrade_type == "offline_shutdown":
self._offline_upgrade()
elif self.offline_upgrade_type == "offline_failover":
self._offline_failover_upgrade()
def failover_add_back(self):
try:
rest = RestConnection(self.master)
recoveryType = self.input.param("recoveryType", "full")
servr_out = self.nodes_out_list
failover_task =self.cluster.async_failover([self.master],
failover_nodes = servr_out, graceful=self.graceful)
failover_task.result()
nodes_all = rest.node_statuses()
nodes = []
if servr_out[0].ip == "127.0.0.1":
for failover_node in servr_out:
nodes.extend([node for node in nodes_all
if (str(node.port) == failover_node.port)])
else:
for failover_node in servr_out:
nodes.extend([node for node in nodes_all
if node.ip == failover_node.ip])
for node in nodes:
self.log.info(node)
rest.add_back_node(node.id)
rest.set_recovery_type(otpNode=node.id, recoveryType=recoveryType)
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init],
[], [])
rebalance.result()
except Exception as ex:
raise
def auto_retry_with_rebalance_in(self, queue=None):
self.change_retry_rebalance_settings(True, 300, 1)
rebalance_in = False
service_in = copy.deepcopy(self.after_upgrade_services_in)
if service_in is None:
service_in = ["kv"]
free_nodes = self._convert_server_map(self._get_free_nodes())
free_node_in = []
if not free_nodes.values():
raise Exception("No free node available to rebalance in")
try:
self.nodes_in_list = self.out_servers_pool.values()[:self.nodes_in]
if int(self.nodes_in) == 1:
if len(free_nodes.keys()) > 1:
free_node_in = [free_nodes.values()[0]]
if len(self.after_upgrade_services_in) > 1:
service_in = [self.after_upgrade_services_in[0]]
else:
free_node_in = free_nodes.values()
self.log.info("<<<=== rebalance_in node {0} with services {1}" \
.format(free_node_in, service_in[0]))
shell = RemoteMachineShellConnection(free_node_in[0])
shell.stop_server()
rebalance = \
self.cluster.async_rebalance(self.servers[:self.nodes_init],
free_node_in,
[], services=service_in)
rebalance.result()
self.in_servers_pool.update(free_nodes)
rebalance_in = True
if any("index" in services for services in service_in):
self.log.info("Set storageMode to forestdb after add "
"index node {0} to cluster".format(free_nodes.keys()))
RestConnection(free_nodes.values()[0]).set_indexer_storage_mode()
if self.after_upgrade_services_in and \
len(self.after_upgrade_services_in) > 1:
self.log.info("remove service '{0}' from service list after "
"rebalance done ".format(self.after_upgrade_services_in[0]))
self.after_upgrade_services_in.pop(0)
self.sleep(10, "wait 10 seconds after rebalance")
if free_node_in and free_node_in[0] not in self.servers:
self.servers.append(free_node_in[0])
except Exception as ex:
self.log.info("Rebalance failed with : {0}".format(str(ex)))
self.check_retry_rebalance_succeeded()
if queue is not None:
queue.put(False)
else:
self.fail("Rebalance did not fail as expected. Hence could not validate auto-retry feature..")
finally:
self.start_server(free_node_in[0])
if rebalance_in and queue is not None:
queue.put(True)
def kv_ops_initialize(self, queue=None):
try:
self.log.info("kv_ops_initialize")
self._load_all_buckets(self.master, self.gen_initial_create,
"create", self.expire_time,
flag=self.item_flag)
self.log.info("done kv_ops_initialize")
except Exception as ex:
self.log.info(ex)
if queue is not None:
queue.put(False)
raise
if queue is not None:
queue.put(True)
def kv_after_ops_create(self, queue=None):
try:
self.log.info("kv_after_ops_create")
self._load_all_buckets(self.master, self.after_gen_create, "create",\
self.expire_time, flag=self.item_flag)
for bucket in self.buckets:
self.log.info(" record vbucket for the bucket {0}"\
.format(bucket.name))
curr_items = \
RestConnection(self.master).get_active_key_count(bucket.name)
self.log.info("{0} curr_items in bucket {1} "\
.format(curr_items, bucket.name))
except Exception as ex:
self.log.info(ex)
if queue is not None:
queue.put(False)
if queue is not None:
queue.put(True)
def kv_after_ops_update(self):
try:
self.log.info("kv_after_ops_update")
self._load_all_buckets(self.master, self.after_gen_update, "update",
self.expire_time, flag=self.item_flag)
except Exception as ex:
self.log.info(ex)
raise
def kv_after_ops_delete(self):
try:
self.log.info("kv_after_ops_delete")
self._load_all_buckets(self.master, self.after_gen_delete,
"delete", self.expire_time,
flag=self.item_flag)
except Exception as ex:
self.log.info(ex)
raise
def doc_ops_initialize(self, queue=None):
try:
self.log.info("load doc to all buckets")
self._load_doc_data_all_buckets(data_op="create", batch_size=1000,
gen_load=None)
self.log.info("done initialize load doc to all buckets")
except Exception as ex:
self.log.info(ex)
if queue is not None:
queue.put(False)
if queue is not None:
queue.put(True)
def kv_ops_create(self):
try:
self.log.info("kv_ops_create")
self._load_all_buckets(self.master, self.gen_create, "create",
self.expire_time, flag=self.item_flag)
except Exception as ex:
self.log.info(ex)
raise
def kv_ops_update(self):
try:
self.log.info("kv_ops_update")
self._load_all_buckets(self.master, self.gen_update, "update",
self.expire_time, flag=self.item_flag)
except Exception as ex:
self.log.info(ex)
raise
def kv_ops_delete(self):
try:
self.log.info("kv_ops_delete")
self._load_all_buckets(self.master, self.gen_delete, "delete",
self.expire_time, flag=self.item_flag)
except Exception as ex:
self.log.info(ex)
raise
def add_sub_doc(self):
try:
self.log.info("add sub doc")
"""add sub doc code here"""
except Exception as ex:
self.log.info(ex)
raise
def create_fts_index(self, queue=None):
try:
self.log.info("Checking if index already exists ...")
name = "default"
""" test on one bucket """
for bucket in self.buckets:
name = bucket.name
break
SOURCE_CB_PARAMS = {
"authUser": "default",
"authPassword": "",
"authSaslUser": "",
"authSaslPassword": "",
"clusterManagerBackoffFactor": 0,
"clusterManagerSleepInitMS": 0,
"clusterManagerSleepMaxMS": 20000,
"dataManagerBackoffFactor": 0,
"dataManagerSleepInitMS": 0,
"dataManagerSleepMaxMS": 20000,
"feedBufferSizeBytes": 0,
"feedBufferAckThreshold": 0
}
self.index_type = 'fulltext-index'
self.index_definition = {
"type": "fulltext-index",
"name": "",
"uuid": "",
"params": {},
"sourceType": "couchbase",
"sourceName": "",
"sourceUUID": "",
"sourceParams": SOURCE_CB_PARAMS,
"planParams": {}
}
self.name = self.index_definition['name'] = \
self.index_definition['sourceName'] = name
fts_node = self.get_nodes_from_services_map(
"fts", servers=self.get_nodes_in_cluster_after_upgrade())
if fts_node:
rest = RestConnection(fts_node)
status, _ = rest.get_fts_index_definition(self.name)
if status != 400:
rest.delete_fts_index(self.name)
self.log.info("Creating {0} {1} on {2}"
.format(self.index_type, self.name, rest.ip))
rest.create_fts_index(self.name, self.index_definition)
else:
raise("No FTS node in cluster")
self.ops_dist_map = self.calculate_data_change_distribution(
create_per=self.create_ops_per, update_per=self.update_ops_per,
delete_per=self.delete_ops_per, expiry_per=self.expiry_ops_per,
start=0, end=self.docs_per_day)
self.log.info(self.ops_dist_map)
self.dataset = "default"
self.docs_gen_map = self.generate_ops_docs(self.docs_per_day, 0)
self.async_ops_all_buckets(self.docs_gen_map, batch_size=100)
except Exception as ex:
self.log.info(ex)
def create_fts_index_query(self, queue=None):
try:
self.fts_obj = self.create_fts_index_query_compare()
return self.fts_obj
except Exception as ex:
self.log.info(ex)
if queue is not None:
queue.put(False)
if queue is not None:
queue.put(True)
def xdcr_create_replication(self):
try:
self.xdcr_handle._create_replication()
except Exception as ex:
self.log.info(ex)
def xdcr_set_replication_properties(self):
try:
param_str = self.__input.param(
"%s@%s" %
("default", "C"), None)
self.xdcr_handle._set_replication_properties(param_str)
except Exception as ex:
self.log.info(ex)
def xdcr_get_replication_properties(self):
try:
self.xdcr_handle._get_replication_properties()
except Exception as ex:
self.log.info(ex)
def create_n1ql_index_query(self, queue=None):
try:
self.create_n1ql_index_and_query()
#return self.n1ql_obj
except Exception as ex:
self.log.info(ex)
if queue is not None:
queue.put(False)
if queue is not None:
queue.put(True)
def cluster_stats(self, servers):
self._wait_for_stats_all_buckets(servers)
def _initialize_n1ql_helper(self):
if self.n1ql_helper is None:
self.n1ql_server = self.get_nodes_from_services_map(
service_type="n1ql", servers=self.input.servers)
self.n1ql_helper = N1QLHelper(
version="sherlock", shell=None,
use_rest=True, max_verify=self.max_verify,
buckets=self.buckets, item_flag=None,
n1ql_port=self.n1ql_server.n1ql_port, full_docs_list=[],
log=self.log, input=self.input, master=self.master)
def _get_free_nodes(self):
self.log.info("Get free nodes in pool not in cluster yet")
nodes = self.get_nodes_in_cluster_after_upgrade()
free_nodes = copy.deepcopy(self.input.servers)
for node in nodes:
for server in free_nodes:
if str(server.ip).strip() == str(node.ip).strip():
self.log.info("this node {0} is in cluster".format(server))
free_nodes.remove(server)
if not free_nodes:
self.log.info("No free node")
else:
self.log.info("Here is the list of free nodes {0}"
.format(free_nodes))
return free_nodes
def get_nodes_in_cluster_after_upgrade(self, master_node=None):
if master_node is None:
rest = RestConnection(self.master)
else:
rest = RestConnection(master_node)
nodes = rest.node_statuses()
server_set = []
for node in nodes:
for server in self.input.servers:
if server.ip == node.ip:
server_set.append(server)
return server_set
def create_scope_collection(self, queue=None):
try:
self._create_scope_collection()
except Exception as ex:
self.log.info(ex)
if queue is not None:
queue.put(False)
if queue is not None:
queue.put(True)
def load_collection(self, queue=None):
try:
self.load_collection_id = self.get_collection_load_id()
option = " -c {0} ".format(self.load_collection_id)
self.sleep(10)
self.load_collection_all_buckets(command_options=option )
except Exception as ex:
self.log.info(ex)
if queue is not None:
queue.put(False)
if queue is not None:
queue.put(True)
def verify_collection_data(self, queue=None):
try:
self._verify_collection_data()
except Exception as ex:
self.log.info(ex)
if queue is not None:
queue.put(False)
if queue is not None:
queue.put(True)
|
websocket_unittest.py
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import absolute_import
import base64
import hashlib
import socket
import threading
import unittest
import six
import six.moves.BaseHTTPServer # pylint: disable=import-error
from telemetry.internal.backends.chrome_inspector import websocket
# Minimal handler for a local websocket server.
class _FakeWebSocketHandler(six.moves.BaseHTTPServer.BaseHTTPRequestHandler):
def do_GET(self): # pylint: disable=invalid-name
key = self.headers.get('Sec-WebSocket-Key')
value = (key + "258EAFA5-E914-47DA-95CA-C5AB0DC85B11").encode('utf-8')
if six.PY3:
hashed = base64.encodebytes(hashlib.sha1(value).digest()).strip().lower()
else:
hashed = base64.encodestring(hashlib.sha1(value).digest()).strip().lower()
self.send_response(101)
self.send_header('Sec-Websocket-Accept', hashed.decode('utf-8'))
self.send_header('upgrade', 'websocket')
self.send_header('connection', 'upgrade')
self.end_headers()
self.wfile.flush()
class TestWebSocket(unittest.TestCase):
def testExports(self):
self.assertNotEqual(websocket.CreateConnection, None)
self.assertNotEqual(websocket.WebSocketException, None)
self.assertNotEqual(websocket.WebSocketTimeoutException, None)
def testSockOpts(self):
httpd = six.moves.BaseHTTPServer.HTTPServer(
('127.0.0.1', 0), _FakeWebSocketHandler)
ws_url = 'ws://127.0.0.1:%d' % httpd.server_port
threading.Thread(target=httpd.handle_request).start()
ws = websocket.CreateConnection(ws_url)
try:
self.assertNotEqual(
ws.sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR), 0)
finally:
ws.close()
threading.Thread(target=httpd.handle_request).start()
ws = websocket.CreateConnection(
ws_url,
sockopt=[(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)])
try:
self.assertNotEqual(
ws.sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR), 0)
self.assertNotEqual(
ws.sock.getsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY), 0)
finally:
ws.close()
|
api.py
|
"""Remotely control your Coinbase Pro account via their API"""
import re
import json
import hmac
import hashlib
import time
import requests
import base64
import sys
import pandas as pd
from numpy import floor
from datetime import datetime, timedelta
from requests.auth import AuthBase
from requests import Request
from threading import Thread
from websocket import create_connection, WebSocketConnectionClosedException
from models.helper.LogHelper import Logger
MARGIN_ADJUSTMENT = 0.0025
DEFAULT_MAKER_FEE_RATE = 0.005
DEFAULT_TAKER_FEE_RATE = 0.005
MINIMUM_TRADE_AMOUNT = 10
DEFAULT_GRANULARITY = 3600
SUPPORTED_GRANULARITY = [60, 300, 900, 3600, 21600, 86400]
FREQUENCY_EQUIVALENTS = ["T", "5T", "15T", "H", "6H", "D"]
MAX_GRANULARITY = max(SUPPORTED_GRANULARITY)
DEFAULT_MARKET = "BTC-GBP"
class AuthAPIBase:
def _isMarketValid(self, market: str) -> bool:
p = re.compile(r"^[1-9A-Z]{2,5}\-[1-9A-Z]{2,5}$")
if p.match(market):
return True
return False
class AuthAPI(AuthAPIBase):
def __init__(
self,
api_key="",
api_secret="",
api_passphrase="",
api_url="https://api.pro.coinbase.com",
) -> None:
"""Coinbase Pro API object model
Parameters
----------
api_key : str
Your Coinbase Pro account portfolio API key
api_secret : str
Your Coinbase Pro account portfolio API secret
api_passphrase : str
Your Coinbase Pro account portfolio API passphrase
api_url
Coinbase Pro API URL
"""
# options
self.debug = False
self.die_on_api_error = False
valid_urls = [
"https://api.pro.coinbase.com",
"https://api.pro.coinbase.com/",
"https://public.sandbox.pro.coinbase.com",
"https://public.sandbox.pro.coinbase.com/",
]
# validate Coinbase Pro API
if api_url not in valid_urls:
raise ValueError("Coinbase Pro API URL is invalid")
if api_url[-1] != "/":
api_url = api_url + "/"
# validates the api key is syntactically correct
p = re.compile(r"^[a-f0-9]{32}$")
if not p.match(api_key):
self.handle_init_error("Coinbase Pro API key is invalid")
# validates the api secret is syntactically correct
p = re.compile(r"^[A-z0-9+\/]+==$")
if not p.match(api_secret):
self.handle_init_error("Coinbase Pro API secret is invalid")
# validates the api passphrase is syntactically correct
p = re.compile(r"^[A-z0-9#$%=@!{},`~&*()<>?.:;_|^/+\[\]]{8,32}$")
if not p.match(api_passphrase):
self.handle_init_error("Coinbase Pro API passphrase is invalid")
self._api_key = api_key
self._api_secret = api_secret
self._api_passphrase = api_passphrase
self._api_url = api_url
def handle_init_error(self, err: str) -> None:
"""Handle initialisation error"""
if self.debug:
raise TypeError(err)
else:
raise SystemExit(err)
def __call__(self, request) -> Request:
"""Signs the request"""
timestamp = str(time.time())
body = (request.body or b"").decode()
message = f"{timestamp}{request.method}{request.path_url}{body}"
hmac_key = base64.b64decode(self._api_secret)
signature = hmac.new(hmac_key, message.encode(), hashlib.sha256)
signature_b64 = base64.b64encode(signature.digest()).decode()
request.headers.update(
{
"CB-ACCESS-SIGN": signature_b64,
"CB-ACCESS-TIMESTAMP": timestamp,
"CB-ACCESS-KEY": self._api_key,
"CB-ACCESS-PASSPHRASE": self._api_passphrase,
"Content-Type": "application/json",
}
)
return request
def getAccounts(self) -> pd.DataFrame:
"""Retrieves your list of accounts"""
# GET /api/v3/account
try:
df = self.authAPI("GET", "accounts")
except:
return pd.DataFrame()
if len(df) == 0:
return pd.DataFrame()
# exclude accounts with a nil balance
df = df[df.balance != "0.0000000000000000"]
# reset the dataframe index to start from 0
df = df.reset_index()
return df
def getAccount(self, account: str) -> pd.DataFrame:
"""Retrieves a specific account"""
# validates the account is syntactically correct
p = re.compile(r"^[a-f0-9\-]{36,36}$")
if not p.match(account):
self.handle_init_error("Coinbase Pro account is invalid")
try:
return self.authAPI("GET", f"accounts/{account}")
except:
return pd.DataFrame()
def getFees(self, market: str = "") -> pd.DataFrame:
"""Retrieves market fees"""
try:
df = self.authAPI("GET", "fees")
if len(df) == 0:
return pd.DataFrame()
if len(market):
df["market"] = market
else:
df["market"] = ""
return df
except:
return pd.DataFrame()
def getMakerFee(self, market: str = "") -> float:
"""Retrieves maker fee"""
if len(market):
fees = self.getFees(market)
else:
fees = self.getFees()
if len(fees) == 0 or "maker_fee_rate" not in fees:
Logger.error(
f"error: 'maker_fee_rate' not in fees (using {DEFAULT_MAKER_FEE_RATE} as a fallback)"
)
return DEFAULT_MAKER_FEE_RATE
return float(fees["maker_fee_rate"].to_string(index=False).strip())
def getTakerFee(self, market: str = "") -> float:
"""Retrieves taker fee"""
if len(market) != None:
fees = self.getFees(market)
else:
fees = self.getFees()
if len(fees) == 0 or "taker_fee_rate" not in fees:
Logger.error(
f"error: 'taker_fee_rate' not in fees (using {DEFAULT_TAKER_FEE_RATE} as a fallback)"
)
return DEFAULT_TAKER_FEE_RATE
return float(fees["taker_fee_rate"].to_string(index=False).strip())
def getUSDVolume(self) -> float:
"""Retrieves USD volume"""
try:
fees = self.getFees()
if "usd_volume" in fees:
return float(fees["usd_volume"].to_string(index=False).strip())
else:
return 0
except:
return 0
def getOrders(
self, market: str = "", action: str = "", status: str = "all"
) -> pd.DataFrame:
"""Retrieves your list of orders with optional filtering"""
# if market provided
if market != "":
# validates the market is syntactically correct
if not self._isMarketValid(market):
raise ValueError("Coinbase Pro market is invalid.")
# if action provided
if action != "":
# validates action is either a buy or sell
if not action in ["buy", "sell"]:
raise ValueError("Invalid order action.")
# validates status is either open, pending, done, active, or all
if not status in ["open", "pending", "done", "active", "all"]:
raise ValueError("Invalid order status.")
try:
# GET /orders?status
resp = self.authAPI("GET", f"orders?status={status}")
if len(resp) > 0:
if status == "open":
df = resp.copy()[
[
"created_at",
"product_id",
"side",
"type",
"size",
"price",
"status",
]
]
df["value"] = float(df["price"]) * float(df["size"]) - (
float(df["price"]) * MARGIN_ADJUSTMENT
)
else:
if "specified_funds" in resp:
df = resp.copy()[
[
"created_at",
"product_id",
"side",
"type",
"filled_size",
"specified_funds",
"executed_value",
"fill_fees",
"status",
]
]
else:
# manual limit orders do not contain 'specified_funds'
df_tmp = resp.copy()
df_tmp["specified_funds"] = None
df = df_tmp[
[
"created_at",
"product_id",
"side",
"type",
"filled_size",
"specified_funds",
"executed_value",
"fill_fees",
"status",
]
]
else:
return pd.DataFrame()
# replace null NaN values with 0
df.copy().fillna(0, inplace=True)
df_tmp = df.copy()
df_tmp["price"] = 0.0
df_tmp["filled_size"] = df_tmp["filled_size"].astype(float)
df_tmp["specified_funds"] = df_tmp["specified_funds"].astype(float)
df_tmp["executed_value"] = df_tmp["executed_value"].astype(float)
df_tmp["fill_fees"] = df_tmp["fill_fees"].astype(float)
df = df_tmp
# calculates the price at the time of purchase
if status != "open":
df["price"] = df.copy().apply(
lambda row: (float(row.executed_value) * 100)
/ (float(row.filled_size) * 100)
if float(row.filled_size) > 0
else 0,
axis=1,
)
# df.loc[df['filled_size'] > 0, 'price'] = (df['executed_value'] * 100) / (df['filled_size'] * 100)
# rename the columns
if status == "open":
df.columns = [
"created_at",
"market",
"action",
"type",
"size",
"price",
"status",
"value",
]
df = df[
[
"created_at",
"market",
"action",
"type",
"size",
"value",
"status",
"price",
]
]
df["size"] = df["size"].astype(float).round(8)
else:
df.columns = [
"created_at",
"market",
"action",
"type",
"value",
"size",
"filled",
"fees",
"status",
"price",
]
df = df[
[
"created_at",
"market",
"action",
"type",
"size",
"value",
"fees",
"price",
"status",
]
]
df.columns = [
"created_at",
"market",
"action",
"type",
"size",
"filled",
"fees",
"price",
"status",
]
df_tmp = df.copy()
df_tmp["filled"] = df_tmp["filled"].astype(float).round(8)
df_tmp["size"] = df_tmp["size"].astype(float).round(8)
df_tmp["fees"] = df_tmp["fees"].astype(float).round(8)
df_tmp["price"] = df_tmp["price"].astype(float).round(8)
df = df_tmp
# convert dataframe to a time series
tsidx = pd.DatetimeIndex(
pd.to_datetime(df["created_at"]).dt.strftime("%Y-%m-%dT%H:%M:%S.%Z")
)
df.set_index(tsidx, inplace=True)
df = df.drop(columns=["created_at"])
# if marker provided
if market != "":
# filter by market
df = df[df["market"] == market]
# if action provided
if action != "":
# filter by action
df = df[df["action"] == action]
# if status provided
if status != "all":
# filter by status
df = df[df["status"] == status]
# reverse orders and reset index
df = df.iloc[::-1].reset_index()
# for sell orders size is filled
df["size"] = df["size"].fillna(df["filled"])
return df
except:
return pd.DataFrame()
def getTime(self) -> datetime:
"""Retrieves the exchange time"""
try:
resp = self.authAPI("GET", "time")
if "epoch" in resp:
epoch = int(resp["epoch"])
return datetime.fromtimestamp(epoch)
else:
Logger.error(resp)
return None
except Exception as e:
Logger.error(f"Error: {e}")
return None
def marketBuy(self, market: str = "", quote_quantity: float = 0) -> pd.DataFrame:
"""Executes a market buy providing a funding amount"""
# validates the market is syntactically correct
if not self._isMarketValid(market):
raise ValueError("Coinbase Pro market is invalid.")
# validates quote_quantity is either an integer or float
if not isinstance(quote_quantity, int) and not isinstance(
quote_quantity, float
):
Logger.critical(
"Please report this to Michael Whittle: "
+ str(quote_quantity)
+ " "
+ str(type(quote_quantity))
)
raise TypeError("The funding amount is not numeric.")
# funding amount needs to be greater than 10
if quote_quantity < MINIMUM_TRADE_AMOUNT:
raise ValueError(f"Trade amount is too small (>= {MINIMUM_TRADE_AMOUNT}).")
try:
order = {
"product_id": market,
"type": "market",
"side": "buy",
"funds": self.marketQuoteIncrement(market, quote_quantity),
}
Logger.debug(order)
# connect to authenticated coinbase pro api
model = AuthAPI(
self._api_key, self._api_secret, self._api_passphrase, self._api_url
)
# place order and return result
return model.authAPI("POST", "orders", order)
except:
return pd.DataFrame()
def marketSell(self, market: str = "", base_quantity: float = 0) -> pd.DataFrame:
"""Executes a market sell providing a crypto amount"""
if not self._isMarketValid(market):
raise ValueError("Coinbase Pro market is invalid.")
if not isinstance(base_quantity, int) and not isinstance(base_quantity, float):
raise TypeError("The crypto amount is not numeric.")
try:
order = {
"product_id": market,
"type": "market",
"side": "sell",
"size": self.marketBaseIncrement(market, base_quantity),
}
Logger.debug(order)
model = AuthAPI(
self._api_key, self._api_secret, self._api_passphrase, self._api_url
)
return model.authAPI("POST", "orders", order)
except:
return pd.DataFrame()
def limitSell(
self, market: str = "", base_quantity: float = 0, future_price: float = 0
) -> pd.DataFrame:
"""Initiates a limit sell order"""
if not self._isMarketValid(market):
raise ValueError("Coinbase Pro market is invalid.")
if not isinstance(base_quantity, int) and not isinstance(base_quantity, float):
raise TypeError("The crypto amount is not numeric.")
if not isinstance(future_price, int) and not isinstance(future_price, float):
raise TypeError("The future crypto price is not numeric.")
try:
order = {
"product_id": market,
"type": "limit",
"side": "sell",
"size": self.marketBaseIncrement(market, base_quantity),
"price": future_price,
}
Logger.debug(order)
model = AuthAPI(
self._api_key, self._api_secret, self._api_passphrase, self._api_url
)
return model.authAPI("POST", "orders", order)
except:
return pd.DataFrame()
def cancelOrders(self, market: str = "") -> pd.DataFrame:
"""Cancels an order"""
if not self._isMarketValid(market):
raise ValueError("Coinbase Pro market is invalid.")
try:
model = AuthAPI(
self._api_key, self._api_secret, self._api_passphrase, self._api_url
)
return model.authAPI("DELETE", "orders")
except:
return pd.DataFrame()
def marketBaseIncrement(self, market, amount) -> float:
"""Retrives the market base increment"""
product = self.authAPI("GET", f"products/{market}")
if "base_increment" not in product:
return amount
base_increment = str(product["base_increment"].values[0])
if "." in str(base_increment):
nb_digits = len(str(base_increment).split(".")[1])
else:
nb_digits = 0
return floor(amount * 10 ** nb_digits) / 10 ** nb_digits
def marketQuoteIncrement(self, market, amount) -> float:
"""Retrieves the market quote increment"""
product = self.authAPI("GET", f"products/{market}")
if "quote_increment" not in product:
return amount
quote_increment = str(product["quote_increment"].values[0])
if "." in str(quote_increment):
nb_digits = len(str(quote_increment).split(".")[1])
else:
nb_digits = 0
return floor(amount * 10 ** nb_digits) / 10 ** nb_digits
def authAPI(self, method: str, uri: str, payload: str = "") -> pd.DataFrame:
"""Initiates a REST API call"""
if not isinstance(method, str):
raise TypeError("Method is not a string.")
if not method in ["DELETE", "GET", "POST"]:
raise TypeError("Method not DELETE, GET or POST.")
if not isinstance(uri, str):
raise TypeError("URI is not a string.")
try:
if method == "DELETE":
resp = requests.delete(self._api_url + uri, auth=self)
elif method == "GET":
resp = requests.get(self._api_url + uri, auth=self)
elif method == "POST":
resp = requests.post(self._api_url + uri, json=payload, auth=self)
if "msg" in resp.json():
resp_message = resp.json()["msg"]
elif "message" in resp.json():
resp_message = resp.json()["message"]
else:
resp_message = ""
if resp.status_code == 401 and (
resp_message == "request timestamp expired"
):
message = f"{method} ({resp.status_code}) {self._api_url}{uri} - {resp_message} (hint: check your system time is using NTP)"
Logger.error(f"Error: {message}")
return {}
elif resp.status_code != 200:
if self.die_on_api_error or resp.status_code == 401:
# disable traceback
sys.tracebacklimit = 0
raise Exception(
f"{method.upper()} ({resp.status_code}) {self._api_url}{uri} - {resp_message}"
)
else:
Logger.error(
f"error: {method.upper()} ({resp.status_code}) {self._api_url}{uri} - {resp_message}"
)
return pd.DataFrame()
resp.raise_for_status()
if isinstance(resp.json(), list):
df = pd.DataFrame.from_dict(resp.json())
return df
else:
df = pd.DataFrame(resp.json(), index=[0])
return df
except requests.ConnectionError as err:
return self.handle_api_error(err, "ConnectionError")
except requests.exceptions.HTTPError as err:
return self.handle_api_error(err, "HTTPError")
except requests.Timeout as err:
return self.handle_api_error(err, "Timeout")
except json.decoder.JSONDecodeError as err:
return self.handle_api_error(err, "JSONDecodeError")
def handle_api_error(self, err: str, reason: str) -> pd.DataFrame:
"""Handle API errors"""
if self.debug:
if self.die_on_api_error:
raise SystemExit(err)
else:
Logger.error(err)
return pd.DataFrame()
else:
if self.die_on_api_error:
raise SystemExit(f"{reason}: {self._api_url}")
else:
Logger.info(f"{reason}: {self._api_url}")
return pd.DataFrame()
class PublicAPI(AuthAPIBase):
def __init__(self) -> None:
# options
self.debug = False
self.die_on_api_error = False
self._api_url = "https://api.pro.coinbase.com/"
def getHistoricalData(
self,
market: str = DEFAULT_MARKET,
granularity: int = MAX_GRANULARITY,
websocket=None,
iso8601start: str = "",
iso8601end: str = "",
) -> pd.DataFrame:
"""Retrieves historical market data"""
# validates the market is syntactically correct
if not self._isMarketValid(market):
raise TypeError("Coinbase Pro market required.")
# validates granularity is an integer
if not isinstance(granularity, int):
raise TypeError("Granularity integer required.")
# validates the granularity is supported by Coinbase Pro
if not granularity in SUPPORTED_GRANULARITY:
raise TypeError(
"Granularity options: " + ", ".join(map(str, SUPPORTED_GRANULARITY))
)
# validates the ISO 8601 start date is a string (if provided)
if not isinstance(iso8601start, str):
raise TypeError("ISO8601 start integer as string required.")
# validates the ISO 8601 end date is a string (if provided)
if not isinstance(iso8601end, str):
raise TypeError("ISO8601 end integer as string required.")
using_websocket = False
if websocket is not None:
if websocket.candles is not None:
try:
df = websocket.candles.loc[websocket.candles["market"] == market]
using_websocket = True
except:
pass
if websocket is None or (websocket is not None and using_websocket is False):
if iso8601start != "" and iso8601end == "":
resp = self.authAPI(
"GET",
f"products/{market}/candles?granularity={granularity}&start={iso8601start}",
)
elif iso8601start != "" and iso8601end != "":
resp = self.authAPI(
"GET",
f"products/{market}/candles?granularity={granularity}&start={iso8601start}&end={iso8601end}",
)
else:
resp = self.authAPI(
"GET", f"products/{market}/candles?granularity={granularity}"
)
# convert the API response into a Pandas DataFrame
df = pd.DataFrame(
resp, columns=["epoch", "low", "high", "open", "close", "volume"]
)
# reverse the order of the response with earliest last
df = df.iloc[::-1].reset_index()
try:
freq = FREQUENCY_EQUIVALENTS[SUPPORTED_GRANULARITY.index(granularity)]
except:
freq = "D"
# convert the DataFrame into a time series with the date as the index/key
try:
tsidx = pd.DatetimeIndex(
pd.to_datetime(df["epoch"], unit="s"),
dtype="datetime64[ns]",
freq=freq,
)
df.set_index(tsidx, inplace=True)
df = df.drop(columns=["epoch", "index"])
df.index.names = ["ts"]
df["date"] = tsidx
except ValueError:
tsidx = pd.DatetimeIndex(
pd.to_datetime(df["epoch"], unit="s"), dtype="datetime64[ns]"
)
df.set_index(tsidx, inplace=True)
df = df.drop(columns=["epoch", "index"])
df.index.names = ["ts"]
df["date"] = tsidx
df["market"] = market
df["granularity"] = granularity
# re-order columns
df = df[
[
"date",
"market",
"granularity",
"low",
"high",
"open",
"close",
"volume",
]
]
return df
def getTicker(self, market: str = DEFAULT_MARKET, websocket=None) -> tuple:
"""Retrives the market ticker"""
# validates the market is syntactically correct
if not self._isMarketValid(market):
raise TypeError("Coinbase Pro market required.")
now = datetime.today().strftime("%Y-%m-%d %H:%M:%S")
if websocket is not None and websocket.tickers is not None:
try:
row = websocket.tickers.loc[websocket.tickers["market"] == market]
return (
datetime.strptime(
re.sub(r".0*$", "", str(row["date"].values[0])),
"%Y-%m-%dT%H:%M:%S",
).strftime("%Y-%m-%d %H:%M:%S"),
float(row["price"].values[0]),
)
except:
return (now, 0.0)
resp = self.authAPI("GET", f"products/{market}/ticker")
if "time" in resp and "price" in resp:
return (
datetime.strptime(resp["time"], "%Y-%m-%dT%H:%M:%S.%fZ").strftime(
"%Y-%m-%d %H:%M:%S"
),
float(resp["price"]),
)
return (now, 0.0)
def getTime(self) -> datetime:
"""Retrieves the exchange time"""
try:
resp = self.authAPI("GET", "time")
if "epoch" in resp:
epoch = int(resp["epoch"])
return datetime.fromtimestamp(epoch)
else:
Logger.error(
"resp does not contain the epoch key for some reason!"
) # remove this later
Logger.error(resp)
return None
except Exception as e:
Logger.error(f"Error: {e}")
return None
def getMarkets24HrStats(self) -> pd.DataFrame():
"""Retrieves exchange markets 24hr stats"""
try:
return self.authAPI("GET", "products/stats")
except:
return pd.DataFrame()
def authAPI(self, method: str, uri: str, payload: str = "") -> dict:
"""Initiates a REST API call"""
if not isinstance(method, str):
raise TypeError("Method is not a string.")
if not method in ["GET", "POST"]:
raise TypeError("Method not GET or POST.")
if not isinstance(uri, str):
raise TypeError("URI is not a string.")
try:
if method == "GET":
resp = requests.get(self._api_url + uri)
elif method == "POST":
resp = requests.post(self._api_url + uri, json=payload)
if resp.status_code != 200:
resp_message = resp.json()["message"]
message = f"{method} ({resp.status_code}) {self._api_url}{uri} - {resp_message}"
if self.die_on_api_error:
raise Exception(message)
else:
Logger.error(f"Error: {message}")
return {}
resp.raise_for_status()
return resp.json()
except requests.ConnectionError as err:
Logger.error("requests.ConnectionError") # remove this later
return self.handle_api_error(err, "ConnectionError")
except requests.exceptions.HTTPError as err:
Logger.error("requests.exceptions.HTTPError") # remove this later
return self.handle_api_error(err, "HTTPError")
except requests.Timeout as err:
Logger.error("requests.Timeout") # remove this later
return self.handle_api_error(err, "Timeout")
except json.decoder.JSONDecodeError as err:
Logger.error("json.decoder.JSONDecodeError") # remove this later
return self.handle_api_error(err, "JSONDecodeError")
def handle_api_error(self, err: str, reason: str) -> dict:
"""Handle API errors"""
if self.debug:
if self.die_on_api_error:
raise SystemExit(err)
else:
Logger.error(err)
return {}
else:
if self.die_on_api_error:
raise SystemExit(f"{reason}: {self._api_url}")
else:
Logger.info(f"{reason}: {self._api_url}")
return {}
class WebSocket(AuthAPIBase):
def __init__(
self,
markets=None,
granularity=None,
api_url="https://api.pro.coinbase.com",
ws_url="wss://ws-feed.pro.coinbase.com",
) -> None:
# options
self.debug = False
valid_urls = [
"https://api.pro.coinbase.com",
"https://api.pro.coinbase.com/",
"https://public.sandbox.pro.coinbase.com",
"https://public.sandbox.pro.coinbase.com/",
]
# validate Coinbase Pro API
if api_url not in valid_urls:
raise ValueError("Coinbase Pro API URL is invalid")
if api_url[-1] != "/":
api_url = api_url + "/"
valid_ws_urls = [
"wss://ws-feed.pro.coinbase.com",
"wss://ws-feed.pro.coinbase.com/",
]
# validate Coinbase Pro Websocket URL
if ws_url not in valid_ws_urls:
raise ValueError("Coinbase Pro WebSocket URL is invalid")
if ws_url[-1] != "/":
ws_url = ws_url + "/"
self._ws_url = ws_url
self._api_url = api_url
self.markets = None
self.granularity = granularity
self.type = "subscribe"
self.stop = True
self.error = None
self.ws = None
self.thread = None
self.start_time = None
self.time_elapsed = 0
def start(self):
def _go():
self._connect()
self._listen()
self._disconnect()
self.stop = False
self.on_open()
self.thread = Thread(target=_go)
self.keepalive = Thread(target=self._keepalive)
self.thread.start()
def _connect(self):
if self.markets is None:
print("Error: no market specified!")
sys.exit()
elif not isinstance(self.markets, list):
self.markets = [self.markets]
self.ws = create_connection(self._ws_url)
self.ws.send(
json.dumps(
{
"type": "subscribe",
"product_ids": self.markets,
"channels": ["matches"],
}
)
)
self.start_time = datetime.now()
def _keepalive(self, interval=30):
while self.ws.connected:
self.ws.ping("keepalive")
time.sleep(interval)
def _listen(self):
self.keepalive.start()
while not self.stop:
try:
data = self.ws.recv()
if data != "":
msg = json.loads(data)
else:
msg = {}
except ValueError as e:
self.on_error(e)
except Exception as e:
self.on_error(e)
else:
self.on_message(msg)
def _disconnect(self):
try:
if self.ws:
self.ws.close()
except WebSocketConnectionClosedException:
pass
finally:
self.keepalive.join()
def close(self):
self.stop = True
self.start_time = None
self.time_elapsed = 0
self._disconnect()
self.thread.join()
def on_open(self):
Logger.info("-- Websocket Subscribed! --")
def on_close(self):
Logger.info("-- Websocket Closed --")
def on_message(self, msg):
Logger.info(msg)
def on_error(self, e, data=None):
Logger.error(e)
Logger.error("{} - data: {}".format(e, data))
self.stop = True
try:
self.ws = None
self.tickers = None
self.candles = None
self.start_time = None
self.time_elapsed = 0
except:
pass
def getStartTime(self) -> datetime:
return self.start_time
def getTimeElapsed(self) -> int:
return self.time_elapsed
class WebSocketClient(WebSocket):
def __init__(
self,
markets: list = [DEFAULT_MARKET],
granularity: str = DEFAULT_GRANULARITY,
api_url="https://api.pro.coinbase.com/",
ws_url: str = "wss://ws-feed.pro.coinbase.com",
) -> None:
if len(markets) == 0:
raise ValueError("A list of one or more markets is required.")
for market in markets:
# validates the market is syntactically correct
if not self._isMarketValid(market):
raise ValueError("Coinbase Pro market is invalid.")
# validates granularity is an integer
if not isinstance(granularity, int):
raise TypeError("Granularity integer required.")
# validates the granularity is supported by Coinbase Pro
if not granularity in SUPPORTED_GRANULARITY:
raise TypeError(
"Granularity options: " + ", ".join(map(str, SUPPORTED_GRANULARITY))
)
valid_urls = [
"https://api.pro.coinbase.com",
"https://api.pro.coinbase.com/",
"https://public.sandbox.pro.coinbase.com",
"https://public.sandbox.pro.coinbase.com/",
]
# validate Coinbase Pro API
if api_url not in valid_urls:
raise ValueError("Coinbase Pro API URL is invalid")
if api_url[-1] != "/":
api_url = api_url + "/"
valid_ws_urls = [
"wss://ws-feed.pro.coinbase.com",
"wss://ws-feed.pro.coinbase.com/",
]
# validate Coinbase Pro Websocket URL
if ws_url not in valid_ws_urls:
raise ValueError("Coinbase Pro WebSocket URL is invalid")
if ws_url[-1] != "/":
ws_url = ws_url + "/"
self._ws_url = ws_url
self.markets = markets
self.granularity = granularity
self.tickers = None
self.candles = None
self.start_time = None
self.time_elapsed = 0
def on_open(self):
self.message_count = 0
def on_message(self, msg):
if self.start_time is not None:
self.time_elapsed = round(
(datetime.now() - self.start_time).total_seconds()
)
if "time" in msg and "product_id" in msg and "price" in msg:
# create dataframe from websocket message
df = pd.DataFrame(
columns=["date", "market", "price"],
data=[
[
datetime.strptime(
msg["time"], "%Y-%m-%dT%H:%M:%S.%fZ"
).strftime("%Y-%m-%d %H:%M:%S"),
msg["product_id"],
msg["price"],
]
],
)
# set column types
df["date"] = df["date"].astype("datetime64[ns]")
df["price"] = df["price"].astype("float64")
# form candles
if self.granularity == 60:
df["candle"] = df["date"].dt.floor(freq="1T")
elif self.granularity == 300:
df["candle"] = df["date"].dt.floor(freq="5T")
elif self.granularity == 900:
df["candle"] = df["date"].dt.floor(freq="15T")
elif self.granularity == 3600:
df["candle"] = df["date"].dt.floor(freq="1H")
elif self.granularity == 21600:
df["candle"] = df["date"].dt.floor(freq="6H")
elif self.granularity == 86400:
df["candle"] = df["date"].dt.floor(freq="1D")
# candles dataframe is empty
if self.candles is None:
resp = PublicAPI().getHistoricalData(
df["market"].values[0], self.granularity
)
if len(resp) > 0:
self.candles = resp
else:
# create dataframe from websocket message
self.candles = pd.DataFrame(
columns=[
"date",
"market",
"granularity",
"open",
"high",
"close",
"low",
"volume",
],
data=[
[
df["candle"].values[0],
df["market"].values[0],
self.granularity,
df["price"].values[0],
df["price"].values[0],
df["price"].values[0],
df["price"].values[0],
msg["size"],
]
],
)
# candles dataframe contains some data
else:
# check if the current candle exists
candle_exists = (
(self.candles["date"] == df["candle"].values[0])
& (self.candles["market"] == df["market"].values[0])
).any()
if not candle_exists:
# populate historical data via api if it does not exist
if (
len(
self.candles[
self.candles["market"] == df["market"].values[0]
]
)
== 0
):
resp = PublicAPI().getHistoricalData(
df["market"].values[0], self.granularity
)
if len(resp) > 0:
df_new_candle = resp
else:
# create dataframe from websocket message
df_new_candle = pd.DataFrame(
columns=[
"date",
"market",
"granularity",
"open",
"high",
"close",
"low",
"volume",
],
data=[
[
df["candle"].values[0],
df["market"].values[0],
self.granularity,
df["price"].values[0],
df["price"].values[0],
df["price"].values[0],
df["price"].values[0],
msg["size"],
]
],
)
else:
df_new_candle = pd.DataFrame(
columns=[
"date",
"market",
"granularity",
"open",
"high",
"close",
"low",
"volume",
],
data=[
[
df["candle"].values[0],
df["market"].values[0],
self.granularity,
df["price"].values[0],
df["price"].values[0],
df["price"].values[0],
df["price"].values[0],
msg["size"],
]
],
)
self.candles = self.candles.append(df_new_candle)
else:
candle = self.candles[
(
(self.candles["date"] == df["candle"].values[0])
& (self.candles["market"] == df["market"].values[0])
)
]
# set high on high
if float(df["price"].values[0]) > float(candle.high.values[0]):
self.candles.at[candle.index.values[0], "high"] = df[
"price"
].values[0]
self.candles.at[candle.index.values[0], "close"] = df[
"price"
].values[0]
# set low on low
if float(df["price"].values[0]) < float(candle.low.values[0]):
self.candles.at[candle.index.values[0], "low"] = df[
"price"
].values[0]
# increment candle base volume
self.candles.at[candle.index.values[0], "volume"] = float(
candle["volume"].values[0]
) + float(msg["size"])
# insert first entry
if self.tickers is None and len(df) > 0:
self.tickers = df
# append future entries without duplicates
elif self.tickers is not None and len(df) > 0:
self.tickers = (
pd.concat([self.tickers, df])
.drop_duplicates(subset="market", keep="last")
.reset_index(drop=True)
)
# convert dataframes to a time series
tsidx = pd.DatetimeIndex(
pd.to_datetime(self.tickers["date"]).dt.strftime("%Y-%m-%dT%H:%M:%S.%Z")
)
self.tickers.set_index(tsidx, inplace=True)
self.tickers.index.name = "ts"
tsidx = pd.DatetimeIndex(
pd.to_datetime(self.candles["date"]).dt.strftime("%Y-%m-%dT%H:%M:%S.%Z")
)
self.candles.set_index(tsidx, inplace=True)
self.candles.index.name = "ts"
# set correct column types
self.candles["open"] = self.candles["open"].astype("float64")
self.candles["high"] = self.candles["high"].astype("float64")
self.candles["close"] = self.candles["close"].astype("float64")
self.candles["low"] = self.candles["low"].astype("float64")
self.candles["volume"] = self.candles["volume"].astype("float64")
# keep last 300 candles per market
self.candles = self.candles.groupby("market").tail(300)
# print (f'{msg["time"]} {msg["product_id"]} {msg["price"]}')
# print(json.dumps(msg, indent=4, sort_keys=True))
self.message_count += 1
|
global_handle.py
|
#!/usr/bin/python3
'''
(C) Copyright 2018-2022 Intel Corporation.
SPDX-License-Identifier: BSD-2-Clause-Patent
'''
import ctypes
import traceback
from multiprocessing import sharedctypes
from avocado import fail_on
from apricot import TestWithServers
from pydaos.raw import DaosPool, DaosContainer, DaosApiError, IOV
class GlobalHandle(TestWithServers):
"""Test the ability to share container handles among processes.
:avocado: recursive
"""
@fail_on(DaosApiError)
def check_handle(self, pool_glob_handle, uuidstr, cont_glob_handle, rank):
"""Verify that the global handles can be turned into local handles.
This gets run in a child process and verifies the global handles can be
turned into local handles in another process.
Args:
pool_glob_handle (sharedctypes.RawValue): pool handle
uuidstr (sharedctypes.RawArray): pool uuid
cont_glob_handle (sharedctypes.RawValue): container handle
rank (int): pool svc rank
Raises:
DaosApiError: if there was an error converting the pool handle or
using the local pool handle to create a container.
"""
# setup the pool and connect using global handle
pool = DaosPool(self.context)
pool.uuid = uuidstr
pool.set_svc(rank)
buf = ctypes.cast(
pool_glob_handle.iov_buf,
ctypes.POINTER(ctypes.c_byte * pool_glob_handle.iov_buf_len))
buf2 = bytearray()
buf2.extend(buf.contents)
pool_handle = pool.global2local(
self.context, pool_glob_handle.iov_len,
pool_glob_handle.iov_buf_len, buf2)
# perform an operation that will use the new handle, if it
# doesn't throw an exception, then all is well.
pool.pool_query()
# setup the container and then connect using the global handle
container = DaosContainer(self.context)
container.poh = pool_handle
buf = ctypes.cast(
cont_glob_handle.iov_buf,
ctypes.POINTER(ctypes.c_byte * cont_glob_handle.iov_buf_len))
buf2 = bytearray()
buf2.extend(buf.contents)
_ = container.global2local(
self.context, cont_glob_handle.iov_len,
cont_glob_handle.iov_buf_len, buf2)
# just try one thing to make sure handle is good
container.query()
def test_global_handle(self):
"""Test Description: Use a pool handle in another process.
:avocado: tags=all,daily_regression
:avocado: tags=tiny
:avocado: tags=container,global_handle,container_global_handle
"""
# initialize a python pool object then create the underlying
# daos storage and connect to it
self.add_pool(create=True, connect=True)
# create a pool global handle
iov_len, buf_len, buf = self.pool.pool.local2global()
buftype = ctypes.c_byte * buf_len
c_buf = buftype.from_buffer(buf)
sct_pool_handle = (
sharedctypes.RawValue(
IOV, ctypes.cast(c_buf, ctypes.c_void_p), buf_len, iov_len))
# create a container
self.add_container(self.pool)
self.container.open()
try:
# create a container global handle
iov_len, buf_len, buf = self.container.container.local2global()
buftype = ctypes.c_byte * buf_len
c_buf = buftype.from_buffer(buf)
sct_cont_handle = (
sharedctypes.RawValue(
IOV, ctypes.cast(c_buf, ctypes.c_void_p), buf_len, iov_len))
sct_pool_uuid = sharedctypes.RawArray(
ctypes.c_byte, self.pool.pool.uuid)
# this should work in the future but need on-line server addition
# arg_list = (
# p = Process(target=check_handle, args=arg_list)
# p.start()
# p.join()
# for now verifying global handle in the same process which is not
# the intended use case
self.check_handle(
sct_pool_handle, sct_pool_uuid, sct_cont_handle, 0)
except DaosApiError as error:
self.log.error(error)
self.log.error(traceback.format_exc())
self.fail("Expecting to pass but test has failed.\n")
|
api.py
|
"""
API
======
"""
import zipfile
try:
import queue
except ImportError:
import Queue as queue
import threading
import datetime
import math
import requests
import mimetypes
import os
import re
try:
from urllib.parse import urlunparse, urlencode, urlparse, parse_qs
except ImportError:
from urllib import urlencode
from urlparse import urlunparse, urlparse, parse_qs
try:
import simplejson as json
except ImportError:
import json
import time
from urllib3.exceptions import ReadTimeoutError
from pyodm.types import NodeOption, NodeInfo, TaskInfo, TaskStatus
from pyodm.exceptions import NodeConnectionError, NodeResponseError, NodeServerError, TaskFailedError, OdmError, RangeNotAvailableError
from pyodm.utils import MultipartEncoder, options_to_json, AtomicCounter
from requests_toolbelt.multipart import encoder
class Node:
"""A client to interact with NodeODM API.
Args:
host (str): Hostname or IP address of processing node
port (int): Port of processing node
token (str): token to use for authentication
timeout (int): timeout value in seconds for network requests
"""
prefixHttp = re.compile('http:', re.I)
prefixHttps = re.compile('https:', re.I)
def __init__(self, host, port, token="", timeout=30):
self.host = host
self.port = port
self.token = token
self.timeout = timeout
@staticmethod
def from_url(url, timeout=30):
"""Create a Node instance from a URL.
>>> n = Node.from_url("http://localhost:3000?token=abc")
Args:
url (str): URL in the format proto://hostname:port/?token=value
timeout (int): timeout value in seconds for network requests
Returns:
:func:`~Node`
"""
u = urlparse(url)
qs = parse_qs(u.query)
port = u.port
if port is None:
port = 443 if u.scheme == 'https' else 80
token = ""
if 'token' in qs:
token = qs['token'][0]
return Node(u.hostname, port, token, timeout)
@staticmethod
def compare_version(node_version, compare_version):
# Compare two NodeODM versions
# -1 = node version lower than compare
# 0 = equal
# 1 = node version higher than compare
if node_version is None or len(node_version) < 3:
return -1
if node_version == compare_version:
return 0
try:
(n_major, n_minor, n_build) = map(int, node_version.split("."))
(c_major, c_minor, c_build) = map(int, compare_version.split("."))
except:
return -1
n_number = 1000000 * n_major + 1000 * n_minor + n_build
c_number = 1000000 * c_major + 1000 * c_minor + c_build
if n_number < c_number:
return -1
else:
return 1
def url(self, url, query={}):
"""Get a URL relative to this node.
Args:
url (str): relative URL
query (dict): query values to append to the URL
Returns:
str: Absolute URL
"""
netloc = self.host if (self.port == 80 or self.port == 443) else "{}:{}".format(self.host, self.port)
proto = 'https' if self.port == 443 else 'http'
if len(self.token) > 0:
query['token'] = self.token
return urlunparse((proto, netloc, url, '', urlencode(query), ''))
def get(self, url, query={}, **kwargs):
try:
res = requests.get(self.url(url, query), timeout=self.timeout, **kwargs)
if res.status_code == 401:
raise NodeResponseError("Unauthorized. Do you need to set a token?")
elif not res.status_code in [200, 403, 206]:
raise NodeServerError("Unexpected status code: %s" % res.status_code)
if "Content-Type" in res.headers and "application/json" in res.headers['Content-Type']:
result = res.json()
if 'error' in result:
raise NodeResponseError(result['error'])
return result
else:
return res
except json.decoder.JSONDecodeError as e:
raise NodeServerError(str(e))
except (requests.exceptions.Timeout, requests.exceptions.ConnectionError) as e:
raise NodeConnectionError(str(e))
def post(self, url, data=None, headers={}):
try:
res = requests.post(self.url(url), data=data, headers=headers, timeout=self.timeout)
if res.status_code == 401:
raise NodeResponseError("Unauthorized. Do you need to set a token?")
elif res.status_code != 200 and res.status_code != 403:
raise NodeServerError(res.status_code)
if "Content-Type" in res.headers and "application/json" in res.headers['Content-Type']:
result = res.json()
if 'error' in result:
raise NodeResponseError(result['error'])
return result
else:
return res
except json.decoder.JSONDecodeError as e:
raise NodeServerError(str(e))
except (requests.exceptions.Timeout, requests.exceptions.ConnectionError) as e:
raise NodeConnectionError(str(e))
def info(self):
"""Retrieve information about this node.
>>> n = Node('localhost', 3000)
>>> n.info().version
'1.5.3'
>>> n.info().engine
'odm'
Returns:
:func:`~pyodm.types.NodeInfo`
"""
return NodeInfo(self.get('/info'))
def options(self):
"""Retrieve the options available for creating new tasks on this node.
>>> n = Node('localhost', 3000)
>>> n.options()[0].name
'pc-classify'
Returns:
list: [:func:`~pyodm.types.NodeOption`]
"""
return list(map(lambda o: NodeOption(**o), self.get('/options')))
def version_greater_or_equal_than(self, version):
"""Checks whether this node version is greater than or equal than
a certain version number.
>>> n = Node('localhost', 3000)
>>> n.version_greater_or_equal_than('1.3.1')
True
>>> n.version_greater_or_equal_than('10.5.1')
False
Args:
version (str): version number to compare
Returns:
bool: result of comparison.
"""
node_version = self.info().version
return self.compare_version(node_version, version) >= 0
def create_task(self, files, options={}, name=None, progress_callback=None, skip_post_processing=False, webhook=None, outputs=[], parallel_uploads=10, max_retries=5, retry_timeout=5):
"""Start processing a new task.
At a minimum you need to pass a list of image paths. All other parameters are optional.
>>> n = Node('localhost', 3000)
>>> t = n.create_task(['examples/images/image_1.jpg', 'examples/images/image_2.jpg'], \
{'orthophoto-resolution': 2, 'dsm': True})
>>> info = t.info()
>>> info.status
<TaskStatus.RUNNING: 20>
>>> info.last_error
''
>>> t.info().images_count
2
>>> t.output()[0:2]
['DJI_0131.JPG - DJI_0313.JPG has 1 candidate matches', 'DJI_0131.JPG - DJI_0177.JPG has 3 candidate matches']
Args:
files (list): list of image paths + optional GCP file path.
options (dict): options to use, for example {'orthophoto-resolution': 3, ...}
name (str): name for the task
progress_callback (function): callback reporting upload progress percentage
skip_post_processing (bool): When true, skips generation of map tiles, derivate assets, point cloud tiles.
webhook (str): Optional URL to call when processing has ended (either successfully or unsuccessfully).
outputs (list): Optional paths relative to the project directory that should be included in the all.zip result file, overriding the default behavior.
parallel_uploads (int): Number of parallel uploads.
max_retries (int): Number of attempts to make before giving up on a file upload.
retry_timeout (int): Wait at least these many seconds before attempting to upload a file a second time, multiplied by the retry number.
Returns:
:func:`~Task`
"""
if not self.version_greater_or_equal_than("1.4.0"):
return self.create_task_fallback(files, options, name, progress_callback)
if len(files) == 0:
raise NodeResponseError("Not enough images")
fields = {
'name': name,
'options': options_to_json(options),
}
if skip_post_processing:
fields['skipPostProcessing'] = 'true'
if webhook is not None:
fields['webhook'] = webhook
if outputs:
fields['outputs'] = json.dumps(outputs)
e = MultipartEncoder(fields=fields)
result = self.post('/task/new/init', data=e, headers={'Content-Type': e.content_type})
if isinstance(result, dict) and 'error' in result:
raise NodeResponseError(result['error'])
if isinstance(result, dict) and 'uuid' in result:
uuid = result['uuid']
progress_event = None
class nonloc:
uploaded_files = AtomicCounter(0)
error = None
# Equivalent as passing the open file descriptor, since requests
# eventually calls read(), but this way we make sure to close
# the file prior to reading the next, so we don't run into open file OS limits
def read_file(file_path):
if Node.prefixHttp.match(file_path) or Node.prefixHttps.match(file_path):
return requests.get(file_path).content
else:
with open(file_path, 'rb') as f:
return f.read()
# Upload
def worker():
while True:
task = q.get()
if task is None or nonloc.error is not None:
q.task_done()
break
# Upload file
if task['wait_until'] > datetime.datetime.now():
time.sleep((task['wait_until'] - datetime.datetime.now()).seconds)
try:
file = task['file']
fields = {
'images': [(os.path.basename(file), read_file(file), (mimetypes.guess_type(file)[0] or "image/jpg"))]
}
e = MultipartEncoder(fields=fields)
result = self.post('/task/new/upload/{}'.format(uuid), data=e, headers={'Content-Type': e.content_type})
if isinstance(result, dict) and 'success' in result and result['success']:
uf = nonloc.uploaded_files.increment()
if progress_event is not None:
progress_event.set()
else:
if isinstance(result, dict) and 'error' in result:
raise NodeResponseError(result['error'])
else:
raise NodeServerError("Failed upload with unexpected result: %s" % str(result))
except OdmError as e:
if task['retries'] < max_retries:
# Put task back in queue
task['retries'] += 1
task['wait_until'] = datetime.datetime.now() + datetime.timedelta(seconds=task['retries'] * retry_timeout)
q.put(task)
else:
nonloc.error = e
except Exception as e:
nonloc.error = e
finally:
q.task_done()
q = queue.Queue()
threads = []
for i in range(parallel_uploads):
t = threading.Thread(target=worker)
t.start()
threads.append(t)
if progress_callback is not None:
progress_event = threading.Event()
now = datetime.datetime.now()
for file in files:
q.put({
'file': file,
'wait_until': now,
'retries': 0
})
# Wait for progress updates
if progress_event is not None:
current_progress = 0
while not q.empty():
if progress_event.wait(0.1):
progress_event.clear()
current_progress = 100.0 * nonloc.uploaded_files.value / len(files)
try:
progress_callback(current_progress)
except Exception as e:
nonloc.error = e
if nonloc.error is not None:
break
# Make sure to report 100% complete
if current_progress != 100 and nonloc.error is None:
try:
progress_callback(100.0)
except Exception as e:
nonloc.error = e
# block until all tasks are done
if nonloc.error is None:
q.join()
# stop workers
for i in range(parallel_uploads):
q.put(None)
for t in threads:
t.join()
if nonloc.error is not None:
raise nonloc.error
result = self.post('/task/new/commit/{}'.format(uuid))
return self.handle_task_new_response(result)
else:
raise NodeServerError("Invalid response from /task/new/init: %s" % result)
def create_task_fallback(self, files, options={}, name=None, progress_callback=None):
# Pre chunked API create task implementation, used as fallback
if len(files) == 0:
raise NodeResponseError("Not enough images")
# Equivalent as passing the open file descriptor, since requests
# eventually calls read(), but this way we make sure to close
# the file prior to reading the next, so we don't run into open file OS limits
def read_file(file_path):
if Node.prefixHttp.match(file_path) or Node.prefixHttps.match(file_path):
return requests.get(file_path).content
else:
with open(file_path, 'rb') as f:
return f.read()
fields = {
'name': name,
'options': options_to_json(options),
'images': [(os.path.basename(f), read_file(f), (mimetypes.guess_type(f)[0] or "image/jpg")) for
f in files]
}
def create_callback(mpe):
total_bytes = mpe.len
def callback(monitor):
if progress_callback is not None and total_bytes > 0:
progress_callback(100.0 * monitor.bytes_read / total_bytes)
return callback
e = MultipartEncoder(fields=fields)
m = encoder.MultipartEncoderMonitor(e, create_callback(e))
result = self.post('/task/new', data=m, headers={'Content-Type': m.content_type})
return self.handle_task_new_response(result)
def handle_task_new_response(self, result):
if isinstance(result, dict) and 'uuid' in result:
return Task(self, result['uuid'])
elif isinstance(result, dict) and 'error' in result:
raise NodeResponseError(result['error'])
else:
raise NodeServerError('Invalid response: ' + str(result))
def get_task(self, uuid):
"""Helper method to initialize a task from an existing UUID
>>> n = Node("localhost", 3000)
>>> t = n.get_task('00000000-0000-0000-0000-000000000000')
>>> t.__class__
<class 'pyodm.api.Task'>
Args:
uuid: Unique identifier of the task
"""
return Task(self, uuid)
class Task:
"""A task is created to process images. To create a task, use :func:`~Node.create_task`.
Args:
node (:func:`~Node`): node this task belongs to
uuid (str): Unique identifier assigned to this task.
"""
def __init__(self, node, uuid):
self.node = node
self.uuid = uuid
def get(self, url, query = {}, **kwargs):
result = self.node.get(url, query, **kwargs)
if isinstance(result, dict) and 'error' in result:
raise NodeResponseError(result['error'])
return result
def post(self, url, data):
result = self.node.post(url, data)
if isinstance(result, dict) and 'error' in result:
raise NodeResponseError(result['error'])
return result
def info(self, with_output=None):
"""Retrieves information about this task.
Returns:
:func:`~pyodm.types.TaskInfo`
"""
query = {}
if with_output is not None:
query['with_output'] = with_output
return TaskInfo(self.get('/task/{}/info'.format(self.uuid), query))
def output(self, line=0):
"""Retrieve console task output.
Args:
line (int): Optional line number that the console output should be truncated from. For example, passing a value of 100 will retrieve the console output starting from line 100. Negative numbers are also allowed. For example -50 will retrieve the last 50 lines of console output. Defaults to 0 (retrieve all console output).
Returns:
[str]: console output (one list item per row).
"""
return self.get('/task/{}/output'.format(self.uuid), {'line': line})
def cancel(self):
"""Cancel this task.
Returns:
bool: task was canceled or not
"""
return self.post('/task/cancel', {'uuid': self.uuid}).get('success', False)
def remove(self):
"""Remove this task.
Returns:
bool: task was removed or not
"""
return self.post('/task/remove', {'uuid': self.uuid}).get('success', False)
def restart(self, options=None):
"""Restart this task.
Args:
options (dict): options to use, for example {'orthophoto-resolution': 3, ...}
Returns:
bool: task was restarted or not
"""
data = {'uuid': self.uuid}
if options is not None: data['options'] = options_to_json(options)
return self.post('/task/restart', data).get('success', False)
def download_zip(self, destination, progress_callback=None, parallel_downloads=16, parallel_chunks_size=10):
"""Download this task's assets archive to a directory.
Args:
destination (str): directory where to download assets archive. If the directory does not exist, it will be created.
progress_callback (function): an optional callback with one parameter, the download progress percentage.
parallel_downloads (int): maximum number of parallel downloads if the node supports http range.
parallel_chunks_size (int): size in MB of chunks for parallel downloads
Returns:
str: path to archive file (.zip)
"""
info = self.info()
if info.status != TaskStatus.COMPLETED:
raise NodeResponseError("Cannot download task, task status is " + str(info.status))
if not os.path.exists(destination):
os.makedirs(destination, exist_ok=True)
try:
download_stream = self.get('/task/{}/download/all.zip'.format(self.uuid), stream=True)
headers = download_stream.headers
zip_path = os.path.join(destination, "{}_{}_all.zip".format(self.uuid, int(time.time())))
# Keep track of download progress (if possible)
content_length = download_stream.headers.get('content-length')
total_length = int(content_length) if content_length is not None else None
downloaded = 0
chunk_size = int(parallel_chunks_size * 1024 * 1024)
use_fallback = False
accept_ranges = headers.get('accept-ranges')
# Can we do parallel downloads?
if accept_ranges is not None and accept_ranges.lower() == 'bytes' and total_length is not None and total_length > chunk_size and parallel_downloads > 1:
num_chunks = int(math.ceil(total_length / float(chunk_size)))
num_workers = parallel_downloads
class nonloc:
completed_chunks = AtomicCounter(0)
merge_chunks = [False] * num_chunks
error = None
def merge():
current_chunk = 0
with open(zip_path, "wb") as out_file:
while current_chunk < num_chunks and nonloc.error is None:
if nonloc.merge_chunks[current_chunk]:
chunk_file = "%s.part%s" % (zip_path, current_chunk)
with open(chunk_file, "rb") as fd:
out_file.write(fd.read())
os.unlink(chunk_file)
current_chunk += 1
else:
time.sleep(0.1)
def worker():
while True:
task = q.get()
part_num, bytes_range = task
if bytes_range is None or nonloc.error is not None:
q.task_done()
break
try:
# Download chunk
res = self.get('/task/{}/download/all.zip'.format(self.uuid), stream=True, headers={'Range': 'bytes=%s-%s' % bytes_range})
if res.status_code == 206:
with open("%s.part%s" % (zip_path, part_num), 'wb') as fd:
bytes_written = 0
for chunk in res.iter_content(4096):
bytes_written += fd.write(chunk)
if bytes_written != (bytes_range[1] - bytes_range[0] + 1):
# Process again
q.put((part_num, bytes_range))
return
with nonloc.completed_chunks.lock:
nonloc.completed_chunks.value += 1
if progress_callback is not None:
progress_callback(100.0 * nonloc.completed_chunks.value / num_chunks)
nonloc.merge_chunks[part_num] = True
else:
nonloc.error = RangeNotAvailableError()
except OdmError as e:
time.sleep(5)
q.put((part_num, bytes_range))
except Exception as e:
nonloc.error = e
finally:
q.task_done()
q = queue.PriorityQueue()
threads = []
for i in range(num_workers):
t = threading.Thread(target=worker)
t.start()
threads.append(t)
merge_thread = threading.Thread(target=merge)
merge_thread.start()
range_start = 0
for i in range(num_chunks):
range_end = min(range_start + chunk_size - 1, total_length - 1)
q.put((i, (range_start, range_end)))
range_start = range_end + 1
# block until all tasks are done
q.join()
# stop workers
for i in range(len(threads)):
q.put((-1, None))
for t in threads:
t.join()
merge_thread.join()
if nonloc.error is not None:
if isinstance(nonloc.error, RangeNotAvailableError):
use_fallback = True
else:
raise nonloc.error
else:
use_fallback = True
if use_fallback:
# Single connection, boring download
with open(zip_path, 'wb') as fd:
for chunk in download_stream.iter_content(4096):
downloaded += len(chunk)
if progress_callback is not None and total_length is not None:
progress_callback((100.0 * float(downloaded) / total_length))
fd.write(chunk)
except (requests.exceptions.Timeout, requests.exceptions.ConnectionError, ReadTimeoutError) as e:
raise NodeConnectionError(e)
return zip_path
def download_assets(self, destination, progress_callback=None, parallel_downloads=16, parallel_chunks_size=10):
"""Download this task's assets to a directory.
Args:
destination (str): directory where to download assets. If the directory does not exist, it will be created.
progress_callback (function): an optional callback with one parameter, the download progress percentage
parallel_downloads (int): maximum number of parallel downloads if the node supports http range.
parallel_chunks_size (int): size in MB of chunks for parallel downloads
Returns:
str: path to saved assets
"""
zip_path = self.download_zip(destination, progress_callback=progress_callback, parallel_downloads=parallel_downloads, parallel_chunks_size=parallel_chunks_size)
with zipfile.ZipFile(zip_path, "r") as zip_h:
zip_h.extractall(destination)
os.remove(zip_path)
return destination
def wait_for_completion(self, status_callback=None, interval=3, max_retries=5, retry_timeout=5):
"""Wait for the task to complete. The call will block until the task status has become
:func:`~TaskStatus.COMPLETED`. If the status is set to :func:`~TaskStatus.CANCELED` or :func:`~TaskStatus.FAILED`
it raises a TaskFailedError exception.
Args:
status_callback (function): optional callback that will be called with task info updates every interval seconds.
interval (int): seconds between status checks.
max_retries (int): number of repeated attempts that should be made to receive a status update before giving up.
retry_timeout (int): wait N*retry_timeout between attempts, where N is the attempt number.
"""
retry = 0
while True:
try:
info = self.info()
except NodeConnectionError as e:
if retry < max_retries:
retry += 1
time.sleep(retry * retry_timeout)
continue
else:
raise e
retry = 0
if status_callback is not None:
status_callback(info)
if info.status in [TaskStatus.COMPLETED, TaskStatus.CANCELED, TaskStatus.FAILED]:
break
time.sleep(interval)
if info.status in [TaskStatus.FAILED, TaskStatus.CANCELED]:
raise TaskFailedError(info.status)
|
Linkedin DL.py
|
"""MIT License
Copyright (c) 2019 Zdravko Georgiev
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE. """
from selenium import webdriver
from colorama import init, Fore, Back, Style
from tqdm import tqdm
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from configparser import ConfigParser
import urllib.request, time, sys, os,urllib.request, platform ,\
webbrowser,urllib.error, shutil,threading, PySimpleGUI as sg, requests
configPath = "Storage.dll"
results = ""
config = ConfigParser()
config.read(configPath)
links = str(config.get("mainConfig", "downloadlist")).split(",")
default_button_color = '#FFF', '#444'
try:
open(configPath,"r")
except:
with open(configPath, "a") as f:
f.writelines\
("""[mainConfig]
login = True
loginuser =
loginpass =
downloadlist =
savedirectory =
[GoogleConfig]
bragent = Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; yie11; rv:11.0) like Gecko
extensions = True
sandbox = True
usrProfile = True
window = True
gpu = True
[Timings]
logintime = 0
loadingallvideos = 1
extractcoursename = 1
getvideolink = 2
downloaddelay = 1
""")
f.close()
for lin in links:
results += lin + "\n"
def remove(path):
""" param <path> could either be relative or absolute. """
if os.path.isfile(path):
os.remove(path) # remove the file
elif os.path.isdir(path):
shutil.rmtree(path) # remove dir and all contains
else:
raise ValueError("file {} is not a file or dir.".format(path))
def build_chrome_options ():
options = webdriver.ChromeOptions()
options.accept_untrusted_certs = True
options.assume_untrusted_cert_issuer = True
if config.getboolean("GoogleConfig", "extensions") == True:
options.add_argument("--disable-extensions")
if config.getboolean("GoogleConfig", "sandbox") == True:
options.add_argument("--no-sandbox")
if config.getboolean("GoogleConfig", "window") == False:
options.add_argument("--headless")
if config.getboolean("GoogleConfig", "gpu") == True:
options.add_argument("--disable-gpu")
if config.getboolean("GoogleConfig", "usrProfile"):
if platform.system() == "Windows":
add_default_chrome_path = os.path.expanduser('~') + r"\AppData\Local\Google\Chrome\User Data\Default"
elif platform.system() == "MacOS" :
add_default_chrome_path = os.path.expanduser('~') + r"/Library/Application/Support/Google/Chrome/Default"
elif platform.system() == "Linux" :
add_default_chrome_path = os.path.expanduser(
'~') + r"/.config/google-chrome/default"
else:
add_default_chrome_path = ""
options.add_argument("--user-data-dir=" + os.path.abspath(add_default_chrome_path))
options.add_argument(
'--user-agent=' + config.get("GoogleConfig","bragent"))
options.add_argument("--disable-impl-side-painting")
options.add_argument("--disable-setuid-sandbox")
options.add_argument("--disable-seccomp-filter-sandbox")
options.add_argument("--disable-breakpad")
options.add_argument("--disable-client-side-phishing-detection")
options.add_argument("--disable-cast")
options.add_argument("--disable-cast-streaming-hw-encoding")
options.add_argument("--disable-cloud-import")
options.add_argument("--disable-popup-blocking")
options.add_argument("--ignore-certificate-errors")
options.add_argument("--disable-session-crashed-bubble")
options.add_argument("--disable-ipv6")
options.add_argument("--allow-http-screen-capture")
options.add_experimental_option("prefs", {
"download.default_directory": "c:/",
"download.prompt_for_download": False,
"download.directory_upgrade": True,
"safebrowsing_for_trusted_sources_enabled": False,
"safebrowsing.enabled": False
})
return options
##################################################
##################################################
# Progress bar Class tqdm
###################################################
class progress_bar(tqdm):
def update_to(self, b=1, bsize=1, tsize=None):
if tsize is not None:
self.total = tsize
self.update(b * bsize - self.n)
def show_progress_bar(url, filename, output_path, type=0, sessions = {}):
with progress_bar(unit='B',smoothing=0.3, unit_scale=True,
miniters=1, desc=filename) as t:
if type == 0:
urllib.request.urlretrieve(url, filename=output_path, reporthook=t.update_to)
else:
# local_filename = url.split('?')[0]
# file_name = local_filename.split("/")
# file_name1 = file_name[len(file_name) - 1]
r = requests.get(url, cookies=sessions, stream=True)
with open(os.path.join(output_path,filename), 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
class bcolors:
if os.name == "posix":
init(autoreset=True)
# colors foreground text:
fc = "\033[0;96m"
fg = "\033[0;92m"
fw = "\033[0;97m"
fr = "\033[0;91m"
fb = "\033[0;94m"
fy = "\033[0;33m"
fm = "\033[0;35m"
# colors background text:
bc = "\033[46m"
bg = "\033[42m"
bw = "\033[47m"
br = "\033[41m"
bb = "\033[44m"
by = "\033[43m"
bm = "\033[45m"
# colors style text:
sd = Style.DIM
sn = Style.NORMAL
sb = Style.BRIGHT
else:
init(autoreset=True)
# colors foreground text:
fc = Fore.CYAN
fg = Fore.GREEN
fw = Fore.WHITE
fr = Fore.RED
fb = Fore.BLUE
fy = Fore.YELLOW
fm = Fore.MAGENTA
# colors background text:
bc = Back.CYAN
bg = Back.GREEN
bw = Back.WHITE
br = Back.RED
bb = Back.BLUE
by = Fore.YELLOW
bm = Fore.MAGENTA
# colors style text:
sd = Style.DIM
sn = Style.NORMAL
sb = Style.BRIGHT
def is_bad_proxy(proxy):
try:
proxy_handler = urllib.request.ProxyHandler({'http': proxy})
opener = urllib.request.build_opener(proxy_handler)
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
urllib.request.install_opener(opener)
req = urllib.request.Request('http://www.google.com') # change the URL to test here
sock = urllib.request.urlopen(req)
except Exception as detail:
return True
return False
brlist = [
"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/6.0)",
"Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; yie11; rv:11.0) like Gecko",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36",
"Mozilla/5.0 (Linux; Android 8.0.0; SM-G930F Build/R16NW; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/74.0.3729.157 Mobile Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.157 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36",
"Mozilla/5.0 (Linux; Android 7.0; SM-G610M Build/NRD90M) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.91 Mobile Safari/537.36",
"Mozilla/5.0 (iPhone; CPU iPhone OS 11_4_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/11.0 Mobile/15E148 Safari/604.1",
"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_5_4; de-de) AppleWebKit/525.18 (KHTML, like Gecko) Version/3.1.2 Safari/525.20.1",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_1) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.0.1 Safari/605.1.15",
"Mozilla/5.0 (iPhone; CPU iPhone OS 10_3_1 like Mac OS X) AppleWebKit/603.1.30 (KHTML, like Gecko) Version/10.0 Mobile/14E304 Safari/602.1",
"Mozilla/5.0 (Linux; Android 5.0; SAMSUNG SM-G900F Build/LRX21T) AppleWebKit/537.36 (KHTML, like Gecko) SamsungBrowser/2.1 Chrome/34.0.1847.76 Mobile Safari/537.36",
"Mozilla/5.0 (compatible; bingbot/2.0; +http://www.bing.com/bingbot.htm)",
"Mozilla / 5.0(compatible; bingbot / 2.0; +http://www.bing.com/bingbot.htm)",
"AdsBot-Google (+http://www.google.com/adsbot.html)",
"Mozilla/5.0 (Linux; Android 6.0.1; Nexus 5X Build/MMB29P) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2272.96 Mobile Safari/537.36 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:54.0) Gecko/20100101 Firefox/54.0",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:67.0) Gecko/20100101 Firefox/67.0",
]
config1 = [
[sg.T("Login Options",size=(500, 1),auto_size_text=True,justification='center', font=("Arial", 10), background_color="#888", text_color="#FFF", pad=(5, 5))],
[sg.Text("Login", size=(14, 1), pad=(0, 5)), sg.Checkbox(text="", default=(config.getboolean("mainConfig", "login"))), ],
[sg.Text("Username", size=(14, 1), pad=(0, 5)), sg.InputText(default_text=(config.get("mainConfig", "loginUser")))],
[sg.Text("Password", size=(14, 1), pad=(0, 5)), sg.InputText(password_char="*",default_text=config.get("mainConfig", "loginPass"))],
]
config2 = [
[sg.T("Download Options",size=(500, 1), auto_size_text=True,justification='center', font=("Arial", 10), background_color="#888", text_color="#FFF", pad=(5, 5))],
[sg.Multiline(tooltip="Add each course link in a row as follows:"
"\nhttps://www.linkedin.com/learning/photoshop-cc-2015-new-features-3\nhttps://www.linkedin.com/learning/photoshop-cs6-image-optimization\n https://www.linkedin.com/learning/photoshop-cc-for-photographers-sharpening \n ",
font=("Helvetica", 7),
autoscroll=True,
enable_events=True,
enter_submits=True,
auto_size_text=True,
key="key1",
size=(650,15),
default_text=results.strip() + "\n",
background_color="#FFF")],
[sg.Text("Save Directory", size=(14, 1), pad=(0, 5)),
sg.InputText(config.get("mainConfig", "saveDirectory"),background_color="#FFF", size=(30,1)),
sg.FolderBrowse(button_text="Select", button_color=default_button_color, size=(15,1))],
]
config3 = [
[sg.T("Chrome Driver Options",size=(500, 1),auto_size_text=True,justification='center', font=("Arial", 10), background_color="#888", text_color="#FFF", pad=(5, 5))],
[sg.T("Browser Agents", size=(13,1))],
[sg.DropDown(brlist,default_value=config.get("GoogleConfig","bragent"),pad=(5,5), size=(60,10))],
[sg.Checkbox(pad=(5,5),text="Extensions",tooltip="Enable/Disable browser extensions ",default=config.getboolean("GoogleConfig", "extensions"))],
[sg.Checkbox(pad=(5,5),text="Window", tooltip="Show browser window", default=config.getboolean("GoogleConfig", "extensions"))],
[sg.Checkbox(pad=(5,5),text="GPU",tooltip="Add/Remove browser GPU rendering", default=config.getboolean("GoogleConfig", "extensions"))],
[sg.Checkbox(pad=(5,5),text="Use Profile", tooltip=("Trying to use saved login credentials"),default=config.getboolean("GoogleConfig", "extensions"))],
[sg.Checkbox(pad=(5,5),text="Sandbox",tooltip="Using Sandbox",default=config.getboolean("GoogleConfig", "extensions"))]
]
config4 = [
[sg.T("Delay Settings",size=(500, 1),auto_size_text=True,justification='center', font=("Arial", 10), background_color="#888", text_color="#FFF", pad=(5, 5))],
[sg.T(pad=(5,5),text="Manual Login Delay",size=(20, 1)),
sg.InputText(size=(5,1), default_text=(config.getint("Timings", "logintime")))],
[sg.T(pad=(5,5),text="Get Video List Delay",size=(20, 1)),
sg.InputText(size=(5,1), default_text=(config.getint("Timings", "loadingallvideos")))],
[sg.T(pad=(5,5),text="Get Course Name Delay",size=(20, 1)),
sg.InputText(size=(5,1), default_text=(config.getint("Timings", "extractcoursename"))),
],
[sg.T(pad=(5,5),text="Get Video Links Delay",size=(20, 1)),
sg.InputText(size=(5,1), default_text=(config.getint("Timings", "getvideolink")))],
[sg.T(pad=(5,5),text="Download Delay",size=(20, 1)),
sg.InputText(size=(5,1), default_text=(config.getint("Timings", "downloaddelay")))]
]
config5 = [
[sg.Frame("",layout=(
[sg.T("LinkedIn Downloader", size=(500, 1), auto_size_text=True, justification='center',
font=("Arial", 10), background_color="#888", text_color="#FFF", pad=(5, 5))],
[sg.T("Author: @r00tme", justification='center')],
[sg.T("Version: GUI 0.16", justification='center')],
[sg.T("Release Date: 26/11/2019 ", justification='center')],
[sg.Button(button_text="Bug Report", button_color=default_button_color, size=(10, 1),
key="_open_git_link_")]
), element_justification="center")]
]
layout = [[sg.TabGroup([[
sg.Tab('Login', config1),
sg.Tab('Download', config2),
sg.Tab('Browser', config3),
sg.Tab('Timings', config4),
sg.Tab('About', config5)
]])],
[
sg.Button('Start', button_color=default_button_color, size=(15,1), auto_size_button=False),
sg.Button('Stop', button_color=default_button_color, size=(15,1), auto_size_button=False),
# sg.Button('Resume',button_color=default_button_color, size=(15,1), auto_size_button=False),
# sg.Button('Pause',button_color=default_button_color, size=(15,1), auto_size_button=False)
]
]
window = sg.Window('LinkedIn Downloader', icon="",
auto_size_text=True,
auto_size_buttons=True,
background_color="#d4d0c8",
use_default_focus=True,
text_justification="left",
size=(600,350),
debugger_enabled=False,
element_justification="center",
).Layout(layout).Finalize()
window.Element('Start').Update(disabled=False)
window.Element('Stop').Update(disabled=True)
# window.Element('Resume').Update(disabled=True)
# window.Element('Pause').Update(disabled=True)
# config.set('Timings', 'pause', "False")
valueList = ""
def the_gui():
while True:
event, values = window.Read(timeout=100)
if event is None or event == "Exit":
break
# if event == "Pause":
# window.FindElement("Pause").Update(disabled=True)
# window.FindElement("Resume").Update(disabled=False)
# config.set('Timings', 'pause', "True")
# dl_run['run'] = False
# thread.join()
# elif event == "Resume":
# config.set('Timings', 'pause', "False")
# window.FindElement("Pause").Update(disabled=False)
# window.FindElement("Resume").Update(disabled=True)
# config.set("Timings", "pause", "False")
# dl_run['run'] = True
# thread.join()
if event == "_open_git_link_":
webbrowser.open('https://github.com/r00tmebaby', new=2)
if event is not sg.TIMEOUT_KEY:
config.set('mainConfig', 'loginuser', str(values[1]).strip())
config.set('mainConfig', 'loginpass', str(values[2]).strip())
config.set('GoogleConfig', 'bragent', str(values[4]))
if type(values[0]) == bool:
config.set('mainConfig', 'login', str(values[0]))
if type(values[4]) == bool:
config.set('GoogleConfig', 'extensions', str(values[5]))
if type(values[5]) == bool:
config.set('GoogleConfig', 'window', str(values[6]))
if type(values[6]) == bool:
config.set('GoogleConfig', 'gpu', str(values[7]))
if type(values[7]) == bool:
config.set('GoogleConfig', 'usrProfile', str(values[8]))
if type(values[8]) == bool:
config.set('GoogleConfig', 'sandbox', str(values[9]))
if os.path.isdir(values[3]):
config.set('mainConfig', 'savedirectory', str(values[3]))
if values[10].isdigit() :
config.set('Timings', 'logintime', str(values[10]))
else:
sg.PopupError("Value must be number")
if values[11].isdigit() :
config.set('Timings', 'loadingallvideos', str(values[11]))
else:
sg.PopupError("Value must be number")
if values[12].isdigit():
config.set('Timings', 'extractcoursename', str(values[12]))
else:
sg.PopupError("Value must be number")
if values[13].isdigit():
config.set('Timings', 'getvideolink', str(values[13]))
else:
sg.PopupError("Value must be number")
if values[14].isdigit():
config.set('Timings', 'downloaddelay', str(values[14]))
else:
sg.PopupError("Value must be number")
config.set('mainConfig', 'downloadlist', ",".join(str(values["key1"]).split()) + ",")
with open(configPath, "w+") as f:
config.write(f)
if event == "Start":
#window.FindElement("Pause").Update(disabled=False)
if not os.path.isfile("chromedriver.exe"):
sg.Popup("Chrome Driver is missing, please download it from here http://chromedriver.chromium.org/.\n The program can not be started without it", button_type=3,auto_close_duration=1, auto_close=True)
else:
event, values = window.Read(timeout=0)
if event is None or event == "Exit":
break
if config.get("mainConfig", "downloadlist") != "" and config.get("mainConfig", "savedirectory") != "":
window.Element('Start').Update(disabled=True)
window.Element('Stop').Update(disabled=False)
threading.Thread(target=downloader).start()
else:
sg.Popup("Please specify download folder and at least one course url")
elif event == "Stop":
#config.set('Timings', 'pause', "False")
# window.FindElement("Pause").Update(disabled=True)
# window.FindElement("Resume").Update(disabled=True)
os.system('taskkill /f /im chromedriver.exe')
os.system('taskkill /f /im chrome.exe')
window.Element('Start').Update(disabled=False)
window.Element('Stop').Update(disabled=True)
def DownloadFile(url,sessions):
local_filename = url.split('?')[0]
file_name = local_filename.split("/")
file_name1 = file_name[len(file_name) -1]
r = requests.get(url,cookies=sessions,stream=True)
with open(file_name1, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
return
def downloader():
driver = webdriver.Chrome("chromedriver.exe", options=build_chrome_options())
sys.stdout.write(
"\r%s%s###############################################\n"
"# LinkedIn Learning Download #\n"
"# @author r00tme 24/11/2019 #\n"
"# @version: GUI 0.16 #\n"
"##############################################\n\n" % (
bcolors.sd, bcolors.fc))
sys.stdout.flush()
links = filter(None, str(config.get("mainConfig", "downloadlist")).split(","))
from bs4 import BeautifulSoup
time.sleep(config.getint("Timings", "logintime"))
if config.getboolean('mainConfig', 'login'):
sys.stdout.write('\n%s[%s*%s]%s Trying to login on LinkedIn' % (
bcolors.bm, bcolors.fc, bcolors.fm, bcolors.fc))
sys.stdout.flush()
driver.get("https://www.linkedin.com/login")
time.sleep(.3)
driver.find_element_by_id("username").clear()
driver.find_element_by_id("password").clear()
driver.find_element_by_id("username").send_keys(config.get('mainConfig', 'loginUser'))
time.sleep(.3)
driver.find_element_by_id("password").send_keys(config.get('mainConfig', 'loginpass'))
time.sleep(.3)
driver.find_element_by_css_selector(".btn__primary--large").click()
try:
if driver.find_element_by_id("captcha-internal"):
sys.stdout.write('\n%s[%s*%s]%s Recaptcha is detected! Solve it and restart the program with unchecked login' % (
bcolors.bm, bcolors.fr, bcolors.fm, bcolors.bm))
sys.stdout.flush()
time.sleep(1000)
except:
pass
if not os.path.isfile(configPath):
sys.stdout.write('\n%s[%s*%s]%s The configuration file does not exist' % (
bcolors.bm, bcolors.fc, bcolors.fm, bcolors.fc))
sys.stdout.flush()
sys.exit()
courses_count = 0
total_counter = 0
prefix = ""
for items_for_download in links:
counter = 0
temp_counter = 0
time.sleep(config.getint("Timings", "loadingallvideos"))
driver.get(items_for_download)
sys.stdout.write('\n%s[%s*%s]%s Working on course %s' % (
bcolors.bm, bcolors.fc, bcolors.fm, bcolors.fc, items_for_download))
sys.stdout.flush()
time.sleep(config.getint("Timings", "loadingallvideos"))
try:
driver.find_element_by_class_name(
'course-body__info-tab-name-overview').click()
except:
time.sleep(config.getint("Timings", "extractcoursename"))
try:
driver.find_element_by_class_name(
'course-body__info-tab-name-overview').click()
except:
continue
elementss = driver.find_element_by_class_name("course-info__difficulty").text
if elementss == "Intermediate":
prefix = "1-"
elif elementss == "Advanced":
prefix = "2-"
else:
prefix = "0-"
course_name = (prefix + items_for_download.split("?")[0].split("/")[4].replace("-", " ").title()).rstrip()
time.sleep(config.getint("Timings", "extractcoursename"))
if os.path.isdir(config.get('mainConfig', 'saveDirectory') + r"\\" + course_name):
remove(config.get('mainConfig', 'saveDirectory') + r"\\" + course_name)
time.sleep(1)
os.makedirs(config.get('mainConfig', 'saveDirectory') + r"\\" + course_name)
sys.stdout.write('\n%s[%s*%s]%s Directory %s has been successfully created' % (
bcolors.bm, bcolors.fc, bcolors.fm, bcolors.fc, course_name))
sys.stdout.flush()
elementss2 = driver.find_element_by_css_selector(
'.course-info__details-section.course-info__divider').get_attribute('innerHTML')
if not os.path.isfile(config.get('mainConfig', 'saveDirectory') + r"\\" + course_name + r"\info.php") or \
os.stat(config.get('mainConfig', 'saveDirectory') + r"\\" + course_name + r"\info.php").st_size == 0:
f = open(config.get('mainConfig', 'saveDirectory') + r"\\" + course_name + r"\info.php", "a+",
encoding="utf-8")
f.write(elementss2)
driver.find_element_by_css_selector(
".course-body__info-tab-name.course-body__info-tab-name-content.ember-view").click()
time.sleep(config.getint("Timings", "extractcoursename"))
time.sleep(config.getint("Timings", "extractcoursename"))
driver.find_element_by_css_selector('.video-item__link.t-black.ember-view').click()
time.sleep(config.getint("Timings", "extractcoursename"))
all_cookies = driver.get_cookies()
cookies = {}
for s_cookie in all_cookies:
cookies[s_cookie["name"]] = s_cookie["value"]
try:
driver.find_element_by_class_name("course-body__exercise-files-header-title").click()
for each_file in driver.find_elements_by_class_name("exercise-file__link"):
# req = requests.get(each_file.get_attribute("href"),stream=True,cookies=cookies)
# DownloadFile(each_file.get_attribute("href"),cookies)
excersize_file = "Downloadig excersise file : %s.zip" % course_name
show_progress_bar(each_file.get_attribute("href"),
"\r" + "%s[%s*%s]%s %s" % (bcolors.bm, bcolors.fc, bcolors.fm, bcolors.fm, excersize_file),
os.path.join(config.get('mainConfig', 'saveDirectory'),course_name , course_name + ".zip"),
sessions=cookies)
except:
sys.stdout.write('\n%s[%s*%s]%s Exercise files were not found ' % (
bcolors.bm, bcolors.fc, bcolors.fm, bcolors.fr))
sys.stdout.flush()
pass
# New Part --
items_for_download = items_for_download.split("?")[0] + "?autoplay=true&" + items_for_download.split("?")[1]
driver.get(items_for_download)
driver.find_element_by_class_name("course-body__info-tab-name-content").click()
soup = BeautifulSoup(driver.page_source,features="html.parser")
video_links = []
for a in soup.find_all('a', href=True):
all_links = str(a['href']).split("&")
if len(all_links) > 1:
video_links.append("https://www.linkedin.com" + all_links[0] + "&" + all_links[1])
# Loop trough all videos
for video_link in video_links:
driver.get(video_link)
try:
WebDriverWait(driver, time.sleep(config.getint("Timings", "getvideolink"))).until(
EC.presence_of_element_located((By.TAG_NAME, "video")))
except:
pass
video_src = driver.find_element_by_tag_name("video").get_attribute("src")
video_name = driver.current_url.split("/")[5].replace("-", " ").split("?")[0].title()
videoSrt = 'https://www.lynda.com/ajax/player/transcript?courseID=' + video_src[5] + '&videoID=' + video_src[7]
show_progress_bar(videoSrt, "file.srt",os.path.join(config.get('mainConfig', 'saveDirectory'),course_name , video_name + ".srt"),
sessions=cookies)
time.sleep(config.getint("Timings", "downloaddelay"))
counter += 1
video_name = "%04d_%s_%s" % (counter, video_name, "-")
save_dir = config.get('mainConfig',
'saveDirectory') + r"\\" + course_name + r"\\" + video_name + ".mp4"
show_progress_bar(video_src, "\r" + "%s[%s*%s]%s %s" % (bcolors.bm, bcolors.fc, bcolors.fm, bcolors.fc, video_name), save_dir)
driver.find_element_by_css_selector(".vjs-control.vjs-button.vjs-next-button").click()
total_counter += 1
temp_counter += 1
time.sleep(config.getint("Timings", "downloaddelay"))
if counter == len(video_links):
courses_count += 1
sys.stdout.write(
"\n%s[%s+%s]%s%sYou have successfully downloaded course %s%s %swith %d videos. Downloaded %d courses and %d videos in total" % (
bcolors.bm, bcolors.fc, bcolors.fm, bcolors.fc,
bcolors.sd + bcolors.fc, bcolors.sb + bcolors.fc, course_name,
bcolors.sd + bcolors.fc, temp_counter, courses_count, total_counter)
)
break
sys.stdout.flush()
if __name__ == '__main__':
the_gui()
|
naccessWithComplexPDBFileGenerator_Socket.py
|
#################
## Created by Engin Cukuroglu
#################
import multiprocessing
from codesOfTools import interfaceNameSorter, naccessRSAFileReaderReturnOnlyAbsASADictionary
from multiprocessing import Queue, Process
import os, sys, time
import subprocess
import tempfile
import socket
def generateComplexPDBFilesAndRunNaccessWork(taskQueue_pdbDict, naccessResultsQueue, allPDBFilesDirectory, minInterfaceResidueCriteria, differenceBetweenComplexAndMonomerASACriteria, naccessRunFileDirectory, rsaFileKeeperDict, naccessErrorQueue):
while True:
pdbName, pdbDict = taskQueue_pdbDict.get()
taskQueue_pdbDict.task_done()
if pdbName is None:
break
pdbFileDirectory = '%s/%s.pdb' %(allPDBFilesDirectory, pdbName)
if os.path.exists(pdbFileDirectory):
createPDBFileList = []
for chainInfo in pdbDict['chains']:
createPDBFileList.append([chainInfo, '', pdbDict['chains'][chainInfo][0], pdbDict['chains'][chainInfo][1], pdbDict['chains'][chainInfo][2], pdbDict['chains'][chainInfo][3]])
pdbFile = open(pdbFileDirectory, 'r')
pdbResidueDict = {}
pdbAtomDict = {}
pdbChainIDDict = {}
while 1:
pdbLine = pdbFile.readline()
if pdbLine == '' or pdbLine[0:3] == 'END':
break
if pdbLine[0:4] == 'ATOM':
chainID = pdbLine[21]
pdbChainIDDict[chainID] = 1
resNo = pdbLine[22:26].strip()
resICode = pdbLine[26]
atomAlternativeLocationIndicator = pdbLine[16]
resType = pdbLine[17:20].strip()
atomType = pdbLine[12:16].strip()
resDictKey = '%s_%s' %(resNo, chainID)
resDictValue = '%s_%s_%s' %(resType, resNo, chainID)
atomDictKey = '%s_%s_%s_%s' %(resType, resNo, chainID, atomType)
if not resDictKey in pdbResidueDict:
pdbResidueDict[resDictKey] = [resDictValue, resICode, atomAlternativeLocationIndicator]
pdbAtomDict[atomDictKey] = 1
for tempPDBFileProperty in createPDBFileList:
if chainID in tempPDBFileProperty[0]:
tempPDBFileProperty[1] = '%s%s' %(tempPDBFileProperty[1], pdbLine)
else:
if pdbResidueDict[resDictKey][0] == resDictValue:
if pdbResidueDict[resDictKey][1] == resICode:
if not atomDictKey in pdbAtomDict:
pdbAtomDict[atomDictKey] = 1
for tempPDBFileProperty in createPDBFileList:
if chainID in tempPDBFileProperty[0]:
tempPDBFileProperty[1] = '%s%s' %(tempPDBFileProperty[1], pdbLine)
pdbFile.close()
for tempPDBFileProperty in createPDBFileList:
chainInfo = tempPDBFileProperty[0]
chainExistenceStatus = 1
for chainID in chainInfo:
if not chainID in pdbChainIDDict:
chainExistenceStatus = 0
break
if chainExistenceStatus == 1:
tempPDBFile = open(tempPDBFileProperty[3], 'w')
tempPDBFile.write(tempPDBFileProperty[1])
tempPDBFile.close()
# tempNaccessErrorFile = tempfile.NamedTemporaryFile(mode='w+b')
# naccessRunString = '%s %s' %(naccessRunFileDirectory, tempPDBFileProperty[3])
# proc = subprocess.Popen([naccessRunFileDirectory, tempPDBFileProperty[3]], stdout=None, stderr=tempNaccessErrorFile, close_fds=True)
# ret_code = proc.wait()
# naccessErr = False
# if not ret_code == None:
# tempNaccessErrorFile.flush()
# tempNaccessErrorFile.seek(0)
# naccessErr = tempNaccessErrorFile.readlines()
# tempNaccessErrorFile.close()
# if naccessErr:
# naccessErrorQueue.put('Error: %s' %(tempPDBFileProperty[3]))
# try:
# ret_code = subprocess.check_call([naccessRunFileDirectory, tempPDBFileProperty[3]])
# except:
# naccessErrorQueue.put('Error: %s' %(tempPDBFileProperty[3]))
stderr = socket.socketpair()
stderr[0].settimeout(0.01)
errMessage = ''
proc = subprocess.Popen([naccessRunFileDirectory, tempPDBFileProperty[3]], stdout=None, stderr=stderr[1], close_fds=True)
err = u''
while True:
proc.poll()
try:
errtmp = stderr[0].recv(4096)
except socket.timeout as exc:
errtmp = ''
err = err + errtmp
if len(err) > 4096:
proc.kill()
proc.wait()
if proc.returncode != None:
returnCode = proc.returncode
break
if err:
naccessErrorQueue.put('Error: %s, %s' %(tempPDBFileProperty[3], err))
rsaFileDirectory = '%s.rsa' %(tempPDBFileProperty[2])
asaFileDirectory = '%s.asa' %(tempPDBFileProperty[2])
logFileDirectory = '%s.log' %(tempPDBFileProperty[2])
# os.system('%s %s > %s' %(naccessRunFileDirectory, tempPDBFileProperty[3], naccessRunOutputFileDirectory))
if os.path.exists(rsaFileDirectory):
os.system('mv %s %s' %(rsaFileDirectory, tempPDBFileProperty[4]))
if os.path.exists(asaFileDirectory):
os.system('mv %s %s' %(asaFileDirectory, tempPDBFileProperty[5]))
else:
if os.path.exists(asaFileDirectory):
os.system('rm %s' %(asaFileDirectory))
if os.path.exists(logFileDirectory):
os.system('rm %s' %(logFileDirectory))
# if os.path.exists(naccessRunOutputFileDirectory):
# os.system('rm %s' %(naccessRunOutputFileDirectory))
if os.path.exists(tempPDBFileProperty[3]):
os.system('rm %s' %(tempPDBFileProperty[3]))
naccessSolutionUsageDictionary = {}
for interfaceList in pdbDict['interfaces']:
chain_1_rsaFileDirectory = interfaceList[1]
chain_2_rsaFileDirectory = interfaceList[2]
allChains_rsaFileDirectory = interfaceList[3]
chain_1_rsaDict = {}
chain_2_rsaDict = {}
allChains_rsaDict = {}
if not chain_1_rsaFileDirectory in naccessSolutionUsageDictionary:
naccessSolutionUsageDictionary[chain_1_rsaFileDirectory] = [0, interfaceList[4]]
if not chain_2_rsaFileDirectory in naccessSolutionUsageDictionary:
naccessSolutionUsageDictionary[chain_2_rsaFileDirectory] = [0, interfaceList[5]]
if not allChains_rsaFileDirectory in naccessSolutionUsageDictionary:
naccessSolutionUsageDictionary[allChains_rsaFileDirectory] = [0, interfaceList[6]]
if os.path.exists(chain_1_rsaFileDirectory):
chain_1_rsaDict = naccessRSAFileReaderReturnOnlyAbsASADictionary(chain_1_rsaFileDirectory)
if len(chain_1_rsaDict) == 0:
interfaceNaccessResultText = '%s\t2\t%s does not exist\n' %(interfaceList[0], chain_1_rsaFileDirectory)
naccessResultsQueue.put(interfaceNaccessResultText)
continue
else:
interfaceNaccessResultText = '%s\t2\t%s does not exist\n' %(interfaceList[0], chain_1_rsaFileDirectory)
naccessResultsQueue.put(interfaceNaccessResultText)
continue
if os.path.exists(chain_2_rsaFileDirectory):
chain_2_rsaDict = naccessRSAFileReaderReturnOnlyAbsASADictionary(chain_2_rsaFileDirectory)
if len(chain_2_rsaDict) == 0:
interfaceNaccessResultText = '%s\t2\t%s does not exist\n' %(interfaceList[0], chain_2_rsaFileDirectory)
naccessResultsQueue.put(interfaceNaccessResultText)
continue
else:
interfaceNaccessResultText = '%s\t2\t%s does not exist\n' %(interfaceList[0], chain_2_rsaFileDirectory)
naccessResultsQueue.put(interfaceNaccessResultText)
continue
if os.path.exists(allChains_rsaFileDirectory):
allChains_rsaDict = naccessRSAFileReaderReturnOnlyAbsASADictionary(allChains_rsaFileDirectory)
if len(allChains_rsaDict) == 0:
interfaceNaccessResultText = '%s\t2\t%s does not exist\n' %(interfaceList[0], allChains_rsaFileDirectory)
naccessResultsQueue.put(interfaceNaccessResultText)
continue
else:
interfaceNaccessResultText = '%s\t2\t%s does not exist\n' %(interfaceList[0], allChains_rsaFileDirectory)
naccessResultsQueue.put(interfaceNaccessResultText)
continue
residueASAChangesCounter_chain_1 = 0
residueASAChangesCounter_chain_2 = 0
for resKey in allChains_rsaDict:
if resKey in chain_1_rsaDict:
if (float(chain_1_rsaDict[resKey]) - float(allChains_rsaDict[resKey])) > differenceBetweenComplexAndMonomerASACriteria:
residueASAChangesCounter_chain_1 = residueASAChangesCounter_chain_1 + 1
elif resKey in chain_2_rsaDict:
if (float(chain_2_rsaDict[resKey]) - float(allChains_rsaDict[resKey])) > differenceBetweenComplexAndMonomerASACriteria:
residueASAChangesCounter_chain_2 = residueASAChangesCounter_chain_2 + 1
if residueASAChangesCounter_chain_1 >= minInterfaceResidueCriteria and residueASAChangesCounter_chain_2 >= minInterfaceResidueCriteria:
naccessSolutionUsageDictionary[chain_1_rsaFileDirectory][0] = 1
naccessSolutionUsageDictionary[chain_2_rsaFileDirectory][0] = 1
naccessSolutionUsageDictionary[allChains_rsaFileDirectory][0] = 1
interfaceNaccessResultText = '%s\t0\t%d\t%d\n' %(interfaceList[0], residueASAChangesCounter_chain_1, residueASAChangesCounter_chain_2)
naccessResultsQueue.put(interfaceNaccessResultText)
for naccessRSAFileDirectory in naccessSolutionUsageDictionary:
if naccessSolutionUsageDictionary[naccessRSAFileDirectory][0] == 0:
if not naccessRSAFileDirectory in rsaFileKeeperDict:
if os.path.exists(naccessRSAFileDirectory):
os.system('rm %s' %(naccessRSAFileDirectory))
if os.path.exists(naccessSolutionUsageDictionary[naccessRSAFileDirectory][1]):
os.system('rm %s' %(naccessSolutionUsageDictionary[naccessRSAFileDirectory][1]))
else:
for interfaceList in pdbDict['interfaces']:
interfaceNaccessResultText = '%s\t2\tPDB file does not present in %s.\n' %(interfaceList[0], pdbFileDirectory)
naccessResultsQueue.put(interfaceNaccessResultText)
def mainNaccessWithComplexPDBFileGenerator(interfaceListFile, allPDBFilesDirectory, generatedPDBFilesDirectory, fullPDBIDListFileDirectory, fullInterfaceNaccessResultsFileDirectory, interfaceNaccessResultsFileDirectory, naccessResultsFileDirectory, naccessRunFileDirectory, naccessLogFileDirectory, minInterfaceResidueCriteria, differenceBetweenComplexAndMonomerASACriteria, fullInterfaceListAfterNaccessFileDirectory, numberOfProcesses):
print('\n* GENERATE COMPLEX PDB FILES STARTED *\n')
print('Time stamp : %s' %(time.asctime()))
t1 = time.time()
if not os.path.exists(interfaceListFile):
sys.exit('\nThe %s does not exist\n' %(interfaceListFile))
if not os.path.exists(allPDBFilesDirectory):
sys.exit('\nThe %s path is not correct.\n' %(allPDBFilesDirectory))
if not os.path.exists(generatedPDBFilesDirectory):
os.system('mkdir %s' %(generatedPDBFilesDirectory))
if not os.path.exists(fullPDBIDListFileDirectory):
sys.exit('\nThe %s does not exist\n' %(fullPDBIDListFileDirectory))
if not os.path.exists(naccessResultsFileDirectory):
os.system('mkdir %s' %(naccessResultsFileDirectory))
if not os.path.exists(naccessRunFileDirectory):
sys.exit('\nThe %s does not exist\n' %(naccessRunFileDirectory))
fullNaccessResultDict = {}
if os.path.exists(fullInterfaceNaccessResultsFileDirectory):
fullInterfaceNaccessResultFile = open(fullInterfaceNaccessResultsFileDirectory, 'r')
for naccessResultEntry in fullInterfaceNaccessResultFile:
splittedNaccessResultEntry = naccessResultEntry.strip().split('\t')
fullNaccessResultDict[splittedNaccessResultEntry[0]] = splittedNaccessResultEntry[1:]
fullInterfaceNaccessResultFile.close()
interfaceList = open(interfaceListFile,'r')
numberOfCreatedPDBFile = 0
existedPDBFiles = 0
totalNumberOfInterface = 0
taskDict = {}
naccessResultsQueue = multiprocessing.Queue()
rsaFileKeeperDict = {}
for interface in interfaceList:
interface = interface.strip()
if interface == '':
continue
totalNumberOfInterface = totalNumberOfInterface + 1
pdbName, chain_1, chain_2, allChains, interfaceResidueStatusOfChains, interface, commonChainCounter = interfaceNameSorter(interface)
if len(chain_1) == 0:
interfaceNaccessResultText = '%s\t2\tChain 1 does not have any monomer.\n' %(interface)
naccessResultsQueue.put(interfaceNaccessResultText)
continue
if len(chain_2) == 0:
interfaceNaccessResultText = '%s\t2\tChain 2 does not have any monomer.\n' %(interface)
naccessResultsQueue.put(interfaceNaccessResultText)
continue
if commonChainCounter > 0:
interfaceNaccessResultText = '%s\t2\tChain 1 and Chain 2 have %d common monomer.\n' %(interface, commonChainCounter)
naccessResultsQueue.put(interfaceNaccessResultText)
continue
chain_1_name = '%s_%s' %(pdbName, chain_1)
chain_2_name = '%s_%s' %(pdbName, chain_2)
allChainsName = '%s_%s' %(pdbName, allChains)
pdbFileDirectoryWith_chain_1 = '%s/%s.pdb' %(generatedPDBFilesDirectory, chain_1_name)
pdbFileDirectoryWith_chain_2 = '%s/%s.pdb' %(generatedPDBFilesDirectory, chain_2_name)
pdbFileDirectoryWith_allChains = '%s/%s.pdb' %(generatedPDBFilesDirectory, allChainsName)
naccessRSAFileDirectoryWith_chain_1 = '%s/%s.rsa' %(naccessResultsFileDirectory, chain_1_name)
naccessRSAFileDirectoryWith_chain_2 = '%s/%s.rsa' %(naccessResultsFileDirectory, chain_2_name)
naccessRSAFileDirectoryWith_allChains = '%s/%s.rsa' %(naccessResultsFileDirectory, allChainsName)
naccessASAFileDirectoryWith_chain_1 = '%s/%s.asa' %(naccessResultsFileDirectory, chain_1_name)
naccessASAFileDirectoryWith_chain_2 = '%s/%s.asa' %(naccessResultsFileDirectory, chain_2_name)
naccessASAFileDirectoryWith_allChains = '%s/%s.asa' %(naccessResultsFileDirectory, allChainsName)
if interface in fullNaccessResultDict:
if int(fullNaccessResultDict[interface][0]) == 0:
if int(fullNaccessResultDict[interface][1]) < minInterfaceResidueCriteria or int(fullNaccessResultDict[interface][2]) < minInterfaceResidueCriteria:
interfaceNaccessResultText = '%s\t%s\t%s\t%s\n' %(interface, fullNaccessResultDict[interface][0], fullNaccessResultDict[interface][1], fullNaccessResultDict[interface][2])
naccessResultsQueue.put(interfaceNaccessResultText)
continue
else:
if (os.path.exists(naccessRSAFileDirectoryWith_chain_1) and os.path.exists(naccessASAFileDirectoryWith_chain_1)):
if (os.path.exists(naccessRSAFileDirectoryWith_chain_2) and os.path.exists(naccessASAFileDirectoryWith_chain_2)):
if (os.path.exists(naccessRSAFileDirectoryWith_allChains) and os.path.exists(naccessASAFileDirectoryWith_allChains)):
interfaceNaccessResultText = '%s\t%s\t%s\t%s\n' %(interface, fullNaccessResultDict[interface][0], fullNaccessResultDict[interface][1], fullNaccessResultDict[interface][2])
naccessResultsQueue.put(interfaceNaccessResultText)
rsaFileKeeperDict[naccessRSAFileDirectoryWith_chain_1] = 1
rsaFileKeeperDict[naccessRSAFileDirectoryWith_chain_2] = 1
rsaFileKeeperDict[naccessRSAFileDirectoryWith_allChains] = 1
continue
if pdbName in taskDict:
taskDict[pdbName]['interfaces'].append([interface, naccessRSAFileDirectoryWith_chain_1, naccessRSAFileDirectoryWith_chain_2, naccessRSAFileDirectoryWith_allChains, naccessASAFileDirectoryWith_chain_1, naccessASAFileDirectoryWith_chain_2, naccessASAFileDirectoryWith_allChains])
else:
taskDict[pdbName] = {}
taskDict[pdbName]['chains'] = {}
taskDict[pdbName]['interfaces'] = []
taskDict[pdbName]['interfaces'].append([interface, naccessRSAFileDirectoryWith_chain_1, naccessRSAFileDirectoryWith_chain_2, naccessRSAFileDirectoryWith_allChains, naccessASAFileDirectoryWith_chain_1, naccessASAFileDirectoryWith_chain_2, naccessASAFileDirectoryWith_allChains])
if not (os.path.exists(naccessRSAFileDirectoryWith_chain_1) and os.path.exists(naccessASAFileDirectoryWith_chain_1)):
if chain_1 in taskDict[pdbName]['chains']:
existedPDBFiles = existedPDBFiles + 1
else:
taskDict[pdbName]['chains'][chain_1] = [chain_1_name, pdbFileDirectoryWith_chain_1, naccessRSAFileDirectoryWith_chain_1, naccessASAFileDirectoryWith_chain_1]
numberOfCreatedPDBFile = numberOfCreatedPDBFile + 1
else:
existedPDBFiles = existedPDBFiles + 1
if not (os.path.exists(naccessRSAFileDirectoryWith_chain_2) and os.path.exists(naccessASAFileDirectoryWith_chain_2)):
if chain_2 in taskDict[pdbName]['chains']:
existedPDBFiles = existedPDBFiles + 1
else:
taskDict[pdbName]['chains'][chain_2] = [chain_2_name, pdbFileDirectoryWith_chain_2, naccessRSAFileDirectoryWith_chain_2, naccessASAFileDirectoryWith_chain_2]
numberOfCreatedPDBFile = numberOfCreatedPDBFile + 1
else:
existedPDBFiles = existedPDBFiles + 1
if not (os.path.exists(naccessRSAFileDirectoryWith_allChains) and os.path.exists(naccessASAFileDirectoryWith_allChains)):
if allChains in taskDict[pdbName]['chains']:
existedPDBFiles = existedPDBFiles + 1
else:
taskDict[pdbName]['chains'][allChains] = [allChainsName, pdbFileDirectoryWith_allChains, naccessRSAFileDirectoryWith_allChains, naccessASAFileDirectoryWith_allChains]
numberOfCreatedPDBFile = numberOfCreatedPDBFile + 1
else:
existedPDBFiles = existedPDBFiles + 1
interfaceList.close()
taskQueue_pdbDict = multiprocessing.JoinableQueue()
naccessErrorQueue = multiprocessing.Manager().Queue()
generateComplexWorkers = [Process(target=generateComplexPDBFilesAndRunNaccessWork, args=(taskQueue_pdbDict, naccessResultsQueue, allPDBFilesDirectory, minInterfaceResidueCriteria, differenceBetweenComplexAndMonomerASACriteria, naccessRunFileDirectory, rsaFileKeeperDict, naccessErrorQueue)) for i in range(numberOfProcesses)]
for tempWorkers in generateComplexWorkers:
tempWorkers.start()
interfaceTotal = 0
for pdbName in taskDict:
print('%s task added' %(pdbName))
interfaceTotal = interfaceTotal + len(taskDict[pdbName]['interfaces'])
taskQueue_pdbDict.put([pdbName, taskDict[pdbName]])
print('total interfaces in taskDict = %d' %(interfaceTotal))
for i in range(numberOfProcesses):
taskQueue_pdbDict.put([None,None])
taskQueue_pdbDict.join()
print('taskQueue join oldu')
interfaceNaccessResultsFile = open(interfaceNaccessResultsFileDirectory, 'w')
interfaceNaccessResultDict = {}
tempTotalNumberOfInterface = totalNumberOfInterface
while tempTotalNumberOfInterface:
interfaceNaccessResultString = naccessResultsQueue.get()
splittedInterfaceNaccessResultString = interfaceNaccessResultString.strip().split('\t')
interfaceNaccessResultDict[splittedInterfaceNaccessResultString[0]] = [0, splittedInterfaceNaccessResultString[1], interfaceNaccessResultString]
interfaceNaccessResultsFile.write(interfaceNaccessResultString)
tempTotalNumberOfInterface = tempTotalNumberOfInterface - 1
interfaceNaccessResultsFile.close()
naccessErrorQueue.put('Done')
naccessLogFile = open(naccessLogFileDirectory, 'w')
while True:
naccessErrorEntry = naccessErrorQueue.get()
if naccessErrorEntry == 'Done':
break
naccessLogFile.write('%s\n' %(naccessErrorEntry))
naccessLogFile.close()
fullPDBIDDict = {}
fullPDBIDListFile = open(fullPDBIDListFileDirectory, 'r')
for pdbID in fullPDBIDListFile:
pdbID = pdbID.strip()
fullPDBIDDict[pdbID] = 1
fullPDBIDListFile.close()
tempFullInterfaceNaccessResultsFileDirectory = '%s_temp' %(fullInterfaceNaccessResultsFileDirectory)
tempFullInterfaceNaccessResultsFile = open(tempFullInterfaceNaccessResultsFileDirectory,'w')
tempFullInterfaceListAfterNaccessFileDirectory = '%s_temp' %(fullInterfaceListAfterNaccessFileDirectory)
tempFullInterfaceListAfterNaccessFile = open(tempFullInterfaceListAfterNaccessFileDirectory, 'w')
if os.path.exists(fullInterfaceNaccessResultsFileDirectory):
fullInterfaceNaccessResultsFile = open(fullInterfaceNaccessResultsFileDirectory,'r')
for naccessResultLine in fullInterfaceNaccessResultsFile:
splittedNaccessResultLine = naccessResultLine.strip().split('\t')
interfaceName = splittedNaccessResultLine[0]
splittedInterfaceName = interfaceName.split('_')
pdbID = splittedInterfaceName[0]
if pdbID in fullPDBIDDict:
if interfaceName in interfaceNaccessResultDict:
interfaceNaccessResultDict[interfaceName][0] = 1
if splittedNaccessResultLine[1] > interfaceNaccessResultDict[interfaceName][1]:
tempFullInterfaceNaccessResultsFile.write(interfaceNaccessResultDict[interfaceName][2])
if int(interfaceNaccessResultDict[interfaceName][1]) == 0:
splittedInterfaceNaccessResultString = interfaceNaccessResultDict[interfacename][2].strip().split('\t')
if int(splittedInterfaceNaccessResultString[2]) >= minInterfaceResidueCriteria and int(splittedInterfaceNaccessResultString[3]) >= minInterfaceResidueCriteria:
tempFullInterfaceListAfterNaccessFile.write('%s\n' %(interfaceName))
else:
tempFullInterfaceNaccessResultsFile.write(naccessResultLine)
if int(splittedNaccessResultLine[1]) == 0:
if int(splittedNaccessResultLine[2]) >= minInterfaceResidueCriteria and int(splittedNaccessResultLine[3]) >= minInterfaceResidueCriteria:
tempFullInterfaceListAfterNaccessFile.write('%s\n' %(interfaceName))
else:
tempFullInterfaceNaccessResultsFile.write(naccessResultLine)
if int(splittedNaccessResultLine[1]) == 0:
if int(splittedNaccessResultLine[2]) >= minInterfaceResidueCriteria and int(splittedNaccessResultLine[3]) >= minInterfaceResidueCriteria:
tempFullInterfaceListAfterNaccessFile.write('%s\n' %(interfaceName))
fullInterfaceNaccessResultsFile.close()
for interfaceName in interfaceNaccessResultDict:
if interfaceNaccessResultDict[interfaceName][0] == 0:
tempFullInterfaceNaccessResultsFile.write(interfaceNaccessResultDict[interfaceName][2])
if int(interfaceNaccessResultDict[interfaceName][1]) == 0:
splittedInterfaceNaccessResultString = interfaceNaccessResultDict[interfaceName][2].strip().split('\t')
if int(splittedInterfaceNaccessResultString[2]) >= minInterfaceResidueCriteria and int(splittedInterfaceNaccessResultString[3]) >= minInterfaceResidueCriteria:
tempFullInterfaceListAfterNaccessFile.write('%s\n' %(interfaceName))
else:
for interfaceName in interfaceNaccessResultDict:
tempFullInterfaceNaccessResultsFile.write(interfaceNaccessResultDict[interfaceName][2])
if int(interfaceNaccessResultDict[interfaceName][1]) == 0:
splittedInterfaceNaccessResultString = interfaceNaccessResultDict[interfacename][2].strip().split('\t')
if int(splittedInterfaceNaccessResultString[2]) >= minInterfaceResidueCriteria and int(splittedInterfaceNaccessResultString[3]) >= minInterfaceResidueCriteria:
tempFullInterfaceListAfterNaccessFile.write('%s\n' %(interfaceName))
tempFullInterfaceNaccessResultsFile.close()
tempFullInterfaceListAfterNaccessFile.close()
os.system('mv %s %s' %(tempFullInterfaceNaccessResultsFileDirectory, fullInterfaceNaccessResultsFileDirectory))
os.system('mv %s %s' %(tempFullInterfaceListAfterNaccessFileDirectory, fullInterfaceListAfterNaccessFileDirectory))
t2 = time.time()
print('\nTotal number of interface = %d\n' %(totalNumberOfInterface))
print('\nExisted PDB Files = %d\n' %(existedPDBFiles))
print('\nNumber of created PDB File = %d\n' %(numberOfCreatedPDBFile))
print('\nElapsed time = %f seconds\n' %(t2-t1))
print('Time stamp : %s' %(time.asctime()))
print('\n* GENERATE COMPLEX PDB FILES COMPLETED *\n')
|
client.py
|
import argparse
import base64
import io
import json
import os
import re
import time
import wave
import zlib
from threading import Lock, Thread
from tkinter import *
from tkinter.ttk import *
import PIL.ImageTk as itk
import pyaudio
import pyttsx3
import requests
import speech_recognition as sr
from manage_audio import AudioSnippet
class GooseWindow(Thread):
def __init__(self, assets_dir):
Thread.__init__(self)
self.assets_dir = assets_dir
self._switch = False
self._speaking_lck = Lock()
self.start()
def run(self):
window = Tk()
window.title("Anserini")
window.resizable(0, 0)
img_width, img_height = (213, 365)
self.canvas = Canvas(width=img_width + 64, height=img_height + 64, bg="white")
self.canvas.pack()
self._init_resources()
self.draw_goose("inactive")
window.mainloop()
def _init_resources(self, names=["awake", "inactive", "open"]):
self.images = {}
for name in names:
file = os.path.join(self.assets_dir, "anserini-{}.png".format(name))
self.images[name] = itk.PhotoImage(file=file)
def _open_mouth(self, length_secs):
with self._speaking_lck:
self.draw_goose("open")
time.sleep(length_secs)
self.draw_goose("awake")
def open_mouth(self, length_secs):
Thread(target=self._open_mouth, args=(length_secs,)).start()
def draw_goose(self, name):
self.canvas.create_image(
32,
32,
image=self.images[name],
anchor=NW,
tags="image1" if self._switch else "image2",
)
self.canvas.delete("image2" if self._switch else "image1")
self._switch = not self._switch
def clean_text(text):
if not text:
return ""
pattern = re.compile(r"^([A-z0-9,\.]+(\-*[A-z0-9,\.]+)*)+$")
words = []
for tok in text.split():
if re.match(pattern, tok):
words.append(tok)
return " ".join(words).replace(" .", ".")
class WatsonApi(object):
def __init__(self, username, password):
self.auth = requests.auth.HTTPBasicAuth(username, password)
def fetch_tts(self, text):
ep = "https://stream.watsonplatform.net/text-to-speech/api/v1/synthesize"
response = requests.get(
ep,
params=dict(accept="audio/wav", text=text, voice="en-US_AllisonVoice"),
stream=True,
auth=self.auth,
)
buf = io.BytesIO()
for chunk in response.iter_content(chunk_size=1024):
if chunk:
buf.write(chunk)
buf.seek(0)
return buf
def play_audio(data, amplitude_cb=None):
with wave.open(data) as f:
audio = pyaudio.PyAudio()
stream = audio.open(
format=audio.get_format_from_width(f.getsampwidth()),
channels=f.getnchannels(),
rate=f.getframerate(),
output=True,
)
data = f.readframes(512)
while data:
if amplitude_cb:
amplitude = AudioSnippet(data).amplitude_rms()
amplitude_cb(amplitude)
stream.write(data)
data = f.readframes(512)
stream.stop_stream()
stream.close()
audio.terminate()
class Client(object):
def __init__(self, server_endpoint, qa_endpoint, goose_window, watson_api=None):
self.watson_api = watson_api
self.server_endpoint = server_endpoint
self.qa_endpoint = qa_endpoint
self.chunk_size = 16000
self.recognizer = sr.Recognizer()
self.goose_window = goose_window
if not watson_api:
self._tts = pyttsx3.init()
self._tts.connect("started-word", self._make_tts_cb())
def say_text(self, text):
if self.watson_api:
data = self.watson_api.fetch_tts(text)
play_audio(data, amplitude_cb=self._make_tts_cb())
else:
self._tts.say(text)
self._tts.runAndWait()
def _make_tts_cb(self):
def on_start(name, location, length):
self.goose_window.open_mouth(length / 40)
def on_amplitude(amplitude):
if amplitude > 0.05:
self.goose_window.draw_goose("open")
else:
self.goose_window.draw_goose("awake")
return on_amplitude if self.watson_api else on_start
def contains_command(self, data):
data = base64.b64encode(zlib.compress(data))
response = requests.post(
"{}/listen".format(self.server_endpoint),
json=dict(wav_data=data.decode(), method="command_tagging"),
)
return json.loads(response.content.decode())["contains_command"]
def query_qa(self, question):
response = requests.post(
"{}/answer".format(self.qa_endpoint),
json=dict(question=question, num_hits=1),
)
response = json.loads(response.content.decode())
try:
return response["answers"][0]["passage"]
except KeyError:
return None
def _recognize_speech(self):
self._stop_listening()
try:
with sr.Microphone() as source:
audio = self.recognizer.listen(source)
return self.recognizer.recognize_google(audio)
except sr.UnknownValueError:
return None
finally:
self._start_listening()
def _start_listening(self):
self.audio = pyaudio.PyAudio()
self.stream = self.audio.open(
format=pyaudio.paInt16,
channels=1,
rate=16000,
input=True,
frames_per_buffer=self.chunk_size,
)
def _stop_listening(self):
self.stream.stop_stream()
self.stream.close()
self.audio.terminate()
def send_retarget_data(self, data, positive=True):
data = base64.b64encode(zlib.compress(data))
requests.post(
"{}/data".format(self.server_endpoint),
json=dict(wav_data=data.decode(), positive=positive),
)
def _retarget_negative(self, n_minutes=1):
if n_minutes == 1:
self.say_text("Please speak random words for the next minute.")
else:
self.say_text(
"Please speak random words for the next {} minutes.".format(n_minutes)
)
t0 = 0
snippet = AudioSnippet()
while t0 < n_minutes * 60:
snippet.append(AudioSnippet(self.stream.read(self.chunk_size)))
t0 += self.chunk_size / 16000
for chunk in snippet.chunk(32000, 16000):
self.send_retarget_data(chunk.byte_data, positive=False)
def _retarget_positive(self, n_times=10):
self.say_text("Please speak the new command {} times.".format(n_times))
self.goose_window.draw_goose("inactive")
n_said = 0
while n_said < n_times:
self.goose_window.draw_goose("inactive")
snippet = AudioSnippet(self.stream.read(self.chunk_size))
tot_snippet = AudioSnippet()
while snippet.amplitude_rms() > 0.01:
if not tot_snippet.byte_data:
self.goose_window.draw_goose("awake")
if n_said == n_times // 2 and n_said >= 5:
self.say_text("Only {} times left.".format(n_times - n_said))
elif n_said == n_times - 5:
self.say_text("Only 5 more times.")
n_said += 1
tot_snippet.append(snippet)
tot_snippet.append(AudioSnippet(self.stream.read(self.chunk_size)))
snippet = AudioSnippet(self.stream.read(self.chunk_size))
if tot_snippet.byte_data:
tot_snippet.trim_window(16000 * 2)
self.send_retarget_data(tot_snippet.byte_data)
def _do_retarget(self):
requests.post("{}/train".format(self.server_endpoint))
self.say_text("Started training your custom keyword")
while True:
time.sleep(5)
response = requests.get("{}/train".format(self.server_endpoint)).content
if not json.loads(response.decode())["in_progress"]:
self.say_text("Completed keyword retargeting!")
break
def start_retarget(self):
print("Follow the goose!")
self._start_listening()
requests.delete("{}/data".format(self.server_endpoint))
self._retarget_positive()
# self._retarget_negative()
self._do_retarget()
def start_live_qa(self):
self._start_listening()
print("Speak Anserini when ready!")
buf = [self.stream.read(self.chunk_size), self.stream.read(self.chunk_size)]
while True:
if self.contains_command(b"".join(buf)):
self.goose_window.draw_goose("awake")
print("Detected Anserini! Ask away...")
question = self._recognize_speech()
print('You asked, "{}"'.format(question))
answer = clean_text(self.query_qa(question))
if answer:
print("Answer: {}".format(answer))
self.say_text(answer)
else:
print("No answer available!")
buf = [
self.stream.read(self.chunk_size),
self.stream.read(self.chunk_size),
]
self.goose_window.draw_goose("inactive")
continue
buf[0] = buf[1]
buf[1] = self.stream.read(self.chunk_size)
def start_client(flags):
file_dir = os.path.dirname(os.path.realpath(__file__))
goose_window = GooseWindow(file_dir)
watson_api = None
if flags.watson_username and flags.watson_password:
watson_api = WatsonApi(flags.watson_username, flags.watson_password)
client = Client(flags.server_endpoint, flags.qa_endpoint, goose_window, watson_api)
if flags.mode == "query":
client.start_live_qa()
elif flags.mode == "retarget":
client.start_retarget()
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--server-endpoint",
type=str,
default="http://127.0.0.1:16888",
help="The endpoint to use",
)
parser.add_argument(
"--mode",
type=str,
default="query",
choices=["retarget", "query"],
help="The mode to run the client in.",
)
parser.add_argument(
"--qa-endpoint", type=str, default="http://dragon00.cs.uwaterloo.ca:80"
)
parser.add_argument(
"--watson-username", type=str, default="", help="If supplied, uses Watson's TTS"
)
parser.add_argument(
"--watson-password", type=str, default="", help="If supplied, uses Watson's TTS"
)
flags, _ = parser.parse_known_args()
start_client(flags)
if __name__ == "__main__":
main()
|
nns.py
|
import os,sys,socket,re
from time import sleep
from threading import Thread,Lock
from queue import Queue
socket.setdefaulttimeout(5)
# read file
def readpath2dict(filename):
with open(filename, 'r') as file:
c = file.read()
lines=re.split('\r?\n',c)
res={}
minlengthdict={1:0}
now=0
for i in lines:
i=i.strip()
if i:
if i and i.startswith('Node'):
now = int(i.split()[1])
res[now]={}
else:
res[now][int(i.split()[0])]=int(i.split()[1])
minlengthdict[int(i.split()[0])]=float('inf')
return res
filename=sys.argv[2]
# use the file
path=readpath2dict(filename)
node=int(sys.argv[1])
host = '127.0.0.1'
port=1000+node
node2socket={}
s=socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(('127.0.0.1',port))
updateQueue=Queue()
# three locks, for sync
lock1=Lock()
lock2=Lock()
lock3=Lock()
sametime=-3
lastroutine={}
if node in path:
nbr = path[node]
else:
nbr={}
distance=dict(nbr)
rout={}
for i in nbr:
rout[i]=i
# receive
def actRecv(s):
recvThread(s)
def receive():
while True:
s.listen(0)
try:
conn, addr = s.accept()
except:
continue
res = conRecv(conn)
thisnode = protocol(res)
lock1.acquire()
node2socket[thisnode] = conn
lock1.release()
# to thread, receive all the time
Thread(target=recvThread, args=(conn,)).start()
Thread(target=receive).start()
def recvThread(conn):
while True:
content = conRecv(conn)
# deal with message to protocol
protocol(content)
# receive message
def conRecv(conn):
alll = b''
last = b''
try:
while True:
if last != b'\n':
last = conn.recv(1)
alll += last
else:
nxt = conn.recv(1)
if nxt == b'\n':
break
else:
alll += nxt
last = b''
except:
None
return alll.decode().split('\n')
def protocol(mess:[]):
n=0
try:
for i in mess:
if i.startswith('ITS'):
n+=1
nodenum=int(i.split()[1])
if i.startswith('AWY'):
lock2.acquire()
distance[nodenum]=int(i.split()[1])
lock2.release()
lock3.acquire()
rout[nodenum]=nodenum
lock3.release()
if i.startswith('KNW'):
knwnode=int(i.split()[1])
if i.startswith('DIS'):
knwdis=int(i.split()[1])
if knwnode==node:
continue
# Core, compare the distance
if (knwnode in distance and distance[knwnode]>distance[nodenum]+knwdis) or (not knwnode in distance):
lock2.acquire()
distance[knwnode] = distance[nodenum] + knwdis
lock2.release()
lock3.acquire()
rout[knwnode]=nodenum
lock3.release()
return nodenum
except:
return
def getPort(a):
return int(a)+1000
def makeaddr(i):
return ('127.0.0.1',getPort(i))
def sendto(n:int,mess:[]):
ns=node2socket[n]
nm='\n'.join(mess)
nm+='\n\n'
try:
ns.sendall(nm.encode())
except:
None
# wait for all server up
sleep(1)
# start up
for i in nbr:
ns=socket.socket(socket.AF_INET, socket.SOCK_STREAM)
ns.connect(makeaddr(i))
lock1.acquire()
node2socket[i]=ns
lock1.release()
Thread(target=actRecv,args=(ns,)).start()
sendto(i, ['ITS ' + str(node), 'AWY ' + str(nbr[i])])
# send all distance
while True:
for i in dict(node2socket):
for n in dict(distance):
sendto(i,['ITS '+str(node),'KNW '+str(n),'DIS '+str(distance[n])])
if not lastroutine:
lastroutine=dict(distance)
else:
if lastroutine==distance:
sametime+=1
if sametime>3:
break
else:
lastroutine = dict(distance)
# wait for message sync
sleep(max(0.02*6,0.02*len(distance)))
# to file
output=''
for i in distance:
output+="go to "+str(i)+", pass through "+str(rout[i])+", length is "+str(distance[i])+'\n'
with open(str(node)+'.txt','w+') as file:
file.write(output)
# exit
os._exit(0)
|
scanrun.py
|
"""
scanrun.py
Copyright 2007 Andres Riancho
This file is part of w3af, http://w3af.org/ .
w3af is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation version 2 of the License.
w3af is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with w3af; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
import gtk
import gobject
import urllib2
import sys
import re
import Queue
import webkit
import webbrowser
from multiprocessing.dummy import Process, Event
from markdown import markdown
from w3af.core.ui.gui import httpLogTab, entries
from w3af.core.ui.gui.reqResViewer import ReqResViewer
from w3af.core.ui.gui.kb.kbtree import KBTree
from w3af.core.ui.gui.tools.fuzzy_requests import FuzzyRequests
from w3af.core.ui.gui.tools.manual_requests import ManualRequests
from w3af.core.ui.gui.misc.xdot_wrapper import WrappedDotWidget
from w3af.core.data.db.history import HistoryItem
from w3af.core.data.kb.info import Info
from w3af.core.data.kb.kb_observer import KBObserver
from w3af.core.controllers.exceptions import DBException
import w3af.core.data.kb.knowledge_base as kb
RECURSION_LIMIT = sys.getrecursionlimit() - 5
RECURSION_MSG = "Recursion limit: can't go deeper"
DB_VULN_NOT_FOUND = markdown('The detailed description for this vulnerability'
' is not available in our database, please'
' contribute to the open source'
' [vulndb/data project](https://github.com/vulndb/data)'
' to improve w3af\'s output.')
FILE = 'file:///'
class FullKBTree(KBTree):
def __init__(self, w3af, kbbrowser, ifilter):
"""A tree showing all the info.
This also gives a long description of the element when clicked.
:param kbbrowser: The KB Browser
:param ifilter: The filter to show which elements
:author: Facundo Batista <facundobatista =at= taniquetil.com.ar>
"""
super(FullKBTree, self).__init__(w3af, ifilter,
'Knowledge Base', strict=False)
self._historyItem = HistoryItem()
self.kbbrowser = kbbrowser
self.connect('cursor-changed', self._show_desc)
self.show()
def _create_reference_list(self, info):
"""
:return: A list with references for this info instance in markdown
format so I can add them to the description.
"""
if not info.get_references():
return ''
output = '\n\n### References\n'
for ref in info.get_references():
output += ' * [%s](%s)\n' % (ref.title, ref.url)
return output
def _show_desc(self, tv):
"""Shows the description in the right section
:param tv: the treeview.
"""
(path, column) = tv.get_cursor()
if path is None:
return
instance = self.get_instance(path)
if not isinstance(instance, Info):
return
summary = instance.get_desc()
self.kbbrowser.explanation.set_text(summary)
self.kbbrowser.vuln_notebook.set_current_page(0)
if instance.has_db_details():
desc_markdown = instance.get_long_description()
desc_markdown += '\n\n### Fix guidance\n'
desc_markdown += instance.get_fix_guidance()
desc_markdown += self._create_reference_list(instance)
desc = markdown(desc_markdown)
self.kbbrowser.description.load_html_string(desc, FILE)
else:
self.kbbrowser.description.load_html_string(DB_VULN_NOT_FOUND, FILE)
if not instance.get_id():
self.clear_request_response_viewer()
return
#
# We have two different cases:
#
# 1) The object is related to ONLY ONE request / response
# 2) The object is related to MORE THAN ONE request / response
#
# For 1), we show the classic view, and for 2) we show the classic
# view with a "page control"
#
# Work:
#
if len(instance.get_id()) == 1:
# There is ONLY ONE id related to the object
# This is 1)
self.kbbrowser.pagesControl.deactivate()
self.kbbrowser.page_change(0)
self.kbbrowser.pagesControl.hide()
self.kbbrowser.title0.hide()
search_id = instance.get_id()[0]
try:
history_item = self._historyItem.read(search_id)
except DBException:
msg = _('The HTTP data with id %s is not inside the database.')
self._show_message(_('Error'), msg % search_id)
self.clear_request_response_viewer()
return
# Error handling for .trace file problems
# https://github.com/andresriancho/w3af/issues/1174
try:
# These lines will trigger the code that reads the .trace file
# from disk and if they aren't there an exception will rise
history_item.request
history_item.response
except IOError, ioe:
self._show_message(_('Error'), str(ioe))
return
# Now we know that these two lines will work and we won't trigger
# https://github.com/andresriancho/w3af/issues/1174
self.kbbrowser.rrV.request.show_object(history_item.request)
self.kbbrowser.rrV.response.show_object(history_item.response)
# Don't forget to highlight if necessary
severity = instance.get_severity()
for s in instance.get_to_highlight():
self.kbbrowser.rrV.response.highlight(s, severity)
else:
# There are MORE THAN ONE ids related to the object
# This is 2)
self.kbbrowser.pagesControl.show()
self.kbbrowser.title0.show()
self.kbbrowser.req_res_ids = instance.get_id()
num_ids = len(instance.get_id())
self.kbbrowser.pagesControl.activate(num_ids)
self.kbbrowser.page_change(0)
self.kbbrowser.rrV.set_sensitive(True)
def clear_request_response_viewer(self):
self.kbbrowser.rrV.request.clear_panes()
self.kbbrowser.rrV.response.clear_panes()
self.kbbrowser.rrV.set_sensitive(False)
def _show_message(self, title, msg, gtkLook=gtk.MESSAGE_WARNING):
"""Show message to user as GTK dialog."""
dlg = gtk.MessageDialog(None, gtk.DIALOG_MODAL, gtkLook,
gtk.BUTTONS_OK, msg)
dlg.set_title(title)
dlg.run()
dlg.destroy()
class KBBrowser(entries.RememberingHPaned):
"""Show the Knowledge Base, with the filter and the tree.
:author: Facundo Batista <facundobatista =at= taniquetil.com.ar>
"""
def __init__(self, w3af):
super(KBBrowser, self).__init__(w3af, "pane-kbbrowser", 250)
# Internal variables:
# Save the request and response ids to be used in the page control
self.req_res_ids = []
# This is to search the DB and print the different request and responses
# as they are requested from the page control, "page_change" method.
self._historyItem = HistoryItem()
# the filter to the tree
filterbox = gtk.HBox()
self.filters = {}
def make_but(label, signal, initial):
but = gtk.CheckButton(label)
but.set_active(initial)
but.connect('clicked', self.type_filter, signal)
self.filters[signal] = initial
but.show()
filterbox.pack_start(but, expand=False, fill=False, padding=2)
make_but('Vulnerability', 'vuln', True)
make_but('Information', 'info', True)
filterbox.show()
# the kb tree
self.kbtree = FullKBTree(w3af, self, self.filters)
# all in the first pane
kbtree_scrollwin = gtk.ScrolledWindow()
kbtree_scrollwin.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
kbtree_scrollwin.add(self.kbtree)
kbtree_scrollwin.show()
# the filter and tree box
treebox = gtk.VBox()
treebox.pack_start(filterbox, expand=False, fill=False)
treebox.pack_start(kbtree_scrollwin)
treebox.show()
# the vulnerability information
summary = self.get_notebook_summary(w3af)
description = self.get_notebook_description()
self.vuln_notebook = gtk.Notebook()
self.vuln_notebook.append_page(summary, gtk.Label('Summary'))
self.vuln_notebook.append_page(description, gtk.Label('Description'))
self.vuln_notebook.set_current_page(0)
self.vuln_notebook.show()
# pack & show
self.pack1(treebox)
self.pack2(self.vuln_notebook)
self.show()
def get_notebook_description(self):
# Make the HTML viewable area
self.description = webkit.WebView()
# Disable the plugins for the webview
ws = self.description.get_settings()
ws.set_property('enable-plugins', False)
self.description.set_settings(ws)
self.description.show()
desc_scroll = gtk.ScrolledWindow()
desc_scroll.add(self.description)
desc_scroll.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
desc_scroll.show()
return desc_scroll
def get_notebook_summary(self, w3af):
summary_tv = gtk.TextView()
summary_tv.set_editable(False)
summary_tv.set_cursor_visible(False)
summary_tv.set_wrap_mode(gtk.WRAP_WORD)
self.explanation = summary_tv.get_buffer()
summary_tv.show()
summary_scrollwin = gtk.ScrolledWindow()
summary_scrollwin.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
summary_scrollwin.add_with_viewport(summary_tv)
summary_scrollwin.show()
# The request/response viewer
self.rrV = ReqResViewer(w3af, withAudit=False)
self.rrV.set_sensitive(False)
# Create the title label to show the request id
self.title0 = gtk.Label()
self.title0.show()
# Create page changer to handle info/vuln objects that have MORE THAN
# ONE related request/response
self.pagesControl = entries.PagesControl(w3af, self.page_change, 0)
self.pagesControl.deactivate()
self.page_change(0)
center_box = gtk.HBox()
center_box.pack_start(self.pagesControl, True, False)
# Title, request/response and paginator all go together in a vbox
http_data_vbox = gtk.VBox()
http_data_vbox.pack_start(self.title0, False, True)
http_data_vbox.pack_start(self.rrV, True, True)
http_data_vbox.pack_start(center_box, False, False)
# and show
http_data_vbox.show()
self.pagesControl.show()
center_box.show()
# The summary and http data go in a vbox too
summary_data_vbox = entries.RememberingVPaned(w3af,
'pane-kbbexplainview',
100)
summary_data_vbox.pack1(summary_scrollwin)
summary_data_vbox.pack2(http_data_vbox)
summary_data_vbox.show()
return summary_data_vbox
def type_filter(self, button, ptype):
"""Changes the filter of the KB in the tree."""
self.filters[ptype] = button.get_active()
self.kbtree.set_filter(self.filters)
def page_change(self, page):
"""
Handle the page change in the page control.
"""
# Only do something if I have a list of request and responses
if self.req_res_ids:
request_id = self.req_res_ids[page]
try:
historyItem = self._historyItem.read(request_id)
except:
# the request brought problems
self.rrV.request.clear_panes()
self.rrV.response.clear_panes()
self.rrV.set_sensitive(False)
self.title0.set_markup("<b>Error</b>")
else:
self.title0.set_markup("<b>Id: %d</b>" % request_id)
self.rrV.request.show_object(historyItem.request)
self.rrV.response.show_object(historyItem.response)
self.rrV.set_sensitive(True)
class URLsGraph(gtk.VBox):
"""Graph the URLs that the system discovers.
:author: Facundo Batista <facundobatista =at= taniquetil.com.ar>
"""
def __init__(self, w3af):
super(URLsGraph, self).__init__()
self.w3af = w3af
self.toolbox = gtk.HBox()
b = entries.SemiStockButton("", gtk.STOCK_ZOOM_IN, 'Zoom In')
b.connect('clicked', self._zoom, "in")
self.toolbox.pack_start(b, False, False)
b = entries.SemiStockButton("", gtk.STOCK_ZOOM_OUT, 'Zoom Out')
b.connect('clicked', self._zoom, "out")
self.toolbox.pack_start(b, False, False)
b = entries.SemiStockButton("", gtk.STOCK_ZOOM_FIT, 'Zoom Fit')
b.connect('clicked', self._zoom, "fit")
self.toolbox.pack_start(b, False, False)
b = entries.SemiStockButton("", gtk.STOCK_ZOOM_100, 'Zoom 100%')
b.connect('clicked', self._zoom, "100")
self.toolbox.pack_start(b, False, False)
self.pack_start(self.toolbox, False, False)
self.toolbox.set_sensitive(False)
# no graph yet
self.widget = gtk.Label(_("No info yet"))
self.widget.set_sensitive(False)
self.nodos_code = []
self._somethingnew = False
self.pack_start(self.widget)
self.show_all()
gobject.timeout_add(500, self._draw_start)
def _zoom(self, widg, what):
f = getattr(self.widget, "on_zoom_" + what)
f(None)
def _draw_start(self):
if not self._somethingnew:
return True
# let's draw!
q = Queue.Queue()
evt = Event()
th = Process(target=self._draw_real, args=(q, evt), name='GTKDraw')
th.start()
gobject.timeout_add(500, self._draw_end, q, evt)
return False
def _draw_real(self, q, evt):
new_widget = WrappedDotWidget()
self._somethingnew = False
dotcode = "graph G {%s}" % "\n".join(self.nodos_code)
try:
new_widget.set_dotcode(dotcode)
except ValueError, ve:
msg = ('A ValueError exception with message "%s" was found while'
' trying to render a new dotcode. Please create a new'
' bug report at %s including the following info:\n\n%s')
new_issue = 'https://github.com/andresriancho/w3af/issues/new'
args = (ve, new_issue, dotcode)
raise ValueError(msg % args)
else:
evt.set()
q.put(new_widget)
def _draw_end(self, q, evt):
if not evt:
return True
new_widget = q.get()
new_widget.zoom_to_fit()
# put that drawing in the widget
self.remove(self.widget)
self.pack_start(new_widget)
self.widget = new_widget
new_widget.show()
self.toolbox.set_sensitive(True)
gobject.timeout_add(500, self._draw_start)
def limit_node(self, parent, node, name):
# I have to escape the quotes, because I don't want a "dot code
# injection". This was sourceforge bug #2675512
node = str(node).replace('"', '\\"')
name = str(name).replace('"', '\\"')
self.nodos_code.append('"%s" [label="%s"]' % (node, name))
if parent:
parent = str(parent).replace('"', '\\"')
nline = '"%s" -- "%s"' % (parent, node)
self.nodos_code.append(nline)
self._somethingnew = True
def new_node(self, parent, node, name, isLeaf):
# I have to escape the quotes, because I don't want a "dot code
# injection" This was bug #2675512
node = str(node).replace('"', '\\"')
name = str(name).replace('"', '\\"')
if not isLeaf:
self.nodos_code.append('"%s" [shape=box]' % node)
self.nodos_code.append('"%s" [label="%s"]' % (node, name))
if parent:
parent = str(parent).replace('"', '\\"')
nline = '"%s" -- "%s"' % (parent, node)
self.nodos_code.append(nline)
self._somethingnew = True
HEAD_TO_SEND = """\
GET %s HTTP/1.0
Host: %s
User-Agent: w3af.org
"""
class URLObserver(KBObserver):
def __init__(self, urls_tree):
self.urls_tree = urls_tree
def add_url(self, url):
self.urls_tree.urls.put(url)
class URLsTree(gtk.TreeView):
"""Show the URLs that the system discovers.
:author: Facundo Batista <facundobatista =at= taniquetil.com.ar>
"""
def __init__(self, w3af, grapher):
self.w3af = w3af
self.grapher = grapher
# simple empty Tree Store
self.treestore = gtk.TreeStore(str)
gtk.TreeView.__init__(self, self.treestore)
self.connect('button-release-event', self.popup_menu)
self.connect('button-press-event', self._doubleClick)
# the TreeView column
tvcolumn = gtk.TreeViewColumn('URLs')
tvcolumn.set_sort_column_id(0)
cell = gtk.CellRendererText()
tvcolumn.pack_start(cell, True)
tvcolumn.add_attribute(cell, "text", 0)
self.append_column(tvcolumn)
# this tree structure will keep the parents where to insert nodes
self.treeholder = {}
# get the queue and go live
self.urls = Queue.Queue()
kb.kb.add_observer(URLObserver(self))
gobject.timeout_add(250, self.add_url)
self.show()
def _doubleClick(self, widg, event):
"""If double click, expand/collapse the row."""
if event.type == gtk.gdk._2BUTTON_PRESS:
path = self.get_cursor()[0]
# This "if path" fixed bug #2205544
if path:
if self.row_expanded(path):
self.collapse_row(path)
else:
self.expand_row(path, False)
def add_url(self):
"""Adds periodically the new URLs to the tree.
:return: True to keep being called by gobject, False when it's done.
"""
try:
url = self.urls.get_nowait()
except Queue.Empty:
pass
else:
path = url.get_path()
params = url.get_params_string()
query = str(url.querystring)
fragment = url.get_fragment()
scheme = url.get_protocol()
netloc = url.get_domain()
ini = "%s://%s" % (scheme, netloc)
end = ""
if params:
end += ";" + params
if query:
end += "?" + query
if fragment:
end += "#" + fragment
splittedPath = re.split('(\\\\|/)', path)
nodes = []
for i in splittedPath:
if i not in ['\\', '/']:
nodes.append(i)
nodes.insert(0, ini)
nodes.append(end)
parts = [x for x in nodes if x]
self._insertNodes(None, parts, self.treeholder, 1)
# TODO: Automatically sort after each insertion
# Order the treeview
self.treestore.sort_column_changed()
return True
def _insertNodes(self, parent, parts, holder, rec_cntr):
"""Insert a new node in the tree.
It's recursive: it walks the path of nodes, being each node a
part of the URL, checking every time if needs to create a new
node or just enter in it.
:param parent: the parent to insert the node
:param parts: the rest of the parts to walk the path
:param holder: the dict when what is already exists is stored.
:param rec_cntr: the recursion counter
:return: The new or modified holder
"""
if not parts:
return {}
node = parts[0]
rest = parts[1:]
if rec_cntr >= RECURSION_LIMIT:
newtreenode = self.treestore.append(parent, [RECURSION_MSG])
self.grapher.limit_node(parent, newtreenode, RECURSION_MSG)
return holder
if node in holder:
# already exists, use it if have more nodes
(treenode, children) = holder[node]
return self._insertNodes(treenode, rest, children, rec_cntr + 1)
# does not exist, create it
newtreenode = self.treestore.append(parent, [node])
self.grapher.new_node(parent, newtreenode, node, not rest)
newholdnode = self._insertNodes(newtreenode, rest, {}, rec_cntr + 1)
holder[node] = (newtreenode, newholdnode)
return holder
def popup_menu(self, tv, event):
"""Shows a menu when you right click on a URL in the treeview.
:param tv: the treeview.
:param event: The GTK event
"""
if event.button != 3:
return
(path, column) = tv.get_cursor()
# Is it over a URL?
if path is None:
return
# Get the information about the click
fullurl = "/".join(
self.treestore[path[:i + 1]][0] for i in range(len(path)))
host = urllib2.urlparse.urlparse(fullurl)[1]
sendtext = HEAD_TO_SEND % (fullurl, host)
gm = gtk.Menu()
e = gtk.ImageMenuItem(_("Open with Manual Request Editor..."))
image = gtk.Image()
image.set_from_stock(gtk.STOCK_INDEX, gtk.ICON_SIZE_MENU)
e.set_image(image)
e.connect('activate', self._send_request, sendtext,
ManualRequests)
gm.append(e)
image = gtk.Image()
image.set_from_stock(gtk.STOCK_PROPERTIES, gtk.ICON_SIZE_MENU)
e = gtk.ImageMenuItem(_("Open with Fuzzy Request Editor..."))
e.set_image(image)
e.connect('activate', self._send_request, sendtext,
FuzzyRequests)
gm.append(e)
e = gtk.ImageMenuItem(_("Open with default browser..."))
e.connect('activate', self._open_browser, fullurl)
gm.append(e)
gm.show_all()
gm.popup(None, None, None, event.button, event.time)
def _open_browser(self, widg, text):
"""Opens the text with an external browser."""
webbrowser.open_new_tab(text)
def _send_request(self, widg, text, func):
func(self.w3af, (text, ""))
class ScanRunBody(gtk.Notebook):
"""The whole body of scan run.
:author: Facundo Batista <facundobatista =at= taniquetil.com.ar>
"""
def __init__(self, w3af):
super(ScanRunBody, self).__init__()
self.w3af = w3af
self.helpChapter = ("Browsing_the_Knowledge_Base",
"Site_structure", "Requests_and_Responses")
self.connect("switch-page", self.changed_page)
# KB Browser
# this one does not go inside a scrolled window, because that's handled
# in each widget of itself
kbbrowser = KBBrowser(w3af)
l = gtk.Label(_("KB Browser"))
self.append_page(kbbrowser, l)
# urlstree, the tree
pan = entries.RememberingHPaned(w3af, "pane-urltreegraph")
urlsgraph = URLsGraph(w3af)
urlstree = URLsTree(w3af, urlsgraph)
scrollwin1 = gtk.ScrolledWindow()
scrollwin1.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
scrollwin1.add_with_viewport(urlstree)
scrollwin1.show()
pan.pack1(scrollwin1)
pan.pack2(urlsgraph)
pan.show()
l = gtk.Label("URLs")
self.append_page(pan, l)
# Request Response navigator
httplog = httpLogTab.httpLogTab(w3af)
l = gtk.Label(_("Request/Response navigator"))
self.append_page(httplog, l)
self.show()
def changed_page(self, notebook, page, page_num):
"""Changed the page in the Notebook."""
self.w3af.helpChapters["scanrun"] = self.helpChapter[page_num]
|
pyminer.py
|
#!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 4442
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
mock_distribute.py
|
# Copyright 2019 The Keras Tuner Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mock running KerasTuner in a distributed tuning setting."""
import os
import sys
import threading
import mock
import portpicker
import six
class ExceptionStoringThread(threading.Thread):
def run(self):
self.raised_exception = None
try:
super(ExceptionStoringThread, self).run()
except BaseException:
self.raised_exception = sys.exc_info()
class MockEnvVars(dict):
"""Allows setting different environment variables in threads."""
def __init__(self):
self.thread_local = threading.local()
self.initial_env_vars = os.environ.copy()
def _setup_thread(self):
if getattr(self.thread_local, "environ", None) is None:
self.thread_local.environ = self.initial_env_vars.copy()
def get(self, name, default=None):
self._setup_thread()
return self.thread_local.environ.get(name, default)
def __setitem__(self, name, value):
self._setup_thread()
self.thread_local.environ[name] = value
def __getitem__(self, name):
self._setup_thread()
return self.thread_local.environ[name]
def __contains__(self, name):
self._setup_thread()
return name in self.thread_local.environ
def mock_distribute(fn, num_workers=2):
"""Runs `fn` in multiple processes, setting appropriate env vars."""
port = str(portpicker.pick_unused_port())
with mock.patch.object(os, "environ", MockEnvVars()):
def chief_fn():
# The IP address of the chief Oracle. Run in distributed mode when
# present. Cloud oracle does not run in this mode because the Cloud
# API coordinates workers itself.
os.environ["KERASTUNER_ORACLE_IP"] = "127.0.0.1"
# The port of the chief Oracle.
os.environ["KERASTUNER_ORACLE_PORT"] = port
# The ID of this process. 'chief' will run the OracleServicer server.
os.environ["KERASTUNER_TUNER_ID"] = "chief"
fn()
chief_thread = ExceptionStoringThread(target=chief_fn)
chief_thread.daemon = True
chief_thread.start()
worker_threads = []
for i in range(num_workers):
def worker_fn():
os.environ["KERASTUNER_ORACLE_IP"] = "127.0.0.1"
os.environ["KERASTUNER_ORACLE_PORT"] = port
# Workers that are part of the same multi-worker
# DistributionStrategy should have the same TUNER_ID.
os.environ["KERASTUNER_TUNER_ID"] = "worker{}".format(i)
fn()
worker_thread = ExceptionStoringThread(target=worker_fn)
worker_thread.start()
worker_threads.append(worker_thread)
for worker_thread in worker_threads:
worker_thread.join()
if chief_thread.raised_exception:
six.reraise(*chief_thread.raised_exception)
for worker_thread in worker_threads:
if worker_thread.raised_exception is not None:
six.reraise(*worker_thread.raised_exception)
|
dppo_cont_gae_dist_gpu.py
|
"""
Distributed Proximal Policy Optimization (Distributed PPO or DPPO) continuous
version implementation with distributed Tensorflow and Python’s multiprocessing
package. This implementation uses normalized running rewards with GAE. The code
is tested with Gym’s continuous action space environment, Pendulum-v0 on Colab.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
#!pip install -q tf-nightly
import tensorflow as tf
tf.reset_default_graph()
import numpy as np
import matplotlib.pyplot as plt
import gym
import time
from multiprocessing import Process
# The following class is adapted from OpenAI's baseline:
# https://github.com/openai/baselines/blob/master/baselines/common/running_mean_std.py
# https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Parallel_algorithm
# This class is used for the normalization of rewards in this program before GAE computation.
class RunningStats(object):
def __init__(self, epsilon=1e-4, shape=()):
self.mean = np.zeros(shape, 'float64')
self.var = np.ones(shape, 'float64')
self.std = np.ones(shape, 'float64')
self.count = epsilon
def update(self, x):
batch_mean = np.mean(x, axis=0)
batch_var = np.var(x, axis=0)
batch_count = x.shape[0]
self.update_from_moments(batch_mean, batch_var, batch_count)
def update_from_moments(self, batch_mean, batch_var, batch_count):
delta = batch_mean - self.mean
new_mean = self.mean + delta * batch_count / (self.count + batch_count)
m_a = self.var * self.count
m_b = batch_var * batch_count
M2 = m_a + m_b + np.square(delta) * self.count * batch_count / (self.count + batch_count)
new_var = M2 / (self.count + batch_count)
self.mean = new_mean
self.var = new_var
self.std = np.maximum(np.sqrt(self.var), 1e-6)
self.count = batch_count + self.count
class PPO(object):
def __init__(self, scope, sess, env, global_PPO=None):
self.sess = sess
self.env = env
#OPT_A = tf.train.AdamOptimizer(A_LR, beta1=0.99, beta2=0.999, name='OPT_A')
#OPT_C = tf.train.AdamOptimizer(C_LR, beta1=0.99, beta2=0.999, name='OPT_C')
OPT_A = tf.train.AdamOptimizer(A_LR, name='OPT_A')
OPT_C = tf.train.AdamOptimizer(C_LR, name='OPT_C')
with tf.variable_scope(scope): # scope is either global or wid
self.state = tf.placeholder(tf.float32, [None, S_DIM], 'state')
# critic
with tf.variable_scope('critic'):
h1 = tf.layers.dense(self.state, hidden, tf.nn.relu, name='hidden', trainable=True)
self.val = tf.layers.dense(h1, 1, name='val', trainable=True)
self.critic_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope + '/critic')
self.discounted_r = tf.placeholder(tf.float32, [None, 1], 'discounted_r')
self.advantage = self.discounted_r - self.val
self.closs = tf.reduce_mean(tf.square(self.advantage))
self.ctrain_op = OPT_C.minimize(self.closs)
with tf.variable_scope('cgrads'):
self.critic_grad_op = tf.gradients(self.closs, self.critic_params)
# actor
self.pi, self.pi_params = self._build_anet(scope, 'pi', self.env, trainable=True)
self.oldpi, self.oldpi_params = self._build_anet(scope, 'oldpi', self.env, trainable=True) # originally trainable=False
with tf.variable_scope('sample_action'):
self.sample_op = tf.squeeze(self.pi.sample(1), axis=0) # choosing action
with tf.variable_scope('update_oldpi'):
self.update_oldpi_op = [oldp.assign(p) for p, oldp in zip(self.pi_params, self.oldpi_params)]
self.act = tf.placeholder(tf.float32, [None, A_DIM], 'action')
self.adv = tf.placeholder(tf.float32, [None, 1], 'advantage')
with tf.variable_scope('loss'):
with tf.variable_scope('surrogate'):
ratio = self.pi.prob(self.act) / self.oldpi.prob(self.act)
surr = ratio * self.adv
self.aloss = -tf.reduce_mean(tf.minimum(surr, tf.clip_by_value(ratio, 1.-epsilon, 1.+epsilon)*self.adv))
with tf.variable_scope('atrain'):
self.atrain_op = OPT_A.minimize(self.aloss)
with tf.variable_scope('agrads'):
self.pi_grad_op = tf.gradients(self.aloss, self.pi_params)
if scope != net_scope: # not global
with tf.name_scope('params'): # push/pull from local/worker perspective
with tf.name_scope('push_to_global'):
self.push_actor_pi_params = OPT_A.apply_gradients(zip(self.pi_grad_op, global_PPO.pi_params))
self.push_critic_params = OPT_C.apply_gradients(zip(self.critic_grad_op, global_PPO.critic_params))
with tf.name_scope('pull_fr_global'):
self.pull_actor_pi_params = [local_params.assign(global_params) for local_params, global_params in zip(self.pi_params, global_PPO.pi_params)]
self.pull_critic_params = [local_params.assign(global_params) for local_params, global_params in zip(self.critic_params, global_PPO.critic_params)]
def update(self, s, a, r, adv):
self.sess.run(self.update_oldpi_op)
for _ in range(A_EPOCH): # train actor
self.sess.run(self.atrain_op, {self.state: s, self.act: a, self.adv: adv})
# update actor
self.sess.run([self.push_actor_pi_params,
self.pull_actor_pi_params],
{self.state: s, self.act: a, self.adv: adv})
for _ in range(C_EPOCH): # train critic
# update critic
self.sess.run(self.ctrain_op, {self.state: s, self.discounted_r: r})
self.sess.run([self.push_critic_params,
self.pull_critic_params],
{self.state: s, self.discounted_r: r})
def _build_anet(self, scope, name, env, trainable):
with tf.variable_scope(name):
h1 = tf.layers.dense(self.state, hidden, tf.nn.relu, name='hidden', trainable=trainable)
mu = self.env.action_space.high * tf.layers.dense(h1, A_DIM, tf.nn.tanh, name='mu', trainable=trainable)
sigma = tf.layers.dense(h1, A_DIM, tf.nn.softplus, name='sigma', trainable=trainable)
norm_dist = tf.distributions.Normal(loc=mu, scale=sigma)
params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope + '/' + name)
return norm_dist, params
def choose_action(self, s):
s = s[None, :]
a = self.sess.run(self.sample_op, {self.state: s})[0]
return np.clip(a, self.env.action_space.low, self.env.action_space.high)
def get_val(self, s):
if s.ndim < 2: s = s[None, :]
return self.sess.run(self.val, {self.state: s})[0, 0]
# This function is adapted from OpenAI's Baseline
# GAE computation
# returns TD lamda return & advantage
def add_vtarg_and_adv(self, R, done, V, v_s_, gamma, lam):
# Compute target value using TD(lambda) estimator, and advantage with GAE(lambda)
# last element is only used for last vtarg, but we already zeroed it if last new = 1
done = np.append(done, 0)
V_plus = np.append(V, v_s_)
T = len(R)
adv = gaelam = np.empty(T, 'float32')
lastgaelam = 0
for t in reversed(range(T)):
nonterminal = 1-done[t+1]
delta = R[t] + gamma * V_plus[t+1] * nonterminal - V_plus[t]
gaelam[t] = lastgaelam = delta + gamma * lam * nonterminal * lastgaelam
#print("adv=", adv.shape)
#print("V=", V.shape)
#print("V_plus=", V_plus.shape)
tdlamret = np.vstack(adv) + V
#print("tdlamret=", tdlamret.shape)
return tdlamret, adv # tdlamret is critic_target or Qs
class Worker(object):
def __init__(self, wid, GLOBAL_PPO, GLOBAL_EP, GLOBAL_RUNNING_R, sess):
self.wid = wid
self.env = gym.make(GAME).unwrapped
self.g_ppo = GLOBAL_PPO
self.ppo = PPO(wid, sess, self.env, GLOBAL_PPO)
self.running_stats_r = RunningStats()
self.sess = sess
self.GLOBAL_EP = GLOBAL_EP
self.GLOBAL_RUNNING_R = GLOBAL_RUNNING_R
def work(self):
T = 0
t = 0
SESS = self.sess
GLOBAL_EP = self.GLOBAL_EP
GLOBAL_RUNNING_R = self.GLOBAL_RUNNING_R
while SESS.run(GLOBAL_EP) < EP_MAX:
s = self.env.reset()
buffer_s, buffer_a, buffer_r, buffer_done, buffer_V = [], [], [], [], []
ep_r = 0
for t in range(EP_LEN):
a = self.ppo.choose_action(s)
s_, r, done, _ = self.env.step(a)
buffer_s.append(s)
buffer_a.append(a)
buffer_r.append(r)
buffer_done.append(done)
v = self.ppo.get_val(s)
buffer_V.append(v)
s = s_
ep_r += r
# update ppo
if (t+1) % BATCH == 0 or t == EP_LEN-1:
self.running_stats_r.update(np.array(buffer_r))
buffer_r = np.clip( (np.array(buffer_r) - self.running_stats_r.mean) / self.running_stats_r.std, -stats_CLIP, stats_CLIP )
v_s_ = self.ppo.get_val(s_)
tdlamret, adv = self.ppo.add_vtarg_and_adv(np.vstack(buffer_r), np.vstack(buffer_done), np.vstack(buffer_V), v_s_, GAMMA, lamda)
bs, ba, br, b_adv = np.vstack(buffer_s), np.vstack(buffer_a), tdlamret, np.vstack(adv)
buffer_s, buffer_a, buffer_r, buffer_done, buffer_V = [], [], [], [], []
self.ppo.update(bs, ba, br, b_adv)
SESS.run(GLOBAL_EP.assign_add(1.0))
qe = GLOBAL_RUNNING_R.enqueue(ep_r)
SESS.run(qe)
GAME = 'Pendulum-v0'
env = gym.make(GAME).unwrapped
net_scope = 'global'
EP_MAX = 500 #500 # max number of episodes
EP_LEN = 200 # episode length
GAMMA = 0.9
lamda = 0.95 #0.95
hidden = 50 #100
A_LR = 0.0001 # actor's learning rate
C_LR = 0.0002 # critic's learning rate
BATCH = 32 # minibatch size
A_EPOCH = 10 # number of epoch
C_EPOCH = 10 # number of epoch
S_DIM, A_DIM = 3, 1 # state, action dimension
stats_CLIP = 10 # upper bound of RunningStats
epsilon=0.2
cluster = tf.train.ClusterSpec({
"worker": ["localhost:3331",
"localhost:3332",
"localhost:3333",
"localhost:3334"
],
"ps": ["localhost:3330"]
})
def parameter_server():
#tf.reset_default_graph()
server = tf.train.Server(cluster,
job_name="ps",
task_index=0)
sess = tf.Session(target=server.target)
with tf.device("/job:ps/task:0"):
GLOBAL_PPO = PPO(net_scope, sess, env, global_PPO=None) # only need its params
GLOBAL_EP = tf.Variable(0.0, name='GLOBAL_EP') # num of global episodes
# a queue of ep_r
GLOBAL_RUNNING_R = tf.FIFOQueue(EP_MAX, tf.float32, shared_name="GLOBAL_RUNNING_R")
print("Parameter server: waiting for cluster connection...")
sess.run(tf.report_uninitialized_variables())
print("Parameter server: cluster ready!")
print("Parameter server: initializing variables...")
sess.run(tf.global_variables_initializer())
print("Parameter server: variables initialized")
while True:
time.sleep(1.0)
if sess.run(GLOBAL_RUNNING_R.size()) >= EP_MAX: # GLOBAL_EP starts from 0, hence +1 to max_global_episodes
time.sleep(10.0)
GLOBAL_RUNNING_R_list = []
ep_r_prev = 0.0
for i in range(sess.run(GLOBAL_RUNNING_R.size())):
ep_r = sess.run(GLOBAL_RUNNING_R.dequeue())
if i==0:
GLOBAL_RUNNING_R_list.append(ep_r) # for display
else:
GLOBAL_RUNNING_R_list.append(GLOBAL_RUNNING_R_list[-1]*0.9 + ep_r*0.1) # for display
break
# display
plt.plot(np.arange(len(GLOBAL_RUNNING_R_list)), GLOBAL_RUNNING_R_list)
plt.xlabel('episode')
plt.ylabel('reward')
plt.show()
#print("Parameter server: blocking...")
#server.join() # currently blocks forever
print("Parameter server: ended...")
def worker(worker_n):
#tf.reset_default_graph()
server = tf.train.Server(cluster,
job_name="worker",
task_index=worker_n)
sess = tf.Session(target=server.target)
with tf.device("/job:ps/task:0"):
GLOBAL_PPO = PPO(net_scope, sess, env, global_PPO=None) # only need its params
GLOBAL_EP = tf.Variable(0.0, name='GLOBAL_EP') # num of global episodes
# a queue of ep_r
GLOBAL_RUNNING_R = tf.FIFOQueue(EP_MAX, tf.float32, shared_name="GLOBAL_RUNNING_R")
"""
with tf.device(tf.train.replica_device_setter(
worker_device='/job:worker/task:' + str(worker_n),
cluster=cluster)):
"""
print("Worker %d: waiting for cluster connection..." % worker_n)
sess.run(tf.report_uninitialized_variables())
print("Worker %d: cluster ready!" % worker_n)
#while sess.run(tf.report_uninitialized_variables()):
while (sess.run(tf.report_uninitialized_variables())).any(): # ********** .any() .all() **********
print("Worker %d: waiting for variable initialization..." % worker_n)
time.sleep(1.0)
print("Worker %d: variables initialized" % worker_n)
w = Worker(str(worker_n), GLOBAL_PPO, GLOBAL_EP, GLOBAL_RUNNING_R, sess)
print("Worker %d: created" % worker_n)
sess.run(tf.global_variables_initializer()) # got to initialize after Worker creation
w.work()
print("Worker %d: w.work()" % worker_n)
#print("Worker %d: blocking..." % worker_n)
server.join() # currently blocks forever
print("Worker %d: ended..." % worker_n)
start_time = time.time()
ps_proc = Process(target=parameter_server, daemon=True)
w1_proc = Process(target=worker, args=(0, ), daemon=True)
w2_proc = Process(target=worker, args=(1, ), daemon=True)
w3_proc = Process(target=worker, args=(2, ), daemon=True)
w4_proc = Process(target=worker, args=(3, ), daemon=True)
ps_proc.start()
w1_proc.start()
w2_proc.start()
w3_proc.start()
w4_proc.start()
# if not join, parent will terminate before children
# & children will terminate as well cuz children are daemon
ps_proc.join()
#w1_proc.join()
#w2_proc.join()
#w3_proc.join()
#w4_proc.join()
for proc in [w1_proc,
w2_proc,
w3_proc,
w4_proc,
ps_proc]:
proc.terminate() # only way to kill server is to kill it's process
print('All done.')
print("--- %s seconds ---" % (time.time() - start_time))
|
oauth.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
import time
import uuid
import string
import codecs
import logging
import threading
# pylint: disable=import-error
from six.moves.urllib.parse import urlparse, parse_qs
from six.moves.BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
from . import docs
from .config import TEMPLATES
from .exceptions import InvalidRefreshToken
from .packages.praw.errors import HTTPException, OAuthException
_logger = logging.getLogger(__name__)
INDEX = os.path.join(TEMPLATES, 'index.html')
class OAuthHTTPServer(HTTPServer):
def handle_error(self, request, client_address):
"""
The default HTTPServer's error handler prints the request traceback
to stdout, which breaks the curses display.
Override it to log to a file instead.
"""
_logger.exception('Error processing request in OAuth HTTP Server')
class OAuthHandler(BaseHTTPRequestHandler):
# params are stored as a global because we don't have control over what
# gets passed into the handler __init__. These will be accessed by the
# OAuthHelper class.
params = {'state': None, 'code': None, 'error': None}
shutdown_on_request = True
def do_GET(self):
"""
Accepts GET requests to http://localhost:6500/, and stores the query
params in the global dict. If shutdown_on_request is true, stop the
server after the first successful request.
The http request may contain the following query params:
- state : unique identifier, should match what we passed to reddit
- code : code that can be exchanged for a refresh token
- error : if provided, the OAuth error that occurred
"""
parsed_path = urlparse(self.path)
if parsed_path.path != '/':
self.send_error(404)
qs = parse_qs(parsed_path.query)
self.params['state'] = qs['state'][0] if 'state' in qs else None
self.params['code'] = qs['code'][0] if 'code' in qs else None
self.params['error'] = qs['error'][0] if 'error' in qs else None
body = self.build_body()
# send_response also sets the Server and Date headers
self.send_response(200)
self.send_header('Content-Type', 'text/html; charset=UTF-8')
self.send_header('Content-Length', len(body))
self.end_headers()
self.wfile.write(body)
if self.shutdown_on_request:
# Shutdown the server after serving the request
# http://stackoverflow.com/a/22533929
thread = threading.Thread(target=self.server.shutdown)
thread.daemon = True
thread.start()
def log_message(self, fmt, *args):
"""
Redirect logging to our own handler instead of stdout
"""
_logger.debug(fmt, *args)
def build_body(self, template_file=INDEX):
"""
Params:
template_file (text): Path to an index.html template
Returns:
body (bytes): THe utf-8 encoded document body
"""
if self.params['error'] == 'access_denied':
message = docs.OAUTH_ACCESS_DENIED
elif self.params['error'] is not None:
message = docs.OAUTH_ERROR.format(error=self.params['error'])
elif self.params['state'] is None or self.params['code'] is None:
message = docs.OAUTH_INVALID
else:
message = docs.OAUTH_SUCCESS
with codecs.open(template_file, 'r', 'utf-8') as fp:
index_text = fp.read()
body = string.Template(index_text).substitute(message=message)
body = codecs.encode(body, 'utf-8')
return body
class OAuthHelper(object):
params = OAuthHandler.params
def __init__(self, reddit, term, config):
self.term = term
self.reddit = reddit
self.config = config
# Wait to initialize the server, we don't want to reserve the port
# unless we know that the server needs to be used.
self.server = None
self.reddit.set_oauth_app_info(
self.config['oauth_client_id'],
self.config['oauth_client_secret'],
self.config['oauth_redirect_uri'])
# Reddit's mobile website works better on terminal browsers
if not self.term.display:
if '.compact' not in self.reddit.config.API_PATHS['authorize']:
self.reddit.config.API_PATHS['authorize'] += '.compact'
def authorize(self):
self.params.update(state=None, code=None, error=None)
# If we already have a token, request new access credentials
if self.config.refresh_token:
with self.term.loader('Logging in'):
try:
self.reddit.refresh_access_information(
self.config.refresh_token)
except (HTTPException, OAuthException) as e:
# Reddit didn't accept the refresh-token
# This appears to throw a generic 400 error instead of the
# more specific invalid_token message that it used to send
if isinstance(e, HTTPException):
if e._raw.status_code != 400:
# No special handling if the error is something
# temporary like a 5XX.
raise e
# Otherwise we know the token is bad, so we can remove it.
_logger.exception(e)
self.clear_oauth_data()
raise InvalidRefreshToken(
' Invalid user credentials!\n'
'The cached refresh token has been removed')
return
state = uuid.uuid4().hex
authorize_url = self.reddit.get_authorize_url(
state, scope=self.config['oauth_scope'], refreshable=True)
if self.server is None:
address = ('', self.config['oauth_redirect_port'])
self.server = OAuthHTTPServer(address, OAuthHandler)
if self.term.display:
# Open a background browser (e.g. firefox) which is non-blocking.
# The server will block until it responds to its first request,
# at which point we can check the callback params.
OAuthHandler.shutdown_on_request = True
with self.term.loader('Opening browser for authorization'):
self.term.open_browser(authorize_url)
self.server.serve_forever()
if self.term.loader.exception:
# Don't need to call server.shutdown() because serve_forever()
# is wrapped in a try-finally that doees it for us.
return
else:
# Open the terminal webbrowser in a background thread and wait
# while for the user to close the process. Once the process is
# closed, the iloop is stopped and we can check if the user has
# hit the callback URL.
OAuthHandler.shutdown_on_request = False
with self.term.loader('Redirecting to reddit', delay=0):
# This load message exists to provide user feedback
time.sleep(1)
thread = threading.Thread(target=self.server.serve_forever)
thread.daemon = True
thread.start()
try:
self.term.open_browser(authorize_url)
except Exception as e:
# If an exception is raised it will be seen by the thread
# so we don't need to explicitly shutdown() the server
_logger.exception(e)
self.term.show_notification('Browser Error', style='Error')
else:
self.server.shutdown()
finally:
thread.join()
if self.params['error'] == 'access_denied':
self.term.show_notification('Denied access', style='Error')
return
elif self.params['error']:
self.term.show_notification('Authentication error', style='Error')
return
elif self.params['state'] is None:
# Something went wrong but it's not clear what happened
return
elif self.params['state'] != state:
self.term.show_notification('UUID mismatch', style='Error')
return
with self.term.loader('Logging in'):
info = self.reddit.get_access_information(self.params['code'])
if self.term.loader.exception:
return
message = 'Welcome {}!'.format(self.reddit.user.name)
self.term.show_notification(message)
self.config.refresh_token = info['refresh_token']
if self.config['persistent']:
self.config.save_refresh_token()
def clear_oauth_data(self):
self.reddit.clear_authentication()
self.config.delete_refresh_token()
|
modis_processing.py
|
# -*- coding: utf-8 -*-
import logging
import shutil
from datetime import datetime, timedelta
from multiprocessing import Process, Queue
from time import sleep
from configuration import (
data_dir,
download_file,
env_bin,
output_dir,
processed_file,
temporary_dir,
)
from wildfires.data.mosaic_modis_tiles import get_file_dates, mosaic_process_date
from wildfires.logging_config import enable_logging
gdal_translate = env_bin / "gdal_translate"
gdalwarp = env_bin / "gdalwarp"
enable_logging(level="INFO")
logger = logging.getLogger(__name__)
def run(queue, data_dir):
"""Process the MODIS data in `data_dir`.
Returns None if an error occurred, and the processed date otherwise.
"""
def handle_error():
queue.put(None)
file_map = get_file_dates(data_dir)
if len(file_map) != 1:
logger.error(f"File map had length '{len(file_map)}' for dir: {data_dir}.")
return handle_error()
date, date_files = next(iter(file_map.items()))
error = False
try:
mosaic_process_date(
date,
date_files,
temporary_dir,
output_dir,
memory=4000,
multi=True,
overwrite=True,
gdal_translate=gdal_translate,
gdalwarp=gdalwarp,
)
except:
logger.exception(f"Processing of '{date}' failed.")
error = True
finally:
# Clean temporary dir.
# NOTE: This makes this code single-threaded!!
if temporary_dir.is_dir():
shutil.rmtree(temporary_dir)
temporary_dir.mkdir()
if error:
return handle_error()
# Record this date as having been processed.
year = int(date[:4])
days = int(date[4:])
queue.put(datetime(year, 1, 1) + timedelta(days=days - 1))
if __name__ == "__main__":
# Continuously monitor the file recording any downloaded files and process any
# previously unprocessed files accordingly.
while True:
logger.info("Checking for downloaded and unprocessed files")
with download_file.open("r") as f:
downloaded = f.read().strip().split("\n")
with processed_file.open("r") as f:
processed = f.read().strip().split("\n")
outstanding = set(downloaded) - set(processed)
logger.info(f"Outstanding dates: {outstanding}")
for date_str in outstanding:
logger.info(f"Processing: {date_str}")
date_dir = data_dir / date_str
# Carry out processing using a new process to avoid potential memory leaks.
queue = Queue()
p = Process(target=run, args=(queue, date_dir))
p.start()
processed_date = queue.get()
p.join()
if processed_date is not None and (
f"{processed_date:%Y.%m.%d}" == date_str
):
logger.info(f"Processed date: {date_str}")
with processed_file.open("a") as f:
f.write(date_str + "\n")
# Remove the original data directory.
shutil.rmtree(date_dir)
else:
logger.error(f"Error during processing of date: {date_str}.")
sleep(100)
|
_parallelize.py
|
"""Module used to parallelize model fitting."""
from typing import Any, Union, Callable, Optional, Sequence
import joblib as jl
from threading import Thread
from multiprocessing import Manager
from cellrank.ul._utils import _get_n_cores
import numpy as np
from scipy.sparse import issparse, spmatrix
def parallelize(
callback: Callable[[Any], Any],
collection: Union[spmatrix, Sequence[Any]],
n_jobs: Optional[int] = None,
n_split: Optional[int] = None,
unit: str = "",
as_array: bool = True,
use_ixs: bool = False,
backend: str = "loky",
extractor: Optional[Callable[[Any], Any]] = None,
show_progress_bar: bool = True,
) -> Any:
"""
Parallelize function call over a collection of elements.
Parameters
----------
callback
Function to parallelize.
collection
Sequence of items which to chunkify or an already .
n_jobs
Number of parallel jobs.
n_split
Split ``collection`` into ``n_split`` chunks. If `None`, split into ``n_jobs`` chunks.
unit
Unit of the progress bar.
as_array
Whether to convert the results not :class:`numpy.ndarray`.
use_ixs
Whether to pass indices to the callback.
backend
Which backend to use for multiprocessing. See :class:`joblib.Parallel` for valid options.
extractor
Function to apply to the result after all jobs have finished.
show_progress_bar
Whether to show a progress bar.
Returns
-------
The result depending on ``callable``, ``extractor`` and ``as_array``.
"""
if show_progress_bar:
try:
import ipywidgets
from tqdm.auto import tqdm
except ImportError:
try:
from tqdm.std import tqdm
except ImportError:
tqdm = None
else:
tqdm = None
def update(pbar, queue, n_total):
n_finished = 0
while n_finished < n_total:
try:
res = queue.get()
except EOFError as e:
if not n_finished != n_total:
raise RuntimeError(
f"Finished only `{n_finished}` out of `{n_total}` tasks.`"
) from e
break
assert res in (None, (1, None), 1) # (None, 1) means only 1 job
if res == (1, None):
n_finished += 1
if pbar is not None:
pbar.update()
elif res is None:
n_finished += 1
elif pbar is not None:
pbar.update()
if pbar is not None:
pbar.close()
def wrapper(*args, **kwargs):
if pass_queue and show_progress_bar:
pbar = (
None
if tqdm is None
else tqdm(total=col_len, unit=unit, mininterval=0.125)
)
queue = Manager().Queue()
thread = Thread(target=update, args=(pbar, queue, len(collections)))
thread.start()
else:
pbar, queue, thread = None, None, None
res = jl.Parallel(n_jobs=n_jobs, backend=backend)(
jl.delayed(callback)(
*((i, cs) if use_ixs else (cs,)),
*args,
**kwargs,
queue=queue,
)
for i, cs in enumerate(collections)
)
res = np.array(res) if as_array else res
if thread is not None:
thread.join()
return res if extractor is None else extractor(res)
col_len = collection.shape[0] if issparse(collection) else len(collection)
n_jobs = _get_n_cores(n_jobs, col_len)
if n_split is None:
n_split = n_jobs
if issparse(collection):
n_split = max(1, min(n_split, collection.shape[0]))
if n_split == collection.shape[0]:
collections = [collection[[ix], :] for ix in range(collection.shape[0])]
else:
step = collection.shape[0] // n_split
ixs = [
np.arange(i * step, min((i + 1) * step, collection.shape[0]))
for i in range(n_split)
]
ixs[-1] = np.append(
ixs[-1], np.arange(ixs[-1][-1] + 1, collection.shape[0])
)
collections = [collection[ix, :] for ix in filter(len, ixs)]
else:
collections = list(filter(len, np.array_split(collection, n_split)))
n_split = len(collections)
n_jobs = min(n_jobs, n_split)
pass_queue = not hasattr(callback, "py_func") # we'd be inside a numba function
return wrapper
|
gym_gazeboros.py
|
#!/usr/bin/env python
from datetime import datetime
import copy
import traceback
import os, subprocess, time, signal
#from cv_bridge import CvBridge
import gym
import math
import random
# u
import numpy as np
import cv2 as cv
import rospy
# Brings in the SimpleActionClient
import actionlib
# Brings in the .action file and messages used by the move base action
from move_base_msgs.msg import MoveBaseAction, MoveBaseGoal
from squaternion import quat2euler
from squaternion import euler2quat
from sensor_msgs.msg import LaserScan
from sensor_msgs.msg import Image
from gazebo_msgs.msg import ModelState
from geometry_msgs.msg import Point
from geometry_msgs.msg import Point32
from geometry_msgs.msg import TransformStamped
from rosgraph_msgs.msg import Clock
from costmap_converter.msg import ObstacleArrayMsg
from costmap_converter.msg import ObstacleMsg
from gazebo_msgs.msg import ModelStates
from geometry_msgs.msg import Twist
from gazebo_msgs.srv import SetModelState
import threading
from gym.utils import seeding
import _thread
from squaternion import quat2euler
from squaternion import euler2quat
from simple_pid import PID
import pickle
import logging
logger = logging.getLogger(__name__)
class History():
def __init__(self, window_size, update_rate, save_rate=10):
self.idx = 0
self.update_rate = update_rate
self.save_rate = save_rate
self.lock = threading.Lock()
self.memory_size = int(math.ceil(save_rate/update_rate*window_size)+1)
self.data = [None for x in range(self.memory_size)]
self.prev_add_time = rospy.Time.now().to_sec() - 1
self.window_size = window_size
self.avg_frame_rate = None
self.time_data_= []
def add_element(self, element):
"""
element: the data that we put inside the history data array
"""
if abs(rospy.Time.now().to_sec() - self.prev_add_time) < 1./self.save_rate:
return
with self.lock:
self.idx = (self.idx + 1) % self.window_size
self.prev_add_time = rospy.Time.now().to_sec()
if self.data[self.idx] is None:
for idx in range(self.memory_size):
self.data[idx] = element
self.data[self.idx] = element
if not len(self.time_data_) > 50:
self.time_data_.append(self.prev_add_time)
if len(self.time_data_) > 3:
prev_t = self.time_data_[0]
time_intervals = []
for t in self.time_data_[1:]:
time_intervals.append(t - prev_t)
prev_t = t
self.avg_frame_rate = 1.0 / np.average(time_intervals)
def get_elemets(self):
return_data = []
while self.avg_frame_rate is None:
time.sleep(0.1)
skip_frames = -int(math.ceil(self.avg_frame_rate / self.update_rate))
with self.lock:
index = self.idx #(self.idx - 1)% self.window_size
if self.window_size * abs(skip_frames) >= self.memory_size:
rospy.logerr("error in get element memory not enough update rate{} avg_frame_rate{} mem_size {} skipf: {}".format(self.update_rate, self.avg_frame_rate, self.memory_size, skip_frames))
for i in range (self.window_size):
return_data.append(self.data[index])
index = (index + skip_frames) % self.window_size
return return_data
def get_latest(self):
with self.lock:
return self.data[self.idx]
class Robot():
def __init__(self, name, max_angular_speed=1, max_linear_speed=1, relative=None, agent_num=None, use_goal=False, use_movebase=False, use_jackal=False, window_size=10, is_testing=False):
self.name = name
self.use_jackal = use_jackal
self.init_node = False
self.alive = True
self.prev_call_gazeboros_ = None
if relative is None:
relative = self
self.relative = relative
self.is_testing = is_testing
if self.is_testing:
self.all_pose_ = []
self.last_time_added = rospy.Time.now().to_sec()
self.log_history = []
self.agent_num = agent_num
self.init_node = True
self.deleted = False
self.update_rate_states = 2.0
self.window_size_history = window_size
self.current_vel_ = Twist()
self.goal = {"pos": None, "orientation": None}
self.use_goal = use_goal
self.use_movebase = use_movebase
self.max_angular_vel = max_angular_speed
self.max_linear_vel = max_linear_speed
self.max_rel_pos_range = 5.0 # meter
self.width_laserelement_image = 100
self.height_laser_image = 50
self.state_ = {'position': (None, None),
'orientation': None}
if self.use_jackal:
self.cmd_vel_pub = rospy.Publisher('/{}/jackal_velocity_controller/cmd_vel'.format(name), Twist, queue_size=1)
else:
self.cmd_vel_pub = rospy.Publisher('/{}/cmd_vel'.format(name), Twist, queue_size=1)
if "tb3" in self.name and self.use_movebase:
# Create an action client called "move_base" with action definition file "MoveBaseAction"
self.action_client_ = actionlib.SimpleActionClient('/move_base_{}'.format(self.agent_num),MoveBaseAction)
# Waits until the action server has started up and started listening for goals.
self.action_client_.wait_for_server(rospy.rostime.Duration(0.4))
else:
self.action_client_ = None
if "person" is self.name:
self.angular_pid = PID(0.5, 0, 0.03, setpoint=0)
self.linear_pid = PID(1.0, 0, 0.05, setpoint=0)
else:
self.angular_pid = PID(2.5, 0, 0.03, setpoint=0)
self.linear_pid = PID(2.5, 0, 0.05, setpoint=0)
self.pos_history = History(self.window_size_history, self.update_rate_states)
self.orientation_history = History(self.window_size_history, self.update_rate_states)
self.velocity_history = History(self.window_size_history, self.update_rate_states)
self.is_collided = False
self.is_pause = False
self.reset = False
self.scan_image = None
def calculate_ahead(self, distance):
x = self.state_['position'][0] + math.cos(self.state_["orientation"]) * distance
y = self.state_['position'][1] + math.sin(self.state_["orientation"]) * distance
return (x,y)
def movebase_cancel_goals(self):
self.action_client_.cancel_all_goals()
self.stop_robot()
def movebase_client_goal(self, goal_pos, goal_orientation):
# Creates a new goal with the MoveBaseGoal constructor
move_base_goal = MoveBaseGoal()
move_base_goal.target_pose.header.frame_id = "tb3_{}/odom".format(self.agent_num)
move_base_goal.target_pose.header.stamp = rospy.Time.now()
move_base_goal.target_pose.pose.position.x = goal_pos[0]
move_base_goal.target_pose.pose.position.y = goal_pos[1]
quaternion_rotation = euler2quat(0, goal_orientation, 0)
move_base_goal.target_pose.pose.orientation.x = quaternion_rotation[3]
move_base_goal.target_pose.pose.orientation.y = quaternion_rotation[1]
move_base_goal.target_pose.pose.orientation.z = quaternion_rotation[2]
move_base_goal.target_pose.pose.orientation.w = quaternion_rotation[0]
# Sends the move_base_goal to the action server.
self.action_client_.send_goal(move_base_goal)
# Waits for the server to finish performing the action.
#wait = self.action_client_.wait_for_result(rospy.rostime.Duration(0.4))
# If the result doesn't arrive, assume the Server is not available
# if not wait:
# rospy.logerr("Action server not available!")
# else:
# # Result of executing the action
# return self.action_client_.get_result()
def get_pos(self):
counter_problem = 0
while self.state_['position'] is None:
if self.reset:
return (None, None)
if counter_problem > 20:
rospy.logdebug("waiting for pos to be available {}/{}".format(counter_problem/10, 20))
time.sleep(0.001)
counter_problem += 1
if counter_problem > 200:
raise Exception('Probable shared memory issue happend')
return self.state_['position']
def get_orientation(self):
counter_problem = 0
while self.state_['orientation'] is None:
if self.reset:
return None
if counter_problem > 20:
rospy.logdebug("waiting for pos to be available {}/{}".format(counter_problem/10, 20))
time.sleep(0.001)
counter_problem += 1
if counter_problem > 200:
raise Exception('Probable shared memory issue happend')
return self.state_['orientation']
def is_current_state_ready(self):
return (self.state_['position'][0] is not None)
def is_observation_ready(self):
return (self.pos_history.avg_frame_rate is not None and\
self.orientation_history.avg_frame_rate is not None and\
self.velocity_history.avg_frame_rate is not None)
def update(self, init_pose):
self.alive = True
self.goal = {"pos": None, "orientation": None}
if "person" is self.name:
self.angular_pid = PID(0.5, 0, 0.03, setpoint=0)
self.linear_pid = PID(1.0, 0, 0.05, setpoint=0)
else:
self.angular_pid = PID(2.5, 0, 0.03, setpoint=0)
self.linear_pid = PID(2.5, 0, 0.05, setpoint=0)
self.pos_history = History(self.window_size_history, self.update_rate_states)
self.orientation_history = History(self.window_size_history, self.update_rate_states)
self.velocity_history = History(self.window_size_history, self.update_rate_states)
self.velocity_history.add_element((0,0))
self.pos_history.add_element((init_pose["pos"][0],init_pose["pos"][1]))
self.orientation_history.add_element(init_pose["orientation"])
self.log_history = []
if self.is_testing:
self.all_pose_ = []
#self.prev_call_gazeboros_ = None
#self.is_collided = False
self.is_pause = False
self.reset = False
def add_log(self, log):
self.log_history.append(log)
def remove(self):
self.reset = True
def set_state(self, state):
self.state_["position"] = state["position"]
self.state_["orientation"] = state["orientation"]
self.state_["velocity"] = state["velocity"]
self.orientation_history.add_element(state["orientation"])
self.pos_history.add_element(state["position"])
self.velocity_history.add_element(state["velocity"])
if self.is_testing and abs (rospy.Time.now().to_sec()- self.last_time_added) > 0.01:
self.all_pose_.append(self.state_.copy())
self.last_time_added = rospy.Time.now().to_sec()
def get_velocity(self):
return self.velocity_history.get_latest()
def pause(self):
self.is_pause = True
self.stop_robot()
def resume(self):
self.is_pause = False
def take_action(self, action):
if self.is_pause:
return
if self.use_goal:
pos = GazeborosEnv.denormalize(action[0:2], self.max_rel_pos_range)
pos_global = GazeborosEnv.get_global_position(pos, self.relative)
self.goal["orientation"] = self.get_orientation()
self.goal["pos"] = pos_global
if self.use_movebase:
#orientation = GazeborosEnv.denormalize(action[2], math.pi)
self.movebase_client_goal(pos_global, self.goal["orientation"])
else:
linear_vel = max(min(action[0]*self.max_linear_vel, self.max_linear_vel), -self.max_linear_vel)
angular_vel = max(min(action[1]*self.max_angular_vel, self.max_angular_vel), -self.max_angular_vel)
cmd_vel = Twist()
cmd_vel.linear.x = linear_vel #float(self.current_vel_.linear.x -(self.current_vel_.linear.x - linear_vel)*0.9)
cmd_vel.angular.z = angular_vel #-float(self.current_vel_.angular.z - (self.current_vel_.angular.z - angular_vel)*0.9)
self.current_vel_ = cmd_vel
self.cmd_vel_pub.publish(cmd_vel)
def stop_robot(self):
self.cmd_vel_pub.publish(Twist())
def angle_distance_to_point(self, pos):
current_pos = self.get_pos()
if current_pos[0] is None:
return None, None
angle = math.atan2(pos[1] - current_pos[1], pos[0] - current_pos[0])
distance = math.hypot(pos[0] - current_pos[0], pos[1] - current_pos[1])
angle = (angle - self.state_["orientation"] + math.pi) % (math.pi * 2) - math.pi
return angle, distance
def publish_cmd_vel(self, linear, angular):
cmd_vel = Twist()
angular_vel = min(max(angular, -self.max_angular_vel),self.max_angular_vel)
linear_vel = min(max(linear, 0), self.max_linear_vel)
cmd_vel.linear.x = float(linear_vel)
cmd_vel.angular.z = float(angular_vel)
self.cmd_vel_pub.publish(cmd_vel)
def use_selected_person_mod(self, person_mode):
while person_mode<=6:
if self.is_pause:
self.stop_robot()
return
if self.reset:
self.stop_robot()
return
angular_vel = 0
linear_vel = 0
if person_mode == 0:
linear_vel = self.max_linear_vel
if person_mode == 1:
#linear_vel = self.max_linear_vel * random.random()
linear_vel = self.max_linear_vel * 0.35
elif person_mode == 2:
linear_vel = self.max_linear_vel/2
angular_vel = self.max_angular_vel/6
elif person_mode == 3:
linear_vel = self.max_linear_vel/2
angular_vel = -self.max_angular_vel/6
elif person_mode == 4:
linear_vel, angular_vel = self.get_velocity()
linear_vel = linear_vel - (linear_vel - (random.random()/2 + 0.5))/2.
angular_vel = -self.max_angular_vel/6
elif person_mode == 5:
linear_vel, angular_vel = self.get_velocity()
linear_vel = linear_vel - (linear_vel - (random.random()/2 + 0.5))/2.
angular_vel = self.max_angular_vel/6
elif person_mode == 6:
linear_vel, angular_vel = self.get_velocity()
linear_vel = linear_vel - (linear_vel - (random.random()/2 + 0.5))/2.
angular_vel = angular_vel - (angular_vel - (random.random()-0.5)*2)/2.
self.publish_cmd_vel(linear_vel, angular_vel)
time.sleep(0.002)
def go_to_goal(self):
while True:
if self.reset:
return
while self.goal["pos"] is None:
time.sleep(0.1)
continue
diff_angle, distance = self.angle_distance_to_point(self.goal["pos"])
time_prev = rospy.Time.now().to_sec()
while not distance < 0.1 and abs(rospy.Time.now().to_sec() - time_prev) < 5:
if self.is_pause:
self.stop_robot()
return
if self.reset:
self.stop_robot()
return
diff_angle, distance = self.angle_distance_to_point(self.goal["pos"])
if distance is None:
return
if self.reset:
return
angular_vel = -min(max(self.angular_pid(diff_angle), -self.max_angular_vel),self.max_angular_vel)
linear_vel = min(max(self.linear_pid(-distance), 0), self.max_linear_vel)
linear_vel = linear_vel * math.pow((abs(math.pi - abs(diff_angle))/math.pi), 1.5)
self.publish_cmd_vel(linear_vel, angular_vel)
time.sleep(0.01)
self.stop_robot()
def go_to_pos(self, pos, stop_after_getting=False):
if self.is_pause:
self.stop_robot()
return
if self.reset:
return
diff_angle, distance = self.angle_distance_to_point(pos)
if distance is None:
print (self.get_pos())
return
time_prev = rospy.Time.now().to_sec()
while not distance < 0.2 and abs(rospy.Time.now().to_sec() - time_prev) < 5:
if self.is_pause:
self.stop_robot()
return
if self.reset:
return
diff_angle, distance = self.angle_distance_to_point(pos)
if distance is None:
return
if self.reset:
return
angular_vel = -min(max(self.angular_pid(diff_angle), -self.max_angular_vel),self.max_angular_vel)
linear_vel = min(max(self.linear_pid(-distance), 0), self.max_linear_vel)
linear_vel = linear_vel * math.pow((abs(math.pi - abs(diff_angle))/math.pi), 2)
self.publish_cmd_vel(linear_vel, angular_vel)
time.sleep(0.01)
if stop_after_getting:
self.stop_robot()
def get_goal(self):
counter_problem = 0
while self.goal["pos"] is None:
if self.reset:
return (None, None)
if counter_problem > 20:
rospy.logwarn("waiting for goal to be available {}/{}".format(counter_problem/10, 20))
time.sleep(0.01)
counter_problem += 1
if counter_problem > 200:
raise Exception('Probable shared memory issue happend')
# if not self.use_movebase:
# pos = GazeborosEnv.get_global_position(self.goal["pos"], self)
# goal = {"pos":pos, "orientation":None}
# else:
# goal = self.goal
return self.goal
def get_pos(self):
counter_problem = 0
while self.state_['position'] is None:
if self.reset:
return (None, None)
if counter_problem > 20:
rospy.logwarn("waiting for pos to be available {}/{}".format(counter_problem/10, 20))
time.sleep(0.01)
counter_problem += 1
if counter_problem > 200:
raise Exception('Probable shared memory issue happend')
return self.state_['position']
def get_laser_image(self):
return np.expand_dims(self.scan_image, axis=2)
class GazeborosEnv(gym.Env):
def __init__(self, is_evaluation=False):
self.is_evaluation_ = is_evaluation
# self.bridge = CvBridge()
# self.image_pub = rospy.Publisher("image_observation", Image)
# self.image_pub_gt = rospy.Publisher("image_observation_gt", Image)
self.is_reseting = True
self.use_path = True
self.use_jackal = True
self.lock = _thread.allocate_lock()
self.path_follower_test_settings = {0:(0,0, "straight",False), 1:(2,0, "right", False), 2:(3,0, "left", False),\
3:(1,4, "straight_Behind", False), 4:(2,3, "right_behind", False), 5:(3,3, "left_behind", False), 6:(7,2, "traj_1", True, True),\
7:(7, 12, "traj_2", True, True), 8:(7, 43, "traj_3", True),\
9:(2,1, "right_left", False), 10:(2,2, "right_right", False),\
11:(3,1, "left_left", False), 12:(3,2, "left_right", False)\
}
#self.path_follower_test_settings = {0:(7, 43, "traj_3", True)#(7,2, "traj_1", True, True), 1:(7, 12, "traj_2", True, True)}
self.is_testing = False
self.small_window_size = False
self.use_predifined_mode_person = True
self.use_goal = True
self.use_orientation_in_observation = True
self.collision_distance = 0.3
self.best_distance = 1.5
self.robot_mode = 0
self.window_size = 10
self.use_movebase = True
self.use_reachability = False
self.path_follower_current_setting_idx = 0
self.use_supervise_action = False
self.mode_person = 0
self.use_noise = True
self.is_use_test_setting = False
self.use_reverse = True
if self.small_window_size:
self.window_size = 5
if self.is_testing:
self.use_noise = False
self.use_reverse = False
self.is_use_test_setting = True
self.fallen = False
self.is_max_distance = False
self.use_random_around_person_ = False
self.max_mod_person_ = 7
self.wait_observation_ = 0
# being use for observation visualization
self.center_pos_ = (0, 0)
self.colors_visualization = cv.cvtColor(cv.applyColorMap(np.arange(0, 255, dtype=np.uint8), cv.COLORMAP_WINTER), cv.COLOR_BGR2RGB).reshape(255,3).tolist()
self.color_index = 0
self.first_call_observation = True
self.test_simulation_ = False
observation_dimentation = 46
if self.use_orientation_in_observation:
observation_dimentation += 1
if self.small_window_size:
observation_dimentation -= 20
self.observation_space = gym.spaces.Box(low=-1, high=1, shape=(observation_dimentation,))
self.current_obsevation_image_ = np.zeros([2000,2000,3])
self.current_obsevation_image_.fill(255)
self.prev_action = (0, 0)
self.action_space = gym.spaces.Box(low=np.array([-1.0, -1.0]), high=np.array([1.0, 1.0]), dtype=np.float32)
self.min_distance = 1
self.max_distance = 2.5
if self.test_simulation_ or self.is_evaluation_:
self.max_numb_steps = 80
elif self.is_use_test_setting:
self.max_numb_steps = 100
else:
self.max_numb_steps = 80
self.reward_range = [-1, 1]
self.reachabilit_value = None
if self.use_reachability:
with open('data/reachability.pkl', 'rb') as f:
self.reachabilit_value = pickle.load(f)
def get_test_path_number(self):
rospy.loginfo("current path idx: {}".format(self.path_follower_current_setting_idx))
return self.path_follower_test_settings[self.path_follower_current_setting_idx][2]
def use_test_setting(self):
self.is_use_test_setting = True
def set_agent(self, agent_num):
try:
self.node = rospy.init_node('gym_gazeboros_{}'.format(agent_num))
except Exception as e:
rospy.logerr("probably already init in another node {}".format(e))
rospy.wait_for_service('/gazebo/set_model_state')
self.set_model_state_sp = rospy.ServiceProxy('/gazebo/set_model_state', SetModelState)
date_time = datetime.now().strftime("%m_%d_%Y_%H_%M_%S")
self.agent_num = agent_num
self.obstacle_pub_ = rospy.Publisher('/move_base_node_{}/TebLocalPlannerROS/obstacles'.format(self.agent_num), ObstacleArrayMsg, queue_size=1)
self.create_robots()
self.path = {}
self.paths = []
self.log_file = None
try:
with open('data/person_trajectories_rl.pkl', 'rb') as f:
paths = pickle.load(f)
for path in paths:
angle_person = path['start_person']['orientation']
for angle in [x for x in range(0, 360, 10)]:
for angle_robot_person in [x for x in range(0, 360, 90)]:
path_angle = path.copy()
angle_from_person = np.deg2rad(angle) + angle_person
angle_person_robot = np.deg2rad(angle_robot_person) + angle_person
path_angle['start_robot']['pos'] = (path_angle['start_person']['pos'][0] + math.cos(angle_from_person)*2, path_angle['start_person']['pos'][1] + math.sin(angle_from_person)*2)
path_angle['start_robot']['orientation'] = angle_person_robot
path_angle['name'] = path['name'] + " " + str(angle) +" " + str(angle_robot_person)
self.paths.append(path_angle)
self.path_idx = -1
self.path = self.paths[self.path_idx]
except Exception as e:
print("error happend in writing {}".format(e))
self.agent_num = agent_num
self.state_cb_prev_time = None
self.model_states_sub = rospy.Subscriber("/gazebo/model_states", ModelStates, self.model_states_cb)
with self.lock:
self.init_simulator()
def model_states_cb(self, states_msg):
for model_idx in range(len(states_msg.name)):
found = False
for robot in [self.robot, self.person]:
if states_msg.name[model_idx] == robot.name:
found = True
break
if not found:
continue
pos = states_msg.pose[model_idx]
euler = quat2euler(pos.orientation.x, pos.orientation.y, pos.orientation.z, pos.orientation.w)
orientation = euler[0]
fall_angle = np.deg2rad(90)
if abs(abs(euler[1]) - fall_angle)< 0.1 or abs(abs(euler[2]) - fall_angle)<0.1:
self.fallen = True
# get velocity
twist = states_msg.twist[model_idx]
linear_vel = twist.linear.x
angular_vel = twist.angular.z
pos_x = pos.position.x
pos_y = pos.position.y
state = {}
state["velocity"] = (linear_vel, angular_vel)
state["position"] = (pos_x, pos_y)
state["orientation"] = orientation
robot.set_state(state)
if self.use_movebase and robot.name == self.person.name:
obstacle_msg_array = ObstacleArrayMsg()
obstacle_msg_array.header.stamp = rospy.Time.now()
obstacle_msg_array.header.frame_id = "tb3_{}/odom".format(self.agent_num)
obstacle_msg = ObstacleMsg()
obstacle_msg.header = obstacle_msg_array.header
obstacle_msg.id = 0
for x in range (5):
for y in range (5):
point = Point32()
point.x = pos.position.x + (x-2)*0.1
point.y = pos.position.y + (y-2)*0.1
point.z = pos.position.z
obstacle_msg.polygon.points.append(point)
obstacle_msg.orientation.x = pos.orientation.x
obstacle_msg.orientation.y = pos.orientation.y
obstacle_msg.orientation.z = pos.orientation.z
obstacle_msg.orientation.w = pos.orientation.w
obstacle_msg.velocities.twist.linear.x = twist.linear.x
obstacle_msg.velocities.twist.angular.z = twist.linear.z
obstacle_msg_array.obstacles.append(obstacle_msg)
self.obstacle_pub_.publish(obstacle_msg_array)
def create_robots(self):
self.person = Robot('person_{}'.format(self.agent_num),
max_angular_speed=1, max_linear_speed=.6, agent_num=self.agent_num, window_size=self.window_size, is_testing=self.is_testing)
relative = self.person
if self.use_goal:
relative = self.person
self.robot = Robot('tb3_{}'.format(self.agent_num),
max_angular_speed=1.8, max_linear_speed=0.8, relative=relative, agent_num=self.agent_num, use_goal=self.use_goal, use_movebase=self.use_movebase ,use_jackal=self.use_jackal, window_size=self.window_size, is_testing=self.is_testing)
def find_random_point_in_circle(self, radious, min_distance, around_point):
max_r = 2
r = (radious - min_distance) * math.sqrt(random.random()) + min_distance
theta = random.random() * 2 * math.pi
x = around_point[0] + r * math.cos(theta)
y = around_point[1] + r * math.sin(theta)
return (x, y)
def set_mode_person_based_on_episode_number(self, episode_number):
if episode_number < 500:
self.mode_person = 0
elif episode_number < 510:
self.mode_person = 1
elif episode_number < 700:
self.mode_person = 3
elif episode_number < 900:
self.mode_person = 5
elif episode_number < 1000:
self.mode_person = 6
else:
#self.mode_person = 7
if random.random()>0.5:
self.mode_person = 7
else:
self.mode_person = random.randint(0, 6)
def get_init_pos_robot_person(self):
if self.is_evaluation_:
idx_start = 0
elif self.is_use_test_setting:
idx_start = self.path_follower_test_settings[self.path_follower_current_setting_idx][1]
else:
idx_start = random.randint(0, len(self.path["points"]) - 20)
self.current_path_idx = idx_start
if not self.is_use_test_setting and self.use_reverse and random.random() > 0.5:
self.path["points"].reverse()
if self.is_evaluation_:
init_pos_person = self.path["start_person"]
init_pos_robot = self.path["start_robot"]
elif self.is_use_test_setting and not self.path_follower_test_settings[self.path_follower_current_setting_idx][3]:
init_pos_person = {"pos": (0, 0), "orientation":0}
mode = self.path_follower_test_settings[self.path_follower_current_setting_idx][1]
if mode == 0:
orinetation_person_rob = 0
elif mode == 1:
orinetation_person_rob = -math.pi /4.
elif mode == 2:
orinetation_person_rob = math.pi /4.
elif mode == 3:
orinetation_person_rob = -math.pi
else:
orinetation_person_rob = math.pi/8*7
pos_robot = (1.5*math.cos(orinetation_person_rob), 1.5*math.sin(orinetation_person_rob))
init_pos_robot = {"pos": pos_robot, "orientation":0}
elif not self.use_path:
init_pos_person = {"pos": (0, 0), "orientation": random.random()*2*math.pi - math.pi}
ahead_person = (init_pos_person['pos'][0] + math.cos(init_pos_person["orientation"]) * 2, init_pos_person['pos'][1] + math.sin(init_pos_person["orientation"]) * 2)
random_pos_robot = self.find_random_point_in_circle(1.5, 2.5, init_pos_person["pos"])
init_pos_robot = {"pos": random_pos_robot,\
"orientation": init_pos_person["orientation"]}#random.random()*2*math.pi - math.pi}#self.calculate_angle_using_path(idx_start)}
elif self.use_random_around_person_:
init_pos_person = {"pos": self.path["points"][idx_start], "orientation": self.calculate_angle_using_path(idx_start)}
init_pos_robot = {"pos": self.find_random_point_in_circle(1.5, 1, self.path["points"][idx_start]),\
"orientation": random.random()*2*math.pi - math.pi}#self.calculate_angle_using_path(idx_start)}
else:
init_pos_person = {"pos": self.path["points"][idx_start], "orientation": self.calculate_angle_using_path(idx_start)}
if self.is_use_test_setting and len(self.path_follower_test_settings[self.path_follower_current_setting_idx])>4 and self.path_follower_test_settings[self.path_follower_current_setting_idx][4] :
orinetation_person_rob = math.pi/2.2
pos_robot = (self.path["points"][idx_start][0] + 2*math.cos(orinetation_person_rob+init_pos_person["orientation"]), self.path["points"][idx_start][1] + 2*math.sin(orinetation_person_rob+init_pos_person["orientation"]))
init_pos_robot = {"pos": pos_robot, "orientation":self.calculate_angle_using_path(idx_start+5)}
else:
idx_robot = idx_start + 1
while (math.hypot(self.path["points"][idx_robot][1] - self.path["points"][idx_start][1],
self.path["points"][idx_robot][0] - self.path["points"][idx_start][0]) < 1.6):
idx_robot += 1
init_pos_robot = {"pos": self.path["points"][idx_robot],\
"orientation": self.calculate_angle_using_path(idx_robot)}
if not self.is_testing:
init_pos_robot["pos"] = (init_pos_robot["pos"][0]+ random.random()-0.5, \
init_pos_robot["pos"][1]+ random.random()-0.5)
init_pos_robot["orientation"] = GazeborosEnv.wrap_pi_to_pi(init_pos_robot["orientation"] + random.random()-0.5)
return init_pos_robot, init_pos_person
def set_pos(self, name, pose):
set_model_msg = ModelState()
set_model_msg.model_name = name
self.prev_action = (0,0)
quaternion_rotation = euler2quat(0, pose["orientation"], 0)
set_model_msg.pose.orientation.x = quaternion_rotation[3]
set_model_msg.pose.orientation.y = quaternion_rotation[1]
set_model_msg.pose.orientation.z = quaternion_rotation[2]
set_model_msg.pose.orientation.w = quaternion_rotation[0]
if self.use_jackal and "tb3" in name:
set_model_msg.pose.position.z = 2.6 * self.agent_num + 0.1635
else:
set_model_msg.pose.position.z = 2.6 * self.agent_num + 0.099
set_model_msg.pose.position.x = pose["pos"][0]
set_model_msg.pose.position.y = pose["pos"][1]
rospy.wait_for_service('/gazebo/set_model_state')
self.set_model_state_sp(set_model_msg)
def init_simulator(self):
self.number_of_steps = 0
rospy.loginfo("init simulation called")
self.is_pause = True
init_pos_robot, init_pos_person = self.get_init_pos_robot_person()
self.center_pos_ = init_pos_person["pos"]
self.color_index = 0
self.fallen = False
self.is_max_distance = False
self.first_call_observation = True
self.current_obsevation_image_.fill(255)
if self.use_movebase:
self.robot.movebase_cancel_goals()
rospy.sleep(0.5)
self.person.stop_robot()
self.robot.stop_robot()
# if self.use_movebase:
# self.prev_action = (0,0, 0)
# else:
self.prev_action = (0,0)
self.set_pos(self.robot.name, init_pos_robot)
self.set_pos(self.person.name, init_pos_person)
self.robot.update(init_pos_robot)
self.person.update(init_pos_person)
self.path_finished = False
self.position_thread = threading.Thread(target=self.path_follower, args=(self.current_path_idx, self.robot,))
self.position_thread.daemon = True
self.is_reseting = False
self.position_thread.start()
self.wait_observation_ = 0
self.is_reseting = False
self.robot.reset = False
self.person.reset = False
# self.resume_simulator()
rospy.loginfo("init simulation finished")
self.is_pause = False
def pause(self):
self.is_pause = True
self.person.pause()
self.robot.pause()
def resume_simulator(self):
rospy.loginfo("resume simulator")
self.is_pause = False
self.person.resume()
self.robot.resume()
rospy.loginfo("resumed simulator")
def calculate_angle_using_path(self, idx):
return math.atan2(self.path["points"][idx+1][1] - self.path["points"][idx][1], self.path["points"][idx+1][0] - self.path["points"][idx][0])
@staticmethod
def denormalize(value, max_val):
if type(value) == tuple or type(value) == list:
norm_val = [float(x) * max_val for x in value]
else:
norm_val = value * float(max_val)
return norm_val
@staticmethod
def normalize(value, max_val, zero_to_one=None):
if type(value) == tuple or type(value) == list:
norm_val = [x/float(max_val) for x in value]
else:
norm_val = value/float(max_val)
if zero_to_one is not None:
if type(value) == tuple or type(value) == list:
norm_val = [(x + 1)/2 for x in norm_val]
else:
norm_val = (norm_val + 1)/2.
return norm_val
@staticmethod
def get_global_position(pos_goal, center):
while not center.is_current_state_ready():
if center.reset:
rospy.logwarn("reseting so return none in rel pos rel: {} center".format(relative.is_current_state_ready(), center.is_current_state_ready()))
return (None, None)
time.sleep(0.01)
rospy.logwarn ("waiting for observation to be ready")
#relative_orientation = relative.state_['orientation']
center_pos = np.asarray(center.state_['position'])
center_orientation = center.state_['orientation']
#pos = [x * 5 for x in pos_goal]
relative_pos = np.asarray(pos_goal)
# transform the relative to center coordinat
rotation_matrix = np.asarray([[np.cos(center_orientation), np.sin(center_orientation)], [-np.sin(center_orientation), np.cos(center_orientation)]])
relative_pos = np.matmul(relative_pos, rotation_matrix)
global_pos = np.asarray(relative_pos + center_pos)
return global_pos
@staticmethod
def get_global_position_orientation(pos_goal, orientation_goal, center):
while not center.is_current_state_ready():
if center.reset:
rospy.logwarn("reseting so return none in rel pos rel: {} center".format(relative.is_current_state_ready(), center.is_current_state_ready()))
return (None, None)
time.sleep(0.01)
rospy.logwarn ("waiting for observation to be ready")
#relative_orientation = relative.state_['orientation']
center_pos = np.asarray(center.state_['position'])
center_orientation = center.state_['orientation']
#pos = [x * 5 for x in pos_goal]
relative_pos = np.asarray(pos_goal)
relative_pos2 = np.asarray((relative_pos[0] +math.cos(orientation_goal) , relative_pos[1] + math.sin(orientation_goal)))
# transform the relative to center coordinat
rotation_matrix = np.asarray([[np.cos(center_orientation), np.sin(center_orientation)], [-np.sin(center_orientation), np.cos(center_orientation)]])
relative_pos = np.matmul(relative_pos, rotation_matrix)
relative_pos2 = np.matmul(relative_pos2, rotation_matrix)
global_pos = np.asarray(relative_pos + center_pos)
global_pos2 = np.asarray(relative_pos2 + center_pos)
new_orientation = np.arctan2(global_pos2[1]-global_pos[1], global_pos2[0]-global_pos[0])
return global_pos, new_orientation
@staticmethod
def wrap_pi_to_pi(angle):
while angle > math.pi:
angle -= 2*math.pi
while angle < - math.pi:
angle += 2*math.pi
return angle
@staticmethod
def get_relative_heading_position(relative, center):
while not relative.is_current_state_ready() or not center.is_current_state_ready():
if relative.reset:
rospy.logwarn("reseting so return none in rel pos rel: {} center".format(relative.is_current_state_ready(), center.is_current_state_ready()))
return (None, None)
time.sleep(0.1)
rospy.loginfo ("waiting for observation to be ready heading pos")
relative_orientation = relative.state_['orientation']
center_pos = np.asarray(center.state_['position'])
center_orientation = center.state_['orientation']
# transform the relative to center coordinat
relative_pos = np.asarray(relative.state_['position'] - center_pos)
relative_pos2 = np.asarray((relative_pos[0] +math.cos(relative_orientation) , relative_pos[1] + math.sin(relative_orientation)))
rotation_matrix = np.asarray([[np.cos(-center_orientation), np.sin(-center_orientation)], [-np.sin(-center_orientation), np.cos(-center_orientation)]])
relative_pos = np.matmul(relative_pos, rotation_matrix)
relative_pos2 = np.matmul(relative_pos2, rotation_matrix)
angle_relative = np.arctan2(relative_pos2[1]-relative_pos[1], relative_pos2[0]-relative_pos[0])
return -angle_relative, relative_pos
@staticmethod
def get_relative_position(pos, center):
while not center.is_current_state_ready():
if center.reset:
rospy.loginfo("reseting so return none in rel pos rel: {} center".format(relative.is_current_state_ready(), center.is_current_state_ready()))
return (None, None)
time.sleep(0.01)
rospy.loginfo("waiting for observation to be ready relative pos")
#relative_orientation = relative.state_['orientation']
center_pos = np.asarray(center.state_['position'])
center_orientation = center.state_['orientation']
relative_pos = np.asarray(pos)
# transform the relative to center coordinat
relative_pos = np.asarray(relative_pos - center_pos)
rotation_matrix = np.asarray([[np.cos(-center_orientation), np.sin(-center_orientation)], [-np.sin(-center_orientation), np.cos(-center_orientation)]])
relative_pos = np.matmul(relative_pos, rotation_matrix)
return relative_pos
def set_robot_to_auto(self):
self.robot_mode = 1
"""
the function will check the self.robot_mode:
0: will not move robot
1: robot will try to go to a point after person
"""
def path_follower(self, idx_start, robot):
counter = 0
while self.is_pause:
if self.is_reseting:
rospy.loginfo( "path follower return as reseting ")
return
time.sleep(0.001)
if counter > 10000:
rospy.loginfo( "path follower waiting for pause to be false")
counter = 0
counter += 1
rospy.loginfo( "path follower waiting for lock pause:{} reset:{}".format(self.is_pause, self.is_reseting))
if self.lock.acquire(timeout=10):
rospy.sleep(1.5)
rospy.loginfo("path follower got the lock")
if self.is_use_test_setting:
mode_person = self.path_follower_test_settings[self.path_follower_current_setting_idx][0]
elif self.test_simulation_:
mode_person = -1
elif self.is_evaluation_:
mode_person = 2
elif self.use_predifined_mode_person:
mode_person = self.mode_person
else:
mode_person = random.randint(0, 7)
#if self.agent_num == 2:
# mode_person = random.randint(1, self.max_mod_person_)
#else:
# mode_person = 0
# if self.agent_num == 0:
# mode_person = 5
# elif self.agent_num == 1:
# mode_person = 2
# elif self.agent_num == 2:
# mode_person = 3
# elif self.agent_num == 3:
# mode_person = 7
# else:
# mode_person = random.randint(1, self.max_mod_person_)
# if mode_person == 0:
# person_thread = threading.Thread(target=self.person.go_to_goal, args=())
# person_thread.start()
if self.use_goal and not self.use_movebase:
self.robot_thread = threading.Thread(target=self.robot.go_to_goal, args=())
self.robot_thread.start()
for idx in range (idx_start, len(self.path["points"])-3):
point = (self.path["points"][idx][0], self.path["points"][idx][1])
self.current_path_idx = idx
counter_pause = 0
while self.is_pause:
counter_pause+=1
rospy.loginfo("pause in path follower")
if self.is_reseting or counter_pause > 200:
# if mode_person == 0:
# person_thread.join()
self.lock.release()
return
time.sleep(0.001)
try:
if mode_person <= 6:
self.person.use_selected_person_mod(mode_person)
else:
self.person.go_to_pos(point, stop_after_getting=True)
time.sleep(0.001)
# person_thread.start()
# if self.robot_mode == 1:
# noisy_point = (self.path["points"][idx+3][0] +min(max(np.random.normal(),-0.5),0.5), self.path["points"][idx+3][1] +min(max(np.random.normal(),-0.5),0.5))
# robot_thread = threading.Thread(target=self.robot.go_to_pos, args=(noisy_point,True,))
# robot_thread.start()
# robot_thread.join()
# person_thread.join()
except Exception as e:
rospy.logerr("path follower {}, {}".format(self.is_reseting, e))
traceback.print_exc()
break
if self.is_reseting:
self.person.stop_robot()
break
self.lock.release()
rospy.loginfo("path follower release the lock")
self.path_finished = True
else:
rospy.loginfo("problem in getting the log in path follower")
# robot.stop_robot()
def get_laser_scan(self):
return self.robot.get_laser_image()
def get_laser_scan_all(self):
images = self.robot.scan_image_history.get_elemets()
counter = 0
while len(images)!=self.robot.scan_image_history.window_size and counter<250:
images = self.robot.scan_image_history.get_elemets()
time.sleep(0.005)
counter +=1
if counter > 100:
rospy.loginfo("wait for laser scan to get filled sec: {}/25".format(counter / 10))
if counter>=250:
raise RuntimeError(
'exception while calling get_laser_scan:')
images = np.asarray(images)
return (images.reshape((images.shape[1], images.shape[2], images.shape[0])))
def get_observation(self):
# got_laser = False
# while not got_laser:
# try:
# laser_all = self.get_laser_scan_all()
# got_laser = True
# except Exception as e:
# rospy.logerr("laser_error reseting")
# # self.reset(reset_gazebo = True)
while self.robot.pos_history.avg_frame_rate is None or self.person.pos_history.avg_frame_rate is None or self.robot.velocity_history.avg_frame_rate is None or self.person.velocity_history.avg_frame_rate is None:
if self.is_reseting:
return None
time.sleep(0.001)
pos_his_robot = np.asarray(self.robot.pos_history.get_elemets())
heading_robot = self.robot.state_["orientation"]
pos_his_person = np.asarray(self.person.pos_history.get_elemets())
heading_person = self.person.state_["orientation"]
robot_vel = np.asarray(self.robot.get_velocity())
person_vel = np.asarray(self.person.get_velocity())
poses = np.concatenate((pos_his_robot, pos_his_person))
if self.use_noise:
poses += np.random.normal(loc=0, scale=0.1, size=poses.shape)
heading_robot += np.random.normal(loc=0, scale=0.2)
heading_person += np.random.normal(loc=0, scale=0.2)
robot_vel += np.random.normal(loc=0, scale=0.1, size=robot_vel.shape)
person_vel += np.random.normal(loc=0, scale=0.1, size=person_vel.shape)
heading_relative = GazeborosEnv.wrap_pi_to_pi(heading_robot-heading_person)/(math.pi)
pos_rel = []
for pos in (poses):
relative = GazeborosEnv.get_relative_position(pos, self.robot.relative)
pos_rel.append(relative)
pos_history = np.asarray(np.asarray(pos_rel)).flatten()/6.0
#TODO: make the velocity normalization better
velocities = np.concatenate((person_vel, robot_vel))/self.robot.max_angular_vel
if self.use_orientation_in_observation:
velocities_heading = np.append(velocities, heading_relative)
else:
velocities_heading = velocities
final_ob = np.append(np.append(pos_history, velocities_heading), self.prev_action)
return final_ob
def __del__(self):
# todo
return
def visualize_observation(self):
observation_image = np.zeros([2000,2000,3])
observation_image_gt = np.zeros([2000,2000,3])
observation_image = observation_image.astype(np.uint8)
observation_image_gt = observation_image_gt.astype(np.uint8)
observation_image.fill(255)
observation_image_gt.fill(255)
while self.robot.pos_history.avg_frame_rate is None or self.person.pos_history.avg_frame_rate is None or self.robot.velocity_history.avg_frame_rate is None or self.person.velocity_history.avg_frame_rate is None:
if self.is_reseting:
return None
time.sleep(0.001)
pos_his_robot = self.robot.pos_history.get_elemets()
heading_robot = self.robot.state_["orientation"]
pos_his_person = self.person.pos_history.get_elemets()
heading_person = self.person.state_["orientation"]
heading_relative = GazeborosEnv.wrap_pi_to_pi(heading_robot-heading_person)/(math.pi)
center_pos = pos_his_robot[-1]
for pos in pos_his_robot:
relative = GazeborosEnv.get_relative_position(pos, self.robot)
pos_rel = GazeborosEnv.to_image_coordinate(relative, (0, 0))
pos_gt = GazeborosEnv.to_image_coordinate(pos, center_pos)
observation_image = self.add_circle_observation_to_image(relative, (255, 0, 0), 10, center_pos=(0,0), image=observation_image)
observation_image_gt = self.add_circle_observation_to_image(pos, (255, 0, 0), 10, center_pos=center_pos, image=observation_image_gt)
for pos in pos_his_person:
relative = GazeborosEnv.get_relative_position(pos, self.robot)
pos_rel = GazeborosEnv.to_image_coordinate(relative, (0, 0))
pos_gt = GazeborosEnv.to_image_coordinate(pos, center_pos)
observation_image = self.add_circle_observation_to_image(relative, (0, 255, 0), 10, image = observation_image, center_pos=(0,0))
observation_image_gt = self.add_circle_observation_to_image(pos, (0, 255, 0), 10, image=observation_image_gt, center_pos=center_pos)
self.image_pub.publish(self.bridge.cv2_to_imgmsg(observation_image, encoding="bgr8"))
self.image_pub_gt.publish(self.bridge.cv2_to_imgmsg(observation_image_gt, encoding="bgr8"))
@staticmethod
def to_image_coordinate(pos, center_pos):
return (int((pos[0] - center_pos[0])*50+1000), int((pos[1] - center_pos[1])*50+1000))
def add_line_observation_to_image(self, pos, pos2):
color = self.colors_visualization[self.color_index]
pos_image = GazeborosEnv.to_image_coordinate(pos, self.center_pos_)
pos_image2 = GazeborosEnv.to_image_coordinate(pos2, self.center_pos_)
if pos_image[0] >2000 or pos_image[0] < 0 or pos_image[1] >2000 or pos_image[1] < 0:
rospy.logerr("problem with observation: {}".format(pos_image))
return
self.new_obsevation_image_ = cv.line(self.new_obsevation_image_, (pos_image[0], pos_image[1]), (pos_image2[0], pos_image2[1]), color, 1)
def add_triangle_observation_to_image(self, pos, orientation):
color = self.colors_visualization[self.color_index]
pos_image = GazeborosEnv.to_image_coordinate(pos, self.center_pos_)
pos_triangle1 = GazeborosEnv.to_image_coordinate((pos[0]+math.cos(orientation)*0.3, pos[1]+math.sin(orientation)*0.3), self.center_pos_)
pos_triangle2 = GazeborosEnv.to_image_coordinate((pos[0]+math.cos(orientation+math.pi/2)*0.1, pos[1]+math.sin(orientation+math.pi/2)*0.1), self.center_pos_)
pos_triangle3 = GazeborosEnv.to_image_coordinate((pos[0]+math.cos(orientation-math.pi/2)*0.1, pos[1]+math.sin(orientation-math.pi/2)*0.1), self.center_pos_)
poses = [pos_triangle1, pos_triangle2, pos_triangle3]
print(poses)
for pos in poses:
if pos[0] >2000 or pos[0] < 0 or pos[1] >2000 or pos[1] < 0:
rospy.logerr("problem with observation: {}".format(pos))
return
self.new_obsevation_image_ = cv.drawContours(self.new_obsevation_image_, [np.asarray(poses)], 0, color, -1)
def add_arrow_observation_to_image(self, pos, orientation):
color = self.colors_visualization[self.color_index]
pos_image = GazeborosEnv.to_image_coordinate(pos, self.center_pos_)
pos_image2 = GazeborosEnv.to_image_coordinate((pos[0]+math.cos(orientation)*0.3, pos[1]+math.sin(orientation)*0.3), self.center_pos_)
if pos_image[0] >2000 or pos_image[0] < 0 or pos_image[1] >2000 or pos_image[1] < 0:
rospy.logerr("problem with observation: {}".format(pos_image))
return
self.new_obsevation_image_ = cv.arrowedLine(self.new_obsevation_image_, (pos_image[0], pos_image[1]), (pos_image2[0], pos_image2[1]), color, 2, tipLength=0.5)
def add_circle_observation_to_image(self, pos, color, radious, center_pos=None, image=None):
if image is None:
image = self.new_obsevation_image_
if center_pos is None:
center_pos = self.center_pos_
pos_image = GazeborosEnv.to_image_coordinate(pos, center_pos)
if pos_image[0] >2000 or pos_image[0] < 0 or pos_image[1] >2000 or pos_image[1] < 0:
rospy.logerr("problem with observation: {}".format(pos_image))
return
return (cv.circle(image , (pos_image[0], pos_image[1]), radious, color, 2))
def get_supervised_action(self):
while not self.person.is_current_state_ready() and not self.is_reseting:
time.sleep(0.1)
if self.is_reseting:
return np.asarray([0,0])
self.use_supervise_action = True
pos = self.person.calculate_ahead(1.5)
pos_person = self.person.get_pos()
pos_relative = GazeborosEnv.get_relative_position(pos, self.robot.relative)
pos_person_relative = GazeborosEnv.get_relative_position(pos_person, self.robot.relative)
print (f"pos pos_person pos_relative [pos], [pos_person] [pos_relative]")
pos_norm = GazeborosEnv.normalize(pos_relative, self.robot.max_rel_pos_range)
orientation = GazeborosEnv.normalize(math.atan2(pos_relative[1] - pos_person_relative[1], pos_relative[0] - pos_person_relative[0]), math.pi)
return np.asarray((pos_norm[0], pos_norm[1], orientation))
def update_observation_image(self):
self.new_obsevation_image_ = np.copy(self.current_obsevation_image_)
robot_pos = self.robot.get_pos()
robot_orientation = self.robot.get_orientation()
person_pos = self.person.get_pos()
person_orientation = self.person.get_orientation()
if self.use_goal:
current_goal = self.robot.get_goal()
if person_orientation is None or robot_orientation is None:
rospy.logerr("person or robot orientation is None")
return
if self.first_call_observation:
# self.new_obsevation_image_ = self.add_circle_observation_to_image(robot_pos, [152,100,100], 10)
# self.new_obsevation_image_ = self.add_circle_observation_to_image(person_pos,[0,100,100], 10)
self.first_call_observation = False
if self.is_collided():
self.new_obsevation_image_ = self.add_circle_observation_to_image(robot_pos, [152,200,200], 10)
self.new_obsevation_image_ = self.add_circle_observation_to_image(person_pos,[200,100,100], 10)
self.add_arrow_observation_to_image(robot_pos, robot_orientation)
self.add_triangle_observation_to_image(person_pos, person_orientation)
if self.use_goal:
if self.use_movebase:
goal_orientation = current_goal["orientation"]
else:
goal_orientation = robot_orientation
self.add_circle_observation_to_image(current_goal["pos"], self.colors_visualization[self.color_index], 5)
#self.add_line_observation_to_image(robot_pos, current_goal["pos"])
else:
self.add_line_observation_to_image(robot_pos, person_pos)
alpha = 0.50
self.current_obsevation_image_ = cv.addWeighted(self.new_obsevation_image_, alpha, self.current_obsevation_image_, 1 - alpha, 0)
def get_current_observation_image(self):
image = self.current_obsevation_image_
image = image/255.
if self.is_testing:
self.save_current_path()
return image
def take_action(self, action):
self.prev_action = action[:2]
self.robot.take_action(action)
if self.wait_observation_ <= 0:
self.update_observation_image()
self.wait_observation_ = 7
self.color_index += 2
if self.color_index >= len(self.colors_visualization):
self.color_index = len(self.colors_visualization) - 1
self.wait_observation_ -= 1
return
def is_skip_run(self):
if self.fallen:
return True
else:
return False
def is_successful(self):
if self.is_collided() or self.is_max_distance or self.fallen:
return False
else:
return True
def step(self, action):
self.number_of_steps += 1
self.take_action(action)
# instead of one reward get all the reward during wait
# rospy.sleep(0.4)
sleep_time = 0.10
rewards = []
if sleep_time > 0.1:
for t in range (10):
rospy.sleep(sleep_time/10.)
rewards.append(self.get_reward())
reward = np.mean(rewards)
else:
rospy.sleep(sleep_time)
reward = self.get_reward()
ob = self.get_observation()
episode_over = False
rel_person = GazeborosEnv.get_relative_heading_position(self.robot, self.person)[1]
distance = math.hypot(rel_person[0], rel_person[1])
if self.path_finished:
rospy.loginfo("path finished")
episode_over = True
if self.is_collided():
self.update_observation_image()
episode_over = True
rospy.loginfo('collision happened episode over')
reward -= 0.5
elif distance > 5:
self.update_observation_image()
self.is_max_distance = True
episode_over = True
rospy.loginfo('max distance happened episode over')
elif self.number_of_steps > self.max_numb_steps:
self.update_observation_image()
episode_over = True
if self.fallen:
episode_over = True
rospy.loginfo('fallen')
reward = min(max(reward, -1), 1)
if self.agent_num == 0:
rospy.loginfo("action {} reward {}".format(action, reward))
if episode_over:
self.person.reset = True
#reward += 1
return ob, reward, episode_over, {}
def is_collided(self):
rel_person = GazeborosEnv.get_relative_heading_position(self.robot, self.person)[1]
distance = math.hypot(rel_person[0], rel_person[1])
if distance < self.collision_distance or self.robot.is_collided:
return True
return False
def get_distance(self):
_, pos_rel = GazeborosEnv.get_relative_heading_position(self.robot, self.person)
return math.hypot(pos_rel[0],pos_rel[1])
def get_angle_person_robot(self):
_, pos_rel = GazeborosEnv.get_relative_heading_position(self.robot, self.person)
angle_robot_person = math.atan2(pos_rel[1], pos_rel[0])
return (GazeborosEnv.wrap_pi_to_pi(angle_robot_person))
def get_reward(self):
reward = 0
angle_robot_person, pos_rel = GazeborosEnv.get_relative_heading_position(self.robot, self.person)
angle_robot_person = math.atan2(pos_rel[1], pos_rel[0])
angle_robot_person = np.rad2deg(GazeborosEnv.wrap_pi_to_pi(angle_robot_person))
distance = math.hypot(pos_rel[0], pos_rel[1])
# Negative reward for being behind the person
if self.is_collided():
reward -= 1
if distance < 0.5:
reward = -1.3
elif abs(distance - self.best_distance) < 0.5:
reward += 0.5 * (0.5 - abs(distance - self.best_distance))
elif distance >= self.best_distance+0.5:
reward -= 0.25 * (distance - (self.best_distance+0.5))
elif distance < self.best_distance-0.5:
reward -= (self.best_distance - 0.5 - distance)/(self.best_distance - 0.5)
if abs(angle_robot_person) < 25:
reward += 0.5 * (25 - abs(angle_robot_person)) / 25
else:
reward -= 0.25 * abs(angle_robot_person) / 180
if abs(distance - self.best_distance) < 0.5 and abs(angle_robot_person) < 25:
reward += 0.25
# if not 90 > angle_robot_person > 0:
# reward -= distance/6.0
# elif self.min_distance < distance < self.max_distance:
# reward += 0.1 + (90 - angle_robot_person) * 0.9 / 90
# elif distance < self.min_distance:
# reward -= 1 - distance / self.min_distance
# else:
# reward -= distance / 7.0
reward = min(max(reward, -1), 1)
# ToDO check for obstacle
return reward
def save_log(self):
pickle.dump({"person_history":self.person.log_history, "robot_history":self.robot.log_history}, self.log_file)
self.log_file.close()
def reset(self, reset_gazebo=False):
self.is_pause = True
self.is_reseting = True
self.robot.reset = True
self.person.reset = True
rospy.loginfo("trying to get the lock for reset")
# if reset_gazebo:
# self.reset_gazebo()
with self.lock:
rospy.loginfo("got the lock")
not_init = True
try:
if self.is_evaluation_:
if self.log_file is not None:
pickle.dump({"person_history":self.person.log_history, "robot_history":self.robot.log_history}, self.log_file)
self.log_file.close()
self.path_idx += 1
print ("start path_id: {}".format(self.path_idx))
if self.path_idx < len(self.paths)-1:
self.path = self.paths[self.path_idx]
self.log_file = open(self.path["name"], "wb")
else:
print ("all done")
self.person.stop_robot()
exit(0)
self.init_simulator()
not_init = False
except RuntimeError as e:
rospy.logerr("error happend reseting: {}".format(e))
if not_init:
rospy.loginfo("not init so run reset again")
return (self.reset())
else:
rospy.sleep(2)
return self.get_observation()
def save_current_path(self):
all_pos_robot = self.robot.all_pose_
all_pos_person = self.person.all_pose_
directory = "data/traj_simulations"
name = ""
if self.use_goal:
if self.use_supervise_action:
name += "base_"
else:
name += "planner_"
else:
name += "cmd_"
name += self.path_follower_test_settings[self.path_follower_current_setting_idx][2]
if not os.path.exists(directory):
os.makedirs(directory)
with open(os.path.join(directory, name + ".pkl") , "wb") as f:
pickle.dump({"robot":all_pos_robot, "person":all_pos_person, "name":name}, f)
self.robot.all_pose_ = []
self.person.all_pose_ = []
def next_setting(self):
self.path_follower_current_setting_idx += 1
def is_finish(self):
if self.path_follower_current_setting_idx >= len(self.path_follower_test_settings)-1:
return True
return False
def render(self, mode='human', close=False):
""" Viewer only supports human mode currently. """
return
def calculate_rechability_derivite(self, x, y, v, theta):
get_idx = lambda x: int(math.floor(x))
pos_norm = GazeborosEnv.normalize((x, y), self.robot.max_rel_pos_range, True)
orientation_norm = GazeborosEnv.normalize(theta, math.pi, True)
velocity_norm = GazeborosEnv.normalize(v, self.robot.max_linear_vel, True)
x_idx = get_idx(pos_norm[0]*(self.reachabilit_value.shape[0]-1))
y_idx = get_idx(pos_norm[1]*(self.reachabilit_value.shape[1]-1))
orientation_idx = get_idx(orientation_norm * (self.reachabilit_value.shape[3] -1))
v_idx = get_idx(velocity_norm * (self.reachabilit_value.shape[2]-1))
rospy.loginfo("x: {} y: {} theta {}".format(x_idx, y_idx, orientation_idx))
v_idx = max(min(v_idx, self.reachabilit_value.shape[2]-2), 0)
orientation_idx = max(min(orientation_idx, self.reachabilit_value.shape[3]-2), 0)
x_idx = max(min(x_idx, self.reachabilit_value.shape[0]-1), 0)
y_idx = max(min(y_idx, self.reachabilit_value.shape[1]-1), 0)
derivative_v = (self.reachabilit_value[x_idx, y_idx, v_idx+1, orientation_idx] -\
self.reachabilit_value[x_idx, y_idx, v_idx, orientation_idx])/2
derivative_theta = (self.reachabilit_value[x_idx, y_idx, v_idx, orientation_idx+1] -\
self.reachabilit_value[x_idx, y_idx, v_idx, orientation_idx])/2
rospy.loginfo("x: {} y: {} theta {}".format(x_idx, y_idx, orientation_idx))
return derivative_v, derivative_theta, self.reachabilit_value[x_idx, y_idx, v_idx, orientation_idx]
def reachability_action(self):
relative = GazeborosEnv.get_relative_position(self.robot.get_pos(), self.person)
orientation = GazeborosEnv.wrap_pi_to_pi(self.robot.get_orientation() - self.person.get_orientation())
print (np.rad2deg(orientation), np.rad2deg(self.person.get_orientation()), np.rad2deg(self.robot.get_orientation()) )
velocity = self.robot.get_velocity()[0]
derivative_v, derivative_theta, v = self.calculate_rechability_derivite(relative[0], relative[1], velocity, orientation)
rospy.loginfo("d_v: {:0.5f} W: {:0.5f} v {:0.1f}".format(derivative_v, derivative_theta, v))
action = [0,0]
if v<1:
if derivative_v > 0:
action[0] = 1
else:
action[0] = -1
if derivative_theta > 0:
action[1] = 1
else:
action[1] = -1
return action
#def read_bag():
# gazeboros_n = GazeborosEnv()
# gazeboros_n.set_agent(0)
#
# while gazeboros_n.robot.prev_call_gazeboros_ is None or rospy.Time.now().to_sec() - gazeboros_n.robot.prev_call_gazeboros_ < 5:
# rospy.sleep(0.1)
# gazeboros_n.save_log()
# print("done")
#read_bag()
def test():
gazeboros_env = GazeborosEnv()
gazeboros_env.set_agent(0)
step = 0
while (True):
step +=1
#action = gazeboros_env.get_supervised_action()
#action = gazeboros_env.reachability_action()
#gazeboros_env.step(action)
rel_person = GazeborosEnv.get_relative_heading_position(gazeboros_env.robot, gazeboros_env.person)[1]
relative_pos2 = GazeborosEnv.get_relative_position(gazeboros_env.robot.get_pos(), gazeboros_env.robot.relative)
orientation1 = np.rad2deg(np.arctan2(rel_person[1], rel_person[0]))
distance = math.hypot(relative_pos2[0], relative_pos2[1])
heading_robot = gazeboros_env.robot.state_["orientation"]
heading_person = gazeboros_env.person.state_["orientation"]
heading_relative = GazeborosEnv.wrap_pi_to_pi(heading_robot-heading_person)
orientation_heading = np.rad2deg(heading_relative)
#print (f"ob: {gazeboros_env.get_observation()}")
print (f"reward: {gazeboros_env.get_reward()}")
print (f"pos: {rel_person} vs {relative_pos2}")
print (f"orientation_h: {orientation_heading} dist: {distance} orin: {orientation1}")
print (f"orientation_robo: {np.rad2deg(heading_robot)} orintation pers: {np.rad2deg(heading_person)}")
print ("\n\n")
#if step % 50==0:
# print("reseting")
# gazeboros_env.reset()
#gazeboros_env.visualize_observation()
rospy.sleep(1)
#test()
|
TransformServer.py
|
#!/usr/bin/env python
from wsgiref.simple_server import make_server
import sys
import json
import traceback
from multiprocessing import Process
from getopt import getopt, GetoptError
from jsonrpcbase import JSONRPCService, InvalidParamsError, KeywordError,\
JSONRPCError, ServerError, InvalidRequestError
from os import environ
from ConfigParser import ConfigParser
from biokbase import log
import biokbase.nexus
DEPLOY = 'KB_DEPLOYMENT_CONFIG'
SERVICE = 'KB_SERVICE_NAME'
# Note that the error fields do not match the 2.0 JSONRPC spec
def get_config_file():
return environ.get(DEPLOY, None)
def get_service_name():
return environ.get(SERVICE, None)
def get_config():
if not get_config_file() or not get_service_name():
return None
retconfig = {}
config = ConfigParser()
config.read(get_config_file())
for nameval in config.items(get_service_name()):
retconfig[nameval[0]] = nameval[1]
return retconfig
config = get_config()
from TransformImpl import Transform
impl_Transform = Transform(config)
class JSONObjectEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
if isinstance(obj, frozenset):
return list(obj)
if hasattr(obj, 'toJSONable'):
return obj.toJSONable()
return json.JSONEncoder.default(self, obj)
class JSONRPCServiceCustom(JSONRPCService):
def call(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in a JSON
string or None if there is none.
Arguments:
jsondata -- remote method call in jsonrpc format
"""
result = self.call_py(ctx, jsondata)
if result is not None:
return json.dumps(result, cls=JSONObjectEncoder)
return None
def _call_method(self, ctx, request):
"""Calls given method with given params and returns it value."""
method = self.method_data[request['method']]['method']
params = request['params']
result = None
try:
if isinstance(params, list):
# Does it have enough arguments?
if len(params) < self._man_args(method) - 1:
raise InvalidParamsError('not enough arguments')
# Does it have too many arguments?
if(not self._vargs(method) and len(params) >
self._max_args(method) - 1):
raise InvalidParamsError('too many arguments')
result = method(ctx, *params)
elif isinstance(params, dict):
# Do not accept keyword arguments if the jsonrpc version is
# not >=1.1.
if request['jsonrpc'] < 11:
raise KeywordError
result = method(ctx, **params)
else: # No params
result = method(ctx)
except JSONRPCError:
raise
except Exception as e:
# log.exception('method %s threw an exception' % request['method'])
# Exception was raised inside the method.
newerr = ServerError()
newerr.trace = traceback.format_exc()
newerr.data = e.message
raise newerr
return result
def call_py(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in python
object format or None if there is none.
This method is same as call() except the return value is a python
object instead of JSON string. This method is mainly only useful for
debugging purposes.
"""
rdata = jsondata
# we already deserialize the json string earlier in the server code, no
# need to do it again
# try:
# rdata = json.loads(jsondata)
# except ValueError:
# raise ParseError
# set some default values for error handling
request = self._get_default_vals()
if isinstance(rdata, dict) and rdata:
# It's a single request.
self._fill_request(request, rdata)
respond = self._handle_request(ctx, request)
# Don't respond to notifications
if respond is None:
return None
return respond
elif isinstance(rdata, list) and rdata:
# It's a batch.
requests = []
responds = []
for rdata_ in rdata:
# set some default values for error handling
request_ = self._get_default_vals()
self._fill_request(request_, rdata_)
requests.append(request_)
for request_ in requests:
respond = self._handle_request(ctx, request_)
# Don't respond to notifications
if respond is not None:
responds.append(respond)
if responds:
return responds
# Nothing to respond.
return None
else:
# empty dict, list or wrong type
raise InvalidRequestError
def _handle_request(self, ctx, request):
"""Handles given request and returns its response."""
if self.method_data[request['method']].has_key('types'): # @IgnorePep8
self._validate_params_types(request['method'], request['params'])
result = self._call_method(ctx, request)
# Do not respond to notifications.
if request['id'] is None:
return None
respond = {}
self._fill_ver(request['jsonrpc'], respond)
respond['result'] = result
respond['id'] = request['id']
return respond
class MethodContext(dict):
def __init__(self, logger):
self['client_ip'] = None
self['user_id'] = None
self['authenticated'] = None
self['token'] = None
self['module'] = None
self['method'] = None
self['call_id'] = None
self._debug_levels = set([7, 8, 9, 'DEBUG', 'DEBUG2', 'DEBUG3'])
self._logger = logger
def log_err(self, message):
self._log(log.ERR, message)
def log_info(self, message):
self._log(log.INFO, message)
def log_debug(self, message, level=1):
if level in self._debug_levels:
pass
else:
level = int(level)
if level < 1 or level > 3:
raise ValueError("Illegal log level: " + str(level))
level = level + 6
self._log(level, message)
def set_log_level(self, level):
self._logger.set_log_level(level)
def get_log_level(self):
return self._logger.get_log_level()
def clear_log_level(self):
self._logger.clear_user_log_level()
def _log(self, level, message):
self._logger.log_message(level, message, self['client_ip'],
self['user_id'], self['module'],
self['method'], self['call_id'])
def getIPAddress(environ):
xFF = environ.get('HTTP_X_FORWARDED_FOR')
realIP = environ.get('HTTP_X_REAL_IP')
trustXHeaders = config is None or \
config.get('dont_trust_x_ip_headers') != 'true'
if (trustXHeaders):
if (xFF):
return xFF.split(',')[0].strip()
if (realIP):
return realIP.strip()
return environ.get('REMOTE_ADDR')
class Application(object):
# Wrap the wsgi handler in a class definition so that we can
# do some initialization and avoid regenerating stuff over
# and over
def logcallback(self):
self.serverlog.set_log_file(self.userlog.get_log_file())
def log(self, level, context, message):
self.serverlog.log_message(level, message, context['client_ip'],
context['user_id'], context['module'],
context['method'], context['call_id'])
def __init__(self):
submod = get_service_name() or 'Transform'
self.userlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, changecallback=self.logcallback,
config=get_config_file())
self.serverlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, logfile=self.userlog.get_log_file())
self.serverlog.set_log_level(6)
self.rpc_service = JSONRPCServiceCustom()
self.method_authentication = dict()
self.rpc_service.add(impl_Transform.import_data,
name='Transform.import_data',
types=[dict])
self.method_authentication['Transform.import_data'] = 'required'
self.rpc_service.add(impl_Transform.validate,
name='Transform.validate',
types=[dict])
self.method_authentication['Transform.validate'] = 'required'
self.rpc_service.add(impl_Transform.upload,
name='Transform.upload',
types=[dict])
self.method_authentication['Transform.upload'] = 'required'
self.rpc_service.add(impl_Transform.download,
name='Transform.download',
types=[dict])
self.method_authentication['Transform.download'] = 'required'
self.auth_client = biokbase.nexus.Client(
config={'server': 'nexus.api.globusonline.org',
'verify_ssl': True,
'client': None,
'client_secret': None})
def __call__(self, environ, start_response):
# Context object, equivalent to the perl impl CallContext
ctx = MethodContext(self.userlog)
ctx['client_ip'] = getIPAddress(environ)
status = '500 Internal Server Error'
try:
body_size = int(environ.get('CONTENT_LENGTH', 0))
except (ValueError):
body_size = 0
if environ['REQUEST_METHOD'] == 'OPTIONS':
# we basically do nothing and just return headers
status = '200 OK'
rpc_result = ""
else:
request_body = environ['wsgi.input'].read(body_size)
try:
req = json.loads(request_body)
except ValueError as ve:
err = {'error': {'code': -32700,
'name': "Parse error",
'message': str(ve),
}
}
rpc_result = self.process_error(err, ctx, {'version': '1.1'})
else:
ctx['module'], ctx['method'] = req['method'].split('.')
ctx['call_id'] = req['id']
try:
token = environ.get('HTTP_AUTHORIZATION')
# parse out the method being requested and check if it
# has an authentication requirement
auth_req = self.method_authentication.get(req['method'],
"none")
if auth_req != "none":
if token is None and auth_req == 'required':
err = ServerError()
err.data = "Authentication required for " + \
"Transform but no authentication header was passed"
raise err
elif token is None and auth_req == 'optional':
pass
else:
try:
user, _, _ = \
self.auth_client.validate_token(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
except Exception, e:
if auth_req == 'required':
err = ServerError()
err.data = \
"Token validation failed: %s" % e
raise err
if (environ.get('HTTP_X_FORWARDED_FOR')):
self.log(log.INFO, ctx, 'X-Forwarded-For: ' +
environ.get('HTTP_X_FORWARDED_FOR'))
self.log(log.INFO, ctx, 'start method')
rpc_result = self.rpc_service.call(ctx, req)
self.log(log.INFO, ctx, 'end method')
except JSONRPCError as jre:
err = {'error': {'code': jre.code,
'name': jre.message,
'message': jre.data
}
}
trace = jre.trace if hasattr(jre, 'trace') else None
rpc_result = self.process_error(err, ctx, req, trace)
except Exception, e:
err = {'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error ' +
'occurred',
}
}
rpc_result = self.process_error(err, ctx, req,
traceback.format_exc())
else:
status = '200 OK'
# print 'The request method was %s\n' % environ['REQUEST_METHOD']
# print 'The environment dictionary is:\n%s\n' % pprint.pformat(environ) @IgnorePep8
# print 'The request body was: %s' % request_body
# print 'The result from the method call is:\n%s\n' % \
# pprint.pformat(rpc_result)
if rpc_result:
response_body = rpc_result
else:
response_body = ''
response_headers = [
('Access-Control-Allow-Origin', '*'),
('Access-Control-Allow-Headers', environ.get(
'HTTP_ACCESS_CONTROL_REQUEST_HEADERS', 'authorization')),
('content-type', 'application/json'),
('content-length', str(len(response_body)))]
start_response(status, response_headers)
return [response_body]
def process_error(self, error, context, request, trace=None):
if trace:
self.log(log.ERR, context, trace.split('\n')[0:-1])
if 'id' in request:
error['id'] = request['id']
if 'version' in request:
error['version'] = request['version']
error['error']['error'] = trace
elif 'jsonrpc' in request:
error['jsonrpc'] = request['jsonrpc']
error['error']['data'] = trace
else:
error['version'] = '1.0'
error['error']['error'] = trace
return json.dumps(error)
application = Application()
# This is the uwsgi application dictionary. On startup uwsgi will look
# for this dict and pull its configuration from here.
# This simply lists where to "mount" the application in the URL path
#
# This uwsgi module "magically" appears when running the app within
# uwsgi and is not available otherwise, so wrap an exception handler
# around it
#
# To run this server in uwsgi with 4 workers listening on port 9999 use:
# uwsgi -M -p 4 --http :9999 --wsgi-file _this_file_
# To run a using the single threaded python BaseHTTP service
# listening on port 9999 by default execute this file
#
try:
import uwsgi
# Before we do anything with the application, see if the
# configs specify patching all std routines to be asynch
# *ONLY* use this if you are going to wrap the service in
# a wsgi container that has enabled gevent, such as
# uwsgi with the --gevent option
if config is not None and config.get('gevent_monkeypatch_all', False):
print "Monkeypatching std libraries for async"
from gevent import monkey
monkey.patch_all()
uwsgi.applications = {
'': application
}
except ImportError:
# Not available outside of wsgi, ignore
pass
_proc = None
def start_server(host='localhost', port=0, newprocess=False):
'''
By default, will start the server on localhost on a system assigned port
in the main thread. Excecution of the main thread will stay in the server
main loop until interrupted. To run the server in a separate process, and
thus allow the stop_server method to be called, set newprocess = True. This
will also allow returning of the port number.'''
global _proc
if _proc:
raise RuntimeError('server is already running')
httpd = make_server(host, port, application)
port = httpd.server_address[1]
print "Listening on port %s" % port
if newprocess:
_proc = Process(target=httpd.serve_forever)
_proc.daemon = True
_proc.start()
else:
httpd.serve_forever()
return port
def stop_server():
global _proc
_proc.terminate()
_proc = None
if __name__ == "__main__":
try:
opts, args = getopt(sys.argv[1:], "", ["port=", "host="])
except GetoptError as err:
# print help information and exit:
print str(err) # will print something like "option -a not recognized"
sys.exit(2)
port = 9999
host = 'localhost'
for o, a in opts:
if o == '--port':
port = int(a)
elif o == '--host':
host = a
print "Host set to %s" % host
else:
assert False, "unhandled option"
start_server(host=host, port=port)
# print "Listening on port %s" % port
# httpd = make_server( host, port, application)
#
# httpd.serve_forever()
|
PeerToPeer.py
|
import socket
import threading
from .Worker import Worker
from ..Diagnostics.Debugging import Console
from .Commands import Commands
import time
class PeerToPeer:
def __init__(self, listenerSock):
self.TalkerAddr = (0,0)
self.ListenerAddr = (0,0)
self.TalkerSock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # The socket
self.ListenerSock = listenerSock
self.ImageSenderSock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # The socket
self.TalkerIsConnected = True
self.ListenerIsConnected = False
self.__Connections = []
Console.info("Initializing p2p connection")
def _Connect(self, peerAddr, onError, onConnection,wait):
time.sleep(wait)
print(peerAddr)
try:
self.TalkerSock.connect(peerAddr)
self.TalkerIsConnected = True
self.TalkerAddr = self.TalkerSock.getsockname()
self.ImageSenderSock.connect(peerAddr)
Console.info("Connected to peer succesfully")
onConnection()
except Exception as e:
print(e)
Console.info("Could not connect to peer")
self.Disconnect()
onError()
def __Listen(self,conn,callback):
try:
while 1:
msg = Worker.GetMessage(conn,cancel = lambda:not self.TalkerIsConnected)
if msg == '!Error':break
if callback(msg):break
except ConnectionResetError:
Console.info("Connection has been resetted")
except Exception as e:
Console.error(errorDesc = e)
conn.close()
self.Disconnect()
Console.info("Stopped Listening")
def _Listen(self, listeningAddr,callback):
Console.info("Setting up p2p listener")
retryCount = 0
while True:
try:
self.ListenerIsConnected = True
self.ListenerSock.listen()
Console.info("Waiting for peer")
connT,addrT = self.ListenerSock.accept()
connI,addrI = self.ListenerSock.accept()
break
except Exception as e:
retryCount += 1
Console.error(e)
if retryCount > 20:
self.Disconnect()
return
time.sleep(0.5)
Console.info("Peer has connected")
self.__Connections = [connI,connT]
threading.Thread(target = self.__Listen, args = (connT, callback),daemon = True).start()
threading.Thread(target = self.__Listen, args = (connI, callback),daemon = True).start()
def Listen(self,listeningAddr,callback):
listeningAddr = tuple(listeningAddr)
threading.Thread(target = self._Listen, args = (listeningAddr,callback),daemon = True).start()
def Connect(self, peerAddr, onError = lambda:0, onConnection = lambda:0, wait = 0):
'''Connects to the peer (asynchronously)'''
peerAddr = tuple(peerAddr)
threading.Thread(target=self._Connect, args = (peerAddr,onError,onConnection,wait),daemon=True).start()
def Disconnect(self):
Console.info("Disconnecting")
for conn in self.__Connections:
try:
conn.shutdown(socket.SHUT_WR)
conn.close()
except:pass
self.TalkerIsConnected = False
self.ListenerIsConnected = False
try:self.TalkerSock.shutdown(socket.SHUT_WR)
except:pass
try:self.ListenSock.shutdown(socket.SHUT_RDWR) # Close the socket
except:pass
try:self.ListenSock.close() # Close the socket
except:pass
def Close(self):
Console.info("Closing Client")
try:
Worker.SendMessage(self.TalkerSock,{'command':Commands.DISCONNECT})
except:
pass
finally:
self.Disconnect()
def RequestRoundStart(self):
Worker.SendMessage(self.TalkerSock, {
'command' : Commands.RequestRoundStart
})
def UpdateImage(self, img):
Worker.SendMessage(self.TalkerSock, {
'command' : Commands.UpdateImage,
'data' : img
})
def ValidateResult(self, res):
Worker.SendMessage(self.TalkerSock, {
'command' : Commands.UpdateScore,
'data' : res
})
def RaiseInconsistency(self):
Worker.SendMessage(self.TalkerSock, {
'command' : Commands.RaiseInconsistency
})
def StartRound(self):
Worker.SendMessage(self.TalkerSock, {
'command' : Commands.StartRound
})
def _SendImages(self, cancel, data, delay):
Console.info("Sending images")
try:
while 1:
time.sleep(delay)
if cancel() or (not self.TalkerIsConnected):return
Worker.SendMessage(self.ImageSenderSock, {
'command' : Commands.UpdateImage,
'data' : data()
})
except ConnectionResetError:
Console.info("Connection has been resetted 124rqwe")
except Exception as e:
Console.error(errorDesc = e)
Console.info("Stopped sending images")
def SendImages(self, cancel = lambda:False,data = lambda:{}, delay = 0):
threading.Thread(target=self._SendImages, args = (cancel, data, delay), daemon = True).start()
|
amqp.py
|
# --coding:utf-8--
# Copyright (c) 2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -----------------------------
# Copyright by Intel
# -----------------------------
import sys
import os
import time
import ssl
import threading
import amqpstorm
import json
import datetime
from amqpstorm import Message
class AMQPConn(object):
def __init__(self, host, username, password, routing, lock, tls_config=None):
"""
:param host: RabbitMQ Server e.g. 127.0.0.1
:param username: RabbitMQ Username e.g. guest
:param password: RabbitMQ Password e.g. guest
:return:
"""
# TBD to support SSL
self.host = host
self.username = username
self.password = password
self.connection = None
self.channel = None
self.resp_queue = None
self.response = None
self.correlation_id = None
self.on_request = routing
self.thread_main = None
self.request_callback = None
self.notif_callback = None
self.tls_config = tls_config
self.use_ssl = False
if self.tls_config:
if self.tls_config.get('tls', None):
self.use_ssl = True
self.lock = lock
self._stopped = threading.Event()
self.setup()
def _on_request(self, message):
json_in = json.loads(message.body)
# print(json_str)
if message.reply_to:
if self.request_callback:
result = self.request_callback(json_in)
properties = {
'correlation_id': message.correlation_id
}
response = Message.create(
message.channel, json.dumps(
result, ensure_ascii=False), properties)
response.content_type = 'application/json'
response.publish(message.reply_to)
else:
if self.notif_callback:
self.notif_callback(json_in)
message.ack()
def _on_response(self, message):
if self.correlation_id != message.correlation_id:
return
self.response = message.body
def setup(self):
if self.use_ssl:
self.connection = amqpstorm.Connection(
self.host,
self.username,
self.password,
port=5671,
ssl=True,
ssl_options={
'ssl_version': ssl.PROTOCOL_TLSv1_2,
'cert_reqs': ssl.CERT_REQUIRED,
'keyfile': self.tls_config.get('keyfile'),
'certfile': self.tls_config.get('cerfile'),
'ca_certs': self.tls_config.get('cafile'),
}
)
else:
self.connection = amqpstorm.Connection(self.host,
self.username,
self.password)
self.channel = self.connection.channel()
result = self.channel.queue.declare(exclusive=True)
self.resp_queue = result['queue']
self.channel.basic.consume(self._on_response, no_ack=True,
queue=self.resp_queue)
self.channel.queue.declare(queue=self.on_request)
self.channel.queue.purge(queue=self.on_request)
self.channel.basic.qos(prefetch_count=100)
self.channel.basic.consume(self._on_request, queue=self.on_request)
def request(self, routing_key, req_json, timeout=0):
self.lock.acquire()
self.response = None
message = Message.create(
self.channel, body=json.dumps(
req_json, ensure_ascii=False))
message.reply_to = self.resp_queue
message.content_type = 'application/json'
self.correlation_id = message.correlation_id
message.publish(routing_key=routing_key)
start = datetime.datetime.now()
while not self.response:
self.channel.process_data_events()
time.sleep(0.01)
now = datetime.datetime.now()
if timeout > 0 and (now - start) >= datetime.timedelta(0, timeout):
break
response = self.response
self.lock.release()
return response
def publish(self, routing_key, req_json):
message = Message.create(
self.channel, body=json.dumps(
req_json, ensure_ascii=False))
message.content_type = 'application/json'
message.publish(routing_key=routing_key)
def start(self, daemon):
self._stopped.clear()
if daemon is True:
self.thread_main = threading.Thread(
target=self._thread_main, args=(None,))
self.thread_main.setDaemon(True)
self.thread_main.start()
else:
self.channel.start_consuming()
def stop(self):
self._stopped.set()
self.channel.stop_consuming()
if self.thread_main:
self.thread_main.join()
self.channel.close()
self.connection.close()
def set_callback(self, request_callback, notif_callback):
self.request_callback = request_callback
self.notif_callback = notif_callback
def _thread_main(self, *args, **kwargs):
need_reconnect = False
while self._stopped.is_set() is not True:
try:
self.channel.start_consuming()
except amqpstorm.AMQPError:
if self._stopped.is_set() is True:
break
need_reconnect = True
pass
if need_reconnect is True:
self.channel.stop_consuming()
self.channel.close()
self.channel = None
self.connection.close()
self.connection = None
while True:
try:
self.setup()
break
except BaseException:
time.sleep(1)
need_reconnect = False
|
main_window.py
|
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2012 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import time
import threading
import os
import traceback
import json
import shutil
import weakref
import csv
from decimal import Decimal
import base64
from functools import partial
import queue
import asyncio
from typing import Optional, TYPE_CHECKING, Sequence, List, Union
from PyQt5.QtGui import QPixmap, QKeySequence, QIcon, QCursor, QFont
from PyQt5.QtCore import Qt, QRect, QStringListModel, QSize, pyqtSignal
from PyQt5.QtCore import QTimer
from PyQt5.QtWidgets import (QMessageBox, QComboBox, QSystemTrayIcon, QTabWidget,
QMenuBar, QFileDialog, QCheckBox, QLabel,
QVBoxLayout, QGridLayout, QLineEdit,
QHBoxLayout, QPushButton, QScrollArea, QTextEdit,
QShortcut, QMainWindow, QCompleter, QInputDialog,
QWidget, QSizePolicy, QStatusBar, QToolTip, QDialog,
QMenu, QAction, QStackedWidget, QToolButton)
import electrum
from electrum import (keystore, ecc, constants, util, bitcoin, commands,
paymentrequest, lnutil)
from electrum.bitcoin import COIN, is_address
from electrum.plugin import run_hook, BasePlugin
from electrum.i18n import _
from electrum.util import (format_time,
UserCancelled, profiler,
bh2u, bfh, InvalidPassword,
UserFacingException,
get_new_wallet_name, send_exception_to_crash_reporter,
InvalidBitcoinURI, maybe_extract_bolt11_invoice, NotEnoughFunds,
NoDynamicFeeEstimates, MultipleSpendMaxTxOutputs,
AddTransactionException, BITCOIN_BIP21_URI_SCHEME)
from electrum.invoices import PR_TYPE_ONCHAIN, PR_TYPE_LN, PR_DEFAULT_EXPIRATION_WHEN_CREATING, Invoice
from electrum.invoices import PR_PAID, PR_FAILED, pr_expiration_values, LNInvoice, OnchainInvoice
from electrum.transaction import (Transaction, PartialTxInput,
PartialTransaction, PartialTxOutput)
from electrum.wallet import (Multisig_Wallet, CannotBumpFee, Abstract_Wallet,
sweep_preparations, InternalAddressCorruption,
CannotDoubleSpendTx, CannotCPFP)
from electrum.version import ELECTRUM_VERSION
from electrum.network import (Network, TxBroadcastError, BestEffortRequestFailed,
UntrustedServerReturnedError, NetworkException)
from electrum.exchange_rate import FxThread
from electrum.simple_config import SimpleConfig
from electrum.logging import Logger
from electrum.lnutil import ln_dummy_address, extract_nodeid, ConnStringFormatError
from electrum.lnaddr import lndecode, LnDecodeException
from .exception_window import Exception_Hook
from .amountedit import AmountEdit, BTCAmountEdit, FreezableLineEdit, FeerateEdit
from .qrcodewidget import QRCodeWidget, QRDialog
from .qrtextedit import ShowQRTextEdit, ScanQRTextEdit
from .transaction_dialog import show_transaction
from .fee_slider import FeeSlider, FeeComboBox
from .util import (read_QIcon, ColorScheme, text_dialog, icon_path, WaitingDialog,
WindowModalDialog, ChoicesLayout, HelpLabel, Buttons,
OkButton, InfoButton, WWLabel, TaskThread, CancelButton,
CloseButton, HelpButton, MessageBoxMixin, EnterButton,
import_meta_gui, export_meta_gui,
filename_field, address_field, char_width_in_lineedit, webopen,
TRANSACTION_FILE_EXTENSION_FILTER_ANY, MONOSPACE_FONT,
getOpenFileName, getSaveFileName, BlockingWaitingDialog)
from .util import ButtonsTextEdit, ButtonsLineEdit
from .installwizard import WIF_HELP_TEXT
from .history_list import HistoryList, HistoryModel
from .update_checker import UpdateCheck, UpdateCheckThread
from .channels_list import ChannelsList
from .confirm_tx_dialog import ConfirmTxDialog
from .transaction_dialog import PreviewTxDialog
if TYPE_CHECKING:
from . import ElectrumGui
LN_NUM_PAYMENT_ATTEMPTS = 10
class StatusBarButton(QToolButton):
# note: this class has a custom stylesheet applied in stylesheet_patcher.py
def __init__(self, icon, tooltip, func):
QToolButton.__init__(self)
self.setText('')
self.setIcon(icon)
self.setToolTip(tooltip)
self.setToolButtonStyle(Qt.ToolButtonTextBesideIcon)
self.setAutoRaise(True)
self.setMaximumWidth(25)
self.clicked.connect(self.onPress)
self.func = func
self.setIconSize(QSize(25,25))
self.setCursor(QCursor(Qt.PointingHandCursor))
def onPress(self, checked=False):
'''Drops the unwanted PyQt5 "checked" argument'''
self.func()
def keyPressEvent(self, e):
if e.key() in [ Qt.Key_Return, Qt.Key_Enter ]:
self.func()
def protected(func):
'''Password request wrapper. The password is passed to the function
as the 'password' named argument. "None" indicates either an
unencrypted wallet, or the user cancelled the password request.
An empty input is passed as the empty string.'''
def request_password(self, *args, **kwargs):
parent = self.top_level_window()
password = None
while self.wallet.has_keystore_encryption():
password = self.password_dialog(parent=parent)
if password is None:
# User cancelled password input
return
try:
self.wallet.check_password(password)
break
except Exception as e:
self.show_error(str(e), parent=parent)
continue
kwargs['password'] = password
return func(self, *args, **kwargs)
return request_password
class ElectrumWindow(QMainWindow, MessageBoxMixin, Logger):
payment_request_ok_signal = pyqtSignal()
payment_request_error_signal = pyqtSignal()
network_signal = pyqtSignal(str, object)
#ln_payment_attempt_signal = pyqtSignal(str)
alias_received_signal = pyqtSignal()
computing_privkeys_signal = pyqtSignal()
show_privkeys_signal = pyqtSignal()
show_error_signal = pyqtSignal(str)
payment_request: Optional[paymentrequest.PaymentRequest]
def __init__(self, gui_object: 'ElectrumGui', wallet: Abstract_Wallet):
QMainWindow.__init__(self)
self.gui_object = gui_object
self.config = config = gui_object.config # type: SimpleConfig
self.gui_thread = gui_object.gui_thread
assert wallet, "no wallet"
self.wallet = wallet
if wallet.has_lightning():
self.wallet.config.set_key('show_channels_tab', True)
self.setup_exception_hook()
self.network = gui_object.daemon.network # type: Network
self.fx = gui_object.daemon.fx # type: FxThread
self.contacts = wallet.contacts
self.tray = gui_object.tray
self.app = gui_object.app
self.cleaned_up = False
self.payment_request = None # type: Optional[paymentrequest.PaymentRequest]
self.payto_URI = None
self.checking_accounts = False
self.qr_window = None
self.pluginsdialog = None
self.showing_cert_mismatch_error = False
self.tl_windows = []
self.pending_invoice = None
Logger.__init__(self)
self.tx_notification_queue = queue.Queue()
self.tx_notification_last_time = 0
self.create_status_bar()
self.need_update = threading.Event()
self.completions = QStringListModel()
coincontrol_sb = self.create_coincontrol_statusbar()
self.tabs = tabs = QTabWidget(self)
self.send_tab = self.create_send_tab()
self.receive_tab = self.create_receive_tab()
self.addresses_tab = self.create_addresses_tab()
self.utxo_tab = self.create_utxo_tab()
self.console_tab = self.create_console_tab()
self.contacts_tab = self.create_contacts_tab()
self.channels_tab = self.create_channels_tab()
tabs.addTab(self.create_history_tab(), read_QIcon("tab_history.png"), _('History'))
tabs.addTab(self.send_tab, read_QIcon("tab_send.png"), _('Send'))
tabs.addTab(self.receive_tab, read_QIcon("tab_receive.png"), _('Receive'))
def add_optional_tab(tabs, tab, icon, description, name):
tab.tab_icon = icon
tab.tab_description = description
tab.tab_pos = len(tabs)
tab.tab_name = name
if self.config.get('show_{}_tab'.format(name), False):
tabs.addTab(tab, icon, description.replace("&", ""))
add_optional_tab(tabs, self.addresses_tab, read_QIcon("tab_addresses.png"), _("&Addresses"), "addresses")
add_optional_tab(tabs, self.channels_tab, read_QIcon("lightning.png"), _("Channels"), "channels")
add_optional_tab(tabs, self.utxo_tab, read_QIcon("tab_coins.png"), _("Co&ins"), "utxo")
add_optional_tab(tabs, self.contacts_tab, read_QIcon("tab_contacts.png"), _("Con&tacts"), "contacts")
add_optional_tab(tabs, self.console_tab, read_QIcon("tab_console.png"), _("Con&sole"), "console")
tabs.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
central_widget = QWidget()
vbox = QVBoxLayout(central_widget)
vbox.setContentsMargins(0, 0, 0, 0)
vbox.addWidget(tabs)
vbox.addWidget(coincontrol_sb)
self.setCentralWidget(central_widget)
if self.config.get("is_maximized"):
self.showMaximized()
self.setWindowIcon(read_QIcon("electrum.png"))
self.init_menubar()
wrtabs = weakref.proxy(tabs)
QShortcut(QKeySequence("Ctrl+W"), self, self.close)
QShortcut(QKeySequence("Ctrl+Q"), self, self.close)
QShortcut(QKeySequence("Ctrl+R"), self, self.update_wallet)
QShortcut(QKeySequence("F5"), self, self.update_wallet)
QShortcut(QKeySequence("Ctrl+PgUp"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() - 1)%wrtabs.count()))
QShortcut(QKeySequence("Ctrl+PgDown"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() + 1)%wrtabs.count()))
for i in range(wrtabs.count()):
QShortcut(QKeySequence("Alt+" + str(i + 1)), self, lambda i=i: wrtabs.setCurrentIndex(i))
self.payment_request_ok_signal.connect(self.payment_request_ok)
self.payment_request_error_signal.connect(self.payment_request_error)
self.show_error_signal.connect(self.show_error)
self.history_list.setFocus(True)
# network callbacks
if self.network:
self.network_signal.connect(self.on_network_qt)
interests = ['wallet_updated', 'network_updated', 'blockchain_updated',
'new_transaction', 'status',
'banner', 'verified', 'fee', 'fee_histogram', 'on_quotes',
'on_history', 'channel', 'channels_updated',
'payment_failed', 'payment_succeeded',
'invoice_status', 'request_status', 'ln_gossip_sync_progress',
'cert_mismatch', 'gossip_db_loaded']
# To avoid leaking references to "self" that prevent the
# window from being GC-ed when closed, callbacks should be
# methods of this class only, and specifically not be
# partials, lambdas or methods of subobjects. Hence...
util.register_callback(self.on_network, interests)
# set initial message
self.console.showMessage(self.network.banner)
# update fee slider in case we missed the callback
#self.fee_slider.update()
self.load_wallet(wallet)
gui_object.timer.timeout.connect(self.timer_actions)
self.fetch_alias()
# If the option hasn't been set yet
if config.get('check_updates') is None:
choice = self.question(title="Electrum-ECC - " + _("Enable update check"),
msg=_("For security reasons we advise that you always use the latest version of Electrum-ECC.") + " " +
_("Would you like to be notified when there is a newer version of Electrum-ECC available?"))
config.set_key('check_updates', bool(choice), save=True)
if config.get('check_updates', False):
# The references to both the thread and the window need to be stored somewhere
# to prevent GC from getting in our way.
def on_version_received(v):
if UpdateCheck.is_newer(v):
self.update_check_button.setText(_("Update to Electrum-ECC {} is available").format(v))
self.update_check_button.clicked.connect(lambda: self.show_update_check(v))
self.update_check_button.show()
self._update_check_thread = UpdateCheckThread()
self._update_check_thread.checked.connect(on_version_received)
self._update_check_thread.start()
def setup_exception_hook(self):
Exception_Hook.maybe_setup(config=self.config,
wallet=self.wallet)
def run_coroutine_from_thread(self, coro, on_result=None):
def task():
try:
f = asyncio.run_coroutine_threadsafe(coro, self.network.asyncio_loop)
r = f.result()
if on_result:
on_result(r)
except Exception as e:
self.logger.exception("exception in coro scheduled via window.wallet")
self.show_error_signal.emit(str(e))
self.wallet.thread.add(task)
def on_fx_history(self):
self.history_model.refresh('fx_history')
self.address_list.update()
def on_fx_quotes(self):
self.update_status()
# Refresh edits with the new rate
edit = self.fiat_send_e if self.fiat_send_e.is_last_edited else self.amount_e
edit.textEdited.emit(edit.text())
edit = self.fiat_receive_e if self.fiat_receive_e.is_last_edited else self.receive_amount_e
edit.textEdited.emit(edit.text())
# History tab needs updating if it used spot
if self.fx.history_used_spot:
self.history_model.refresh('fx_quotes')
self.address_list.update()
def toggle_tab(self, tab):
show = not self.config.get('show_{}_tab'.format(tab.tab_name), False)
self.config.set_key('show_{}_tab'.format(tab.tab_name), show)
item_text = (_("Hide {}") if show else _("Show {}")).format(tab.tab_description)
tab.menu_action.setText(item_text)
if show:
# Find out where to place the tab
index = len(self.tabs)
for i in range(len(self.tabs)):
try:
if tab.tab_pos < self.tabs.widget(i).tab_pos:
index = i
break
except AttributeError:
pass
self.tabs.insertTab(index, tab, tab.tab_icon, tab.tab_description.replace("&", ""))
else:
i = self.tabs.indexOf(tab)
self.tabs.removeTab(i)
def push_top_level_window(self, window):
'''Used for e.g. tx dialog box to ensure new dialogs are appropriately
parented. This used to be done by explicitly providing the parent
window, but that isn't something hardware wallet prompts know.'''
self.tl_windows.append(window)
def pop_top_level_window(self, window):
self.tl_windows.remove(window)
def top_level_window(self, test_func=None):
'''Do the right thing in the presence of tx dialog windows'''
override = self.tl_windows[-1] if self.tl_windows else None
if override and test_func and not test_func(override):
override = None # only override if ok for test_func
return self.top_level_window_recurse(override, test_func)
def diagnostic_name(self):
#return '{}:{}'.format(self.__class__.__name__, self.wallet.diagnostic_name())
return self.wallet.diagnostic_name()
def is_hidden(self):
return self.isMinimized() or self.isHidden()
def show_or_hide(self):
if self.is_hidden():
self.bring_to_top()
else:
self.hide()
def bring_to_top(self):
self.show()
self.raise_()
def on_error(self, exc_info):
e = exc_info[1]
if isinstance(e, UserCancelled):
pass
elif isinstance(e, UserFacingException):
self.show_error(str(e))
else:
# TODO would be nice if we just sent these to the crash reporter...
# anything we don't want to send there, we should explicitly catch
# send_exception_to_crash_reporter(e)
try:
self.logger.error("on_error", exc_info=exc_info)
except OSError:
pass # see #4418
self.show_error(repr(e))
def on_network(self, event, *args):
# Handle in GUI thread
self.network_signal.emit(event, args)
def on_network_qt(self, event, args=None):
# Handle a network message in the GUI thread
# note: all windows get events from all wallets!
if event == 'wallet_updated':
wallet = args[0]
if wallet == self.wallet:
self.need_update.set()
elif event == 'network_updated':
self.gui_object.network_updated_signal_obj.network_updated_signal \
.emit(event, args)
self.network_signal.emit('status', None)
elif event == 'blockchain_updated':
# to update number of confirmations in history
self.need_update.set()
elif event == 'new_transaction':
wallet, tx = args
if wallet == self.wallet:
self.tx_notification_queue.put(tx)
elif event == 'on_quotes':
self.on_fx_quotes()
elif event == 'on_history':
self.on_fx_history()
elif event == 'gossip_db_loaded':
self.channels_list.gossip_db_loaded.emit(*args)
elif event == 'channels_updated':
wallet = args[0]
if wallet == self.wallet:
self.channels_list.update_rows.emit(*args)
elif event == 'channel':
wallet = args[0]
if wallet == self.wallet:
self.channels_list.update_single_row.emit(*args)
self.update_status()
elif event == 'request_status':
self.on_request_status(*args)
elif event == 'invoice_status':
self.on_invoice_status(*args)
elif event == 'payment_succeeded':
wallet = args[0]
if wallet == self.wallet:
self.on_payment_succeeded(*args)
elif event == 'payment_failed':
wallet = args[0]
if wallet == self.wallet:
self.on_payment_failed(*args)
elif event == 'status':
self.update_status()
elif event == 'banner':
self.console.showMessage(args[0])
elif event == 'verified':
wallet, tx_hash, tx_mined_status = args
if wallet == self.wallet:
self.history_model.update_tx_mined_status(tx_hash, tx_mined_status)
elif event == 'fee':
pass
elif event == 'fee_histogram':
self.history_model.on_fee_histogram()
elif event == 'ln_gossip_sync_progress':
self.update_lightning_icon()
elif event == 'cert_mismatch':
self.show_cert_mismatch_error()
else:
self.logger.info(f"unexpected network event: {event} {args}")
def fetch_alias(self):
self.alias_info = None
alias = self.config.get('alias')
if alias:
alias = str(alias)
def f():
self.alias_info = self.contacts.resolve_openalias(alias)
self.alias_received_signal.emit()
t = threading.Thread(target=f)
t.setDaemon(True)
t.start()
def close_wallet(self):
if self.wallet:
self.logger.info(f'close_wallet {self.wallet.storage.path}')
self.wallet.thread = None
run_hook('close_wallet', self.wallet)
@profiler
def load_wallet(self, wallet: Abstract_Wallet):
wallet.thread = TaskThread(self, self.on_error)
self.update_recently_visited(wallet.storage.path)
if wallet.has_lightning():
util.trigger_callback('channels_updated', wallet)
self.need_update.set()
# Once GUI has been initialized check if we want to announce something since the callback has been called before the GUI was initialized
# update menus
self.seed_menu.setEnabled(self.wallet.has_seed())
self.update_lock_icon()
self.update_buttons_on_seed()
self.update_console()
self.clear_receive_tab()
self.request_list.update()
self.channels_list.update()
self.tabs.show()
self.init_geometry()
if self.config.get('hide_gui') and self.gui_object.tray.isVisible():
self.hide()
else:
self.show()
self.watching_only_changed()
run_hook('load_wallet', wallet, self)
try:
wallet.try_detecting_internal_addresses_corruption()
except InternalAddressCorruption as e:
self.show_error(str(e))
send_exception_to_crash_reporter(e)
def init_geometry(self):
winpos = self.wallet.db.get("winpos-qt")
try:
screen = self.app.desktop().screenGeometry()
assert screen.contains(QRect(*winpos))
self.setGeometry(*winpos)
except:
self.logger.info("using default geometry")
self.setGeometry(100, 100, 840, 400)
def watching_only_changed(self):
name = "Electrum-ECC Testnet" if constants.net.TESTNET else "Electrum-ECC"
title = '%s %s - %s' % (name, ELECTRUM_VERSION,
self.wallet.basename())
extra = [self.wallet.db.get('wallet_type', '?')]
if self.wallet.is_watching_only():
extra.append(_('watching only'))
title += ' [%s]'% ', '.join(extra)
self.setWindowTitle(title)
self.password_menu.setEnabled(self.wallet.may_have_password())
self.import_privkey_menu.setVisible(self.wallet.can_import_privkey())
self.import_address_menu.setVisible(self.wallet.can_import_address())
self.export_menu.setEnabled(self.wallet.can_export())
def warn_if_watching_only(self):
if self.wallet.is_watching_only():
msg = ' '.join([
_("This wallet is watching-only."),
_("This means you will not be able to spend ECCoins with it."),
_("Make sure you own the seed phrase or the private keys, before you request ECCoins to be sent to this wallet.")
])
self.show_warning(msg, title=_('Watch-only wallet'))
def warn_if_testnet(self):
if not constants.net.TESTNET:
return
# user might have opted out already
if self.config.get('dont_show_testnet_warning', False):
return
# only show once per process lifecycle
if getattr(self.gui_object, '_warned_testnet', False):
return
self.gui_object._warned_testnet = True
msg = ''.join([
_("You are in testnet mode."), ' ',
_("Testnet coins are worthless."), '\n',
_("Testnet is separate from the main Bitcoin network. It is used for testing.")
])
cb = QCheckBox(_("Don't show this again."))
cb_checked = False
def on_cb(x):
nonlocal cb_checked
cb_checked = x == Qt.Checked
cb.stateChanged.connect(on_cb)
self.show_warning(msg, title=_('Testnet'), checkbox=cb)
if cb_checked:
self.config.set_key('dont_show_testnet_warning', True)
def open_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
filename, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder)
if not filename:
return
self.gui_object.new_window(filename)
def select_backup_dir(self, b):
name = self.config.get('backup_dir', '')
dirname = QFileDialog.getExistingDirectory(self, "Select your wallet backup directory", name)
if dirname:
self.config.set_key('backup_dir', dirname)
self.backup_dir_e.setText(dirname)
def backup_wallet(self):
d = WindowModalDialog(self, _("File Backup"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
backup_help = ""
backup_dir = self.config.get('backup_dir')
backup_dir_label = HelpLabel(_('Backup directory') + ':', backup_help)
msg = _('Please select a backup directory')
if self.wallet.has_lightning() and self.wallet.lnworker.channels:
msg += '\n\n' + ' '.join([
_("Note that lightning channels will be converted to channel backups."),
_("You cannot use channel backups to perform lightning payments."),
_("Channel backups can only be used to request your channels to be closed.")
])
self.backup_dir_e = QPushButton(backup_dir)
self.backup_dir_e.clicked.connect(self.select_backup_dir)
grid.addWidget(backup_dir_label, 1, 0)
grid.addWidget(self.backup_dir_e, 1, 1)
vbox.addLayout(grid)
vbox.addWidget(WWLabel(msg))
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
try:
new_path = self.wallet.save_backup()
except BaseException as reason:
self.show_critical(_("Electrum was unable to copy your wallet file to the specified location.") + "\n" + str(reason), title=_("Unable to create backup"))
return
if new_path:
msg = _("A copy of your wallet file was created in")+" '%s'" % str(new_path)
self.show_message(msg, title=_("Wallet backup created"))
else:
self.show_message(_("You need to configure a backup directory in your preferences"), title=_("Backup not created"))
def update_recently_visited(self, filename):
recent = self.config.get('recently_open', [])
try:
sorted(recent)
except:
recent = []
if filename in recent:
recent.remove(filename)
recent.insert(0, filename)
recent = [path for path in recent if os.path.exists(path)]
recent = recent[:5]
self.config.set_key('recently_open', recent)
self.recently_visited_menu.clear()
for i, k in enumerate(sorted(recent)):
b = os.path.basename(k)
def loader(k):
return lambda: self.gui_object.new_window(k)
self.recently_visited_menu.addAction(b, loader(k)).setShortcut(QKeySequence("Ctrl+%d"%(i+1)))
self.recently_visited_menu.setEnabled(len(recent))
def get_wallet_folder(self):
return os.path.dirname(os.path.abspath(self.wallet.storage.path))
def new_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
filename = get_new_wallet_name(wallet_folder)
full_path = os.path.join(wallet_folder, filename)
self.gui_object.start_new_window(full_path, None)
def init_menubar(self):
menubar = QMenuBar()
file_menu = menubar.addMenu(_("&File"))
self.recently_visited_menu = file_menu.addMenu(_("&Recently open"))
file_menu.addAction(_("&Open"), self.open_wallet).setShortcut(QKeySequence.Open)
file_menu.addAction(_("&New/Restore"), self.new_wallet).setShortcut(QKeySequence.New)
file_menu.addAction(_("&Save backup"), self.backup_wallet).setShortcut(QKeySequence.SaveAs)
file_menu.addAction(_("Delete"), self.remove_wallet)
file_menu.addSeparator()
file_menu.addAction(_("&Quit"), self.close)
wallet_menu = menubar.addMenu(_("&Wallet"))
wallet_menu.addAction(_("&Information"), self.show_wallet_info)
wallet_menu.addSeparator()
self.password_menu = wallet_menu.addAction(_("&Password"), self.change_password_dialog)
self.seed_menu = wallet_menu.addAction(_("&Seed"), self.show_seed_dialog)
self.private_keys_menu = wallet_menu.addMenu(_("&Private keys"))
self.private_keys_menu.addAction(_("&Sweep"), self.sweep_key_dialog)
self.import_privkey_menu = self.private_keys_menu.addAction(_("&Import"), self.do_import_privkey)
self.export_menu = self.private_keys_menu.addAction(_("&Export"), self.export_privkeys_dialog)
self.import_address_menu = wallet_menu.addAction(_("Import addresses"), self.import_addresses)
wallet_menu.addSeparator()
addresses_menu = wallet_menu.addMenu(_("&Addresses"))
addresses_menu.addAction(_("&Filter"), lambda: self.address_list.toggle_toolbar(self.config))
labels_menu = wallet_menu.addMenu(_("&Labels"))
labels_menu.addAction(_("&Import"), self.do_import_labels)
labels_menu.addAction(_("&Export"), self.do_export_labels)
history_menu = wallet_menu.addMenu(_("&History"))
history_menu.addAction(_("&Filter"), lambda: self.history_list.toggle_toolbar(self.config))
history_menu.addAction(_("&Summary"), self.history_list.show_summary)
history_menu.addAction(_("&Plot"), self.history_list.plot_history_dialog)
history_menu.addAction(_("&Export"), self.history_list.export_history_dialog)
contacts_menu = wallet_menu.addMenu(_("Contacts"))
contacts_menu.addAction(_("&New"), self.new_contact_dialog)
contacts_menu.addAction(_("Import"), lambda: self.import_contacts())
contacts_menu.addAction(_("Export"), lambda: self.export_contacts())
invoices_menu = wallet_menu.addMenu(_("Invoices"))
invoices_menu.addAction(_("Import"), lambda: self.import_invoices())
invoices_menu.addAction(_("Export"), lambda: self.export_invoices())
requests_menu = wallet_menu.addMenu(_("Requests"))
requests_menu.addAction(_("Import"), lambda: self.import_requests())
requests_menu.addAction(_("Export"), lambda: self.export_requests())
wallet_menu.addSeparator()
wallet_menu.addAction(_("Find"), self.toggle_search).setShortcut(QKeySequence("Ctrl+F"))
def add_toggle_action(view_menu, tab):
is_shown = self.config.get('show_{}_tab'.format(tab.tab_name), False)
item_name = (_("Hide") if is_shown else _("Show")) + " " + tab.tab_description
tab.menu_action = view_menu.addAction(item_name, lambda: self.toggle_tab(tab))
view_menu = menubar.addMenu(_("&View"))
add_toggle_action(view_menu, self.addresses_tab)
add_toggle_action(view_menu, self.utxo_tab)
add_toggle_action(view_menu, self.channels_tab)
add_toggle_action(view_menu, self.contacts_tab)
add_toggle_action(view_menu, self.console_tab)
tools_menu = menubar.addMenu(_("&Tools")) # type: QMenu
preferences_action = tools_menu.addAction(_("Preferences"), self.settings_dialog) # type: QAction
if sys.platform == 'darwin':
# "Settings"/"Preferences" are all reserved keywords in macOS.
# preferences_action will get picked up based on name (and put into a standardized location,
# and given a standard reserved hotkey)
# Hence, this menu item will be at a "uniform location re macOS processes"
preferences_action.setMenuRole(QAction.PreferencesRole) # make sure OS recognizes it as preferences
# Add another preferences item, to also have a "uniform location for Electrum between different OSes"
tools_menu.addAction(_("Electrum preferences"), self.settings_dialog)
tools_menu.addAction(_("&Network"), self.gui_object.show_network_dialog).setEnabled(bool(self.network))
tools_menu.addAction(_("&Lightning Network"), self.gui_object.show_lightning_dialog).setEnabled(bool(self.wallet.has_lightning() and self.network))
tools_menu.addAction(_("Local &Watchtower"), self.gui_object.show_watchtower_dialog).setEnabled(bool(self.network and self.network.local_watchtower))
tools_menu.addAction(_("&Plugins"), self.plugins_dialog)
tools_menu.addSeparator()
tools_menu.addAction(_("&Sign/verify message"), self.sign_verify_message)
tools_menu.addAction(_("&Encrypt/decrypt message"), self.encrypt_message)
tools_menu.addSeparator()
paytomany_menu = tools_menu.addAction(_("&Pay to many"), self.paytomany)
raw_transaction_menu = tools_menu.addMenu(_("&Load transaction"))
raw_transaction_menu.addAction(_("&From file"), self.do_process_from_file)
raw_transaction_menu.addAction(_("&From text"), self.do_process_from_text)
raw_transaction_menu.addAction(_("&From the blockchain"), self.do_process_from_txid)
raw_transaction_menu.addAction(_("&From QR code"), self.read_tx_from_qrcode)
self.raw_transaction_menu = raw_transaction_menu
run_hook('init_menubar_tools', self, tools_menu)
help_menu = menubar.addMenu(_("&Help"))
help_menu.addAction(_("&About"), self.show_about)
help_menu.addAction(_("&Check for updates"), self.show_update_check)
help_menu.addAction(_("&Official website"), lambda: webopen("https://electrum.org"))
help_menu.addSeparator()
help_menu.addAction(_("&Documentation"), lambda: webopen("http://docs.electrum.org/")).setShortcut(QKeySequence.HelpContents)
if not constants.net.TESTNET:
help_menu.addAction(_("&Bitcoin Paper"), self.show_bitcoin_paper)
help_menu.addAction(_("&Report Bug"), self.show_report_bug)
help_menu.addSeparator()
help_menu.addAction(_("&Donate to server"), self.donate_to_server)
self.setMenuBar(menubar)
def donate_to_server(self):
d = self.network.get_donation_address()
if d:
host = self.network.get_parameters().server.host
self.pay_to_URI('eccoin:%s?message=donation for %s'%(d, host))
else:
self.show_error(_('No donation address for this server'))
def show_about(self):
QMessageBox.about(self, "Electrum-ECC",
(_("Version")+" %s" % ELECTRUM_VERSION + "\n\n" +
_("Electrum-ECC's focus is speed, with low resource usage and simplifying ECCoin.") + " " +
_("You do not need to perform regular backups, because your wallet can be "
"recovered from a secret phrase that you can memorize or write on paper.") + " " +
_("Startup times are instant because it operates in conjunction with high-performance "
"servers that handle the most complicated parts of the ECCoin system.") + "\n\n" +
_("Uses icons from the Icons8 icon pack (icons8.com).")))
def show_bitcoin_paper(self):
filename = os.path.join(self.config.path, 'bitcoin.pdf')
if not os.path.exists(filename):
s = self._fetch_tx_from_network("54e48e5f5c656b26c3bca14a8c95aa583d07ebe84dde3b7dd4a78f4e4186e713")
if not s:
return
s = s.split("0100000000000000")[1:-1]
out = ''.join(x[6:136] + x[138:268] + x[270:400] if len(x) > 136 else x[6:] for x in s)[16:-20]
with open(filename, 'wb') as f:
f.write(bytes.fromhex(out))
webopen('file:///' + filename)
def show_update_check(self, version=None):
self.gui_object._update_check = UpdateCheck(latest_version=version)
def show_report_bug(self):
msg = ' '.join([
_("Please report any bugs as issues on github:<br/>"),
f'''<a href="{constants.GIT_REPO_ISSUES_URL}">{constants.GIT_REPO_ISSUES_URL}</a><br/><br/>''',
_("Before reporting a bug, upgrade to the most recent version of Electrum (latest release or git HEAD), and include the version number in your report."),
_("Try to explain not only what the bug is, but how it occurs.")
])
self.show_message(msg, title="Electrum-ECC - " + _("Reporting Bugs"), rich_text=True)
def notify_transactions(self):
if self.tx_notification_queue.qsize() == 0:
return
if not self.wallet.up_to_date:
return # no notifications while syncing
now = time.time()
rate_limit = 20 # seconds
if self.tx_notification_last_time + rate_limit > now:
return
self.tx_notification_last_time = now
self.logger.info("Notifying GUI about new transactions")
txns = []
while True:
try:
txns.append(self.tx_notification_queue.get_nowait())
except queue.Empty:
break
# Combine the transactions if there are at least three
if len(txns) >= 3:
total_amount = 0
for tx in txns:
tx_wallet_delta = self.wallet.get_wallet_delta(tx)
if not tx_wallet_delta.is_relevant:
continue
total_amount += tx_wallet_delta.delta
self.notify(_("{} new transactions: Total amount received in the new transactions {}")
.format(len(txns), self.format_amount_and_units(total_amount)))
else:
for tx in txns:
tx_wallet_delta = self.wallet.get_wallet_delta(tx)
if not tx_wallet_delta.is_relevant:
continue
self.notify(_("New transaction: {}").format(self.format_amount_and_units(tx_wallet_delta.delta)))
def notify(self, message):
if self.tray:
try:
# this requires Qt 5.9
self.tray.showMessage("Electrum-ECC", message, read_QIcon("electrum_dark_icon"), 20000)
except TypeError:
self.tray.showMessage("Electrum-ECC", message, QSystemTrayIcon.Information, 20000)
def timer_actions(self):
self.request_list.refresh_status()
# Note this runs in the GUI thread
if self.need_update.is_set():
self.need_update.clear()
self.update_wallet()
elif not self.wallet.up_to_date:
# this updates "synchronizing" progress
self.update_status()
# resolve aliases
# FIXME this is a blocking network call that has a timeout of 5 sec
self.payto_e.resolve()
self.notify_transactions()
def format_amount(self, x, is_diff=False, whitespaces=False):
# x is in sats
return self.config.format_amount(x, is_diff=is_diff, whitespaces=whitespaces)
def format_amount_and_units(self, amount):
# amount is in sats
text = self.config.format_amount_and_units(amount)
x = self.fx.format_amount_and_units(amount) if self.fx else None
if text and x:
text += ' (%s)'%x
return text
def format_fee_rate(self, fee_rate):
return self.config.format_fee_rate(fee_rate)
def get_decimal_point(self):
return self.config.get_decimal_point()
def base_unit(self):
return self.config.get_base_unit()
def connect_fields(self, window, btc_e, fiat_e, fee_e):
def edit_changed(edit):
if edit.follows:
return
edit.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
fiat_e.is_last_edited = (edit == fiat_e)
amount = edit.get_amount()
rate = self.fx.exchange_rate() if self.fx else Decimal('NaN')
if rate.is_nan() or amount is None:
if edit is fiat_e:
btc_e.setText("")
if fee_e:
fee_e.setText("")
else:
fiat_e.setText("")
else:
if edit is fiat_e:
btc_e.follows = True
btc_e.setAmount(int(amount / Decimal(rate) * COIN))
btc_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
btc_e.follows = False
if fee_e:
window.update_fee()
else:
fiat_e.follows = True
fiat_e.setText(self.fx.ccy_amount_str(
amount * Decimal(rate) / COIN, False))
fiat_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
fiat_e.follows = False
btc_e.follows = False
fiat_e.follows = False
fiat_e.textChanged.connect(partial(edit_changed, fiat_e))
btc_e.textChanged.connect(partial(edit_changed, btc_e))
fiat_e.is_last_edited = False
def update_status(self):
if not self.wallet:
return
if self.network is None:
text = _("Offline")
icon = read_QIcon("status_disconnected.png")
elif self.network.is_connected():
server_height = self.network.get_server_height()
server_lag = self.network.get_local_height() - server_height
fork_str = "_fork" if len(self.network.get_blockchains())>1 else ""
# Server height can be 0 after switching to a new server
# until we get a headers subscription request response.
# Display the synchronizing message in that case.
if not self.wallet.up_to_date or server_height == 0:
num_sent, num_answered = self.wallet.get_history_sync_state_details()
text = ("{} ({}/{})"
.format(_("Synchronizing..."), num_answered, num_sent))
icon = read_QIcon("status_waiting.png")
elif server_lag > 1:
text = _("Server is lagging ({} blocks)").format(server_lag)
icon = read_QIcon("status_lagging%s.png"%fork_str)
else:
c, u, x = self.wallet.get_balance()
text = _("Balance" ) + ": %s "%(self.format_amount_and_units(c))
if u:
text += " [%s unconfirmed]"%(self.format_amount(u, is_diff=True).strip())
if x:
text += " [%s unmatured]"%(self.format_amount(x, is_diff=True).strip())
if self.wallet.has_lightning():
l = self.wallet.lnworker.get_balance()
text += u' \U000026a1 %s'%(self.format_amount_and_units(l).strip())
# append fiat balance and price
if self.fx.is_enabled():
text += self.fx.get_fiat_status_text(c + u + x,
self.base_unit(), self.get_decimal_point()) or ''
if not self.network.proxy:
icon = read_QIcon("status_connected%s.png"%fork_str)
else:
icon = read_QIcon("status_connected_proxy%s.png"%fork_str)
else:
if self.network.proxy:
text = "{} ({})".format(_("Not connected"), _("proxy enabled"))
else:
text = _("Not connected")
icon = read_QIcon("status_disconnected.png")
self.tray.setToolTip("%s (%s)" % (text, self.wallet.basename()))
self.balance_label.setText(text)
if self.status_button:
self.status_button.setIcon( icon )
def update_wallet(self):
self.update_status()
if self.wallet.up_to_date or not self.network or not self.network.is_connected():
self.update_tabs()
def update_tabs(self, wallet=None):
if wallet is None:
wallet = self.wallet
if wallet != self.wallet:
return
self.history_model.refresh('update_tabs')
self.request_list.update()
self.address_list.update()
self.utxo_list.update()
self.contact_list.update()
self.invoice_list.update()
self.channels_list.update_rows.emit(wallet)
self.update_completions()
def create_channels_tab(self):
self.channels_list = ChannelsList(self)
t = self.channels_list.get_toolbar()
return self.create_list_tab(self.channels_list, t)
def create_history_tab(self):
self.history_model = HistoryModel(self)
self.history_list = l = HistoryList(self, self.history_model)
self.history_model.set_view(self.history_list)
l.searchable_list = l
toolbar = l.create_toolbar(self.config)
toolbar_shown = bool(self.config.get('show_toolbar_history', False))
l.show_toolbar(toolbar_shown)
return self.create_list_tab(l, toolbar)
def show_address(self, addr):
from . import address_dialog
d = address_dialog.AddressDialog(self, addr)
d.exec_()
def show_channel(self, channel_id):
from . import channel_details
channel_details.ChannelDetailsDialog(self, channel_id).show()
def show_transaction(self, tx, *, tx_desc=None):
'''tx_desc is set only for txs created in the Send tab'''
show_transaction(tx, parent=self, desc=tx_desc)
def show_lightning_transaction(self, tx_item):
from .lightning_tx_dialog import LightningTxDialog
d = LightningTxDialog(self, tx_item)
d.show()
def create_receive_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.receive_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
self.receive_message_e = QLineEdit()
grid.addWidget(QLabel(_('Description')), 0, 0)
grid.addWidget(self.receive_message_e, 0, 1, 1, 4)
self.receive_message_e.textChanged.connect(self.update_receive_qr)
self.receive_amount_e = BTCAmountEdit(self.get_decimal_point)
grid.addWidget(QLabel(_('Requested amount')), 1, 0)
grid.addWidget(self.receive_amount_e, 1, 1)
self.receive_amount_e.textChanged.connect(self.update_receive_qr)
self.fiat_receive_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_receive_e.setVisible(False)
grid.addWidget(self.fiat_receive_e, 1, 2, Qt.AlignLeft)
self.connect_fields(self, self.receive_amount_e, self.fiat_receive_e, None)
self.connect_fields(self, self.amount_e, self.fiat_send_e, None)
self.expires_combo = QComboBox()
evl = sorted(pr_expiration_values.items())
evl_keys = [i[0] for i in evl]
evl_values = [i[1] for i in evl]
default_expiry = self.config.get('request_expiry', PR_DEFAULT_EXPIRATION_WHEN_CREATING)
try:
i = evl_keys.index(default_expiry)
except ValueError:
i = 0
self.expires_combo.addItems(evl_values)
self.expires_combo.setCurrentIndex(i)
self.expires_combo.setFixedWidth(self.receive_amount_e.width())
def on_expiry(i):
self.config.set_key('request_expiry', evl_keys[i])
self.expires_combo.currentIndexChanged.connect(on_expiry)
msg = ' '.join([
_('Expiration date of your request.'),
_('This information is seen by the recipient if you send them a signed payment request.'),
_('Expired requests have to be deleted manually from your list, in order to free the corresponding ECCoin addresses.'),
_('The ECCoin address never expires and will always be part of this electrum wallet.'),
])
grid.addWidget(HelpLabel(_('Expires after'), msg), 2, 0)
grid.addWidget(self.expires_combo, 2, 1)
self.expires_label = QLineEdit('')
self.expires_label.setReadOnly(1)
self.expires_label.setFocusPolicy(Qt.NoFocus)
self.expires_label.hide()
grid.addWidget(self.expires_label, 2, 1)
self.clear_invoice_button = QPushButton(_('Clear'))
self.clear_invoice_button.clicked.connect(self.clear_receive_tab)
self.create_invoice_button = QPushButton(_('New Address'))
self.create_invoice_button.setIcon(read_QIcon("bitcoin.png"))
self.create_invoice_button.setToolTip('Create on-chain request')
self.create_invoice_button.clicked.connect(lambda: self.create_invoice(False))
self.receive_buttons = buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.clear_invoice_button)
buttons.addWidget(self.create_invoice_button)
if self.wallet.has_lightning():
self.create_invoice_button.setText(_('New Address'))
self.create_lightning_invoice_button = QPushButton(_('Lightning'))
self.create_lightning_invoice_button.setToolTip('Create lightning request')
self.create_lightning_invoice_button.setIcon(read_QIcon("lightning.png"))
self.create_lightning_invoice_button.clicked.connect(lambda: self.create_invoice(True))
buttons.addWidget(self.create_lightning_invoice_button)
grid.addLayout(buttons, 4, 3, 1, 2)
self.receive_payreq_e = ButtonsTextEdit()
self.receive_payreq_e.setFont(QFont(MONOSPACE_FONT))
self.receive_payreq_e.addCopyButton(self.app)
self.receive_payreq_e.setReadOnly(True)
self.receive_payreq_e.textChanged.connect(self.update_receive_qr)
self.receive_payreq_e.setFocusPolicy(Qt.ClickFocus)
self.receive_qr = QRCodeWidget(fixedSize=220)
self.receive_qr.mouseReleaseEvent = lambda x: self.toggle_qr_window()
self.receive_qr.enterEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.PointingHandCursor))
self.receive_qr.leaveEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.ArrowCursor))
self.receive_address_e = ButtonsTextEdit()
self.receive_address_e.setFont(QFont(MONOSPACE_FONT))
self.receive_address_e.addCopyButton(self.app)
self.receive_address_e.setReadOnly(True)
self.receive_address_e.textChanged.connect(self.update_receive_address_styling)
qr_show = lambda: self.show_qrcode(str(self.receive_address_e.text()), _('Receiving address'), parent=self)
qr_icon = "qrcode_white.png" if ColorScheme.dark_scheme else "qrcode.png"
self.receive_address_e.addButton(qr_icon, qr_show, _("Show as QR code"))
self.receive_requests_label = QLabel(_('Receive queue'))
from .request_list import RequestList
self.request_list = RequestList(self)
receive_tabs = QTabWidget()
receive_tabs.addTab(self.receive_address_e, _('Address'))
receive_tabs.addTab(self.receive_payreq_e, _('Request'))
receive_tabs.addTab(self.receive_qr, _('QR Code'))
receive_tabs.setCurrentIndex(self.config.get('receive_tabs_index', 0))
receive_tabs.currentChanged.connect(lambda i: self.config.set_key('receive_tabs_index', i))
receive_tabs_sp = receive_tabs.sizePolicy()
receive_tabs_sp.setRetainSizeWhenHidden(True)
receive_tabs.setSizePolicy(receive_tabs_sp)
def maybe_hide_receive_tabs():
receive_tabs.setVisible(bool(self.receive_payreq_e.text()))
self.receive_payreq_e.textChanged.connect(maybe_hide_receive_tabs)
maybe_hide_receive_tabs()
# layout
vbox_g = QVBoxLayout()
vbox_g.addLayout(grid)
vbox_g.addStretch()
hbox = QHBoxLayout()
hbox.addLayout(vbox_g)
hbox.addStretch()
hbox.addWidget(receive_tabs)
w = QWidget()
w.searchable_list = self.request_list
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.receive_requests_label)
vbox.addWidget(self.request_list)
vbox.setStretchFactor(self.request_list, 1000)
return w
def delete_requests(self, keys):
for key in keys:
self.wallet.delete_request(key)
self.request_list.update()
self.clear_receive_tab()
def delete_lightning_payreq(self, payreq_key):
self.wallet.lnworker.delete_invoice(payreq_key)
self.request_list.update()
self.invoice_list.update()
self.clear_receive_tab()
def sign_payment_request(self, addr):
alias = self.config.get('alias')
if alias and self.alias_info:
alias_addr, alias_name, validated = self.alias_info
if alias_addr:
if self.wallet.is_mine(alias_addr):
msg = _('This payment request will be signed.') + '\n' + _('Please enter your password')
password = None
if self.wallet.has_keystore_encryption():
password = self.password_dialog(msg)
if not password:
return
try:
self.wallet.sign_payment_request(addr, alias, alias_addr, password)
except Exception as e:
self.show_error(repr(e))
return
else:
return
def create_invoice(self, is_lightning):
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
expiry = self.config.get('request_expiry', PR_DEFAULT_EXPIRATION_WHEN_CREATING)
if is_lightning:
if not self.wallet.lnworker.channels:
self.show_error(_("You need to open a Lightning channel first."))
return
# TODO maybe show a warning if amount exceeds lnworker.num_sats_can_receive (as in kivy)
key = self.wallet.lnworker.add_request(amount, message, expiry)
else:
key = self.create_bitcoin_request(amount, message, expiry)
if not key:
return
self.address_list.update()
assert key is not None
self.request_list.update()
self.request_list.select_key(key)
# clear request fields
self.receive_amount_e.setText('')
self.receive_message_e.setText('')
# copy to clipboard
r = self.wallet.get_request(key)
content = r.invoice if r.is_lightning() else r.get_address()
title = _('Invoice') if is_lightning else _('Address')
self.do_copy(content, title=title)
def create_bitcoin_request(self, amount, message, expiration) -> Optional[str]:
addr = self.wallet.get_unused_address()
if addr is None:
if not self.wallet.is_deterministic(): # imported wallet
msg = [
_('No more addresses in your wallet.'), ' ',
_('You are using a non-deterministic wallet, which cannot create new addresses.'), ' ',
_('If you want to create new addresses, use a deterministic wallet instead.'), '\n\n',
_('Creating a new payment request will reuse one of your addresses and overwrite an existing request. Continue anyway?'),
]
if not self.question(''.join(msg)):
return
addr = self.wallet.get_receiving_address()
else: # deterministic wallet
if not self.question(_("Warning: The next address will not be recovered automatically if you restore your wallet from seed; you may need to add it manually.\n\nThis occurs because you have too many unused addresses in your wallet. To avoid this situation, use the existing addresses first.\n\nCreate anyway?")):
return
addr = self.wallet.create_new_address(False)
req = self.wallet.make_payment_request(addr, amount, message, expiration)
try:
self.wallet.add_payment_request(req)
except Exception as e:
self.logger.exception('Error adding payment request')
self.show_error(_('Error adding payment request') + ':\n' + repr(e))
else:
self.sign_payment_request(addr)
return addr
def do_copy(self, content: str, *, title: str = None) -> None:
self.app.clipboard().setText(content)
if title is None:
tooltip_text = _("Text copied to clipboard").format(title)
else:
tooltip_text = _("{} copied to clipboard").format(title)
QToolTip.showText(QCursor.pos(), tooltip_text, self)
def clear_receive_tab(self):
self.receive_payreq_e.setText('')
self.receive_address_e.setText('')
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
self.expires_label.hide()
self.expires_combo.show()
self.request_list.clearSelection()
def toggle_qr_window(self):
from . import qrwindow
if not self.qr_window:
self.qr_window = qrwindow.QR_Window(self)
self.qr_window.setVisible(True)
self.qr_window_geometry = self.qr_window.geometry()
else:
if not self.qr_window.isVisible():
self.qr_window.setVisible(True)
self.qr_window.setGeometry(self.qr_window_geometry)
else:
self.qr_window_geometry = self.qr_window.geometry()
self.qr_window.setVisible(False)
self.update_receive_qr()
def show_send_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.send_tab))
def show_receive_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.receive_tab))
def update_receive_qr(self):
uri = str(self.receive_payreq_e.text())
if maybe_extract_bolt11_invoice(uri):
# encode lightning invoices as uppercase so QR encoding can use
# alphanumeric mode; resulting in smaller QR codes
uri = uri.upper()
self.receive_qr.setData(uri)
if self.qr_window and self.qr_window.isVisible():
self.qr_window.qrw.setData(uri)
def update_receive_address_styling(self):
addr = str(self.receive_address_e.text())
if is_address(addr) and self.wallet.is_used(addr):
self.receive_address_e.setStyleSheet(ColorScheme.RED.as_stylesheet(True))
self.receive_address_e.setToolTip(_("This address has already been used. "
"For better privacy, do not reuse it for new payments."))
else:
self.receive_address_e.setStyleSheet("")
self.receive_address_e.setToolTip("")
def create_send_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.send_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
from .paytoedit import PayToEdit
self.amount_e = BTCAmountEdit(self.get_decimal_point)
self.payto_e = PayToEdit(self)
self.payto_e.addPasteButton(self.app)
msg = _('Recipient of the funds.') + '\n\n'\
+ _('You may enter a ECCoin address, a label from your list of contacts (a list of completions will be proposed), or an alias (email-like address that forwards to a ECCoin address)')
payto_label = HelpLabel(_('Pay to'), msg)
grid.addWidget(payto_label, 1, 0)
grid.addWidget(self.payto_e, 1, 1, 1, -1)
completer = QCompleter()
completer.setCaseSensitivity(False)
self.payto_e.set_completer(completer)
completer.setModel(self.completions)
msg = _('Description of the transaction (not mandatory).') + '\n\n'\
+ _('The description is not sent to the recipient of the funds. It is stored in your wallet file, and displayed in the \'History\' tab.')
description_label = HelpLabel(_('Description'), msg)
grid.addWidget(description_label, 2, 0)
self.message_e = FreezableLineEdit()
self.message_e.setMinimumWidth(700)
grid.addWidget(self.message_e, 2, 1, 1, -1)
msg = _('Amount to be sent.') + '\n\n' \
+ _('The amount will be displayed in red if you do not have enough funds in your wallet.') + ' ' \
+ _('Note that if you have frozen some of your addresses, the available funds will be lower than your total balance.') + '\n\n' \
+ _('Keyboard shortcut: type "!" to send all your coins.')
amount_label = HelpLabel(_('Amount'), msg)
grid.addWidget(amount_label, 3, 0)
grid.addWidget(self.amount_e, 3, 1)
self.fiat_send_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_send_e.setVisible(False)
grid.addWidget(self.fiat_send_e, 3, 2)
self.amount_e.frozen.connect(
lambda: self.fiat_send_e.setFrozen(self.amount_e.isReadOnly()))
self.max_button = EnterButton(_("Max"), self.spend_max)
self.max_button.setFixedWidth(100)
self.max_button.setCheckable(True)
grid.addWidget(self.max_button, 3, 3)
self.save_button = EnterButton(_("Save"), self.do_save_invoice)
self.send_button = EnterButton(_("Pay") + "...", self.do_pay)
self.clear_button = EnterButton(_("Clear"), self.do_clear)
buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.clear_button)
buttons.addWidget(self.save_button)
buttons.addWidget(self.send_button)
grid.addLayout(buttons, 6, 1, 1, 4)
self.amount_e.shortcut.connect(self.spend_max)
def reset_max(text):
self.max_button.setChecked(False)
enable = not bool(text) and not self.amount_e.isReadOnly()
#self.max_button.setEnabled(enable)
self.amount_e.textEdited.connect(reset_max)
self.fiat_send_e.textEdited.connect(reset_max)
self.set_onchain(False)
self.invoices_label = QLabel(_('Send queue'))
from .invoice_list import InvoiceList
self.invoice_list = InvoiceList(self)
vbox0 = QVBoxLayout()
vbox0.addLayout(grid)
hbox = QHBoxLayout()
hbox.addLayout(vbox0)
hbox.addStretch(1)
w = QWidget()
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.invoices_label)
vbox.addWidget(self.invoice_list)
vbox.setStretchFactor(self.invoice_list, 1000)
w.searchable_list = self.invoice_list
run_hook('create_send_tab', grid)
return w
def spend_max(self):
if run_hook('abort_send', self):
return
outputs = self.payto_e.get_outputs(True)
if not outputs:
return
make_tx = lambda fee_est: self.wallet.make_unsigned_transaction(
coins=self.get_coins(),
outputs=outputs,
fee=fee_est,
is_sweep=False)
try:
try:
tx = make_tx(None)
except (NotEnoughFunds, NoDynamicFeeEstimates) as e:
# Check if we had enough funds excluding fees,
# if so, still provide opportunity to set lower fees.
tx = make_tx(0)
except MultipleSpendMaxTxOutputs as e:
self.max_button.setChecked(False)
self.show_error(str(e))
return
except NotEnoughFunds as e:
self.max_button.setChecked(False)
text = self.get_text_not_enough_funds_mentioning_frozen()
self.show_error(text)
return
self.max_button.setChecked(True)
amount = tx.output_value()
__, x_fee_amount = run_hook('get_tx_extra_fee', self.wallet, tx) or (None, 0)
amount_after_all_fees = amount - x_fee_amount
self.amount_e.setAmount(amount_after_all_fees)
def get_contact_payto(self, key):
_type, label = self.contacts.get(key)
return label + ' <' + key + '>' if _type == 'address' else key
def update_completions(self):
l = [self.get_contact_payto(key) for key in self.contacts.keys()]
self.completions.setStringList(l)
@protected
def protect(self, func, args, password):
return func(*args, password)
def read_outputs(self) -> List[PartialTxOutput]:
if self.payment_request:
outputs = self.payment_request.get_outputs()
else:
outputs = self.payto_e.get_outputs(self.max_button.isChecked())
return outputs
def check_send_tab_onchain_outputs_and_show_errors(self, outputs: List[PartialTxOutput]) -> bool:
"""Returns whether there are errors with outputs.
Also shows error dialog to user if so.
"""
if not outputs:
self.show_error(_('No outputs'))
return True
for o in outputs:
if o.scriptpubkey is None:
self.show_error(_('Bitcoin Address is None'))
return True
if o.value is None:
self.show_error(_('Invalid Amount'))
return True
return False # no errors
def check_send_tab_payto_line_and_show_errors(self) -> bool:
"""Returns whether there are errors.
Also shows error dialog to user if so.
"""
pr = self.payment_request
if pr:
if pr.has_expired():
self.show_error(_('Payment request has expired'))
return True
if not pr:
errors = self.payto_e.get_errors()
if errors:
if len(errors) == 1 and not errors[0].is_multiline:
err = errors[0]
self.show_warning(_("Failed to parse 'Pay to' line") + ":\n" +
f"{err.line_content[:40]}...\n\n"
f"{err.exc!r}")
else:
self.show_warning(_("Invalid Lines found:") + "\n\n" +
'\n'.join([_("Line #") +
f"{err.idx+1}: {err.line_content[:40]}... ({err.exc!r})"
for err in errors]))
return True
if self.payto_e.is_alias and self.payto_e.validated is False:
alias = self.payto_e.toPlainText()
msg = _('WARNING: the alias "{}" could not be validated via an additional '
'security check, DNSSEC, and thus may not be correct.').format(alias) + '\n'
msg += _('Do you wish to continue?')
if not self.question(msg):
return True
return False # no errors
def pay_lightning_invoice(self, invoice: str, *, amount_msat: Optional[int]):
if amount_msat is None:
raise Exception("missing amount for LN invoice")
amount_sat = Decimal(amount_msat) / 1000
# FIXME this is currently lying to user as we truncate to satoshis
msg = _("Pay lightning invoice?") + '\n\n' + _("This will send {}?").format(self.format_amount_and_units(amount_sat))
if not self.question(msg):
return
self.save_pending_invoice()
attempts = LN_NUM_PAYMENT_ATTEMPTS
def task():
self.wallet.lnworker.pay(invoice, amount_msat=amount_msat, attempts=attempts)
self.wallet.thread.add(task)
def on_request_status(self, wallet, key, status):
if wallet != self.wallet:
return
req = self.wallet.receive_requests.get(key)
if req is None:
return
if status == PR_PAID:
self.notify(_('Payment received') + '\n' + key)
self.need_update.set()
else:
self.request_list.update_item(key, req)
def on_invoice_status(self, wallet, key):
if wallet != self.wallet:
return
invoice = self.wallet.get_invoice(key)
if invoice is None:
return
status = self.wallet.get_invoice_status(invoice)
if status == PR_PAID:
self.invoice_list.update()
else:
self.invoice_list.update_item(key, invoice)
def on_payment_succeeded(self, wallet, key):
description = self.wallet.get_label(key)
self.notify(_('Payment succeeded') + '\n\n' + description)
self.need_update.set()
def on_payment_failed(self, wallet, key, reason):
self.show_error(_('Payment failed') + '\n\n' + reason)
def read_invoice(self):
if self.check_send_tab_payto_line_and_show_errors():
return
if not self._is_onchain:
invoice_str = self.payto_e.lightning_invoice
if not invoice_str:
return
if not self.wallet.has_lightning():
self.show_error(_('Lightning is disabled'))
return
invoice = LNInvoice.from_bech32(invoice_str)
if invoice.get_amount_msat() is None:
amount_sat = self.amount_e.get_amount()
if amount_sat:
invoice.amount_msat = int(amount_sat * 1000)
else:
self.show_error(_('No amount'))
return
return invoice
else:
outputs = self.read_outputs()
if self.check_send_tab_onchain_outputs_and_show_errors(outputs):
return
message = self.message_e.text()
return self.wallet.create_invoice(
outputs=outputs,
message=message,
pr=self.payment_request,
URI=self.payto_URI)
def do_save_invoice(self):
self.pending_invoice = self.read_invoice()
if not self.pending_invoice:
return
self.save_pending_invoice()
def save_pending_invoice(self):
if not self.pending_invoice:
return
self.do_clear()
self.wallet.save_invoice(self.pending_invoice)
self.invoice_list.update()
self.pending_invoice = None
def do_pay(self):
self.pending_invoice = self.read_invoice()
if not self.pending_invoice:
return
self.do_pay_invoice(self.pending_invoice)
def pay_multiple_invoices(self, invoices):
outputs = []
for invoice in invoices:
outputs += invoice.outputs
self.pay_onchain_dialog(self.get_coins(), outputs)
def do_pay_invoice(self, invoice: 'Invoice'):
if invoice.type == PR_TYPE_LN:
assert isinstance(invoice, LNInvoice)
self.pay_lightning_invoice(invoice.invoice, amount_msat=invoice.get_amount_msat())
elif invoice.type == PR_TYPE_ONCHAIN:
assert isinstance(invoice, OnchainInvoice)
self.pay_onchain_dialog(self.get_coins(), invoice.outputs)
else:
raise Exception('unknown invoice type')
def get_coins(self, *, nonlocal_only=False) -> Sequence[PartialTxInput]:
coins = self.get_manually_selected_coins()
if coins is not None:
return coins
else:
return self.wallet.get_spendable_coins(None, nonlocal_only=nonlocal_only)
def get_manually_selected_coins(self) -> Optional[Sequence[PartialTxInput]]:
"""Return a list of selected coins or None.
Note: None means selection is not being used,
while an empty sequence means the user specifically selected that.
"""
return self.utxo_list.get_spend_list()
def get_text_not_enough_funds_mentioning_frozen(self) -> str:
text = _("Not enough funds")
frozen_bal = sum(self.wallet.get_frozen_balance())
if frozen_bal:
text += " ({} {} {})".format(
self.format_amount(frozen_bal).strip(), self.base_unit(), _("are frozen")
)
return text
def pay_onchain_dialog(
self, inputs: Sequence[PartialTxInput],
outputs: List[PartialTxOutput], *,
external_keypairs=None) -> None:
# trustedcoin requires this
if run_hook('abort_send', self):
return
is_sweep = bool(external_keypairs)
make_tx = lambda fee_est: self.wallet.make_unsigned_transaction(
coins=inputs,
outputs=outputs,
fee=fee_est,
is_sweep=is_sweep)
output_values = [x.value for x in outputs]
if output_values.count('!') > 1:
self.show_error(_("More than one output set to spend max"))
return
output_value = '!' if '!' in output_values else sum(output_values)
d = ConfirmTxDialog(window=self, make_tx=make_tx, output_value=output_value, is_sweep=is_sweep)
if d.not_enough_funds:
# Check if we had enough funds excluding fees,
# if so, still provide opportunity to set lower fees.
if not d.have_enough_funds_assuming_zero_fees():
text = self.get_text_not_enough_funds_mentioning_frozen()
self.show_message(text)
return
# shortcut to advanced preview (after "enough funds" check!)
if self.config.get('advanced_preview'):
self.preview_tx_dialog(make_tx=make_tx,
external_keypairs=external_keypairs)
return
cancelled, is_send, password, tx = d.run()
if cancelled:
return
if is_send:
self.save_pending_invoice()
def sign_done(success):
if success:
self.broadcast_or_show(tx)
self.sign_tx_with_password(tx, callback=sign_done, password=password,
external_keypairs=external_keypairs)
else:
self.preview_tx_dialog(make_tx=make_tx,
external_keypairs=external_keypairs)
def preview_tx_dialog(self, *, make_tx, external_keypairs=None):
d = PreviewTxDialog(make_tx=make_tx, external_keypairs=external_keypairs,
window=self)
d.show()
def broadcast_or_show(self, tx: Transaction):
if not tx.is_complete():
self.show_transaction(tx)
return
if not self.network:
self.show_error(_("You can't broadcast a transaction without a live network connection."))
self.show_transaction(tx)
return
self.broadcast_transaction(tx)
@protected
def sign_tx(self, tx, *, callback, external_keypairs, password):
self.sign_tx_with_password(tx, callback=callback, password=password, external_keypairs=external_keypairs)
def sign_tx_with_password(self, tx: PartialTransaction, *, callback, password, external_keypairs=None):
'''Sign the transaction in a separate thread. When done, calls
the callback with a success code of True or False.
'''
def on_success(result):
callback(True)
def on_failure(exc_info):
self.on_error(exc_info)
callback(False)
on_success = run_hook('tc_sign_wrapper', self.wallet, tx, on_success, on_failure) or on_success
if external_keypairs:
# can sign directly
task = partial(tx.sign, external_keypairs)
else:
task = partial(self.wallet.sign_transaction, tx, password)
msg = _('Signing transaction...')
WaitingDialog(self, msg, task, on_success, on_failure)
def broadcast_transaction(self, tx: Transaction):
def broadcast_thread():
# non-GUI thread
pr = self.payment_request
if pr and pr.has_expired():
self.payment_request = None
return False, _("Invoice has expired")
try:
self.network.run_from_another_thread(self.network.broadcast_transaction(tx))
except TxBroadcastError as e:
return False, e.get_message_for_gui()
except BestEffortRequestFailed as e:
return False, repr(e)
# success
txid = tx.txid()
if pr:
self.payment_request = None
refund_address = self.wallet.get_receiving_address()
coro = pr.send_payment_and_receive_paymentack(tx.serialize(), refund_address)
fut = asyncio.run_coroutine_threadsafe(coro, self.network.asyncio_loop)
ack_status, ack_msg = fut.result(timeout=20)
self.logger.info(f"Payment ACK: {ack_status}. Ack message: {ack_msg}")
return True, txid
# Capture current TL window; override might be removed on return
parent = self.top_level_window(lambda win: isinstance(win, MessageBoxMixin))
def broadcast_done(result):
# GUI thread
if result:
success, msg = result
if success:
parent.show_message(_('Payment sent.') + '\n' + msg)
self.invoice_list.update()
else:
msg = msg or ''
parent.show_error(msg)
WaitingDialog(self, _('Broadcasting transaction...'),
broadcast_thread, broadcast_done, self.on_error)
def mktx_for_open_channel(self, funding_sat):
coins = self.get_coins(nonlocal_only=True)
make_tx = lambda fee_est: self.wallet.lnworker.mktx_for_open_channel(coins=coins,
funding_sat=funding_sat,
fee_est=fee_est)
return make_tx
def open_channel(self, connect_str, funding_sat, push_amt):
try:
extract_nodeid(connect_str)
except ConnStringFormatError as e:
self.show_error(str(e))
return
# use ConfirmTxDialog
# we need to know the fee before we broadcast, because the txid is required
make_tx = self.mktx_for_open_channel(funding_sat)
d = ConfirmTxDialog(window=self, make_tx=make_tx, output_value=funding_sat, is_sweep=False)
# disable preview button because the user must not broadcast tx before establishment_flow
d.preview_button.setEnabled(False)
cancelled, is_send, password, funding_tx = d.run()
if not is_send:
return
if cancelled:
return
# read funding_sat from tx; converts '!' to int value
funding_sat = funding_tx.output_value_for_address(ln_dummy_address())
def task():
return self.wallet.lnworker.open_channel(connect_str=connect_str,
funding_tx=funding_tx,
funding_sat=funding_sat,
push_amt_sat=push_amt,
password=password)
def on_success(args):
chan, funding_tx = args
n = chan.constraints.funding_txn_minimum_depth
message = '\n'.join([
_('Channel established.'),
_('Remote peer ID') + ':' + chan.node_id.hex(),
_('This channel will be usable after {} confirmations').format(n)
])
if not funding_tx.is_complete():
message += '\n\n' + _('Please sign and broadcast the funding transaction')
self.show_message(message)
if not funding_tx.is_complete():
self.show_transaction(funding_tx)
def on_failure(exc_info):
type_, e, traceback = exc_info
self.show_error(_('Could not open channel: {}').format(repr(e)))
WaitingDialog(self, _('Opening channel...'), task, on_success, on_failure)
def query_choice(self, msg, choices):
# Needed by QtHandler for hardware wallets
dialog = WindowModalDialog(self.top_level_window())
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout(dialog)
vbox.addLayout(clayout.layout())
vbox.addLayout(Buttons(OkButton(dialog)))
if not dialog.exec_():
return None
return clayout.selected_index()
def lock_amount(self, b: bool) -> None:
self.amount_e.setFrozen(b)
self.max_button.setEnabled(not b)
def prepare_for_payment_request(self):
self.show_send_tab()
self.payto_e.is_pr = True
for e in [self.payto_e, self.message_e]:
e.setFrozen(True)
self.lock_amount(True)
self.payto_e.setText(_("please wait..."))
return True
def delete_invoices(self, keys):
for key in keys:
self.wallet.delete_invoice(key)
self.invoice_list.update()
def payment_request_ok(self):
pr = self.payment_request
if not pr:
return
key = pr.get_id()
invoice = self.wallet.get_invoice(key)
if invoice and self.wallet.get_invoice_status(invoice) == PR_PAID:
self.show_message("invoice already paid")
self.do_clear()
self.payment_request = None
return
self.payto_e.is_pr = True
if not pr.has_expired():
self.payto_e.setGreen()
else:
self.payto_e.setExpired()
self.payto_e.setText(pr.get_requestor())
self.amount_e.setAmount(pr.get_amount())
self.message_e.setText(pr.get_memo())
# signal to set fee
self.amount_e.textEdited.emit("")
def payment_request_error(self):
pr = self.payment_request
if not pr:
return
self.show_message(pr.error)
self.payment_request = None
self.do_clear()
def on_pr(self, request: 'paymentrequest.PaymentRequest'):
self.set_onchain(True)
self.payment_request = request
if self.payment_request.verify(self.contacts):
self.payment_request_ok_signal.emit()
else:
self.payment_request_error_signal.emit()
def parse_lightning_invoice(self, invoice):
"""Parse ln invoice, and prepare the send tab for it."""
try:
lnaddr = lndecode(invoice, expected_hrp=constants.net.SEGWIT_HRP)
except Exception as e:
raise LnDecodeException(e) from e
pubkey = bh2u(lnaddr.pubkey.serialize())
for k,v in lnaddr.tags:
if k == 'd':
description = v
break
else:
description = ''
self.payto_e.setFrozen(True)
self.payto_e.setText(pubkey)
self.message_e.setText(description)
if lnaddr.get_amount_sat() is not None:
self.amount_e.setAmount(lnaddr.get_amount_sat())
#self.amount_e.textEdited.emit("")
self.set_onchain(False)
def set_onchain(self, b):
self._is_onchain = b
self.max_button.setEnabled(b)
def pay_to_URI(self, URI):
if not URI:
return
try:
out = util.parse_URI(URI, self.on_pr)
except InvalidBitcoinURI as e:
self.show_error(_("Error parsing URI") + f":\n{e}")
return
self.show_send_tab()
self.payto_URI = out
r = out.get('r')
sig = out.get('sig')
name = out.get('name')
if r or (name and sig):
self.prepare_for_payment_request()
return
address = out.get('address')
amount = out.get('amount')
label = out.get('label')
message = out.get('message')
# use label as description (not BIP21 compliant)
if label and not message:
message = label
if address:
self.payto_e.setText(address)
if message:
self.message_e.setText(message)
if amount:
self.amount_e.setAmount(amount)
self.amount_e.textEdited.emit("")
def do_clear(self):
self.max_button.setChecked(False)
self.payment_request = None
self.payto_URI = None
self.payto_e.is_pr = False
self.set_onchain(False)
for e in [self.payto_e, self.message_e, self.amount_e]:
e.setText('')
e.setFrozen(False)
self.update_status()
run_hook('do_clear', self)
def set_frozen_state_of_addresses(self, addrs, freeze: bool):
self.wallet.set_frozen_state_of_addresses(addrs, freeze)
self.address_list.update()
self.utxo_list.update()
def set_frozen_state_of_coins(self, utxos: Sequence[PartialTxInput], freeze: bool):
utxos_str = {utxo.prevout.to_str() for utxo in utxos}
self.wallet.set_frozen_state_of_coins(utxos_str, freeze)
self.utxo_list.update()
def create_list_tab(self, l, toolbar=None):
w = QWidget()
w.searchable_list = l
vbox = QVBoxLayout()
w.setLayout(vbox)
#vbox.setContentsMargins(0, 0, 0, 0)
#vbox.setSpacing(0)
if toolbar:
vbox.addLayout(toolbar)
vbox.addWidget(l)
return w
def create_addresses_tab(self):
from .address_list import AddressList
self.address_list = l = AddressList(self)
toolbar = l.create_toolbar(self.config)
toolbar_shown = bool(self.config.get('show_toolbar_addresses', False))
l.show_toolbar(toolbar_shown)
return self.create_list_tab(l, toolbar)
def create_utxo_tab(self):
from .utxo_list import UTXOList
self.utxo_list = UTXOList(self)
return self.create_list_tab(self.utxo_list)
def create_contacts_tab(self):
from .contact_list import ContactList
self.contact_list = l = ContactList(self)
return self.create_list_tab(l)
def remove_address(self, addr):
if not self.question(_("Do you want to remove {} from your wallet?").format(addr)):
return
try:
self.wallet.delete_address(addr)
except UserFacingException as e:
self.show_error(str(e))
else:
self.need_update.set() # history, addresses, coins
self.clear_receive_tab()
def paytomany(self):
self.show_send_tab()
self.payto_e.paytomany()
msg = '\n'.join([
_('Enter a list of outputs in the \'Pay to\' field.'),
_('One output per line.'),
_('Format: address, amount'),
_('You may load a CSV file using the file icon.')
])
self.show_message(msg, title=_('Pay to many'))
def payto_contacts(self, labels):
paytos = [self.get_contact_payto(label) for label in labels]
self.show_send_tab()
if len(paytos) == 1:
self.payto_e.setText(paytos[0])
self.amount_e.setFocus()
else:
text = "\n".join([payto + ", 0" for payto in paytos])
self.payto_e.setText(text)
self.payto_e.setFocus()
def set_contact(self, label, address):
if not is_address(address):
self.show_error(_('Invalid Address'))
self.contact_list.update() # Displays original unchanged value
return False
self.contacts[address] = ('address', label)
self.contact_list.update()
self.history_list.update()
self.update_completions()
return True
def delete_contacts(self, labels):
if not self.question(_("Remove {} from your list of contacts?")
.format(" + ".join(labels))):
return
for label in labels:
self.contacts.pop(label)
self.history_list.update()
self.contact_list.update()
self.update_completions()
def show_onchain_invoice(self, invoice: OnchainInvoice):
amount_str = self.format_amount(invoice.amount_sat) + ' ' + self.base_unit()
d = WindowModalDialog(self, _("Onchain Invoice"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
grid.addWidget(QLabel(_("Amount") + ':'), 1, 0)
grid.addWidget(QLabel(amount_str), 1, 1)
if len(invoice.outputs) == 1:
grid.addWidget(QLabel(_("Address") + ':'), 2, 0)
grid.addWidget(QLabel(invoice.get_address()), 2, 1)
else:
outputs_str = '\n'.join(map(lambda x: x.address + ' : ' + self.format_amount(x.value)+ self.base_unit(), invoice.outputs))
grid.addWidget(QLabel(_("Outputs") + ':'), 2, 0)
grid.addWidget(QLabel(outputs_str), 2, 1)
grid.addWidget(QLabel(_("Description") + ':'), 3, 0)
grid.addWidget(QLabel(invoice.message), 3, 1)
if invoice.exp:
grid.addWidget(QLabel(_("Expires") + ':'), 4, 0)
grid.addWidget(QLabel(format_time(invoice.exp + invoice.time)), 4, 1)
if invoice.bip70:
pr = paymentrequest.PaymentRequest(bytes.fromhex(invoice.bip70))
pr.verify(self.contacts)
grid.addWidget(QLabel(_("Requestor") + ':'), 5, 0)
grid.addWidget(QLabel(pr.get_requestor()), 5, 1)
grid.addWidget(QLabel(_("Signature") + ':'), 6, 0)
grid.addWidget(QLabel(pr.get_verify_status()), 6, 1)
def do_export():
key = pr.get_id()
name = str(key) + '.bip70'
fn = getSaveFileName(
parent=self,
title=_("Save invoice to file"),
filename=name,
filter="*.bip70",
config=self.config,
)
if not fn:
return
with open(fn, 'wb') as f:
data = f.write(pr.raw)
self.show_message(_('BIP70 invoice saved as {}').format(fn))
exportButton = EnterButton(_('Export'), do_export)
buttons = Buttons(exportButton, CloseButton(d))
else:
buttons = Buttons(CloseButton(d))
vbox.addLayout(grid)
vbox.addLayout(buttons)
d.exec_()
def show_lightning_invoice(self, invoice: LNInvoice):
lnaddr = lndecode(invoice.invoice, expected_hrp=constants.net.SEGWIT_HRP)
d = WindowModalDialog(self, _("Lightning Invoice"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
grid.addWidget(QLabel(_("Node ID") + ':'), 0, 0)
grid.addWidget(QLabel(lnaddr.pubkey.serialize().hex()), 0, 1)
grid.addWidget(QLabel(_("Amount") + ':'), 1, 0)
amount_str = self.format_amount(invoice.get_amount_sat()) + ' ' + self.base_unit()
grid.addWidget(QLabel(amount_str), 1, 1)
grid.addWidget(QLabel(_("Description") + ':'), 2, 0)
grid.addWidget(QLabel(invoice.message), 2, 1)
grid.addWidget(QLabel(_("Hash") + ':'), 3, 0)
payhash_e = ButtonsLineEdit(lnaddr.paymenthash.hex())
payhash_e.addCopyButton(self.app)
payhash_e.setReadOnly(True)
vbox.addWidget(payhash_e)
grid.addWidget(payhash_e, 3, 1)
if invoice.exp:
grid.addWidget(QLabel(_("Expires") + ':'), 4, 0)
grid.addWidget(QLabel(format_time(invoice.time + invoice.exp)), 4, 1)
vbox.addLayout(grid)
invoice_e = ShowQRTextEdit(config=self.config)
invoice_e.addCopyButton(self.app)
invoice_e.setText(invoice.invoice)
vbox.addWidget(invoice_e)
vbox.addLayout(Buttons(CloseButton(d),))
d.exec_()
def create_console_tab(self):
from .console import Console
self.console = console = Console()
return console
def update_console(self):
console = self.console
console.history = self.wallet.db.get("qt-console-history", [])
console.history_index = len(console.history)
console.updateNamespace({
'wallet': self.wallet,
'network': self.network,
'plugins': self.gui_object.plugins,
'window': self,
'config': self.config,
'electrum': electrum,
'daemon': self.gui_object.daemon,
'util': util,
'bitcoin': bitcoin,
'lnutil': lnutil,
})
c = commands.Commands(
config=self.config,
daemon=self.gui_object.daemon,
network=self.network,
callback=lambda: self.console.set_json(True))
methods = {}
def mkfunc(f, method):
return lambda *args, **kwargs: f(method,
args,
self.password_dialog,
**{**kwargs, 'wallet': self.wallet})
for m in dir(c):
if m[0]=='_' or m in ['network','wallet','config','daemon']: continue
methods[m] = mkfunc(c._run, m)
console.updateNamespace(methods)
def create_status_bar(self):
sb = QStatusBar()
sb.setFixedHeight(35)
self.balance_label = QLabel("Loading wallet...")
self.balance_label.setTextInteractionFlags(Qt.TextSelectableByMouse)
self.balance_label.setStyleSheet("""QLabel { padding: 0 }""")
sb.addWidget(self.balance_label)
self.search_box = QLineEdit()
self.search_box.textChanged.connect(self.do_search)
self.search_box.hide()
sb.addPermanentWidget(self.search_box)
self.update_check_button = QPushButton("")
self.update_check_button.setFlat(True)
self.update_check_button.setCursor(QCursor(Qt.PointingHandCursor))
self.update_check_button.setIcon(read_QIcon("update.png"))
self.update_check_button.hide()
sb.addPermanentWidget(self.update_check_button)
self.password_button = StatusBarButton(QIcon(), _("Password"), self.change_password_dialog )
sb.addPermanentWidget(self.password_button)
sb.addPermanentWidget(StatusBarButton(read_QIcon("preferences.png"), _("Preferences"), self.settings_dialog ) )
self.seed_button = StatusBarButton(read_QIcon("seed.png"), _("Seed"), self.show_seed_dialog )
sb.addPermanentWidget(self.seed_button)
self.lightning_button = None
if self.wallet.has_lightning() and self.network:
self.lightning_button = StatusBarButton(read_QIcon("lightning_disconnected.png"), _("Lightning Network"), self.gui_object.show_lightning_dialog)
self.update_lightning_icon()
sb.addPermanentWidget(self.lightning_button)
self.status_button = None
if self.network:
self.status_button = StatusBarButton(read_QIcon("status_disconnected.png"), _("Network"), self.gui_object.show_network_dialog)
sb.addPermanentWidget(self.status_button)
run_hook('create_status_bar', sb)
self.setStatusBar(sb)
def create_coincontrol_statusbar(self):
self.coincontrol_sb = sb = QStatusBar()
sb.setSizeGripEnabled(False)
#sb.setFixedHeight(3 * char_width_in_lineedit())
sb.setStyleSheet('QStatusBar::item {border: None;} '
+ ColorScheme.GREEN.as_stylesheet(True))
self.coincontrol_label = QLabel()
self.coincontrol_label.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Preferred)
self.coincontrol_label.setTextInteractionFlags(Qt.TextSelectableByMouse)
sb.addWidget(self.coincontrol_label)
clear_cc_button = EnterButton(_('Reset'), lambda: self.utxo_list.set_spend_list(None))
clear_cc_button.setStyleSheet("margin-right: 5px;")
sb.addPermanentWidget(clear_cc_button)
sb.setVisible(False)
return sb
def set_coincontrol_msg(self, msg: Optional[str]) -> None:
if not msg:
self.coincontrol_label.setText("")
self.coincontrol_sb.setVisible(False)
return
self.coincontrol_label.setText(msg)
self.coincontrol_sb.setVisible(True)
def update_lightning_icon(self):
if self.lightning_button is None:
return
if self.network.lngossip is None:
return
# display colorful lightning icon to signal connection
self.lightning_button.setIcon(read_QIcon("lightning.png"))
cur, total, progress_percent = self.network.lngossip.get_sync_progress_estimate()
# self.logger.debug(f"updating lngossip sync progress estimate: cur={cur}, total={total}")
progress_str = "??%"
if progress_percent is not None:
progress_str = f"{progress_percent}%"
if progress_percent and progress_percent >= 100:
self.lightning_button.setMaximumWidth(25)
self.lightning_button.setText('')
self.lightning_button.setToolTip(_("The Lightning Network graph is fully synced."))
else:
self.lightning_button.setMaximumWidth(25 + 5 * char_width_in_lineedit())
self.lightning_button.setText(progress_str)
self.lightning_button.setToolTip(_("The Lightning Network graph is syncing...\n"
"Payments are more likely to succeed with a more complete graph."))
def update_lock_icon(self):
icon = read_QIcon("lock.png") if self.wallet.has_password() else read_QIcon("unlock.png")
self.password_button.setIcon(icon)
def update_buttons_on_seed(self):
self.seed_button.setVisible(self.wallet.has_seed())
self.password_button.setVisible(self.wallet.may_have_password())
def change_password_dialog(self):
from electrum.storage import StorageEncryptionVersion
if self.wallet.get_available_storage_encryption_version() == StorageEncryptionVersion.XPUB_PASSWORD:
from .password_dialog import ChangePasswordDialogForHW
d = ChangePasswordDialogForHW(self, self.wallet)
ok, encrypt_file = d.run()
if not ok:
return
try:
hw_dev_pw = self.wallet.keystore.get_password_for_storage_encryption()
except UserCancelled:
return
except BaseException as e:
self.logger.exception('')
self.show_error(repr(e))
return
old_password = hw_dev_pw if self.wallet.has_password() else None
new_password = hw_dev_pw if encrypt_file else None
else:
from .password_dialog import ChangePasswordDialogForSW
d = ChangePasswordDialogForSW(self, self.wallet)
ok, old_password, new_password, encrypt_file = d.run()
if not ok:
return
try:
self.wallet.update_password(old_password, new_password, encrypt_storage=encrypt_file)
except InvalidPassword as e:
self.show_error(str(e))
return
except BaseException:
self.logger.exception('Failed to update password')
self.show_error(_('Failed to update password'))
return
msg = _('Password was updated successfully') if self.wallet.has_password() else _('Password is disabled, this wallet is not protected')
self.show_message(msg, title=_("Success"))
self.update_lock_icon()
def toggle_search(self):
self.search_box.setHidden(not self.search_box.isHidden())
if not self.search_box.isHidden():
self.search_box.setFocus(1)
else:
self.do_search('')
def do_search(self, t):
tab = self.tabs.currentWidget()
if hasattr(tab, 'searchable_list'):
tab.searchable_list.filter(t)
def new_contact_dialog(self):
d = WindowModalDialog(self, _("New Contact"))
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_('New Contact') + ':'))
grid = QGridLayout()
line1 = QLineEdit()
line1.setFixedWidth(32 * char_width_in_lineedit())
line2 = QLineEdit()
line2.setFixedWidth(32 * char_width_in_lineedit())
grid.addWidget(QLabel(_("Address")), 1, 0)
grid.addWidget(line1, 1, 1)
grid.addWidget(QLabel(_("Name")), 2, 0)
grid.addWidget(line2, 2, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if d.exec_():
self.set_contact(line2.text(), line1.text())
def show_wallet_info(self):
dialog = WindowModalDialog(self, _("Wallet Information"))
dialog.setMinimumSize(500, 100)
vbox = QVBoxLayout()
wallet_type = self.wallet.db.get('wallet_type', '')
if self.wallet.is_watching_only():
wallet_type += ' [{}]'.format(_('watching-only'))
seed_available = _('True') if self.wallet.has_seed() else _('False')
keystore_types = [k.get_type_text() for k in self.wallet.get_keystores()]
grid = QGridLayout()
basename = os.path.basename(self.wallet.storage.path)
grid.addWidget(QLabel(_("Wallet name")+ ':'), 0, 0)
grid.addWidget(QLabel(basename), 0, 1)
grid.addWidget(QLabel(_("Wallet type")+ ':'), 1, 0)
grid.addWidget(QLabel(wallet_type), 1, 1)
grid.addWidget(QLabel(_("Script type")+ ':'), 2, 0)
grid.addWidget(QLabel(self.wallet.txin_type), 2, 1)
grid.addWidget(QLabel(_("Seed available") + ':'), 3, 0)
grid.addWidget(QLabel(str(seed_available)), 3, 1)
if len(keystore_types) <= 1:
grid.addWidget(QLabel(_("Keystore type") + ':'), 4, 0)
ks_type = str(keystore_types[0]) if keystore_types else _('No keystore')
grid.addWidget(QLabel(ks_type), 4, 1)
# lightning
grid.addWidget(QLabel(_('Lightning') + ':'), 5, 0)
if self.wallet.can_have_lightning():
grid.addWidget(QLabel(_('Enabled')), 5, 1)
local_nodeid = QLabel(bh2u(self.wallet.lnworker.node_keypair.pubkey))
local_nodeid.setTextInteractionFlags(Qt.TextSelectableByMouse)
grid.addWidget(QLabel(_('Lightning Node ID:')), 6, 0)
grid.addWidget(local_nodeid, 6, 1, 1, 3)
else:
grid.addWidget(QLabel(_("Not available for this wallet.")), 5, 1)
grid.addWidget(HelpButton(_("Lightning is currently restricted to HD wallets with p2wpkh addresses.")), 5, 2)
vbox.addLayout(grid)
labels_clayout = None
if self.wallet.is_deterministic():
keystores = self.wallet.get_keystores()
ks_stack = QStackedWidget()
def select_ks(index):
ks_stack.setCurrentIndex(index)
# only show the combobox in case multiple accounts are available
if len(keystores) > 1:
def label(idx, ks):
if isinstance(self.wallet, Multisig_Wallet) and hasattr(ks, 'label'):
return _("cosigner") + f' {idx+1}: {ks.get_type_text()} {ks.label}'
else:
return _("keystore") + f' {idx+1}'
labels = [label(idx, ks) for idx, ks in enumerate(self.wallet.get_keystores())]
on_click = lambda clayout: select_ks(clayout.selected_index())
labels_clayout = ChoicesLayout(_("Select keystore"), labels, on_click)
vbox.addLayout(labels_clayout.layout())
for ks in keystores:
ks_w = QWidget()
ks_vbox = QVBoxLayout()
ks_vbox.setContentsMargins(0, 0, 0, 0)
ks_w.setLayout(ks_vbox)
mpk_text = ShowQRTextEdit(ks.get_master_public_key(), config=self.config)
mpk_text.setMaximumHeight(150)
mpk_text.addCopyButton(self.app)
run_hook('show_xpub_button', mpk_text, ks)
der_path_hbox = QHBoxLayout()
der_path_hbox.setContentsMargins(0, 0, 0, 0)
der_path_hbox.addWidget(QLabel(_("Derivation path") + ':'))
der_path_text = QLabel(ks.get_derivation_prefix() or _("unknown"))
der_path_text.setTextInteractionFlags(Qt.TextSelectableByMouse)
der_path_hbox.addWidget(der_path_text)
der_path_hbox.addStretch()
ks_vbox.addWidget(QLabel(_("Master Public Key")))
ks_vbox.addWidget(mpk_text)
ks_vbox.addLayout(der_path_hbox)
ks_stack.addWidget(ks_w)
select_ks(0)
vbox.addWidget(ks_stack)
vbox.addStretch(1)
btn_export_info = run_hook('wallet_info_buttons', self, dialog)
btn_close = CloseButton(dialog)
btns = Buttons(btn_export_info, btn_close)
vbox.addLayout(btns)
dialog.setLayout(vbox)
dialog.exec_()
def remove_wallet(self):
if self.question('\n'.join([
_('Delete wallet file?'),
"%s"%self.wallet.storage.path,
_('If your wallet contains funds, make sure you have saved its seed.')])):
self._delete_wallet()
@protected
def _delete_wallet(self, password):
wallet_path = self.wallet.storage.path
basename = os.path.basename(wallet_path)
r = self.gui_object.daemon.delete_wallet(wallet_path)
self.close()
if r:
self.show_error(_("Wallet removed: {}").format(basename))
else:
self.show_error(_("Wallet file not found: {}").format(basename))
@protected
def show_seed_dialog(self, password):
if not self.wallet.has_seed():
self.show_message(_('This wallet has no seed'))
return
keystore = self.wallet.get_keystore()
try:
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
except BaseException as e:
self.show_error(repr(e))
return
from .seed_dialog import SeedDialog
d = SeedDialog(self, seed, passphrase, config=self.config)
d.exec_()
def show_qrcode(self, data, title = _("QR code"), parent=None, *,
help_text=None, show_copy_text_btn=False):
if not data:
return
d = QRDialog(
data=data,
parent=parent or self,
title=title,
help_text=help_text,
show_copy_text_btn=show_copy_text_btn,
config=self.config,
)
d.exec_()
@protected
def show_private_key(self, address, password):
if not address:
return
try:
pk = self.wallet.export_private_key(address, password)
except Exception as e:
self.logger.exception('')
self.show_message(repr(e))
return
xtype = bitcoin.deserialize_privkey(pk)[0]
d = WindowModalDialog(self, _("Private key"))
d.setMinimumSize(600, 150)
vbox = QVBoxLayout()
vbox.addWidget(QLabel(_("Address") + ': ' + address))
vbox.addWidget(QLabel(_("Script type") + ': ' + xtype))
vbox.addWidget(QLabel(_("Private key") + ':'))
keys_e = ShowQRTextEdit(text=pk, config=self.config)
keys_e.addCopyButton(self.app)
vbox.addWidget(keys_e)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
d.exec_()
msg_sign = _("Signing with an address actually means signing with the corresponding "
"private key, and verifying with the corresponding public key. The "
"address you have entered does not have a unique public key, so these "
"operations cannot be performed.") + '\n\n' + \
_('The operation is undefined. Not just in Electrum, but in general.')
@protected
def do_sign(self, address, message, signature, password):
address = address.text().strip()
message = message.toPlainText().strip()
if not bitcoin.is_address(address):
self.show_message(_('Invalid ECCoin address.'))
return
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
if not self.wallet.is_mine(address):
self.show_message(_('Address not in wallet.'))
return
txin_type = self.wallet.get_txin_type(address)
if txin_type not in ['p2pkh', 'p2wpkh', 'p2wpkh-p2sh']:
self.show_message(_('Cannot sign messages with this type of address:') + \
' ' + txin_type + '\n\n' + self.msg_sign)
return
task = partial(self.wallet.sign_message, address, message, password)
def show_signed_message(sig):
try:
signature.setText(base64.b64encode(sig).decode('ascii'))
except RuntimeError:
# (signature) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=show_signed_message)
def do_verify(self, address, message, signature):
address = address.text().strip()
message = message.toPlainText().strip().encode('utf-8')
if not bitcoin.is_address(address):
self.show_message(_('Invalid ECCoin address.'))
return
try:
# This can throw on invalid base64
sig = base64.b64decode(str(signature.toPlainText()))
verified = ecc.verify_message_with_address(address, sig, message)
except Exception as e:
verified = False
if verified:
self.show_message(_("Signature verified"))
else:
self.show_error(_("Wrong signature"))
def sign_verify_message(self, address=''):
d = WindowModalDialog(self, _('Sign/verify Message'))
d.setMinimumSize(610, 290)
layout = QGridLayout(d)
message_e = QTextEdit()
message_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
address_e = QLineEdit()
address_e.setText(address)
layout.addWidget(QLabel(_('Address')), 2, 0)
layout.addWidget(address_e, 2, 1)
signature_e = QTextEdit()
signature_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Signature')), 3, 0)
layout.addWidget(signature_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Sign"))
b.clicked.connect(lambda: self.do_sign(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Verify"))
b.clicked.connect(lambda: self.do_verify(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
@protected
def do_decrypt(self, message_e, pubkey_e, encrypted_e, password):
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
cyphertext = encrypted_e.toPlainText()
task = partial(self.wallet.decrypt_message, pubkey_e.text(), cyphertext, password)
def setText(text):
try:
message_e.setText(text.decode('utf-8'))
except RuntimeError:
# (message_e) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=setText)
def do_encrypt(self, message_e, pubkey_e, encrypted_e):
message = message_e.toPlainText()
message = message.encode('utf-8')
try:
public_key = ecc.ECPubkey(bfh(pubkey_e.text()))
except BaseException as e:
self.logger.exception('Invalid Public key')
self.show_warning(_('Invalid Public key'))
return
encrypted = public_key.encrypt_message(message)
encrypted_e.setText(encrypted.decode('ascii'))
def encrypt_message(self, address=''):
d = WindowModalDialog(self, _('Encrypt/decrypt Message'))
d.setMinimumSize(610, 490)
layout = QGridLayout(d)
message_e = QTextEdit()
message_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
pubkey_e = QLineEdit()
if address:
pubkey = self.wallet.get_public_key(address)
pubkey_e.setText(pubkey)
layout.addWidget(QLabel(_('Public key')), 2, 0)
layout.addWidget(pubkey_e, 2, 1)
encrypted_e = QTextEdit()
encrypted_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Encrypted')), 3, 0)
layout.addWidget(encrypted_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Encrypt"))
b.clicked.connect(lambda: self.do_encrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Decrypt"))
b.clicked.connect(lambda: self.do_decrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
def password_dialog(self, msg=None, parent=None):
from .password_dialog import PasswordDialog
parent = parent or self
d = PasswordDialog(parent, msg)
return d.run()
def tx_from_text(self, data: Union[str, bytes]) -> Union[None, 'PartialTransaction', 'Transaction']:
from electrum.transaction import tx_from_any
try:
return tx_from_any(data)
except BaseException as e:
self.show_critical(_("Electrum-ECC was unable to parse your transaction") + ":\n" + repr(e))
return
def import_channel_backup(self, encrypted: str):
if not self.question('Import channel backup?'):
return
try:
self.wallet.lnbackups.import_channel_backup(encrypted)
except Exception as e:
self.show_error("failed to import backup" + '\n' + str(e))
return
def read_tx_from_qrcode(self):
from electrum import qrscanner
try:
data = qrscanner.scan_barcode(self.config.get_video_device())
except BaseException as e:
self.show_error(repr(e))
return
if not data:
return
# if the user scanned a bitcoin URI
if data.lower().startswith(BITCOIN_BIP21_URI_SCHEME + ':'):
self.pay_to_URI(data)
return
if data.lower().startswith('channel_backup:'):
self.import_channel_backup(data)
return
# else if the user scanned an offline signed tx
tx = self.tx_from_text(data)
if not tx:
return
self.show_transaction(tx)
def read_tx_from_file(self) -> Optional[Transaction]:
fileName = getOpenFileName(
parent=self,
title=_("Select your transaction file"),
filter=TRANSACTION_FILE_EXTENSION_FILTER_ANY,
config=self.config,
)
if not fileName:
return
try:
with open(fileName, "rb") as f:
file_content = f.read() # type: Union[str, bytes]
except (ValueError, IOError, os.error) as reason:
self.show_critical(_("Electrum-ECC was unable to open your transaction file") + "\n" + str(reason),
title=_("Unable to read file or no transaction found"))
return
return self.tx_from_text(file_content)
def do_process_from_text(self):
text = text_dialog(
parent=self,
title=_('Input raw transaction'),
header_layout=_("Transaction:"),
ok_label=_("Load transaction"),
config=self.config,
)
if not text:
return
tx = self.tx_from_text(text)
if tx:
self.show_transaction(tx)
def do_process_from_text_channel_backup(self):
text = text_dialog(
parent=self,
title=_('Input channel backup'),
header_layout=_("Channel Backup:"),
ok_label=_("Load backup"),
config=self.config,
)
if not text:
return
if text.startswith('channel_backup:'):
self.import_channel_backup(text)
def do_process_from_file(self):
tx = self.read_tx_from_file()
if tx:
self.show_transaction(tx)
def do_process_from_txid(self):
from electrum import transaction
txid, ok = QInputDialog.getText(self, _('Lookup transaction'), _('Transaction ID') + ':')
if ok and txid:
txid = str(txid).strip()
raw_tx = self._fetch_tx_from_network(txid)
if not raw_tx:
return
tx = transaction.Transaction(raw_tx)
self.show_transaction(tx)
def _fetch_tx_from_network(self, txid: str) -> Optional[str]:
if not self.network:
self.show_message(_("You are offline."))
return
try:
raw_tx = self.network.run_from_another_thread(
self.network.get_transaction(txid, timeout=10))
except UntrustedServerReturnedError as e:
self.logger.info(f"Error getting transaction from network: {repr(e)}")
self.show_message(_("Error getting transaction from network") + ":\n" + e.get_message_for_gui())
return
except Exception as e:
self.show_message(_("Error getting transaction from network") + ":\n" + repr(e))
return
return raw_tx
@protected
def export_privkeys_dialog(self, password):
if self.wallet.is_watching_only():
self.show_message(_("This is a watching-only wallet"))
return
if isinstance(self.wallet, Multisig_Wallet):
self.show_message(_('WARNING: This is a multi-signature wallet.') + '\n' +
_('It cannot be "backed up" by simply exporting these private keys.'))
d = WindowModalDialog(self, _('Private keys'))
d.setMinimumSize(980, 300)
vbox = QVBoxLayout(d)
msg = "%s\n%s\n%s" % (_("WARNING: ALL your private keys are secret."),
_("Exposing a single private key can compromise your entire wallet!"),
_("In particular, DO NOT use 'redeem private key' services proposed by third parties."))
vbox.addWidget(QLabel(msg))
e = QTextEdit()
e.setReadOnly(True)
vbox.addWidget(e)
defaultname = 'electrum-private-keys.csv'
select_msg = _('Select file to export your private keys to')
hbox, filename_e, csv_button = filename_field(self, self.config, defaultname, select_msg)
vbox.addLayout(hbox)
b = OkButton(d, _('Export'))
b.setEnabled(False)
vbox.addLayout(Buttons(CancelButton(d), b))
private_keys = {}
addresses = self.wallet.get_addresses()
done = False
cancelled = False
def privkeys_thread():
for addr in addresses:
time.sleep(0.1)
if done or cancelled:
break
privkey = self.wallet.export_private_key(addr, password)
private_keys[addr] = privkey
self.computing_privkeys_signal.emit()
if not cancelled:
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.emit()
def show_privkeys():
s = "\n".join( map( lambda x: x[0] + "\t"+ x[1], private_keys.items()))
e.setText(s)
b.setEnabled(True)
self.show_privkeys_signal.disconnect()
nonlocal done
done = True
def on_dialog_closed(*args):
nonlocal done
nonlocal cancelled
if not done:
cancelled = True
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.disconnect()
self.computing_privkeys_signal.connect(lambda: e.setText("Please wait... %d/%d"%(len(private_keys),len(addresses))))
self.show_privkeys_signal.connect(show_privkeys)
d.finished.connect(on_dialog_closed)
threading.Thread(target=privkeys_thread).start()
if not d.exec_():
done = True
return
filename = filename_e.text()
if not filename:
return
try:
self.do_export_privkeys(filename, private_keys, csv_button.isChecked())
except (IOError, os.error) as reason:
txt = "\n".join([
_("Electrum-ECC was unable to produce a private key-export."),
str(reason)
])
self.show_critical(txt, title=_("Unable to create csv"))
except Exception as e:
self.show_message(repr(e))
return
self.show_message(_("Private keys exported."))
def do_export_privkeys(self, fileName, pklist, is_csv):
with open(fileName, "w+") as f:
os.chmod(fileName, 0o600)
if is_csv:
transaction = csv.writer(f)
transaction.writerow(["address", "private_key"])
for addr, pk in pklist.items():
transaction.writerow(["%34s"%addr,pk])
else:
f.write(json.dumps(pklist, indent = 4))
def do_import_labels(self):
def on_import():
self.need_update.set()
import_meta_gui(self, _('labels'), self.wallet.import_labels, on_import)
def do_export_labels(self):
export_meta_gui(self, _('labels'), self.wallet.export_labels)
def import_invoices(self):
import_meta_gui(self, _('invoices'), self.wallet.import_invoices, self.invoice_list.update)
def export_invoices(self):
export_meta_gui(self, _('invoices'), self.wallet.export_invoices)
def import_requests(self):
import_meta_gui(self, _('requests'), self.wallet.import_requests, self.request_list.update)
def export_requests(self):
export_meta_gui(self, _('requests'), self.wallet.export_requests)
def import_contacts(self):
import_meta_gui(self, _('contacts'), self.contacts.import_file, self.contact_list.update)
def export_contacts(self):
export_meta_gui(self, _('contacts'), self.contacts.export_file)
def sweep_key_dialog(self):
d = WindowModalDialog(self, title=_('Sweep private keys'))
d.setMinimumSize(600, 300)
vbox = QVBoxLayout(d)
hbox_top = QHBoxLayout()
hbox_top.addWidget(QLabel(_("Enter private keys:")))
hbox_top.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
vbox.addLayout(hbox_top)
keys_e = ScanQRTextEdit(allow_multi=True, config=self.config)
keys_e.setTabChangesFocus(True)
vbox.addWidget(keys_e)
addresses = self.wallet.get_unused_addresses()
if not addresses:
try:
addresses = self.wallet.get_receiving_addresses()
except AttributeError:
addresses = self.wallet.get_addresses()
h, address_e = address_field(addresses)
vbox.addLayout(h)
vbox.addStretch(1)
button = OkButton(d, _('Sweep'))
vbox.addLayout(Buttons(CancelButton(d), button))
button.setEnabled(False)
def get_address():
addr = str(address_e.text()).strip()
if bitcoin.is_address(addr):
return addr
def get_pk(*, raise_on_error=False):
text = str(keys_e.toPlainText())
return keystore.get_private_keys(text, raise_on_error=raise_on_error)
def on_edit():
valid_privkeys = False
try:
valid_privkeys = get_pk(raise_on_error=True) is not None
except Exception as e:
button.setToolTip(f'{_("Error")}: {repr(e)}')
else:
button.setToolTip('')
button.setEnabled(get_address() is not None and valid_privkeys)
on_address = lambda text: address_e.setStyleSheet((ColorScheme.DEFAULT if get_address() else ColorScheme.RED).as_stylesheet())
keys_e.textChanged.connect(on_edit)
address_e.textChanged.connect(on_edit)
address_e.textChanged.connect(on_address)
on_address(str(address_e.text()))
if not d.exec_():
return
# user pressed "sweep"
addr = get_address()
try:
self.wallet.check_address_for_corruption(addr)
except InternalAddressCorruption as e:
self.show_error(str(e))
raise
privkeys = get_pk()
def on_success(result):
coins, keypairs = result
outputs = [PartialTxOutput.from_address_and_value(addr, value='!')]
self.warn_if_watching_only()
self.pay_onchain_dialog(coins, outputs, external_keypairs=keypairs)
def on_failure(exc_info):
self.on_error(exc_info)
msg = _('Preparing sweep transaction...')
task = lambda: self.network.run_from_another_thread(
sweep_preparations(privkeys, self.network))
WaitingDialog(self, msg, task, on_success, on_failure)
def _do_import(self, title, header_layout, func):
text = text_dialog(
parent=self,
title=title,
header_layout=header_layout,
ok_label=_('Import'),
allow_multi=True,
config=self.config,
)
if not text:
return
keys = str(text).split()
good_inputs, bad_inputs = func(keys)
if good_inputs:
msg = '\n'.join(good_inputs[:10])
if len(good_inputs) > 10: msg += '\n...'
self.show_message(_("The following addresses were added")
+ f' ({len(good_inputs)}):\n' + msg)
if bad_inputs:
msg = "\n".join(f"{key[:10]}... ({msg})" for key, msg in bad_inputs[:10])
if len(bad_inputs) > 10: msg += '\n...'
self.show_error(_("The following inputs could not be imported")
+ f' ({len(bad_inputs)}):\n' + msg)
self.address_list.update()
self.history_list.update()
def import_addresses(self):
if not self.wallet.can_import_address():
return
title, msg = _('Import addresses'), _("Enter addresses")+':'
self._do_import(title, msg, self.wallet.import_addresses)
@protected
def do_import_privkey(self, password):
if not self.wallet.can_import_privkey():
return
title = _('Import private keys')
header_layout = QHBoxLayout()
header_layout.addWidget(QLabel(_("Enter private keys")+':'))
header_layout.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
self._do_import(title, header_layout, lambda x: self.wallet.import_private_keys(x, password))
def update_fiat(self):
b = self.fx and self.fx.is_enabled()
self.fiat_send_e.setVisible(b)
self.fiat_receive_e.setVisible(b)
self.history_list.update()
self.address_list.refresh_headers()
self.address_list.update()
self.update_status()
def settings_dialog(self):
from .settings_dialog import SettingsDialog
d = SettingsDialog(self, self.config)
self.alias_received_signal.connect(d.set_alias_color)
d.exec_()
self.alias_received_signal.disconnect(d.set_alias_color)
if self.fx:
self.fx.trigger_update()
run_hook('close_settings_dialog')
if d.need_restart:
self.show_warning(_('Please restart Electrum-ECC to activate the new GUI settings'), title=_('Success'))
def closeEvent(self, event):
# It seems in some rare cases this closeEvent() is called twice
if not self.cleaned_up:
self.cleaned_up = True
self.clean_up()
event.accept()
def clean_up(self):
self.wallet.thread.stop()
util.unregister_callback(self.on_network)
self.config.set_key("is_maximized", self.isMaximized())
if not self.isMaximized():
g = self.geometry()
self.wallet.db.put("winpos-qt", [g.left(),g.top(),
g.width(),g.height()])
self.wallet.db.put("qt-console-history", self.console.history[-50:])
if self.qr_window:
self.qr_window.close()
self.close_wallet()
self.gui_object.timer.timeout.disconnect(self.timer_actions)
self.gui_object.close_window(self)
def plugins_dialog(self):
self.pluginsdialog = d = WindowModalDialog(self, _('Electrum-ECC Plugins'))
plugins = self.gui_object.plugins
vbox = QVBoxLayout(d)
# plugins
scroll = QScrollArea()
scroll.setEnabled(True)
scroll.setWidgetResizable(True)
scroll.setMinimumSize(400,250)
vbox.addWidget(scroll)
w = QWidget()
scroll.setWidget(w)
w.setMinimumHeight(plugins.count() * 35)
grid = QGridLayout()
grid.setColumnStretch(0,1)
w.setLayout(grid)
settings_widgets = {}
def enable_settings_widget(p: Optional['BasePlugin'], name: str, i: int):
widget = settings_widgets.get(name) # type: Optional[QWidget]
if widget and not p:
# plugin got disabled, rm widget
grid.removeWidget(widget)
widget.setParent(None)
settings_widgets.pop(name)
elif widget is None and p and p.requires_settings() and p.is_enabled():
# plugin got enabled, add widget
widget = settings_widgets[name] = p.settings_widget(d)
grid.addWidget(widget, i, 1)
def do_toggle(cb, name, i):
p = plugins.toggle(name)
cb.setChecked(bool(p))
enable_settings_widget(p, name, i)
# note: all enabled plugins will receive this hook:
run_hook('init_qt', self.gui_object)
for i, descr in enumerate(plugins.descriptions.values()):
full_name = descr['__name__']
prefix, _separator, name = full_name.rpartition('.')
p = plugins.get(name)
if descr.get('registers_keystore'):
continue
try:
cb = QCheckBox(descr['fullname'])
plugin_is_loaded = p is not None
cb_enabled = (not plugin_is_loaded and plugins.is_available(name, self.wallet)
or plugin_is_loaded and p.can_user_disable())
cb.setEnabled(cb_enabled)
cb.setChecked(plugin_is_loaded and p.is_enabled())
grid.addWidget(cb, i, 0)
enable_settings_widget(p, name, i)
cb.clicked.connect(partial(do_toggle, cb, name, i))
msg = descr['description']
if descr.get('requires'):
msg += '\n\n' + _('Requires') + ':\n' + '\n'.join(map(lambda x: x[1], descr.get('requires')))
grid.addWidget(HelpButton(msg), i, 2)
except Exception:
self.logger.exception(f"cannot display plugin {name}")
grid.setRowStretch(len(plugins.descriptions.values()), 1)
vbox.addLayout(Buttons(CloseButton(d)))
d.exec_()
def cpfp_dialog(self, parent_tx: Transaction) -> None:
new_tx = self.wallet.cpfp(parent_tx, 0)
total_size = parent_tx.estimated_size() + new_tx.estimated_size()
parent_txid = parent_tx.txid()
assert parent_txid
parent_fee = self.wallet.get_tx_fee(parent_txid)
if parent_fee is None:
self.show_error(_("Can't CPFP: unknown fee for parent transaction."))
return
d = WindowModalDialog(self, _('Child Pays for Parent'))
vbox = QVBoxLayout(d)
msg = (
"A CPFP is a transaction that sends an unconfirmed output back to "
"yourself, with a high fee. The goal is to have miners confirm "
"the parent transaction in order to get the fee attached to the "
"child transaction.")
vbox.addWidget(WWLabel(_(msg)))
msg2 = ("The proposed fee is computed using your "
"fee/kB settings, applied to the total size of both child and "
"parent transactions. After you broadcast a CPFP transaction, "
"it is normal to see a new unconfirmed transaction in your history.")
vbox.addWidget(WWLabel(_(msg2)))
grid = QGridLayout()
grid.addWidget(QLabel(_('Total size') + ':'), 0, 0)
grid.addWidget(QLabel('%d bytes'% total_size), 0, 1)
max_fee = new_tx.output_value()
grid.addWidget(QLabel(_('Input amount') + ':'), 1, 0)
grid.addWidget(QLabel(self.format_amount(max_fee) + ' ' + self.base_unit()), 1, 1)
output_amount = QLabel('')
grid.addWidget(QLabel(_('Output amount') + ':'), 2, 0)
grid.addWidget(output_amount, 2, 1)
fee_e = BTCAmountEdit(self.get_decimal_point)
# FIXME with dyn fees, without estimates, there are all kinds of crashes here
combined_fee = QLabel('')
combined_feerate = QLabel('')
def on_fee_edit(x):
fee_for_child = fee_e.get_amount()
if fee_for_child is None:
return
out_amt = max_fee - fee_for_child
out_amt_str = (self.format_amount(out_amt) + ' ' + self.base_unit()) if out_amt else ''
output_amount.setText(out_amt_str)
comb_fee = parent_fee + fee_for_child
comb_fee_str = (self.format_amount(comb_fee) + ' ' + self.base_unit()) if comb_fee else ''
combined_fee.setText(comb_fee_str)
comb_feerate = comb_fee / total_size * 1000
comb_feerate_str = self.format_fee_rate(comb_feerate) if comb_feerate else ''
combined_feerate.setText(comb_feerate_str)
fee_e.textChanged.connect(on_fee_edit)
def get_child_fee_from_total_feerate(fee_per_kb):
fee = fee_per_kb * total_size / 1000 - parent_fee
fee = min(max_fee, fee)
fee = max(total_size, fee) # pay at least 1 sat/byte for combined size
return fee
suggested_feerate = self.config.fee_per_kb()
if suggested_feerate is None:
self.show_error(f'''{_("Can't CPFP'")}: {_('Dynamic fee estimates not available')}''')
return
fee = get_child_fee_from_total_feerate(suggested_feerate)
fee_e.setAmount(fee)
grid.addWidget(QLabel(_('Fee for child') + ':'), 3, 0)
grid.addWidget(fee_e, 3, 1)
def on_rate(dyn, pos, fee_rate):
fee = get_child_fee_from_total_feerate(fee_rate)
fee_e.setAmount(fee)
fee_slider = FeeSlider(self, self.config, on_rate)
fee_combo = FeeComboBox(fee_slider)
fee_slider.update()
grid.addWidget(fee_slider, 4, 1)
grid.addWidget(fee_combo, 4, 2)
grid.addWidget(QLabel(_('Total fee') + ':'), 5, 0)
grid.addWidget(combined_fee, 5, 1)
grid.addWidget(QLabel(_('Total feerate') + ':'), 6, 0)
grid.addWidget(combined_feerate, 6, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
fee = fee_e.get_amount()
if fee is None:
return # fee left empty, treat is as "cancel"
if fee > max_fee:
self.show_error(_('Max fee exceeded'))
return
try:
new_tx = self.wallet.cpfp(parent_tx, fee)
except CannotCPFP as e:
self.show_error(str(e))
return
new_tx.set_rbf(True)
self.show_transaction(new_tx)
def _add_info_to_tx_from_wallet_and_network(self, tx: PartialTransaction) -> bool:
"""Returns whether successful."""
# note side-effect: tx is being mutated
assert isinstance(tx, PartialTransaction)
try:
# note: this might download input utxos over network
BlockingWaitingDialog(
self,
_("Adding info to tx, from wallet and network..."),
lambda: tx.add_info_from_wallet(self.wallet, ignore_network_issues=False),
)
except NetworkException as e:
self.show_error(repr(e))
return False
return True
def _rbf_dialog(self, tx: Transaction, func, title, help_text):
txid = tx.txid()
assert txid
if not isinstance(tx, PartialTransaction):
tx = PartialTransaction.from_tx(tx)
if not self._add_info_to_tx_from_wallet_and_network(tx):
return
fee = tx.get_fee()
assert fee is not None
tx_label = self.wallet.get_label_for_txid(txid)
tx_size = tx.estimated_size()
old_fee_rate = fee / tx_size # sat/vbyte
d = WindowModalDialog(self, title)
vbox = QVBoxLayout(d)
vbox.addWidget(WWLabel(help_text))
ok_button = OkButton(d)
warning_label = WWLabel('\n')
warning_label.setStyleSheet(ColorScheme.RED.as_stylesheet())
feerate_e = FeerateEdit(lambda: 0)
feerate_e.setAmount(max(old_fee_rate * 1.5, old_fee_rate + 1))
def on_feerate():
fee_rate = feerate_e.get_amount()
warning_text = '\n'
if fee_rate is not None:
try:
new_tx = func(fee_rate)
except Exception as e:
new_tx = None
warning_text = str(e).replace('\n',' ')
else:
new_tx = None
ok_button.setEnabled(new_tx is not None)
warning_label.setText(warning_text)
feerate_e.textChanged.connect(on_feerate)
def on_slider(dyn, pos, fee_rate):
fee_slider.activate()
if fee_rate is not None:
feerate_e.setAmount(fee_rate / 1000)
fee_slider = FeeSlider(self, self.config, on_slider)
fee_combo = FeeComboBox(fee_slider)
fee_slider.deactivate()
feerate_e.textEdited.connect(fee_slider.deactivate)
grid = QGridLayout()
grid.addWidget(QLabel(_('Current Fee') + ':'), 0, 0)
grid.addWidget(QLabel(self.format_amount(fee) + ' ' + self.base_unit()), 0, 1)
grid.addWidget(QLabel(_('Current Fee rate') + ':'), 1, 0)
grid.addWidget(QLabel(self.format_fee_rate(1000 * old_fee_rate)), 1, 1)
grid.addWidget(QLabel(_('New Fee rate') + ':'), 2, 0)
grid.addWidget(feerate_e, 2, 1)
grid.addWidget(fee_slider, 3, 1)
grid.addWidget(fee_combo, 3, 2)
vbox.addLayout(grid)
cb = QCheckBox(_('Final'))
vbox.addWidget(cb)
vbox.addWidget(warning_label)
vbox.addLayout(Buttons(CancelButton(d), ok_button))
if not d.exec_():
return
is_final = cb.isChecked()
new_fee_rate = feerate_e.get_amount()
try:
new_tx = func(new_fee_rate)
except Exception as e:
self.show_error(str(e))
return
if is_final:
new_tx.set_rbf(False)
self.show_transaction(new_tx, tx_desc=tx_label)
def bump_fee_dialog(self, tx: Transaction):
title = _('Bump Fee')
help_text = _("Increase your transaction's fee to improve its position in mempool.")
def func(new_fee_rate):
return self.wallet.bump_fee(
tx=tx,
txid=tx.txid(),
new_fee_rate=new_fee_rate,
coins=self.get_coins())
self._rbf_dialog(tx, func, title, help_text)
def dscancel_dialog(self, tx: Transaction):
title = _('Cancel transaction')
help_text = _(
"Cancel an unconfirmed RBF transaction by double-spending "
"its inputs back to your wallet with a higher fee.")
def func(new_fee_rate):
return self.wallet.dscancel(tx=tx, new_fee_rate=new_fee_rate)
self._rbf_dialog(tx, func, title, help_text)
def save_transaction_into_wallet(self, tx: Transaction):
win = self.top_level_window()
try:
if not self.wallet.add_transaction(tx):
win.show_error(_("Transaction could not be saved.") + "\n" +
_("It conflicts with current history."))
return False
except AddTransactionException as e:
win.show_error(e)
return False
else:
self.wallet.save_db()
# need to update at least: history_list, utxo_list, address_list
self.need_update.set()
msg = (_("Transaction added to wallet history.") + '\n\n' +
_("Note: this is an offline transaction, if you want the network "
"to see it, you need to broadcast it."))
win.msg_box(QPixmap(icon_path("offline_tx.png")), None, _('Success'), msg)
return True
def show_cert_mismatch_error(self):
if self.showing_cert_mismatch_error:
return
self.showing_cert_mismatch_error = True
self.show_critical(title=_("Certificate mismatch"),
msg=_("The SSL certificate provided by the main server did not match the fingerprint passed in with the --serverfingerprint option.") + "\n\n" +
_("Electrum will now exit."))
self.showing_cert_mismatch_error = False
self.close()
|
cross_barrier.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from byteps.torch.compression import Compression
from byteps.torch.ops import push_pull_async_inplace as byteps_push_pull
from byteps.torch.ops import poll, synchronize
from byteps.torch.ops import init, shutdown
from byteps.torch.ops import size, local_size, rank, local_rank
import threading
import logging
try:
import queue
except ImportError:
import Queue as queue
import time
import math
import torch
import byteps.torch as bps
_DistributedOptimizer = bps._DistributedOptimizer
_bps_DistributedOptimizer = bps.DistributedOptimizer
broadcast_parameters = bps.broadcast_parameters
broadcast_optimizer_state = bps.broadcast_optimizer_state
class _CrossBarrier(_DistributedOptimizer):
"""An optimizer that wraps a _DistributedOptimizer, intercepting push-pull operations.
This class enables overlapping gradient push-pull with both backward and forward propagation while maintaining
correct dependencies. It can achieve even higher training performance than the default BytePS with proper system
parameters. To understand the principles behind barrier crossing, check the paper
https://dl.acm.org/citation.cfm?id=3359642
"""
def __init__(self, model, byteps_opt, num_steps=10**6):
"""Construct a new ScheduledOptimizer, which uses byteps optimizer under the hood for averaging gradients
across all workers.
Args:
model: The training model. BytePS uses the model object to register hooks.
byteps_opt: Optimizer to use for averaging gradients and applying updates.
num_steps: The maximum number of training steps. BytePS needs to know when to stop cross-iteration
scheduling.
"""
self._model = model
self._opt = byteps_opt
self._logger = logging.getLogger("CrossBarrier")
self._logger.info("CrossBarrier is enabled.")
self._logger.debug("byteps size {}, rank {}".format(size(), rank()))
self._desc = "rank {}".format(rank())
# Track training steps
self._step = 0
self._final_step = num_steps
# Use lock to block the forward propagation of each parameter.
self._locks = {}
for param_group in self.param_groups:
for p in param_group['params']:
self._locks[p] = threading.Lock()
if size() > 1:
self._register_forward_hooks()
self._register_hooks()
# Poll whether the tensor push-pull is finished.
self._event_queue = queue.Queue()
self._poller = threading.Thread(target=self._poll, args=())
self._poller.start()
def __getattr__(self, item):
return getattr(self._opt, item)
def step(self, closure=None):
"""Override the default step function."""
self._logger.debug("{} calls step() {}".format(self._desc, self._step))
# Step 0 is called for parameter initialization after parameter broadcast
if size() > 1 and self._step > 0:
self._synchronize()
# if it is the final training step, wait for the completion of all tensors
if self._step == self._final_step:
self._logger.debug("final step {}, waiting for push-pull completion.".format(self._final_step))
while not self._event_queue.empty():
time.sleep(0.001)
self._event_queue.put((None, None, None))
self._poller.join()
self._logger.info("training finished!")
loss = None
if closure is not None:
loss = closure()
self._step += 1
return loss
else:
# Optimizer.step() will be triggered when user calls byteps.broadcast_optimizer_sate()
super(self._opt.__class__, self._opt).step()
self._step += 1
def zero_grad(self):
"""Override the default zero_grad function.
Clears the gradients of all optimized tensors.
"""
self._logger.debug("{} calls zero_grad() of step {}".format(self._desc, self._step))
if size() > 1 and self._step > 0:
return
else:
self._opt.zero_grad()
def _get_parameter_name(self, p):
if self._is_tensor_instance:
name = self._parameter_names.get(p.__hash__())
else:
name = self._parameter_names.get(p)
return name
def _register_hooks(self):
for param_group in self.param_groups:
for p in param_group['params']:
if p.requires_grad:
p.grad = p.data.new(p.size()).zero_()
self._requires_update.add(p)
p_tmp = p.expand_as(p)
grad_acc = p_tmp.grad_fn.next_functions[0][0]
grad_acc.register_hook(self._make_hook(p))
self._grad_accs.append(grad_acc)
def _synchronize(self):
"""Push pull missing parameters"""
missing_p = self._requires_update - set(self._handles.keys())
for p in missing_p:
handle, ctx = self._push_pull_grad_async(p)
self._handles[p] = (handle, ctx)
for p, value in self._handles.items():
handle, ctx = value
if handle is None:
handle, ctx = self._push_pull_grad_async(p)
self._handles[p] = (handle, ctx)
def _push_pull_grad_async(self, p):
"""Call byteps API to push-pull gradient asynchronously
Arguments:
tensor: The tensor to push-pull.
name: The name of the tensor.
Returns:
an push-pull handle and context
"""
name = self._get_parameter_name(p)
tensor = p.grad
tensor_compressed, ctx = self._compression.compress(tensor)
self._locks[p].acquire()
handle = byteps_push_pull(tensor_compressed, average=True, name="Gradient."+name)
self._logger.debug("{} calls byteps_push_pull for {}".format(self._desc, self._get_parameter_name(p)))
# Add to queue to poll completion
self._event_queue.put((p, handle, ctx))
return handle, ctx
def _poll(self):
"""Poll the completion of the tensor's backward or push-pull from a FIFO event_queue"""
while True:
p, handle, ctx = self._event_queue.get()
if p is None:
self._logger.debug("poller exits.")
break
# Check whether the push-pull is finished. If so, start updating parameters.
if handle is not None and poll(handle):
output = synchronize(handle)
p.grad.set_(self._compression.decompress(output, ctx))
self._logger.debug("{} {} finished push-pull".format(self._desc, self._get_parameter_name(p)))
self._push_pull_delay[p] = self.backward_passes_per_step
# So only support SGD, Adam and RMSprop optimizers in torch
if isinstance(self._opt, torch.optim.SGD):
self._sgd(p)
elif isinstance(self._opt, torch.optim.Adam):
self._adam(p)
elif isinstance(self._opt, torch.optim.RMSprop):
self._rmsprop(p)
else:
raise ValueError("Invalid optimizer! Only support SGD, Adam and RMSprop.")
self._zero_one_grad(p)
# notify update completion and parameter is ready for forward propagation
if p in self._locks:
self._locks[p].release()
else:
self._event_queue.put((p, handle, ctx))
def _register_forward_hooks(self):
"""Add hook before forward propagation of each layer to block forward computation until the push-pull and
parameter update is finished. The blocking is implemented using a lock."""
# Recursively find all submodules
submodules = []
q = queue.LifoQueue()
for mod in self._model.children():
q.put(mod)
while not q.empty():
mod = q.get()
if len(list(mod.children())) == 0:
submodules.append(mod)
else:
for m in mod.children():
q.put(m)
def pre_forward_hook(mod, input):
for p in mod.parameters():
if p in self._handles:
del self._handles[p]
if p not in self._locks:
continue
with self._locks[p]:
self._logger.debug("{} {} is ready.".format(self._desc, self._get_parameter_name(p)))
self._logger.debug("{} starts forward {}.".format(self._desc, mod))
def after_forward_hook(mod, input, result):
self._logger.debug("{} finished forward {}.".format(self._desc, mod))
# Register pre-hook and hook for each module
for mod in reversed(submodules):
self._logger.debug("{} registers forward hook on module {}".format(self._desc, mod))
mod.register_forward_pre_hook(pre_forward_hook)
mod.register_forward_hook(after_forward_hook)
def _zero_one_grad(self, p):
"""Clears the gradient of one variable as torch accumulates gradients by default.
Arguments:
p: the parameter.
"""
if p.grad is not None:
p.grad.detach_()
p.grad.zero_()
"""Below are the implementations of optimizers, e.g., SGD, Adam, RMSprop.
The implementation is derived from Torch's code, except that we update one parameter each time."""
def _sgd(self, p):
"""Performs a single optimization step using SGD optimizer on a parameter.
Arguments:
p: The parameter to be updated.
"""
for group in self.param_groups:
weight_decay = group['weight_decay']
momentum = group['momentum']
dampening = group['dampening']
nesterov = group['nesterov']
for gp in group['params']:
if self._get_parameter_name(p) != self._get_parameter_name(gp) or gp.shape != p.shape:
continue
self._logger.debug("{} is updating {}".format(self._desc, self._get_parameter_name(p)))
if p.grad is None:
continue
d_p = p.grad.data
if weight_decay != 0:
d_p.add_(weight_decay, p.data)
if momentum != 0:
param_state = self.state[p]
if 'momentum_buffer' not in param_state:
buf = param_state['momentum_buffer'] = torch.zeros_like(p.data)
buf.mul_(momentum).add_(d_p)
else:
buf = param_state['momentum_buffer']
buf.mul_(momentum).add_(1 - dampening, d_p)
if nesterov:
d_p = d_p.add(momentum, buf)
else:
d_p = buf
p.data.add_(-group['lr'], d_p)
break
def _adam(self, p):
"""Performs a single optimization step using Adam optimizer on a parameter.
Arguments:
p: The parameter to be updated.
"""
for group in self.param_groups:
for gp in group['params']:
if self._get_parameter_name(p) != self._get_parameter_name(gp) or gp.shape != p.shape:
continue
self._logger.debug("{} is updating {}".format(self._desc, self._get_parameter_name(p)))
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
amsgrad = group['amsgrad']
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p.data)
if amsgrad:
# Maintains max of all exp. moving avg. of sq. grad. values
state['max_exp_avg_sq'] = torch.zeros_like(p.data)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
if amsgrad:
max_exp_avg_sq = state['max_exp_avg_sq']
beta1, beta2 = group['betas']
state['step'] += 1
if group['weight_decay'] != 0:
grad.add_(group['weight_decay'], p.data)
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(1 - beta1, grad)
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
if amsgrad:
# Maintains the maximum of all 2nd moment running avg. till now
torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq)
# Use the max. for normalizing running avg. of gradient
denom = max_exp_avg_sq.sqrt().add_(group['eps'])
else:
denom = exp_avg_sq.sqrt().add_(group['eps'])
bias_correction1 = 1 - beta1 ** state['step']
bias_correction2 = 1 - beta2 ** state['step']
step_size = group['lr'] * math.sqrt(bias_correction2) / bias_correction1
p.data.addcdiv_(-step_size, exp_avg, denom)
break
def _rmsprop(self, p):
"""Performs a single optimization step using RMSprop optimizer on a parameter.
Arguments:
p: The parameter to be updated.
"""
for group in self.param_groups:
for gp in group['params']:
if self._get_parameter_name(p) != self._get_parameter_name(gp) or gp.shape != p.shape:
continue
self._logger.debug("{} is updating {}".format(self._desc, self._get_parameter_name(p)))
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('RMSprop does not support sparse gradients')
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
state['square_avg'] = torch.zeros_like(p.data)
if group['momentum'] > 0:
state['momentum_buffer'] = torch.zeros_like(p.data)
if group['centered']:
state['grad_avg'] = torch.zeros_like(p.data)
square_avg = state['square_avg']
alpha = group['alpha']
state['step'] += 1
if group['weight_decay'] != 0:
grad = grad.add(group['weight_decay'], p.data)
square_avg.mul_(alpha).addcmul_(1 - alpha, grad, grad)
if group['centered']:
grad_avg = state['grad_avg']
grad_avg.mul_(alpha).add_(1 - alpha, grad)
avg = square_avg.addcmul(-1, grad_avg, grad_avg).sqrt().add_(group['eps'])
else:
avg = square_avg.sqrt().add_(group['eps'])
if group['momentum'] > 0:
buf = state['momentum_buffer']
buf.mul_(group['momentum']).addcdiv_(grad, avg)
p.data.add_(-group['lr'], buf)
else:
p.data.addcdiv_(-group['lr'], grad, avg)
break
def _init_bsc():
"""Replace _register_hook() function in _DistributedOptimizer with empty function."""
def hijack(obj, func_name):
orig_func = getattr(obj, func_name)
# print("hijack function {}".format(orig_func))
def wrapped_func(*args, **kwargs):
# print("function {} is hijacked to do nothing.".format(orig_func))
return
setattr(obj, func_name, wrapped_func)
hijack(_DistributedOptimizer, '_register_hooks')
def _init_logger():
logger = logging.getLogger("CrossBarrier")
formatter = logging.Formatter('%(asctime)s.%(msecs)03d %(filename)s:%(lineno)s %(levelname)s: %(message)s',
'%H:%M:%S')
sh = logging.StreamHandler()
sh.setFormatter(formatter)
logger.addHandler(sh)
fh = logging.FileHandler('cross_barrier.log', 'w')
fh.setFormatter(formatter)
logger.addHandler(fh)
logger.propagate = False
logger.setLevel(logging.INFO)
def CrossBarrier(model,
optimizer,
named_parameters=None,
compression=Compression.none,
backward_passes_per_step=1,
num_steps=10**6):
"""Wrap Torch optimizer using BytePS DistributedOptimizer and _CrossBarrier."""
bps_opt = _bps_DistributedOptimizer(optimizer, named_parameters, compression, backward_passes_per_step)
return _CrossBarrier(model, bps_opt, num_steps)
_init_bsc()
_init_logger()
|
CAM_CANoe.py
|
# -*- coding:utf-8 -*-
import threading
from time import sleep
import cv2
import numpy as np
import traceback
import socketserver
from datetime import datetime
import os
from pylab import array, plot, show, axis, arange, figure, uint8
def image_process(image, level=12):
# image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# # image = cv2.erode(image, kernel, iterations=1)
# image = cv2.medianBlur(image, 3)
maxIntensity = 255.0 # depends on dtype of image data
x = arange(maxIntensity)
# Parameters for manipulating image data
phi = 1
theta = 1
image = (maxIntensity / phi) * (image / (maxIntensity / theta))**level
image = array(image, dtype=uint8)
# ret1, image = cv2.threshold(image, 10, 255, cv2.THRESH_BINARY)
# image = cv2.threshold(image, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]
image = cv2.medianBlur(image, 3)
# image = cv2.Canny(image, 50, 150)
# convert the bottom image to grayscale, then apply a blackhat
# morphological operator to find dark regions against a light
# background (i.e., the routing and account numbers)
# rectKernel = cv2.getStructuringElement(cv2.MORPH_RECT, (17, 7))
# gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# blackhat = cv2.morphologyEx(gray, cv2.MORPH_BLACKHAT, rectKernel)
# kernel = np.ones((3, 3), np.uint8)
# image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# ret1, image = cv2.threshold(image, 196, 255, cv2.THRESH_BINARY)
# # image = cv2.threshold(image, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]
# # image = cv2.erode(image, kernel, iterations=1)
# image = cv2.medianBlur(image, 3)
# # image = cv2.Canny(image, 50, 150)
return image
def inverse_color(image):
height,width = image.shape
img2 = image.copy()
for i in range(height):
for j in range(width):
img2[i,j] = (255-image[i,j]) # For GRAY_SCALE image ;
# for R.G.B image: img2[i,j] = (255-image[i,j][0],255-image[i,j][1],255-image[i,j][2])
return img2
def detect_black(Img):
# Img = cv2.imread(imagename)#读入一幅图像
I_h, I_w = Img.shape[:2]
I_h2,I_w2 = int(I_h * 0.8),int(I_w*0.8)
I_h1,I_w1 = int((I_h-I_h2)/2),int((I_w-I_w2)/2)
Img = Img[I_h1:I_h2, I_w1:I_w2]
kernel_4 = np.ones((2,2),np.uint8)#4x4的卷积核
if Img is not None:#判断图片是否读入
HSV = cv2.cvtColor(Img, cv2.COLOR_BGR2HSV)#把BGR图像转换为HSV格式
'''
HSV模型中颜色的参数分别是:色调(H),饱和度(S),明度(V)
下面两个值是要识别的颜色范围
'''
Lower = np.array([0, 0, 0])#要识别颜色的下限
Upper = np.array([255, 255, 90])#要识别的颜色的上限
#mask是把HSV图片中在颜色范围内的区域变成白色,其他区域变成黑色
mask = cv2.inRange(HSV, Lower, Upper)
# mask = inverse_color(mask)
#下面四行是用卷积进行滤波
erosion = cv2.erode(mask,kernel_4,iterations = 1)
# erosion = cv2.erode(erosion,kernel_4,iterations = 1)
dilation = cv2.dilate(erosion,kernel_4,iterations = 1)
# dilation = cv2.dilate(dilation,kernel_4,iterations = 1)
# #target是把原图中的非目标颜色区域去掉剩下的图像
# target = cv2.bitwise_and(Img, Img, mask=dilation)
#将滤波后的图像变成二值图像放在binary中
ret, binary = cv2.threshold(dilation,127,255,cv2.THRESH_BINARY)
#在binary中发现轮廓,轮廓按照面积从小到大排列
binary, contours, hierarchy = cv2.findContours(binary,cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)
if len(contours) > 3:
# print("not found")
return False
else:
# print("black")
return True
# inital camera
def camera_start():
global capture_val,quit_value,black_value,black_result,camerano,filename
# create folder
try:
isExists1 = os.path.exists('../Capture')
if not isExists1:
os.mkdir('../Capture')
except:
print("folder create error")
while(camerano == 9999):
if camerano < 9999:
break
sleep(0.1)
# inital camera
try:
cap = cv2.VideoCapture(camerano)
# cap.set(cv2.CAP_PROP_AUTOFOCUS, 0)
# cap.set(cv2.CAP_PROP_FOCUS, 118)
# cap.set(cv2.CAP_PROP_FRAME_WIDTH, 1280)
# cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 720)
# sleep(1)
# cap.release()
# cv2.destroyAllWindows()
except:
cap = cv2.VideoCapture(0)
traceback.print_exc()
print("[ERROR]: Camera Connect Error!")
while (1):
# get a frame
ret, frame = cap.read()
# show a frame
if ret == 1:
cv2.imshow("CANoe Camera", frame)
# counter1 = counter.get()
# counter1 == 1 means capture, ==6 means compare, ==9 means exit
if capture_val == 1:
try:
# filename = datetime.now().strftime("%Y%m%d_%H%M%S_%f") + ".png"
cv2.imwrite("../Capture/"+filename, frame)
capture_val = 0
print(filename + " capture ok")
except:
print("capture error")
if quit_value == 1:
try:
break
except:
print("capture error")
if black_value == 1:
frame = image_process(frame)
if detect_black(frame):
black_result = 1
black_value = 0
print("Black SCREEN")
else:
black_result = 0
black_value = 0
print("WHITE SCREEN")
ch = cv2.waitKey(1)
# print(ch)
# sleep(1)
if ch == ord('q'):
quit_value = 1
sleep(1)
print("service quit")
break
elif ch == ord(' '):
cv2.imwrite('./Standard.png', frame)
else:
pass
else:
print("Camera Error!!!")
try:
cap.release()
cv2.destroyAllWindows()
except:
print("Camera Close Error!!!")
class MyTCPhandler(socketserver.BaseRequestHandler): # 必须继承这个类
# timeout = 3
# def handle_timeout(self):
# print ("No message received in {0} seconds".format(self.timeout))
def handle(self):
global capture_val,camerano,quit_value,black_value,black_result,server_timeout,server,filename
print(self.request) #打印出来的就是conn
print(self.client_address) #打印出来的就是addr
# self.request.timeout = 2
# print(self.server.timeout)
while True:
try:
# if quit_value == 1:
# break
# print("service quit")
data = self.request.recv(1024)
if not data: break
if "CAP" in data:
capture_val = 1
filename = datetime.now().strftime("%Y%m%d_%H%M%S_%f") + ".png"
self.request.send(filename)
# print("capture")
elif "CAMS" in data:
camerano = int(data.split("=")[1])
print("Select Camera no:",camerano)
elif "BLACK" in data:
black_value = 1
sleep(0.3)
if black_result == 1:
self.request.send("BLACK_ONN")
# print("SCREEN IS: black")
else:
self.request.send("BLACK_OFF")
# print("SCREEN IS: light")
elif "QUIT" in data:
quit_value = 1
sleep(1)
print("quit")
break
else:
print(data.strip())
# self.request.send(data.upper())
except Exception:
break
quit_value = 0
self.request.close()
server.shutdown()
if __name__ == "__main__":
global capture_val,host,port,quit_value,black_value,black_result,server_timeout,server,filename
capture_val = 0
quit_value = 0
black_result = 0
black_value = 0
camerano = 9999
server_timeout = 3
filename = ""
threads = []
host,port ='127.0.0.1',17778
#camera start thread
th1 = threading.Thread(target=camera_start, args=())
# th1.setDaemon(True)
th1.start()
threads.append(th1)
# th2 = threading.Thread(target=service_start, args=(host,port,))
# # th2.setDaemon(True)
# th2.start()
# threads.append(th2)
# tcp ip server start
print("server start")
server = socketserver.ThreadingTCPServer((host,port),MyTCPhandler) #实现了多线程的socket通话
server.allow_reuse_address=True
# server.handle_timeout()
# server.timeout = 10
server.serve_forever()
print("server quit")
th1.join()
print "All threads finish"
|
common.py
|
# -*- coding: utf-8 -*-
import json, subprocess, threading, sys, platform, os
PY3 = sys.version_info[0] == 3
JsonLoads = PY3 and json.loads or (lambda s: encJson(json.loads(s)))
JsonDumps = json.dumps
def STR2BYTES(s):
return s.encode('utf8') if PY3 else s
def BYTES2STR(b):
return b.decode('utf8') if PY3 else b
def BYTE2SYSTEMSTR(b):
return b.decode('utf8') if PY3 else \
b.decode('utf8').encode(sys.stdin.encoding)
if not PY3:
def encJson(obj):
if hasattr(obj, 'encode'):
return obj.encode('utf8')
elif isinstance(obj, list):
return [encJson(e) for e in obj]
elif isinstance(obj, dict):
return dict((encJson(k), encJson(v)) for k,v in obj.items())
else:
return obj
def Partition(msg, n):
if n >= len(msg):
return msg, ''
else:
# All utf8 characters start with '0xxx-xxxx' or '11xx-xxxx'
while n > 0 and ord(msg[n]) >> 6 == 2:
n -= 1
return msg[:n], msg[n:]
else:
def Partition(msg, n):
return msg[:n], msg[n:]
#_p = re.compile(r'[0-9]+|[a-zA-Z][a-z]*')
#
#def SplitWords(s):
# return _p.findall(s)
#
#def MinusSeperate(s):
# return '-'.join(SplitWords(s)).lower()
def HasCommand(procName):
return subprocess.call(['which', procName], stdout=subprocess.PIPE) == 0
#def StartThread(target, *args, **kwargs):
# threading.Thread(target=target, args=args, kwargs=kwargs).start()
def StartDaemonThread(target, *args, **kwargs):
t = threading.Thread(target=target, args=args, kwargs=kwargs)
t.daemon = True
t.start()
class LockedValue(object):
def __init__(self, initialVal=None):
self.val = initialVal
self.lock = threading.Lock()
def setVal(self, val):
with self.lock:
self.val = val
def getVal(self):
with self.lock:
val = self.val
return val
# usage: CallInNewConsole(['python', 'qterm.py'])
def CallInNewConsole(args=None):
args = sys.argv[1:] if args is None else args
if not args:
return 1
osName = platform.system()
if osName == 'Windows':
return subprocess.call(['start'] + list(args), shell=True)
elif osName == 'Linux':
cmd = subprocess.list2cmdline(args)
if HasCommand('mate-terminal'):
args = ['mate-terminal', '-e', cmd]
elif HasCommand('gnome-terminal'):
args = ['gnome-terminal', '-e', cmd]
elif HasCommand('xterm'):
args = ['sh', '-c', 'xterm -e %s &' % cmd]
else:
return 1
# args = ['sh', '-c', 'nohup %s >/dev/null 2>&1 &' % cmd]
return subprocess.call(args, preexec_fn=os.setpgrp)
elif osName == 'Darwin':
return subprocess.call(['open','-W','-a','Terminal.app'] + list(args))
else:
return 1
# return subprocess.Popen(list(args) + ['&'])
if PY3:
import queue as Queue
else:
import Queue
class DotDict(object):
def __init__(self, **kw):
self.__dict__.update(**kw)
Pass = lambda *arg, **kwargs: None
def LeftTrim(s, head):
if s.startswith(head):
return s[:len(head)]
else:
return s
def AutoTest():
with open(sys.argv[1], 'rb') as f:
for line in f.read().split(b'\n'):
line = BYTE2SYSTEMSTR(line.strip())
if not line:
continue
elif line.startswith('#'):
print(line)
else:
print('>>> '+line)
os.system(line)
sys.stdout.write('\npress enter to continue...')
if PY3:
input()
else:
raw_input()
sys.stdout.write('\n')
|
data_interface.py
|
import importlib
import os
import shutil
import yaml
from abc import ABC, abstractmethod
from pathlib import Path
from queue import Queue
from threading import Thread
from jigsaw import models
class LabeledImage(ABC):
def __init__(self, image_id):
self.image_id = image_id
@classmethod
@abstractmethod
def construct(cls, image_id, **kwargs):
raise NotImplementedError
@classmethod
@abstractmethod
def filter_and_load(cls, data_source, **kwargs):
raise NotImplementedError
@classmethod
@abstractmethod
def transform(cls, image_ids, **kwargs):
raise NotImplementedError
# file extensions of any file relevant to this model
@property
@classmethod
@abstractmethod
def associated_files(cls):
raise NotImplementedError
# prefixes of any file actually needed to validation/testing data from this model
@property
@classmethod
@abstractmethod
def related_data_prefixes(cls):
raise NotImplementedError
@property
@classmethod
@abstractmethod
def temp_dir(cls):
raise NotImplementedError
@property
@classmethod
def training_type(cls):
raise NotImplementedError
@abstractmethod
def export_as_TFExample(self, **kwargs):
raise NotImplementedError
@classmethod
@abstractmethod
def write_additional_files(cls, dataset_name, **kwargs):
raise NotImplementedError
@classmethod
def construct_all(cls, image_ids, num_threads=20):
"""Loads a set of LabeledImage objects from the filesystem
NOTE: this is done concurrently to limit I/O costs.
Args:
image_ids (list): the list of image IDs that should be loaded
num_threads (int, optional): Defaults to 20. The number of threads that
should be used for concurrent loading.
Returns:
dict: a dict where the keys are image IDs and the values are the
object for each image ID
"""
labeled_images = {}
# pulls image_ids from a queue, loads the relevant object
# NOTE: this is the function being performed concurrently
def worker_load_func(queue):
while True:
image_id = queue.get()
if image_id is None:
break
labeled_images[image_id] = cls.construct(image_id)
queue.task_done()
# create a queue for images that need to be loaded
image_id_queue = Queue(maxsize=0)
workers = []
for worker in range(num_threads):
worker = Thread(target=worker_load_func, args=(image_id_queue, ))
worker.setDaemon(True)
worker.start()
workers.append(worker)
for image_id in image_ids:
image_id_queue.put(image_id)
# gracefully finish all threaded processes
image_id_queue.join()
for _ in range(num_threads):
image_id_queue.put(None)
for worker in workers:
worker.join()
return labeled_images
def copy_associated_files(self, destination, **kwargs):
if self.temp_dir is None:
data_dir = Path.cwd() / "data"
else:
data_dir = self.temp_dir
for suffix in self.associated_files.values():
for prefix in self.related_data_prefixes.values():
filepath = data_dir / f'{prefix}{self.image_id}{suffix}'
if filepath.exists():
shutil.copy(
str(filepath.absolute()), str(destination.absolute()))
def load_models():
parent_dir = os.path.dirname(os.path.realpath(__file__))
data_models_yml = Path(parent_dir) / "data_models.yml"
with open(data_models_yml) as f:
data_models = yaml.safe_load(f)
model_list = []
for data_model in data_models:
module = importlib.import_module(data_model["parent_module"])
model_list.append(getattr(module, data_model["model_class"]))
return model_list
|
twisterlib.py
|
#!/usr/bin/env python3
# vim: set syntax=python ts=4 :
#
# Copyright (c) 2018 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import os
import contextlib
import string
import mmap
import sys
import re
import subprocess
import select
import shutil
import shlex
import signal
import threading
from collections import OrderedDict
import queue
import time
import csv
import glob
import xml.etree.ElementTree as ET
import logging
from pathlib import Path
from distutils.spawn import find_executable
from colorama import Fore
import pickle
import platform
import yaml
import json
from multiprocessing import Lock, Process, Value
from typing import List
try:
# Use the C LibYAML parser if available, rather than the Python parser.
# It's much faster.
from yaml import CSafeLoader as SafeLoader
from yaml import CDumper as Dumper
except ImportError:
from yaml import SafeLoader, Dumper
try:
import serial
except ImportError:
print("Install pyserial python module with pip to use --device-testing option.")
try:
from tabulate import tabulate
except ImportError:
print("Install tabulate python module with pip to use --device-testing option.")
try:
import psutil
except ImportError:
print("Install psutil python module with pip to run in Qemu.")
try:
import pty
except ImportError as capture_error:
if os.name == "nt": # "nt" means that program is running on Windows OS
pass # "--device-serial-pty" option is not supported on Windows OS
else:
raise capture_error
ZEPHYR_BASE = os.getenv("ZEPHYR_BASE")
if not ZEPHYR_BASE:
sys.exit("$ZEPHYR_BASE environment variable undefined")
# This is needed to load edt.pickle files.
sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts", "dts",
"python-devicetree", "src"))
from devicetree import edtlib # pylint: disable=unused-import
# Use this for internal comparisons; that's what canonicalization is
# for. Don't use it when invoking other components of the build system
# to avoid confusing and hard to trace inconsistencies in error messages
# and logs, generated Makefiles, etc. compared to when users invoke these
# components directly.
# Note "normalization" is different from canonicalization, see os.path.
canonical_zephyr_base = os.path.realpath(ZEPHYR_BASE)
sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts/"))
import scl
import expr_parser
logger = logging.getLogger('twister')
logger.setLevel(logging.DEBUG)
class ExecutionCounter(object):
def __init__(self, total=0):
self._done = Value('i', 0)
self._passed = Value('i', 0)
self._skipped_configs = Value('i', 0)
self._skipped_runtime = Value('i', 0)
self._skipped_filter = Value('i', 0)
self._skipped_cases = Value('i', 0)
self._error = Value('i', 0)
self._failed = Value('i', 0)
self._total = Value('i', total)
self._cases = Value('i', 0)
self.lock = Lock()
def summary(self):
logger.debug("--------------------------------")
logger.debug(f"Total Test suites: {self.total}")
logger.debug(f"Total Test cases: {self.cases}")
logger.debug(f"Skipped test cases: {self.skipped_cases}")
logger.debug(f"Completed Testsuites: {self.done}")
logger.debug(f"Passing Testsuites: {self.passed}")
logger.debug(f"Failing Testsuites: {self.failed}")
logger.debug(f"Skipped Testsuites: {self.skipped_configs}")
logger.debug(f"Skipped Testsuites (runtime): {self.skipped_runtime}")
logger.debug(f"Skipped Testsuites (filter): {self.skipped_filter}")
logger.debug(f"Errors: {self.error}")
logger.debug("--------------------------------")
@property
def cases(self):
with self._cases.get_lock():
return self._cases.value
@cases.setter
def cases(self, value):
with self._cases.get_lock():
self._cases.value = value
@property
def skipped_cases(self):
with self._skipped_cases.get_lock():
return self._skipped_cases.value
@skipped_cases.setter
def skipped_cases(self, value):
with self._skipped_cases.get_lock():
self._skipped_cases.value = value
@property
def error(self):
with self._error.get_lock():
return self._error.value
@error.setter
def error(self, value):
with self._error.get_lock():
self._error.value = value
@property
def done(self):
with self._done.get_lock():
return self._done.value
@done.setter
def done(self, value):
with self._done.get_lock():
self._done.value = value
@property
def passed(self):
with self._passed.get_lock():
return self._passed.value
@passed.setter
def passed(self, value):
with self._passed.get_lock():
self._passed.value = value
@property
def skipped_configs(self):
with self._skipped_configs.get_lock():
return self._skipped_configs.value
@skipped_configs.setter
def skipped_configs(self, value):
with self._skipped_configs.get_lock():
self._skipped_configs.value = value
@property
def skipped_filter(self):
with self._skipped_filter.get_lock():
return self._skipped_filter.value
@skipped_filter.setter
def skipped_filter(self, value):
with self._skipped_filter.get_lock():
self._skipped_filter.value = value
@property
def skipped_runtime(self):
with self._skipped_runtime.get_lock():
return self._skipped_runtime.value
@skipped_runtime.setter
def skipped_runtime(self, value):
with self._skipped_runtime.get_lock():
self._skipped_runtime.value = value
@property
def failed(self):
with self._failed.get_lock():
return self._failed.value
@failed.setter
def failed(self, value):
with self._failed.get_lock():
self._failed.value = value
@property
def total(self):
with self._total.get_lock():
return self._total.value
class CMakeCacheEntry:
'''Represents a CMake cache entry.
This class understands the type system in a CMakeCache.txt, and
converts the following cache types to Python types:
Cache Type Python type
---------- -------------------------------------------
FILEPATH str
PATH str
STRING str OR list of str (if ';' is in the value)
BOOL bool
INTERNAL str OR list of str (if ';' is in the value)
---------- -------------------------------------------
'''
# Regular expression for a cache entry.
#
# CMake variable names can include escape characters, allowing a
# wider set of names than is easy to match with a regular
# expression. To be permissive here, use a non-greedy match up to
# the first colon (':'). This breaks if the variable name has a
# colon inside, but it's good enough.
CACHE_ENTRY = re.compile(
r'''(?P<name>.*?) # name
:(?P<type>FILEPATH|PATH|STRING|BOOL|INTERNAL) # type
=(?P<value>.*) # value
''', re.X)
@classmethod
def _to_bool(cls, val):
# Convert a CMake BOOL string into a Python bool.
#
# "True if the constant is 1, ON, YES, TRUE, Y, or a
# non-zero number. False if the constant is 0, OFF, NO,
# FALSE, N, IGNORE, NOTFOUND, the empty string, or ends in
# the suffix -NOTFOUND. Named boolean constants are
# case-insensitive. If the argument is not one of these
# constants, it is treated as a variable."
#
# https://cmake.org/cmake/help/v3.0/command/if.html
val = val.upper()
if val in ('ON', 'YES', 'TRUE', 'Y'):
return 1
elif val in ('OFF', 'NO', 'FALSE', 'N', 'IGNORE', 'NOTFOUND', ''):
return 0
elif val.endswith('-NOTFOUND'):
return 0
else:
try:
v = int(val)
return v != 0
except ValueError as exc:
raise ValueError('invalid bool {}'.format(val)) from exc
@classmethod
def from_line(cls, line, line_no):
# Comments can only occur at the beginning of a line.
# (The value of an entry could contain a comment character).
if line.startswith('//') or line.startswith('#'):
return None
# Whitespace-only lines do not contain cache entries.
if not line.strip():
return None
m = cls.CACHE_ENTRY.match(line)
if not m:
return None
name, type_, value = (m.group(g) for g in ('name', 'type', 'value'))
if type_ == 'BOOL':
try:
value = cls._to_bool(value)
except ValueError as exc:
args = exc.args + ('on line {}: {}'.format(line_no, line),)
raise ValueError(args) from exc
elif type_ in ['STRING', 'INTERNAL']:
# If the value is a CMake list (i.e. is a string which
# contains a ';'), convert to a Python list.
if ';' in value:
value = value.split(';')
return CMakeCacheEntry(name, value)
def __init__(self, name, value):
self.name = name
self.value = value
def __str__(self):
fmt = 'CMakeCacheEntry(name={}, value={})'
return fmt.format(self.name, self.value)
class CMakeCache:
'''Parses and represents a CMake cache file.'''
@staticmethod
def from_file(cache_file):
return CMakeCache(cache_file)
def __init__(self, cache_file):
self.cache_file = cache_file
self.load(cache_file)
def load(self, cache_file):
entries = []
with open(cache_file, 'r') as cache:
for line_no, line in enumerate(cache):
entry = CMakeCacheEntry.from_line(line, line_no)
if entry:
entries.append(entry)
self._entries = OrderedDict((e.name, e) for e in entries)
def get(self, name, default=None):
entry = self._entries.get(name)
if entry is not None:
return entry.value
else:
return default
def get_list(self, name, default=None):
if default is None:
default = []
entry = self._entries.get(name)
if entry is not None:
value = entry.value
if isinstance(value, list):
return value
elif isinstance(value, str):
return [value] if value else []
else:
msg = 'invalid value {} type {}'
raise RuntimeError(msg.format(value, type(value)))
else:
return default
def __contains__(self, name):
return name in self._entries
def __getitem__(self, name):
return self._entries[name].value
def __setitem__(self, name, entry):
if not isinstance(entry, CMakeCacheEntry):
msg = 'improper type {} for value {}, expecting CMakeCacheEntry'
raise TypeError(msg.format(type(entry), entry))
self._entries[name] = entry
def __delitem__(self, name):
del self._entries[name]
def __iter__(self):
return iter(self._entries.values())
class TwisterException(Exception):
pass
class TwisterRuntimeError(TwisterException):
pass
class ConfigurationError(TwisterException):
def __init__(self, cfile, message):
TwisterException.__init__(self, cfile + ": " + message)
class BuildError(TwisterException):
pass
class ExecutionError(TwisterException):
pass
class HarnessImporter:
def __init__(self, name):
sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts/pylib/twister"))
module = __import__("harness")
if name:
my_class = getattr(module, name)
else:
my_class = getattr(module, "Test")
self.instance = my_class()
class Handler:
def __init__(self, instance, type_str="build"):
"""Constructor
"""
self.state = "waiting"
self.run = False
self.duration = 0
self.type_str = type_str
self.binary = None
self.pid_fn = None
self.call_make_run = False
self.name = instance.name
self.instance = instance
self.timeout = instance.testcase.timeout
self.sourcedir = instance.testcase.source_dir
self.build_dir = instance.build_dir
self.log = os.path.join(self.build_dir, "handler.log")
self.returncode = 0
self.set_state("running", self.duration)
self.generator = None
self.generator_cmd = None
self.args = []
self.terminated = False
def set_state(self, state, duration):
self.state = state
self.duration = duration
def get_state(self):
ret = (self.state, self.duration)
return ret
def record(self, harness):
if harness.recording:
filename = os.path.join(self.build_dir, "recording.csv")
with open(filename, "at") as csvfile:
cw = csv.writer(csvfile, harness.fieldnames, lineterminator=os.linesep)
cw.writerow(harness.fieldnames)
for instance in harness.recording:
cw.writerow(instance)
def terminate(self, proc):
# encapsulate terminate functionality so we do it consistently where ever
# we might want to terminate the proc. We need try_kill_process_by_pid
# because of both how newer ninja (1.6.0 or greater) and .NET / renode
# work. Newer ninja's don't seem to pass SIGTERM down to the children
# so we need to use try_kill_process_by_pid.
for child in psutil.Process(proc.pid).children(recursive=True):
try:
os.kill(child.pid, signal.SIGTERM)
except ProcessLookupError:
pass
proc.terminate()
# sleep for a while before attempting to kill
time.sleep(0.5)
proc.kill()
self.terminated = True
def add_missing_testscases(self, harness):
"""
If testsuite was broken by some error (e.g. timeout) it is necessary to
add information about next testcases, which were not be
performed due to this error.
"""
for c in self.instance.testcase.cases:
if c not in harness.tests:
harness.tests[c] = "BLOCK"
def _set_skip_reason(self, harness_state):
"""
If testcase written in ztest framework is skipped by "ztest_test_skip()"
function, then such testcase is marked in instance.results dict as
"SKIP", but reason of this sipping still "Unknown". This method pick up
this situation and complete the instance.reason properly.
"""
harness_state_pass = "passed"
harness_testcase_result_skip = "SKIP"
instance_reason_unknown = "Unknown"
if harness_state == harness_state_pass and \
self.instance.reason == instance_reason_unknown and \
harness_testcase_result_skip in self.instance.results.values():
self.instance.reason = "ztest skip"
class BinaryHandler(Handler):
def __init__(self, instance, type_str):
"""Constructor
@param instance Test Instance
"""
super().__init__(instance, type_str)
self.call_west_flash = False
# Tool options
self.valgrind = False
self.lsan = False
self.asan = False
self.ubsan = False
self.coverage = False
def try_kill_process_by_pid(self):
if self.pid_fn:
pid = int(open(self.pid_fn).read())
os.unlink(self.pid_fn)
self.pid_fn = None # clear so we don't try to kill the binary twice
try:
os.kill(pid, signal.SIGTERM)
except ProcessLookupError:
pass
def _output_reader(self, proc):
self.line = proc.stdout.readline()
def _output_handler(self, proc, harness):
if harness.is_pytest:
harness.handle(None)
return
log_out_fp = open(self.log, "wt")
timeout_extended = False
timeout_time = time.time() + self.timeout
while True:
this_timeout = timeout_time - time.time()
if this_timeout < 0:
break
reader_t = threading.Thread(target=self._output_reader, args=(proc,), daemon=True)
reader_t.start()
reader_t.join(this_timeout)
if not reader_t.is_alive():
line = self.line
logger.debug("OUTPUT: {0}".format(line.decode('utf-8').rstrip()))
log_out_fp.write(line.decode('utf-8'))
log_out_fp.flush()
harness.handle(line.decode('utf-8').rstrip())
if harness.state:
if not timeout_extended or harness.capture_coverage:
timeout_extended = True
if harness.capture_coverage:
timeout_time = time.time() + 30
else:
timeout_time = time.time() + 2
else:
reader_t.join(0)
break
try:
# POSIX arch based ztests end on their own,
# so let's give it up to 100ms to do so
proc.wait(0.1)
except subprocess.TimeoutExpired:
self.terminate(proc)
log_out_fp.close()
def handle(self):
harness_name = self.instance.testcase.harness.capitalize()
harness_import = HarnessImporter(harness_name)
harness = harness_import.instance
harness.configure(self.instance)
if self.call_make_run:
command = [self.generator_cmd, "run"]
elif self.call_west_flash:
command = ["west", "flash", "--skip-rebuild", "-d", self.build_dir]
else:
command = [self.binary]
run_valgrind = False
if self.valgrind:
command = ["valgrind", "--error-exitcode=2",
"--leak-check=full",
"--suppressions=" + ZEPHYR_BASE + "/scripts/valgrind.supp",
"--log-file=" + self.build_dir + "/valgrind.log",
"--track-origins=yes",
] + command
run_valgrind = True
logger.debug("Spawning process: " +
" ".join(shlex.quote(word) for word in command) + os.linesep +
"in directory: " + self.build_dir)
start_time = time.time()
env = os.environ.copy()
if self.asan:
env["ASAN_OPTIONS"] = "log_path=stdout:" + \
env.get("ASAN_OPTIONS", "")
if not self.lsan:
env["ASAN_OPTIONS"] += "detect_leaks=0"
if self.ubsan:
env["UBSAN_OPTIONS"] = "log_path=stdout:halt_on_error=1:" + \
env.get("UBSAN_OPTIONS", "")
with subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, cwd=self.build_dir, env=env) as proc:
logger.debug("Spawning BinaryHandler Thread for %s" % self.name)
t = threading.Thread(target=self._output_handler, args=(proc, harness,), daemon=True)
t.start()
t.join()
if t.is_alive():
self.terminate(proc)
t.join()
proc.wait()
self.returncode = proc.returncode
self.try_kill_process_by_pid()
handler_time = time.time() - start_time
if self.coverage:
subprocess.call(["GCOV_PREFIX=" + self.build_dir,
"gcov", self.sourcedir, "-b", "-s", self.build_dir], shell=True)
# FIXME: This is needed when killing the simulator, the console is
# garbled and needs to be reset. Did not find a better way to do that.
if sys.stdout.isatty():
subprocess.call(["stty", "sane"])
if harness.is_pytest:
harness.pytest_run(self.log)
self.instance.results = harness.tests
if not self.terminated and self.returncode != 0:
if run_valgrind and self.returncode == 2:
self.set_state("failed", handler_time)
self.instance.reason = "Valgrind error"
else:
# When a process is killed, the default handler returns 128 + SIGTERM
# so in that case the return code itself is not meaningful
self.set_state("failed", handler_time)
self.instance.reason = "Failed"
elif harness.state:
self.set_state(harness.state, handler_time)
if harness.state == "failed":
self.instance.reason = "Failed"
else:
self.set_state("timeout", handler_time)
self.instance.reason = "Timeout"
self.add_missing_testscases(harness)
self._set_skip_reason(harness.state)
self.record(harness)
class DeviceHandler(Handler):
def __init__(self, instance, type_str):
"""Constructor
@param instance Test Instance
"""
super().__init__(instance, type_str)
self.suite = None
def monitor_serial(self, ser, halt_fileno, harness):
if harness.is_pytest:
harness.handle(None)
return
log_out_fp = open(self.log, "wt")
ser_fileno = ser.fileno()
readlist = [halt_fileno, ser_fileno]
if self.coverage:
# Set capture_coverage to True to indicate that right after
# test results we should get coverage data, otherwise we exit
# from the test.
harness.capture_coverage = True
ser.flush()
while ser.isOpen():
readable, _, _ = select.select(readlist, [], [], self.timeout)
if halt_fileno in readable:
logger.debug('halted')
ser.close()
break
if ser_fileno not in readable:
continue # Timeout.
serial_line = None
try:
serial_line = ser.readline()
except TypeError:
pass
except serial.SerialException:
ser.close()
break
# Just because ser_fileno has data doesn't mean an entire line
# is available yet.
if serial_line:
sl = serial_line.decode('utf-8', 'ignore').lstrip()
logger.debug("DEVICE: {0}".format(sl.rstrip()))
log_out_fp.write(sl)
log_out_fp.flush()
harness.handle(sl.rstrip())
if harness.state:
if not harness.capture_coverage:
ser.close()
break
log_out_fp.close()
def device_is_available(self, instance):
device = instance.platform.name
fixture = instance.testcase.harness_config.get("fixture")
for d in self.suite.duts:
if fixture and fixture not in d.fixtures:
continue
if d.platform != device or not (d.serial or d.serial_pty):
continue
d.lock.acquire()
avail = False
if d.available:
d.available = 0
d.counter += 1
avail = True
d.lock.release()
if avail:
return d
return None
def make_device_available(self, serial):
for d in self.suite.duts:
if d.serial == serial or d.serial_pty:
d.available = 1
@staticmethod
def run_custom_script(script, timeout):
with subprocess.Popen(script, stderr=subprocess.PIPE, stdout=subprocess.PIPE) as proc:
try:
stdout, stderr = proc.communicate(timeout=timeout)
logger.debug(stdout.decode())
if proc.returncode != 0:
logger.error(f"Custom script failure: {stderr.decode(errors='ignore')}")
except subprocess.TimeoutExpired:
proc.kill()
proc.communicate()
logger.error("{} timed out".format(script))
def handle(self):
out_state = "failed"
runner = None
hardware = self.device_is_available(self.instance)
while not hardware:
logger.debug("Waiting for device {} to become available".format(self.instance.platform.name))
time.sleep(1)
hardware = self.device_is_available(self.instance)
runner = hardware.runner or self.suite.west_runner
serial_pty = hardware.serial_pty
ser_pty_process = None
if serial_pty:
master, slave = pty.openpty()
try:
ser_pty_process = subprocess.Popen(re.split(',| ', serial_pty), stdout=master, stdin=master, stderr=master)
except subprocess.CalledProcessError as error:
logger.error("Failed to run subprocess {}, error {}".format(serial_pty, error.output))
return
serial_device = os.ttyname(slave)
else:
serial_device = hardware.serial
logger.debug(f"Using serial device {serial_device} @ {hardware.baud} baud")
if (self.suite.west_flash is not None) or runner:
command = ["west", "flash", "--skip-rebuild", "-d", self.build_dir]
command_extra_args = []
# There are three ways this option is used.
# 1) bare: --west-flash
# This results in options.west_flash == []
# 2) with a value: --west-flash="--board-id=42"
# This results in options.west_flash == "--board-id=42"
# 3) Multiple values: --west-flash="--board-id=42,--erase"
# This results in options.west_flash == "--board-id=42 --erase"
if self.suite.west_flash and self.suite.west_flash != []:
command_extra_args.extend(self.suite.west_flash.split(','))
if runner:
command.append("--runner")
command.append(runner)
board_id = hardware.probe_id or hardware.id
product = hardware.product
if board_id is not None:
if runner == "pyocd":
command_extra_args.append("--board-id")
command_extra_args.append(board_id)
elif runner == "nrfjprog":
command_extra_args.append("--dev-id")
command_extra_args.append(board_id)
elif runner == "openocd" and product == "STM32 STLink":
command_extra_args.append("--cmd-pre-init")
command_extra_args.append("hla_serial %s" % (board_id))
elif runner == "openocd" and product == "STLINK-V3":
command_extra_args.append("--cmd-pre-init")
command_extra_args.append("hla_serial %s" % (board_id))
elif runner == "openocd" and product == "EDBG CMSIS-DAP":
command_extra_args.append("--cmd-pre-init")
command_extra_args.append("cmsis_dap_serial %s" % (board_id))
elif runner == "jlink":
command.append("--tool-opt=-SelectEmuBySN %s" % (board_id))
elif runner == "stm32cubeprogrammer":
command.append("--tool-opt=sn=%s" % (board_id))
if command_extra_args != []:
command.append('--')
command.extend(command_extra_args)
else:
command = [self.generator_cmd, "-C", self.build_dir, "flash"]
pre_script = hardware.pre_script
post_flash_script = hardware.post_flash_script
post_script = hardware.post_script
if pre_script:
self.run_custom_script(pre_script, 30)
try:
ser = serial.Serial(
serial_device,
baudrate=hardware.baud,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.EIGHTBITS,
timeout=self.timeout
)
except serial.SerialException as e:
self.set_state("failed", 0)
self.instance.reason = "Failed"
logger.error("Serial device error: %s" % (str(e)))
if serial_pty and ser_pty_process:
ser_pty_process.terminate()
outs, errs = ser_pty_process.communicate()
logger.debug("Process {} terminated outs: {} errs {}".format(serial_pty, outs, errs))
self.make_device_available(serial_device)
return
ser.flush()
harness_name = self.instance.testcase.harness.capitalize()
harness_import = HarnessImporter(harness_name)
harness = harness_import.instance
harness.configure(self.instance)
read_pipe, write_pipe = os.pipe()
start_time = time.time()
t = threading.Thread(target=self.monitor_serial, daemon=True,
args=(ser, read_pipe, harness))
t.start()
d_log = "{}/device.log".format(self.instance.build_dir)
logger.debug('Flash command: %s', command)
try:
stdout = stderr = None
with subprocess.Popen(command, stderr=subprocess.PIPE, stdout=subprocess.PIPE) as proc:
try:
(stdout, stderr) = proc.communicate(timeout=30)
# ignore unencodable unicode chars
logger.debug(stdout.decode(errors = "ignore"))
if proc.returncode != 0:
self.instance.reason = "Device issue (Flash?)"
with open(d_log, "w") as dlog_fp:
dlog_fp.write(stderr.decode())
os.write(write_pipe, b'x') # halt the thread
out_state = "flash_error"
except subprocess.TimeoutExpired:
proc.kill()
(stdout, stderr) = proc.communicate()
self.instance.reason = "Device issue (Timeout)"
with open(d_log, "w") as dlog_fp:
dlog_fp.write(stderr.decode())
except subprocess.CalledProcessError:
os.write(write_pipe, b'x') # halt the thread
if post_flash_script:
self.run_custom_script(post_flash_script, 30)
t.join(self.timeout)
if t.is_alive():
logger.debug("Timed out while monitoring serial output on {}".format(self.instance.platform.name))
out_state = "timeout"
if ser.isOpen():
ser.close()
if serial_pty:
ser_pty_process.terminate()
outs, errs = ser_pty_process.communicate()
logger.debug("Process {} terminated outs: {} errs {}".format(serial_pty, outs, errs))
os.close(write_pipe)
os.close(read_pipe)
handler_time = time.time() - start_time
if out_state in ["timeout", "flash_error"]:
self.add_missing_testscases(harness)
if out_state == "timeout":
self.instance.reason = "Timeout"
elif out_state == "flash_error":
self.instance.reason = "Flash error"
if harness.is_pytest:
harness.pytest_run(self.log)
self.instance.results = harness.tests
# sometimes a test instance hasn't been executed successfully with an
# empty dictionary results, in order to include it into final report,
# so fill the results as BLOCK
if self.instance.results == {}:
for k in self.instance.testcase.cases:
self.instance.results[k] = 'BLOCK'
if harness.state:
self.set_state(harness.state, handler_time)
if harness.state == "failed":
self.instance.reason = "Failed"
else:
self.set_state(out_state, handler_time)
self._set_skip_reason(harness.state)
if post_script:
self.run_custom_script(post_script, 30)
self.make_device_available(serial_device)
self.record(harness)
class QEMUHandler(Handler):
"""Spawns a thread to monitor QEMU output from pipes
We pass QEMU_PIPE to 'make run' and monitor the pipes for output.
We need to do this as once qemu starts, it runs forever until killed.
Test cases emit special messages to the console as they run, we check
for these to collect whether the test passed or failed.
"""
def __init__(self, instance, type_str):
"""Constructor
@param instance Test instance
"""
super().__init__(instance, type_str)
self.fifo_fn = os.path.join(instance.build_dir, "qemu-fifo")
self.pid_fn = os.path.join(instance.build_dir, "qemu.pid")
if "ignore_qemu_crash" in instance.testcase.tags:
self.ignore_qemu_crash = True
self.ignore_unexpected_eof = True
else:
self.ignore_qemu_crash = False
self.ignore_unexpected_eof = False
@staticmethod
def _get_cpu_time(pid):
"""get process CPU time.
The guest virtual time in QEMU icount mode isn't host time and
it's maintained by counting guest instructions, so we use QEMU
process execution time to mostly simulate the time of guest OS.
"""
proc = psutil.Process(pid)
cpu_time = proc.cpu_times()
return cpu_time.user + cpu_time.system
@staticmethod
def _thread(handler, timeout, outdir, logfile, fifo_fn, pid_fn, results, harness,
ignore_unexpected_eof=False):
fifo_in = fifo_fn + ".in"
fifo_out = fifo_fn + ".out"
# These in/out nodes are named from QEMU's perspective, not ours
if os.path.exists(fifo_in):
os.unlink(fifo_in)
os.mkfifo(fifo_in)
if os.path.exists(fifo_out):
os.unlink(fifo_out)
os.mkfifo(fifo_out)
# We don't do anything with out_fp but we need to open it for
# writing so that QEMU doesn't block, due to the way pipes work
out_fp = open(fifo_in, "wb")
# Disable internal buffering, we don't
# want read() or poll() to ever block if there is data in there
in_fp = open(fifo_out, "rb", buffering=0)
log_out_fp = open(logfile, "wt")
start_time = time.time()
timeout_time = start_time + timeout
p = select.poll()
p.register(in_fp, select.POLLIN)
out_state = None
line = ""
timeout_extended = False
pid = 0
if os.path.exists(pid_fn):
pid = int(open(pid_fn).read())
while True:
this_timeout = int((timeout_time - time.time()) * 1000)
if this_timeout < 0 or not p.poll(this_timeout):
try:
if pid and this_timeout > 0:
#there's possibility we polled nothing because
#of not enough CPU time scheduled by host for
#QEMU process during p.poll(this_timeout)
cpu_time = QEMUHandler._get_cpu_time(pid)
if cpu_time < timeout and not out_state:
timeout_time = time.time() + (timeout - cpu_time)
continue
except ProcessLookupError:
out_state = "failed"
break
if not out_state:
out_state = "timeout"
break
if pid == 0 and os.path.exists(pid_fn):
pid = int(open(pid_fn).read())
if harness.is_pytest:
harness.handle(None)
out_state = harness.state
break
try:
c = in_fp.read(1).decode("utf-8")
except UnicodeDecodeError:
# Test is writing something weird, fail
out_state = "unexpected byte"
break
if c == "":
# EOF, this shouldn't happen unless QEMU crashes
if not ignore_unexpected_eof:
out_state = "unexpected eof"
break
line = line + c
if c != "\n":
continue
# line contains a full line of data output from QEMU
log_out_fp.write(line)
log_out_fp.flush()
line = line.strip()
logger.debug(f"QEMU ({pid}): {line}")
harness.handle(line)
if harness.state:
# if we have registered a fail make sure the state is not
# overridden by a false success message coming from the
# testsuite
if out_state not in ['failed', 'unexpected eof', 'unexpected byte']:
out_state = harness.state
# if we get some state, that means test is doing well, we reset
# the timeout and wait for 2 more seconds to catch anything
# printed late. We wait much longer if code
# coverage is enabled since dumping this information can
# take some time.
if not timeout_extended or harness.capture_coverage:
timeout_extended = True
if harness.capture_coverage:
timeout_time = time.time() + 30
else:
timeout_time = time.time() + 2
line = ""
if harness.is_pytest:
harness.pytest_run(logfile)
out_state = harness.state
handler.record(harness)
handler_time = time.time() - start_time
logger.debug(f"QEMU ({pid}) complete ({out_state}) after {handler_time} seconds")
if out_state == "timeout":
handler.instance.reason = "Timeout"
handler.set_state("failed", handler_time)
elif out_state == "failed":
handler.instance.reason = "Failed"
handler.set_state("failed", handler_time)
elif out_state in ['unexpected eof', 'unexpected byte']:
handler.instance.reason = out_state
handler.set_state("failed", handler_time)
else:
handler.set_state(out_state, handler_time)
log_out_fp.close()
out_fp.close()
in_fp.close()
if pid:
try:
if pid:
os.kill(pid, signal.SIGTERM)
except ProcessLookupError:
# Oh well, as long as it's dead! User probably sent Ctrl-C
pass
os.unlink(fifo_in)
os.unlink(fifo_out)
def handle(self):
self.results = {}
self.run = True
# We pass this to QEMU which looks for fifos with .in and .out
# suffixes.
self.fifo_fn = os.path.join(self.instance.build_dir, "qemu-fifo")
self.pid_fn = os.path.join(self.instance.build_dir, "qemu.pid")
if os.path.exists(self.pid_fn):
os.unlink(self.pid_fn)
self.log_fn = self.log
harness_import = HarnessImporter(self.instance.testcase.harness.capitalize())
harness = harness_import.instance
harness.configure(self.instance)
self.thread = threading.Thread(name=self.name, target=QEMUHandler._thread,
args=(self, self.timeout, self.build_dir,
self.log_fn, self.fifo_fn,
self.pid_fn, self.results, harness,
self.ignore_unexpected_eof))
self.instance.results = harness.tests
self.thread.daemon = True
logger.debug("Spawning QEMUHandler Thread for %s" % self.name)
self.thread.start()
if sys.stdout.isatty():
subprocess.call(["stty", "sane"])
logger.debug("Running %s (%s)" % (self.name, self.type_str))
command = [self.generator_cmd]
command += ["-C", self.build_dir, "run"]
is_timeout = False
qemu_pid = None
with subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=self.build_dir) as proc:
logger.debug("Spawning QEMUHandler Thread for %s" % self.name)
try:
proc.wait(self.timeout)
except subprocess.TimeoutExpired:
# sometimes QEMU can't handle SIGTERM signal correctly
# in that case kill -9 QEMU process directly and leave
# twister to judge testing result by console output
is_timeout = True
self.terminate(proc)
if harness.state == "passed":
self.returncode = 0
else:
self.returncode = proc.returncode
else:
if os.path.exists(self.pid_fn):
qemu_pid = int(open(self.pid_fn).read())
logger.debug(f"No timeout, return code from QEMU ({qemu_pid}): {proc.returncode}")
self.returncode = proc.returncode
# Need to wait for harness to finish processing
# output from QEMU. Otherwise it might miss some
# error messages.
self.thread.join(0)
if self.thread.is_alive():
logger.debug("Timed out while monitoring QEMU output")
if os.path.exists(self.pid_fn):
qemu_pid = int(open(self.pid_fn).read())
os.unlink(self.pid_fn)
logger.debug(f"return code from QEMU ({qemu_pid}): {self.returncode}")
if (self.returncode != 0 and not self.ignore_qemu_crash) or not harness.state:
self.set_state("failed", 0)
if is_timeout:
self.instance.reason = "Timeout"
else:
self.instance.reason = "Exited with {}".format(self.returncode)
self.add_missing_testscases(harness)
self._set_skip_reason(harness.state)
def get_fifo(self):
return self.fifo_fn
class SizeCalculator:
alloc_sections = [
"bss",
"noinit",
"app_bss",
"app_noinit",
"ccm_bss",
"ccm_noinit"
]
rw_sections = [
"datas",
"initlevel",
"exceptions",
"initshell",
"_static_thread_data_area",
"k_timer_area",
"k_mem_slab_area",
"k_mem_pool_area",
"sw_isr_table",
"k_sem_area",
"k_mutex_area",
"app_shmem_regions",
"_k_fifo_area",
"_k_lifo_area",
"k_stack_area",
"k_msgq_area",
"k_mbox_area",
"k_pipe_area",
"net_if_area",
"net_if_dev_area",
"net_l2_area",
"net_l2_data",
"k_queue_area",
"_net_buf_pool_area",
"app_datas",
"kobject_data",
"mmu_tables",
"app_pad",
"priv_stacks",
"ccm_data",
"usb_descriptor",
"usb_data", "usb_bos_desc",
"uart_mux",
'log_backends_sections',
'log_dynamic_sections',
'log_const_sections',
"app_smem",
'shell_root_cmds_sections',
'log_const_sections',
"font_entry_sections",
"priv_stacks_noinit",
"_GCOV_BSS_SECTION_NAME",
"gcov",
"nocache",
"devices",
"k_heap_area",
]
# These get copied into RAM only on non-XIP
ro_sections = [
"rom_start",
"text",
"ctors",
"init_array",
"reset",
"z_object_assignment_area",
"rodata",
"net_l2",
"vector",
"sw_isr_table",
"settings_handler_static_area",
"bt_l2cap_fixed_chan_area",
"bt_l2cap_br_fixed_chan_area",
"bt_gatt_service_static_area",
"vectors",
"net_socket_register_area",
"net_ppp_proto",
"shell_area",
"tracing_backend_area",
"ppp_protocol_handler_area",
]
def __init__(self, filename, extra_sections):
"""Constructor
@param filename Path to the output binary
The <filename> is parsed by objdump to determine section sizes
"""
# Make sure this is an ELF binary
with open(filename, "rb") as f:
magic = f.read(4)
try:
if magic != b'\x7fELF':
raise TwisterRuntimeError("%s is not an ELF binary" % filename)
except Exception as e:
print(str(e))
sys.exit(2)
# Search for CONFIG_XIP in the ELF's list of symbols using NM and AWK.
# GREP can not be used as it returns an error if the symbol is not
# found.
is_xip_command = "nm " + filename + \
" | awk '/CONFIG_XIP/ { print $3 }'"
is_xip_output = subprocess.check_output(
is_xip_command, shell=True, stderr=subprocess.STDOUT).decode(
"utf-8").strip()
try:
if is_xip_output.endswith("no symbols"):
raise TwisterRuntimeError("%s has no symbol information" % filename)
except Exception as e:
print(str(e))
sys.exit(2)
self.is_xip = (len(is_xip_output) != 0)
self.filename = filename
self.sections = []
self.rom_size = 0
self.ram_size = 0
self.extra_sections = extra_sections
self._calculate_sizes()
def get_ram_size(self):
"""Get the amount of RAM the application will use up on the device
@return amount of RAM, in bytes
"""
return self.ram_size
def get_rom_size(self):
"""Get the size of the data that this application uses on device's flash
@return amount of ROM, in bytes
"""
return self.rom_size
def unrecognized_sections(self):
"""Get a list of sections inside the binary that weren't recognized
@return list of unrecognized section names
"""
slist = []
for v in self.sections:
if not v["recognized"]:
slist.append(v["name"])
return slist
def _calculate_sizes(self):
""" Calculate RAM and ROM usage by section """
objdump_command = "objdump -h " + self.filename
objdump_output = subprocess.check_output(
objdump_command, shell=True).decode("utf-8").splitlines()
for line in objdump_output:
words = line.split()
if not words: # Skip lines that are too short
continue
index = words[0]
if not index[0].isdigit(): # Skip lines that do not start
continue # with a digit
name = words[1] # Skip lines with section names
if name[0] == '.': # starting with '.'
continue
# TODO this doesn't actually reflect the size in flash or RAM as
# it doesn't include linker-imposed padding between sections.
# It is close though.
size = int(words[2], 16)
if size == 0:
continue
load_addr = int(words[4], 16)
virt_addr = int(words[3], 16)
# Add section to memory use totals (for both non-XIP and XIP scenarios)
# Unrecognized section names are not included in the calculations.
recognized = True
if name in SizeCalculator.alloc_sections:
self.ram_size += size
stype = "alloc"
elif name in SizeCalculator.rw_sections:
self.ram_size += size
self.rom_size += size
stype = "rw"
elif name in SizeCalculator.ro_sections:
self.rom_size += size
if not self.is_xip:
self.ram_size += size
stype = "ro"
else:
stype = "unknown"
if name not in self.extra_sections:
recognized = False
self.sections.append({"name": name, "load_addr": load_addr,
"size": size, "virt_addr": virt_addr,
"type": stype, "recognized": recognized})
class TwisterConfigParser:
"""Class to read test case files with semantic checking
"""
def __init__(self, filename, schema):
"""Instantiate a new TwisterConfigParser object
@param filename Source .yaml file to read
"""
self.data = {}
self.schema = schema
self.filename = filename
self.tests = {}
self.common = {}
def load(self):
self.data = scl.yaml_load_verify(self.filename, self.schema)
if 'tests' in self.data:
self.tests = self.data['tests']
if 'common' in self.data:
self.common = self.data['common']
def _cast_value(self, value, typestr):
if isinstance(value, str):
v = value.strip()
if typestr == "str":
return v
elif typestr == "float":
return float(value)
elif typestr == "int":
return int(value)
elif typestr == "bool":
return value
elif typestr.startswith("list") and isinstance(value, list):
return value
elif typestr.startswith("list") and isinstance(value, str):
vs = v.split()
if len(typestr) > 4 and typestr[4] == ":":
return [self._cast_value(vsi, typestr[5:]) for vsi in vs]
else:
return vs
elif typestr.startswith("set"):
vs = v.split()
if len(typestr) > 3 and typestr[3] == ":":
return {self._cast_value(vsi, typestr[4:]) for vsi in vs}
else:
return set(vs)
elif typestr.startswith("map"):
return value
else:
raise ConfigurationError(
self.filename, "unknown type '%s'" % value)
def get_test(self, name, valid_keys):
"""Get a dictionary representing the keys/values within a test
@param name The test in the .yaml file to retrieve data from
@param valid_keys A dictionary representing the intended semantics
for this test. Each key in this dictionary is a key that could
be specified, if a key is given in the .yaml file which isn't in
here, it will generate an error. Each value in this dictionary
is another dictionary containing metadata:
"default" - Default value if not given
"type" - Data type to convert the text value to. Simple types
supported are "str", "float", "int", "bool" which will get
converted to respective Python data types. "set" and "list"
may also be specified which will split the value by
whitespace (but keep the elements as strings). finally,
"list:<type>" and "set:<type>" may be given which will
perform a type conversion after splitting the value up.
"required" - If true, raise an error if not defined. If false
and "default" isn't specified, a type conversion will be
done on an empty string
@return A dictionary containing the test key-value pairs with
type conversion and default values filled in per valid_keys
"""
d = {}
for k, v in self.common.items():
d[k] = v
for k, v in self.tests[name].items():
if k in d:
if isinstance(d[k], str):
# By default, we just concatenate string values of keys
# which appear both in "common" and per-test sections,
# but some keys are handled in adhoc way based on their
# semantics.
if k == "filter":
d[k] = "(%s) and (%s)" % (d[k], v)
else:
d[k] += " " + v
else:
d[k] = v
for k, kinfo in valid_keys.items():
if k not in d:
if "required" in kinfo:
required = kinfo["required"]
else:
required = False
if required:
raise ConfigurationError(
self.filename,
"missing required value for '%s' in test '%s'" %
(k, name))
else:
if "default" in kinfo:
default = kinfo["default"]
else:
default = self._cast_value("", kinfo["type"])
d[k] = default
else:
try:
d[k] = self._cast_value(d[k], kinfo["type"])
except ValueError:
raise ConfigurationError(
self.filename, "bad %s value '%s' for key '%s' in name '%s'" %
(kinfo["type"], d[k], k, name))
return d
class Platform:
"""Class representing metadata for a particular platform
Maps directly to BOARD when building"""
platform_schema = scl.yaml_load(os.path.join(ZEPHYR_BASE,
"scripts", "schemas", "twister", "platform-schema.yaml"))
def __init__(self):
"""Constructor.
"""
self.name = ""
self.twister = True
# if no RAM size is specified by the board, take a default of 128K
self.ram = 128
self.ignore_tags = []
self.only_tags = []
self.default = False
# if no flash size is specified by the board, take a default of 512K
self.flash = 512
self.supported = set()
self.arch = ""
self.type = "na"
self.simulation = "na"
self.supported_toolchains = []
self.env = []
self.env_satisfied = True
self.filter_data = dict()
def load(self, platform_file):
scp = TwisterConfigParser(platform_file, self.platform_schema)
scp.load()
data = scp.data
self.name = data['identifier']
self.twister = data.get("twister", True)
# if no RAM size is specified by the board, take a default of 128K
self.ram = data.get("ram", 128)
testing = data.get("testing", {})
self.ignore_tags = testing.get("ignore_tags", [])
self.only_tags = testing.get("only_tags", [])
self.default = testing.get("default", False)
# if no flash size is specified by the board, take a default of 512K
self.flash = data.get("flash", 512)
self.supported = set()
for supp_feature in data.get("supported", []):
for item in supp_feature.split(":"):
self.supported.add(item)
self.arch = data['arch']
self.type = data.get('type', "na")
self.simulation = data.get('simulation', "na")
self.supported_toolchains = data.get("toolchain", [])
self.env = data.get("env", [])
self.env_satisfied = True
for env in self.env:
if not os.environ.get(env, None):
self.env_satisfied = False
def __repr__(self):
return "<%s on %s>" % (self.name, self.arch)
class DisablePyTestCollectionMixin(object):
__test__ = False
class ScanPathResult:
"""Result of the TestCase.scan_path function call.
Attributes:
matches A list of test cases
warnings A string containing one or more
warnings to display
has_registered_test_suites Whether or not the path contained any
calls to the ztest_register_test_suite
macro.
has_run_registered_test_suites Whether or not the path contained at
least one call to
ztest_run_registered_test_suites.
has_test_main Whether or not the path contains a
definition of test_main(void)
"""
def __init__(self,
matches: List[str] = None,
warnings: str = None,
has_registered_test_suites: bool = False,
has_run_registered_test_suites: bool = False,
has_test_main: bool = False):
self.matches = matches
self.warnings = warnings
self.has_registered_test_suites = has_registered_test_suites
self.has_run_registered_test_suites = has_run_registered_test_suites
self.has_test_main = has_test_main
def __eq__(self, other):
if not isinstance(other, ScanPathResult):
return False
return (sorted(self.matches) == sorted(other.matches) and
self.warnings == other.warnings and
(self.has_registered_test_suites ==
other.has_registered_test_suites) and
(self.has_run_registered_test_suites ==
other.has_run_registered_test_suites) and
self.has_test_main == other.has_test_main)
class TestCase(DisablePyTestCollectionMixin):
"""Class representing a test application
"""
def __init__(self, testcase_root, workdir, name):
"""TestCase constructor.
This gets called by TestSuite as it finds and reads test yaml files.
Multiple TestCase instances may be generated from a single testcase.yaml,
each one corresponds to an entry within that file.
We need to have a unique name for every single test case. Since
a testcase.yaml can define multiple tests, the canonical name for
the test case is <workdir>/<name>.
@param testcase_root os.path.abspath() of one of the --testcase-root
@param workdir Sub-directory of testcase_root where the
.yaml test configuration file was found
@param name Name of this test case, corresponding to the entry name
in the test case configuration file. For many test cases that just
define one test, can be anything and is usually "test". This is
really only used to distinguish between different cases when
the testcase.yaml defines multiple tests
"""
self.source_dir = ""
self.yamlfile = ""
self.cases = []
self.name = self.get_unique(testcase_root, workdir, name)
self.id = name
self.type = None
self.tags = set()
self.extra_args = None
self.extra_configs = None
self.arch_allow = None
self.arch_exclude = None
self.skip = False
self.platform_exclude = None
self.platform_allow = None
self.toolchain_exclude = None
self.toolchain_allow = None
self.tc_filter = None
self.timeout = 60
self.harness = ""
self.harness_config = {}
self.build_only = True
self.build_on_all = False
self.slow = False
self.min_ram = -1
self.depends_on = None
self.min_flash = -1
self.extra_sections = None
self.integration_platforms = []
@staticmethod
def get_unique(testcase_root, workdir, name):
canonical_testcase_root = os.path.realpath(testcase_root)
if Path(canonical_zephyr_base) in Path(canonical_testcase_root).parents:
# This is in ZEPHYR_BASE, so include path in name for uniqueness
# FIXME: We should not depend on path of test for unique names.
relative_tc_root = os.path.relpath(canonical_testcase_root,
start=canonical_zephyr_base)
else:
relative_tc_root = ""
# workdir can be "."
unique = os.path.normpath(os.path.join(relative_tc_root, workdir, name))
check = name.split(".")
if len(check) < 2:
raise TwisterException(f"""bad test name '{name}' in {testcase_root}/{workdir}. \
Tests should reference the category and subsystem with a dot as a separator.
"""
)
return unique
@staticmethod
def scan_file(inf_name):
suite_regex = re.compile(
# do not match until end-of-line, otherwise we won't allow
# stc_regex below to catch the ones that are declared in the same
# line--as we only search starting the end of this match
br"^\s*ztest_test_suite\(\s*(?P<suite_name>[a-zA-Z0-9_]+)\s*,",
re.MULTILINE)
registered_suite_regex = re.compile(
br"^\s*ztest_register_test_suite"
br"\(\s*(?P<suite_name>[a-zA-Z0-9_]+)\s*,",
re.MULTILINE)
# Checks if the file contains a definition of "void test_main(void)"
# Since ztest provides a plain test_main implementation it is OK to:
# 1. register test suites and not call the run function iff the test
# doesn't have a custom test_main.
# 2. register test suites and a custom test_main definition iff the test
# also calls ztest_run_registered_test_suites.
test_main_regex = re.compile(
br"^\s*void\s+test_main\(void\)",
re.MULTILINE)
stc_regex = re.compile(
br"""^\s* # empty space at the beginning is ok
# catch the case where it is declared in the same sentence, e.g:
#
# ztest_test_suite(mutex_complex, ztest_user_unit_test(TESTNAME));
# ztest_register_test_suite(n, p, ztest_user_unit_test(TESTNAME),
(?:ztest_
(?:test_suite\(|register_test_suite\([a-zA-Z0-9_]+\s*,\s*)
[a-zA-Z0-9_]+\s*,\s*
)?
# Catch ztest[_user]_unit_test-[_setup_teardown](TESTNAME)
ztest_(?:1cpu_)?(?:user_)?unit_test(?:_setup_teardown)?
# Consume the argument that becomes the extra testcase
\(\s*(?P<stc_name>[a-zA-Z0-9_]+)
# _setup_teardown() variant has two extra arguments that we ignore
(?:\s*,\s*[a-zA-Z0-9_]+\s*,\s*[a-zA-Z0-9_]+)?
\s*\)""",
# We don't check how it finishes; we don't care
re.MULTILINE | re.VERBOSE)
suite_run_regex = re.compile(
br"^\s*ztest_run_test_suite\((?P<suite_name>[a-zA-Z0-9_]+)\)",
re.MULTILINE)
registered_suite_run_regex = re.compile(
br"^\s*ztest_run_registered_test_suites\("
br"(\*+|&)?(?P<state_identifier>[a-zA-Z0-9_]+)\)",
re.MULTILINE)
achtung_regex = re.compile(
br"(#ifdef|#endif)",
re.MULTILINE)
warnings = None
has_registered_test_suites = False
has_run_registered_test_suites = False
has_test_main = False
with open(inf_name) as inf:
if os.name == 'nt':
mmap_args = {'fileno': inf.fileno(), 'length': 0, 'access': mmap.ACCESS_READ}
else:
mmap_args = {'fileno': inf.fileno(), 'length': 0, 'flags': mmap.MAP_PRIVATE, 'prot': mmap.PROT_READ,
'offset': 0}
with contextlib.closing(mmap.mmap(**mmap_args)) as main_c:
suite_regex_match = suite_regex.search(main_c)
registered_suite_regex_match = registered_suite_regex.search(
main_c)
if registered_suite_regex_match:
has_registered_test_suites = True
if registered_suite_run_regex.search(main_c):
has_run_registered_test_suites = True
if test_main_regex.search(main_c):
has_test_main = True
if not suite_regex_match and not has_registered_test_suites:
# can't find ztest_test_suite, maybe a client, because
# it includes ztest.h
return ScanPathResult(
matches=None,
warnings=None,
has_registered_test_suites=has_registered_test_suites,
has_run_registered_test_suites=has_run_registered_test_suites,
has_test_main=has_test_main)
suite_run_match = suite_run_regex.search(main_c)
if suite_regex_match and not suite_run_match:
raise ValueError("can't find ztest_run_test_suite")
if suite_regex_match:
search_start = suite_regex_match.end()
else:
search_start = registered_suite_regex_match.end()
if suite_run_match:
search_end = suite_run_match.start()
else:
search_end = re.compile(br"\);", re.MULTILINE) \
.search(main_c, search_start) \
.end()
achtung_matches = re.findall(
achtung_regex,
main_c[search_start:search_end])
if achtung_matches:
warnings = "found invalid %s in ztest_test_suite()" \
% ", ".join(sorted({match.decode() for match in achtung_matches},reverse = True))
_matches = re.findall(
stc_regex,
main_c[search_start:search_end])
for match in _matches:
if not match.decode().startswith("test_"):
warnings = "Found a test that does not start with test_"
matches = [match.decode().replace("test_", "", 1) for match in _matches]
return ScanPathResult(
matches=matches,
warnings=warnings,
has_registered_test_suites=has_registered_test_suites,
has_run_registered_test_suites=has_run_registered_test_suites,
has_test_main=has_test_main)
def scan_path(self, path):
subcases = []
has_registered_test_suites = False
has_run_registered_test_suites = False
has_test_main = False
for filename in glob.glob(os.path.join(path, "src", "*.c*")):
try:
result: ScanPathResult = self.scan_file(filename)
if result.warnings:
logger.error("%s: %s" % (filename, result.warnings))
raise TwisterRuntimeError(
"%s: %s" % (filename, result.warnings))
if result.matches:
subcases += result.matches
if result.has_registered_test_suites:
has_registered_test_suites = True
if result.has_run_registered_test_suites:
has_run_registered_test_suites = True
if result.has_test_main:
has_test_main = True
except ValueError as e:
logger.error("%s: can't find: %s" % (filename, e))
for filename in glob.glob(os.path.join(path, "*.c")):
try:
result: ScanPathResult = self.scan_file(filename)
if result.warnings:
logger.error("%s: %s" % (filename, result.warnings))
if result.matches:
subcases += result.matches
except ValueError as e:
logger.error("%s: can't find: %s" % (filename, e))
if (has_registered_test_suites and has_test_main and
not has_run_registered_test_suites):
warning = \
"Found call to 'ztest_register_test_suite()' but no "\
"call to 'ztest_run_registered_test_suites()'"
logger.error(warning)
raise TwisterRuntimeError(warning)
return subcases
def parse_subcases(self, test_path):
results = self.scan_path(test_path)
for sub in results:
name = "{}.{}".format(self.id, sub)
self.cases.append(name)
if not results:
self.cases.append(self.id)
def __str__(self):
return self.name
class TestInstance(DisablePyTestCollectionMixin):
"""Class representing the execution of a particular TestCase on a platform
@param test The TestCase object we want to build/execute
@param platform Platform object that we want to build and run against
@param base_outdir Base directory for all test results. The actual
out directory used is <outdir>/<platform>/<test case name>
"""
def __init__(self, testcase, platform, outdir):
self.testcase = testcase
self.platform = platform
self.status = None
self.reason = "Unknown"
self.metrics = dict()
self.handler = None
self.outdir = outdir
self.name = os.path.join(platform.name, testcase.name)
self.build_dir = os.path.join(outdir, platform.name, testcase.name)
self.run = False
self.results = {}
def __getstate__(self):
d = self.__dict__.copy()
return d
def __setstate__(self, d):
self.__dict__.update(d)
def __lt__(self, other):
return self.name < other.name
@staticmethod
def testcase_runnable(testcase, fixtures):
can_run = False
# console harness allows us to run the test and capture data.
if testcase.harness in [ 'console', 'ztest', 'pytest']:
can_run = True
# if we have a fixture that is also being supplied on the
# command-line, then we need to run the test, not just build it.
fixture = testcase.harness_config.get('fixture')
if fixture:
can_run = (fixture in fixtures)
elif testcase.harness:
can_run = False
else:
can_run = True
return can_run
# Global testsuite parameters
def check_runnable(self, enable_slow=False, filter='buildable', fixtures=[]):
# right now we only support building on windows. running is still work
# in progress.
if os.name == 'nt':
return False
# we asked for build-only on the command line
if self.testcase.build_only:
return False
# Do not run slow tests:
skip_slow = self.testcase.slow and not enable_slow
if skip_slow:
return False
target_ready = bool(self.testcase.type == "unit" or \
self.platform.type == "native" or \
self.platform.simulation in ["mdb-nsim", "nsim", "renode", "qemu", "tsim", "armfvp", "xt-sim"] or \
filter == 'runnable')
if self.platform.simulation == "nsim":
if not find_executable("nsimdrv"):
target_ready = False
if self.platform.simulation == "mdb-nsim":
if not find_executable("mdb"):
target_ready = False
if self.platform.simulation == "renode":
if not find_executable("renode"):
target_ready = False
if self.platform.simulation == "tsim":
if not find_executable("tsim-leon3"):
target_ready = False
testcase_runnable = self.testcase_runnable(self.testcase, fixtures)
return testcase_runnable and target_ready
def create_overlay(self, platform, enable_asan=False, enable_ubsan=False, enable_coverage=False, coverage_platform=[]):
# Create this in a "twister/" subdirectory otherwise this
# will pass this overlay to kconfig.py *twice* and kconfig.cmake
# will silently give that second time precedence over any
# --extra-args=CONFIG_*
subdir = os.path.join(self.build_dir, "twister")
content = ""
if self.testcase.extra_configs:
content = "\n".join(self.testcase.extra_configs)
if enable_coverage:
if platform.name in coverage_platform:
content = content + "\nCONFIG_COVERAGE=y"
content = content + "\nCONFIG_COVERAGE_DUMP=y"
if enable_asan:
if platform.type == "native":
content = content + "\nCONFIG_ASAN=y"
if enable_ubsan:
if platform.type == "native":
content = content + "\nCONFIG_UBSAN=y"
if content:
os.makedirs(subdir, exist_ok=True)
file = os.path.join(subdir, "testcase_extra.conf")
with open(file, "w") as f:
f.write(content)
return content
def calculate_sizes(self):
"""Get the RAM/ROM sizes of a test case.
This can only be run after the instance has been executed by
MakeGenerator, otherwise there won't be any binaries to measure.
@return A SizeCalculator object
"""
fns = glob.glob(os.path.join(self.build_dir, "zephyr", "*.elf"))
fns.extend(glob.glob(os.path.join(self.build_dir, "zephyr", "*.exe")))
fns = [x for x in fns if '_pre' not in x]
if len(fns) != 1:
raise BuildError("Missing/multiple output ELF binary")
return SizeCalculator(fns[0], self.testcase.extra_sections)
def fill_results_by_status(self):
"""Fills results according to self.status
The method is used to propagate the instance level status
to the test cases inside. Useful when the whole instance is skipped
and the info is required also at the test cases level for reporting.
Should be used with caution, e.g. should not be used
to fill all results with passes
"""
status_to_verdict = {
'skipped': 'SKIP',
'error': 'BLOCK',
'failure': 'FAILED'
}
for k in self.results:
self.results[k] = status_to_verdict[self.status]
def __repr__(self):
return "<TestCase %s on %s>" % (self.testcase.name, self.platform.name)
class CMake():
config_re = re.compile('(CONFIG_[A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$')
dt_re = re.compile('([A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$')
def __init__(self, testcase, platform, source_dir, build_dir):
self.cwd = None
self.capture_output = True
self.defconfig = {}
self.cmake_cache = {}
self.instance = None
self.testcase = testcase
self.platform = platform
self.source_dir = source_dir
self.build_dir = build_dir
self.log = "build.log"
self.generator = None
self.generator_cmd = None
def parse_generated(self):
self.defconfig = {}
return {}
def run_build(self, args=[]):
logger.debug("Building %s for %s" % (self.source_dir, self.platform.name))
cmake_args = []
cmake_args.extend(args)
cmake = shutil.which('cmake')
cmd = [cmake] + cmake_args
kwargs = dict()
if self.capture_output:
kwargs['stdout'] = subprocess.PIPE
# CMake sends the output of message() to stderr unless it's STATUS
kwargs['stderr'] = subprocess.STDOUT
if self.cwd:
kwargs['cwd'] = self.cwd
p = subprocess.Popen(cmd, **kwargs)
out, _ = p.communicate()
results = {}
if p.returncode == 0:
msg = "Finished building %s for %s" % (self.source_dir, self.platform.name)
self.instance.status = "passed"
results = {'msg': msg, "returncode": p.returncode, "instance": self.instance}
if out:
log_msg = out.decode(sys.getdefaultencoding())
with open(os.path.join(self.build_dir, self.log), "a") as log:
log.write(log_msg)
else:
return None
else:
# A real error occurred, raise an exception
log_msg = ""
if out:
log_msg = out.decode(sys.getdefaultencoding())
with open(os.path.join(self.build_dir, self.log), "a") as log:
log.write(log_msg)
if log_msg:
res = re.findall("region `(FLASH|ROM|RAM|ICCM|DCCM|SRAM)' overflowed by", log_msg)
if res and not self.overflow_as_errors:
logger.debug("Test skipped due to {} Overflow".format(res[0]))
self.instance.status = "skipped"
self.instance.reason = "{} overflow".format(res[0])
else:
self.instance.status = "error"
self.instance.reason = "Build failure"
results = {
"returncode": p.returncode,
"instance": self.instance,
}
return results
def run_cmake(self, args=[]):
if self.warnings_as_errors:
ldflags = "-Wl,--fatal-warnings"
cflags = "-Werror"
aflags = "-Werror -Wa,--fatal-warnings"
gen_defines_args = "--edtlib-Werror"
else:
ldflags = cflags = aflags = ""
gen_defines_args = ""
logger.debug("Running cmake on %s for %s" % (self.source_dir, self.platform.name))
cmake_args = [
f'-B{self.build_dir}',
f'-S{self.source_dir}',
f'-DEXTRA_CFLAGS={cflags}',
f'-DEXTRA_AFLAGS={aflags}',
f'-DEXTRA_LDFLAGS={ldflags}',
f'-DEXTRA_GEN_DEFINES_ARGS={gen_defines_args}',
f'-G{self.generator}'
]
args = ["-D{}".format(a.replace('"', '')) for a in args]
cmake_args.extend(args)
cmake_opts = ['-DBOARD={}'.format(self.platform.name)]
cmake_args.extend(cmake_opts)
logger.debug("Calling cmake with arguments: {}".format(cmake_args))
cmake = shutil.which('cmake')
cmd = [cmake] + cmake_args
kwargs = dict()
if self.capture_output:
kwargs['stdout'] = subprocess.PIPE
# CMake sends the output of message() to stderr unless it's STATUS
kwargs['stderr'] = subprocess.STDOUT
if self.cwd:
kwargs['cwd'] = self.cwd
p = subprocess.Popen(cmd, **kwargs)
out, _ = p.communicate()
if p.returncode == 0:
filter_results = self.parse_generated()
msg = "Finished building %s for %s" % (self.source_dir, self.platform.name)
logger.debug(msg)
results = {'msg': msg, 'filter': filter_results}
else:
self.instance.status = "error"
self.instance.reason = "Cmake build failure"
self.instance.fill_results_by_status()
logger.error("Cmake build failure: %s for %s" % (self.source_dir, self.platform.name))
results = {"returncode": p.returncode}
if out:
with open(os.path.join(self.build_dir, self.log), "a") as log:
log_msg = out.decode(sys.getdefaultencoding())
log.write(log_msg)
return results
@staticmethod
def run_cmake_script(args=[]):
logger.debug("Running cmake script %s" % (args[0]))
cmake_args = ["-D{}".format(a.replace('"', '')) for a in args[1:]]
cmake_args.extend(['-P', args[0]])
logger.debug("Calling cmake with arguments: {}".format(cmake_args))
cmake = shutil.which('cmake')
if not cmake:
msg = "Unable to find `cmake` in path"
logger.error(msg)
raise Exception(msg)
cmd = [cmake] + cmake_args
kwargs = dict()
kwargs['stdout'] = subprocess.PIPE
# CMake sends the output of message() to stderr unless it's STATUS
kwargs['stderr'] = subprocess.STDOUT
p = subprocess.Popen(cmd, **kwargs)
out, _ = p.communicate()
# It might happen that the environment adds ANSI escape codes like \x1b[0m,
# for instance if twister is executed from inside a makefile. In such a
# scenario it is then necessary to remove them, as otherwise the JSON decoding
# will fail.
ansi_escape = re.compile(r'\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])')
out = ansi_escape.sub('', out.decode())
if p.returncode == 0:
msg = "Finished running %s" % (args[0])
logger.debug(msg)
results = {"returncode": p.returncode, "msg": msg, "stdout": out}
else:
logger.error("Cmake script failure: %s" % (args[0]))
results = {"returncode": p.returncode, "returnmsg": out}
return results
class FilterBuilder(CMake):
def __init__(self, testcase, platform, source_dir, build_dir):
super().__init__(testcase, platform, source_dir, build_dir)
self.log = "config-twister.log"
def parse_generated(self):
if self.platform.name == "unit_testing":
return {}
cmake_cache_path = os.path.join(self.build_dir, "CMakeCache.txt")
defconfig_path = os.path.join(self.build_dir, "zephyr", ".config")
with open(defconfig_path, "r") as fp:
defconfig = {}
for line in fp.readlines():
m = self.config_re.match(line)
if not m:
if line.strip() and not line.startswith("#"):
sys.stderr.write("Unrecognized line %s\n" % line)
continue
defconfig[m.group(1)] = m.group(2).strip()
self.defconfig = defconfig
cmake_conf = {}
try:
cache = CMakeCache.from_file(cmake_cache_path)
except FileNotFoundError:
cache = {}
for k in iter(cache):
cmake_conf[k.name] = k.value
self.cmake_cache = cmake_conf
filter_data = {
"ARCH": self.platform.arch,
"PLATFORM": self.platform.name
}
filter_data.update(os.environ)
filter_data.update(self.defconfig)
filter_data.update(self.cmake_cache)
edt_pickle = os.path.join(self.build_dir, "zephyr", "edt.pickle")
if self.testcase and self.testcase.tc_filter:
try:
if os.path.exists(edt_pickle):
with open(edt_pickle, 'rb') as f:
edt = pickle.load(f)
else:
edt = None
res = expr_parser.parse(self.testcase.tc_filter, filter_data, edt)
except (ValueError, SyntaxError) as se:
sys.stderr.write(
"Failed processing %s\n" % self.testcase.yamlfile)
raise se
if not res:
return {os.path.join(self.platform.name, self.testcase.name): True}
else:
return {os.path.join(self.platform.name, self.testcase.name): False}
else:
self.platform.filter_data = filter_data
return filter_data
class ProjectBuilder(FilterBuilder):
def __init__(self, suite, instance, **kwargs):
super().__init__(instance.testcase, instance.platform, instance.testcase.source_dir, instance.build_dir)
self.log = "build.log"
self.instance = instance
self.suite = suite
self.filtered_tests = 0
self.lsan = kwargs.get('lsan', False)
self.asan = kwargs.get('asan', False)
self.ubsan = kwargs.get('ubsan', False)
self.valgrind = kwargs.get('valgrind', False)
self.extra_args = kwargs.get('extra_args', [])
self.device_testing = kwargs.get('device_testing', False)
self.cmake_only = kwargs.get('cmake_only', False)
self.cleanup = kwargs.get('cleanup', False)
self.coverage = kwargs.get('coverage', False)
self.inline_logs = kwargs.get('inline_logs', False)
self.generator = kwargs.get('generator', None)
self.generator_cmd = kwargs.get('generator_cmd', None)
self.verbose = kwargs.get('verbose', None)
self.warnings_as_errors = kwargs.get('warnings_as_errors', True)
self.overflow_as_errors = kwargs.get('overflow_as_errors', False)
@staticmethod
def log_info(filename, inline_logs):
filename = os.path.abspath(os.path.realpath(filename))
if inline_logs:
logger.info("{:-^100}".format(filename))
try:
with open(filename) as fp:
data = fp.read()
except Exception as e:
data = "Unable to read log data (%s)\n" % (str(e))
logger.error(data)
logger.info("{:-^100}".format(filename))
else:
logger.error("see: " + Fore.YELLOW + filename + Fore.RESET)
def log_info_file(self, inline_logs):
build_dir = self.instance.build_dir
h_log = "{}/handler.log".format(build_dir)
b_log = "{}/build.log".format(build_dir)
v_log = "{}/valgrind.log".format(build_dir)
d_log = "{}/device.log".format(build_dir)
if os.path.exists(v_log) and "Valgrind" in self.instance.reason:
self.log_info("{}".format(v_log), inline_logs)
elif os.path.exists(h_log) and os.path.getsize(h_log) > 0:
self.log_info("{}".format(h_log), inline_logs)
elif os.path.exists(d_log) and os.path.getsize(d_log) > 0:
self.log_info("{}".format(d_log), inline_logs)
else:
self.log_info("{}".format(b_log), inline_logs)
def setup_handler(self):
instance = self.instance
args = []
# FIXME: Needs simplification
if instance.platform.simulation == "qemu":
instance.handler = QEMUHandler(instance, "qemu")
args.append("QEMU_PIPE=%s" % instance.handler.get_fifo())
instance.handler.call_make_run = True
elif instance.testcase.type == "unit":
instance.handler = BinaryHandler(instance, "unit")
instance.handler.binary = os.path.join(instance.build_dir, "testbinary")
if self.coverage:
args.append("COVERAGE=1")
elif instance.platform.type == "native":
handler = BinaryHandler(instance, "native")
handler.asan = self.asan
handler.valgrind = self.valgrind
handler.lsan = self.lsan
handler.ubsan = self.ubsan
handler.coverage = self.coverage
handler.binary = os.path.join(instance.build_dir, "zephyr", "zephyr.exe")
instance.handler = handler
elif instance.platform.simulation == "renode":
if find_executable("renode"):
instance.handler = BinaryHandler(instance, "renode")
instance.handler.pid_fn = os.path.join(instance.build_dir, "renode.pid")
instance.handler.call_make_run = True
elif instance.platform.simulation == "tsim":
instance.handler = BinaryHandler(instance, "tsim")
instance.handler.call_make_run = True
elif self.device_testing:
instance.handler = DeviceHandler(instance, "device")
instance.handler.coverage = self.coverage
elif instance.platform.simulation == "nsim":
if find_executable("nsimdrv"):
instance.handler = BinaryHandler(instance, "nsim")
instance.handler.call_make_run = True
elif instance.platform.simulation == "mdb-nsim":
if find_executable("mdb"):
instance.handler = BinaryHandler(instance, "nsim")
instance.handler.call_make_run = True
elif instance.platform.simulation == "armfvp":
instance.handler = BinaryHandler(instance, "armfvp")
instance.handler.call_make_run = True
elif instance.platform.simulation == "xt-sim":
instance.handler = BinaryHandler(instance, "xt-sim")
instance.handler.call_make_run = True
if instance.handler:
instance.handler.args = args
instance.handler.generator_cmd = self.generator_cmd
instance.handler.generator = self.generator
def process(self, pipeline, done, message, lock, results):
op = message.get('op')
if not self.instance.handler:
self.setup_handler()
# The build process, call cmake and build with configured generator
if op == "cmake":
res = self.cmake()
if self.instance.status in ["failed", "error"]:
pipeline.put({"op": "report", "test": self.instance})
elif self.cmake_only:
if self.instance.status is None:
self.instance.status = "passed"
pipeline.put({"op": "report", "test": self.instance})
else:
if self.instance.name in res['filter'] and res['filter'][self.instance.name]:
logger.debug("filtering %s" % self.instance.name)
self.instance.status = "skipped"
self.instance.reason = "filter"
results.skipped_runtime += 1
for case in self.instance.testcase.cases:
self.instance.results.update({case: 'SKIP'})
pipeline.put({"op": "report", "test": self.instance})
else:
pipeline.put({"op": "build", "test": self.instance})
elif op == "build":
logger.debug("build test: %s" % self.instance.name)
res = self.build()
if not res:
self.instance.status = "error"
self.instance.reason = "Build Failure"
pipeline.put({"op": "report", "test": self.instance})
else:
# Count skipped cases during build, for example
# due to ram/rom overflow.
inst = res.get("instance", None)
if inst and inst.status == "skipped":
results.skipped_runtime += 1
if res.get('returncode', 1) > 0:
pipeline.put({"op": "report", "test": self.instance})
else:
pipeline.put({"op": "gather_metrics", "test": self.instance})
elif op == "gather_metrics":
self.gather_metrics(self.instance)
if self.instance.run and self.instance.handler:
pipeline.put({"op": "run", "test": self.instance})
else:
pipeline.put({"op": "report", "test": self.instance})
# Run the generated binary using one of the supported handlers
elif op == "run":
logger.debug("run test: %s" % self.instance.name)
self.run()
self.instance.status, _ = self.instance.handler.get_state()
logger.debug(f"run status: {self.instance.name} {self.instance.status}")
# to make it work with pickle
self.instance.handler.thread = None
self.instance.handler.suite = None
pipeline.put({
"op": "report",
"test": self.instance,
"status": self.instance.status,
"reason": self.instance.reason
}
)
# Report results and output progress to screen
elif op == "report":
with lock:
done.put(self.instance)
self.report_out(results)
if self.cleanup and not self.coverage and self.instance.status == "passed":
pipeline.put({
"op": "cleanup",
"test": self.instance
})
elif op == "cleanup":
if self.device_testing:
self.cleanup_device_testing_artifacts()
else:
self.cleanup_artifacts()
def cleanup_artifacts(self, additional_keep=[]):
logger.debug("Cleaning up {}".format(self.instance.build_dir))
allow = [
'zephyr/.config',
'handler.log',
'build.log',
'device.log',
'recording.csv',
]
allow += additional_keep
allow = [os.path.join(self.instance.build_dir, file) for file in allow]
for dirpath, dirnames, filenames in os.walk(self.instance.build_dir, topdown=False):
for name in filenames:
path = os.path.join(dirpath, name)
if path not in allow:
os.remove(path)
# Remove empty directories and symbolic links to directories
for dir in dirnames:
path = os.path.join(dirpath, dir)
if os.path.islink(path):
os.remove(path)
elif not os.listdir(path):
os.rmdir(path)
def cleanup_device_testing_artifacts(self):
logger.debug("Cleaning up for Device Testing {}".format(self.instance.build_dir))
sanitizelist = [
'CMakeCache.txt',
'zephyr/runners.yaml',
]
keep = [
'zephyr/zephyr.hex',
'zephyr/zephyr.bin',
'zephyr/zephyr.elf',
]
keep += sanitizelist
self.cleanup_artifacts(keep)
# sanitize paths so files are relocatable
for file in sanitizelist:
file = os.path.join(self.instance.build_dir, file)
with open(file, "rt") as fin:
data = fin.read()
data = data.replace(canonical_zephyr_base+"/", "")
with open(file, "wt") as fin:
fin.write(data)
def report_out(self, results):
total_to_do = results.total
total_tests_width = len(str(total_to_do))
results.done += 1
instance = self.instance
if instance.status in ["error", "failed", "timeout", "flash_error"]:
if instance.status == "error":
results.error += 1
results.failed += 1
if self.verbose:
status = Fore.RED + "FAILED " + Fore.RESET + instance.reason
else:
print("")
logger.error(
"{:<25} {:<50} {}FAILED{}: {}".format(
instance.platform.name,
instance.testcase.name,
Fore.RED,
Fore.RESET,
instance.reason))
if not self.verbose:
self.log_info_file(self.inline_logs)
elif instance.status == "skipped":
status = Fore.YELLOW + "SKIPPED" + Fore.RESET
results.skipped_configs += 1
results.skipped_cases += len(instance.testcase.cases)
elif instance.status == "passed":
status = Fore.GREEN + "PASSED" + Fore.RESET
results.passed += 1
for res in instance.results.values():
if res == 'SKIP':
results.skipped_cases += 1
else:
logger.debug(f"Unknown status = {instance.status}")
status = Fore.YELLOW + "UNKNOWN" + Fore.RESET
if self.verbose:
if self.cmake_only:
more_info = "cmake"
elif instance.status == "skipped":
more_info = instance.reason
else:
if instance.handler and instance.run:
more_info = instance.handler.type_str
htime = instance.handler.duration
if htime:
more_info += " {:.3f}s".format(htime)
else:
more_info = "build"
logger.info("{:>{}}/{} {:<25} {:<50} {} ({})".format(
results.done + results.skipped_filter, total_tests_width, total_to_do , instance.platform.name,
instance.testcase.name, status, more_info))
if instance.status in ["error", "failed", "timeout"]:
self.log_info_file(self.inline_logs)
else:
completed_perc = 0
if total_to_do > 0:
completed_perc = int((float(results.done + results.skipped_filter) / total_to_do) * 100)
sys.stdout.write("\rINFO - Total complete: %s%4d/%4d%s %2d%% skipped: %s%4d%s, failed: %s%4d%s" % (
Fore.GREEN,
results.done + results.skipped_filter,
total_to_do,
Fore.RESET,
completed_perc,
Fore.YELLOW if results.skipped_configs > 0 else Fore.RESET,
results.skipped_filter + results.skipped_runtime,
Fore.RESET,
Fore.RED if results.failed > 0 else Fore.RESET,
results.failed,
Fore.RESET
)
)
sys.stdout.flush()
def cmake(self):
instance = self.instance
args = self.testcase.extra_args[:]
args += self.extra_args
if instance.handler:
args += instance.handler.args
# merge overlay files into one variable
def extract_overlays(args):
re_overlay = re.compile('OVERLAY_CONFIG=(.*)')
other_args = []
overlays = []
for arg in args:
match = re_overlay.search(arg)
if match:
overlays.append(match.group(1).strip('\'"'))
else:
other_args.append(arg)
args[:] = other_args
return overlays
overlays = extract_overlays(args)
if os.path.exists(os.path.join(instance.build_dir,
"twister", "testcase_extra.conf")):
overlays.append(os.path.join(instance.build_dir,
"twister", "testcase_extra.conf"))
if overlays:
args.append("OVERLAY_CONFIG=\"%s\"" % (" ".join(overlays)))
res = self.run_cmake(args)
return res
def build(self):
res = self.run_build(['--build', self.build_dir])
return res
def run(self):
instance = self.instance
if instance.handler:
if instance.handler.type_str == "device":
instance.handler.suite = self.suite
instance.handler.handle()
sys.stdout.flush()
def gather_metrics(self, instance):
if self.suite.enable_size_report and not self.suite.cmake_only:
self.calc_one_elf_size(instance)
else:
instance.metrics["ram_size"] = 0
instance.metrics["rom_size"] = 0
instance.metrics["unrecognized"] = []
@staticmethod
def calc_one_elf_size(instance):
if instance.status not in ["error", "failed", "skipped"]:
if instance.platform.type != "native":
size_calc = instance.calculate_sizes()
instance.metrics["ram_size"] = size_calc.get_ram_size()
instance.metrics["rom_size"] = size_calc.get_rom_size()
instance.metrics["unrecognized"] = size_calc.unrecognized_sections()
else:
instance.metrics["ram_size"] = 0
instance.metrics["rom_size"] = 0
instance.metrics["unrecognized"] = []
instance.metrics["handler_time"] = instance.handler.duration if instance.handler else 0
class TestSuite(DisablePyTestCollectionMixin):
config_re = re.compile('(CONFIG_[A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$')
dt_re = re.compile('([A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$')
tc_schema = scl.yaml_load(
os.path.join(ZEPHYR_BASE,
"scripts", "schemas", "twister", "testcase-schema.yaml"))
quarantine_schema = scl.yaml_load(
os.path.join(ZEPHYR_BASE,
"scripts", "schemas", "twister", "quarantine-schema.yaml"))
testcase_valid_keys = {"tags": {"type": "set", "required": False},
"type": {"type": "str", "default": "integration"},
"extra_args": {"type": "list"},
"extra_configs": {"type": "list"},
"build_only": {"type": "bool", "default": False},
"build_on_all": {"type": "bool", "default": False},
"skip": {"type": "bool", "default": False},
"slow": {"type": "bool", "default": False},
"timeout": {"type": "int", "default": 60},
"min_ram": {"type": "int", "default": 8},
"depends_on": {"type": "set"},
"min_flash": {"type": "int", "default": 32},
"arch_allow": {"type": "set"},
"arch_exclude": {"type": "set"},
"extra_sections": {"type": "list", "default": []},
"integration_platforms": {"type": "list", "default": []},
"platform_exclude": {"type": "set"},
"platform_allow": {"type": "set"},
"toolchain_exclude": {"type": "set"},
"toolchain_allow": {"type": "set"},
"filter": {"type": "str"},
"harness": {"type": "str"},
"harness_config": {"type": "map", "default": {}}
}
RELEASE_DATA = os.path.join(ZEPHYR_BASE, "scripts", "release",
"twister_last_release.csv")
SAMPLE_FILENAME = 'sample.yaml'
TESTCASE_FILENAME = 'testcase.yaml'
def __init__(self, board_root_list=[], testcase_roots=[], outdir=None):
self.roots = testcase_roots
if not isinstance(board_root_list, list):
self.board_roots = [board_root_list]
else:
self.board_roots = board_root_list
# Testsuite Options
self.coverage_platform = []
self.build_only = False
self.cmake_only = False
self.cleanup = False
self.enable_slow = False
self.device_testing = False
self.fixtures = []
self.enable_coverage = False
self.enable_ubsan = False
self.enable_lsan = False
self.enable_asan = False
self.enable_valgrind = False
self.extra_args = []
self.inline_logs = False
self.enable_sizes_report = False
self.west_flash = None
self.west_runner = None
self.generator = None
self.generator_cmd = None
self.warnings_as_errors = True
self.overflow_as_errors = False
self.quarantine_verify = False
self.retry_build_errors = False
# Keep track of which test cases we've filtered out and why
self.testcases = {}
self.quarantine = {}
self.platforms = []
self.platform_names = []
self.selected_platforms = []
self.filtered_platforms = []
self.default_platforms = []
self.outdir = os.path.abspath(outdir)
self.discards = {}
self.load_errors = 0
self.instances = dict()
self.total_platforms = 0
self.start_time = 0
self.duration = 0
self.warnings = 0
# hardcoded for now
self.duts = []
# run integration tests only
self.integration = False
# used during creating shorter build paths
self.link_dir_counter = 0
self.pipeline = None
self.version = "NA"
def check_zephyr_version(self):
try:
subproc = subprocess.run(["git", "describe", "--abbrev=12", "--always"],
stdout=subprocess.PIPE,
universal_newlines=True,
cwd=ZEPHYR_BASE)
if subproc.returncode == 0:
self.version = subproc.stdout.strip()
logger.info(f"Zephyr version: {self.version}")
except OSError:
logger.info("Cannot read zephyr version.")
def get_platform_instances(self, platform):
filtered_dict = {k:v for k,v in self.instances.items() if k.startswith(platform + os.sep)}
return filtered_dict
def config(self):
logger.info("coverage platform: {}".format(self.coverage_platform))
# Debug Functions
@staticmethod
def info(what):
sys.stdout.write(what + "\n")
sys.stdout.flush()
def update_counting(self, results=None):
for instance in self.instances.values():
results.cases += len(instance.testcase.cases)
if instance.status == 'skipped':
results.skipped_filter += 1
results.skipped_configs += 1
results.skipped_cases += len(instance.testcase.cases)
def compare_metrics(self, filename):
# name, datatype, lower results better
interesting_metrics = [("ram_size", int, True),
("rom_size", int, True)]
if not os.path.exists(filename):
logger.error("Cannot compare metrics, %s not found" % filename)
return []
results = []
saved_metrics = {}
with open(filename) as fp:
cr = csv.DictReader(fp)
for row in cr:
d = {}
for m, _, _ in interesting_metrics:
d[m] = row[m]
saved_metrics[(row["test"], row["platform"])] = d
for instance in self.instances.values():
mkey = (instance.testcase.name, instance.platform.name)
if mkey not in saved_metrics:
continue
sm = saved_metrics[mkey]
for metric, mtype, lower_better in interesting_metrics:
if metric not in instance.metrics:
continue
if sm[metric] == "":
continue
delta = instance.metrics.get(metric, 0) - mtype(sm[metric])
if delta == 0:
continue
results.append((instance, metric, instance.metrics.get(metric, 0), delta,
lower_better))
return results
def footprint_reports(self, report, show_footprint, all_deltas,
footprint_threshold, last_metrics):
if not report:
return
logger.debug("running footprint_reports")
deltas = self.compare_metrics(report)
warnings = 0
if deltas and show_footprint:
for i, metric, value, delta, lower_better in deltas:
if not all_deltas and ((delta < 0 and lower_better) or
(delta > 0 and not lower_better)):
continue
percentage = 0
if value > delta:
percentage = (float(delta) / float(value - delta))
if not all_deltas and (percentage < (footprint_threshold / 100.0)):
continue
logger.info("{:<25} {:<60} {}{}{}: {} {:<+4}, is now {:6} {:+.2%}".format(
i.platform.name, i.testcase.name, Fore.YELLOW,
"INFO" if all_deltas else "WARNING", Fore.RESET,
metric, delta, value, percentage))
warnings += 1
if warnings:
logger.warning("Deltas based on metrics from last %s" %
("release" if not last_metrics else "run"))
def summary(self, results, unrecognized_sections):
failed = 0
run = 0
for instance in self.instances.values():
if instance.status == "failed":
failed += 1
elif instance.metrics.get("unrecognized") and not unrecognized_sections:
logger.error("%sFAILED%s: %s has unrecognized binary sections: %s" %
(Fore.RED, Fore.RESET, instance.name,
str(instance.metrics.get("unrecognized", []))))
failed += 1
if instance.metrics.get('handler_time', None):
run += 1
if results.total and results.total != results.skipped_configs:
pass_rate = (float(results.passed) / float(results.total - results.skipped_configs))
else:
pass_rate = 0
logger.info(
"{}{} of {}{} test configurations passed ({:.2%}), {}{}{} failed, {} skipped with {}{}{} warnings in {:.2f} seconds".format(
Fore.RED if failed else Fore.GREEN,
results.passed,
results.total,
Fore.RESET,
pass_rate,
Fore.RED if results.failed else Fore.RESET,
results.failed,
Fore.RESET,
results.skipped_configs,
Fore.YELLOW if self.warnings else Fore.RESET,
self.warnings,
Fore.RESET,
self.duration))
self.total_platforms = len(self.platforms)
# if we are only building, do not report about tests being executed.
if self.platforms and not self.build_only:
logger.info("In total {} test cases were executed, {} skipped on {} out of total {} platforms ({:02.2f}%)".format(
results.cases - results.skipped_cases,
results.skipped_cases,
len(self.filtered_platforms),
self.total_platforms,
(100 * len(self.filtered_platforms) / len(self.platforms))
))
logger.info(f"{Fore.GREEN}{run}{Fore.RESET} test configurations executed on platforms, \
{Fore.RED}{results.total - run - results.skipped_configs}{Fore.RESET} test configurations were only built.")
def save_reports(self, name, suffix, report_dir, no_update, release, only_failed, platform_reports, json_report, report_skipped):
if not self.instances:
return
logger.info("Saving reports...")
if name:
report_name = name
else:
report_name = "twister"
if report_dir:
os.makedirs(report_dir, exist_ok=True)
filename = os.path.join(report_dir, report_name)
outdir = report_dir
else:
filename = os.path.join(self.outdir, report_name)
outdir = self.outdir
if suffix:
filename = "{}_{}".format(filename, suffix)
if not no_update:
self.xunit_report(filename + ".xml", full_report=False,
append=only_failed, version=self.version, report_skipped=report_skipped)
self.xunit_report(filename + "_report.xml", full_report=True,
append=only_failed, version=self.version, report_skipped=report_skipped)
self.csv_report(filename + ".csv")
if json_report:
self.json_report(filename + ".json", append=only_failed, version=self.version)
if platform_reports:
self.target_report(outdir, suffix, append=only_failed)
if self.discards:
self.discard_report(filename + "_discard.csv")
if release:
self.csv_report(self.RELEASE_DATA)
def add_configurations(self):
for board_root in self.board_roots:
board_root = os.path.abspath(board_root)
logger.debug("Reading platform configuration files under %s..." %
board_root)
for file in glob.glob(os.path.join(board_root, "*", "*", "*.yaml")):
try:
platform = Platform()
platform.load(file)
if platform.name in [p.name for p in self.platforms]:
logger.error(f"Duplicate platform {platform.name} in {file}")
raise Exception(f"Duplicate platform identifier {platform.name} found")
if platform.twister:
self.platforms.append(platform)
if platform.default:
self.default_platforms.append(platform.name)
except RuntimeError as e:
logger.error("E: %s: can't load: %s" % (file, e))
self.load_errors += 1
self.platform_names = [p.name for p in self.platforms]
def get_all_tests(self):
tests = []
for _, tc in self.testcases.items():
for case in tc.cases:
tests.append(case)
return tests
@staticmethod
def get_toolchain():
toolchain_script = Path(ZEPHYR_BASE) / Path('cmake/modules/verify-toolchain.cmake')
result = CMake.run_cmake_script([toolchain_script, "FORMAT=json"])
try:
if result['returncode']:
raise TwisterRuntimeError(f"E: {result['returnmsg']}")
except Exception as e:
print(str(e))
sys.exit(2)
toolchain = json.loads(result['stdout'])['ZEPHYR_TOOLCHAIN_VARIANT']
logger.info(f"Using '{toolchain}' toolchain.")
return toolchain
def add_testcases(self, testcase_filter=[]):
for root in self.roots:
root = os.path.abspath(root)
logger.debug("Reading test case configuration files under %s..." % root)
for dirpath, _, filenames in os.walk(root, topdown=True):
if self.SAMPLE_FILENAME in filenames:
filename = self.SAMPLE_FILENAME
elif self.TESTCASE_FILENAME in filenames:
filename = self.TESTCASE_FILENAME
else:
continue
logger.debug("Found possible test case in " + dirpath)
tc_path = os.path.join(dirpath, filename)
try:
parsed_data = TwisterConfigParser(tc_path, self.tc_schema)
parsed_data.load()
tc_path = os.path.dirname(tc_path)
workdir = os.path.relpath(tc_path, root)
for name in parsed_data.tests.keys():
tc = TestCase(root, workdir, name)
tc_dict = parsed_data.get_test(name, self.testcase_valid_keys)
tc.source_dir = tc_path
tc.yamlfile = tc_path
tc.type = tc_dict["type"]
tc.tags = tc_dict["tags"]
tc.extra_args = tc_dict["extra_args"]
tc.extra_configs = tc_dict["extra_configs"]
tc.arch_allow = tc_dict["arch_allow"]
tc.arch_exclude = tc_dict["arch_exclude"]
tc.skip = tc_dict["skip"]
tc.platform_exclude = tc_dict["platform_exclude"]
tc.platform_allow = tc_dict["platform_allow"]
tc.toolchain_exclude = tc_dict["toolchain_exclude"]
tc.toolchain_allow = tc_dict["toolchain_allow"]
tc.tc_filter = tc_dict["filter"]
tc.timeout = tc_dict["timeout"]
tc.harness = tc_dict["harness"]
tc.harness_config = tc_dict["harness_config"]
if tc.harness == 'console' and not tc.harness_config:
raise Exception('Harness config error: console harness defined without a configuration.')
tc.build_only = tc_dict["build_only"]
tc.build_on_all = tc_dict["build_on_all"]
tc.slow = tc_dict["slow"]
tc.min_ram = tc_dict["min_ram"]
tc.depends_on = tc_dict["depends_on"]
tc.min_flash = tc_dict["min_flash"]
tc.extra_sections = tc_dict["extra_sections"]
tc.integration_platforms = tc_dict["integration_platforms"]
tc.parse_subcases(tc_path)
if testcase_filter:
if tc.name and tc.name in testcase_filter:
self.testcases[tc.name] = tc
else:
self.testcases[tc.name] = tc
except Exception as e:
logger.error("%s: can't load (skipping): %s" % (tc_path, e))
self.load_errors += 1
return len(self.testcases)
def get_platform(self, name):
selected_platform = None
for platform in self.platforms:
if platform.name == name:
selected_platform = platform
break
return selected_platform
def load_quarantine(self, file):
"""
Loads quarantine list from the given yaml file. Creates a dictionary
of all tests configurations (platform + scenario: comment) that shall be
skipped due to quarantine
"""
# Load yaml into quarantine_yaml
quarantine_yaml = scl.yaml_load_verify(file, self.quarantine_schema)
# Create quarantine_list with a product of the listed
# platforms and scenarios for each entry in quarantine yaml
quarantine_list = []
for quar_dict in quarantine_yaml:
if quar_dict['platforms'][0] == "all":
plat = self.platform_names
else:
plat = quar_dict['platforms']
comment = quar_dict.get('comment', "NA")
quarantine_list.append([{".".join([p, s]): comment}
for p in plat for s in quar_dict['scenarios']])
# Flatten the quarantine_list
quarantine_list = [it for sublist in quarantine_list for it in sublist]
# Change quarantine_list into a dictionary
for d in quarantine_list:
self.quarantine.update(d)
def load_from_file(self, file, filter_status=[], filter_platform=[]):
try:
with open(file, "r") as fp:
cr = csv.DictReader(fp)
instance_list = []
for row in cr:
if row["status"] in filter_status:
continue
test = row["test"]
platform = self.get_platform(row["platform"])
if filter_platform and platform.name not in filter_platform:
continue
instance = TestInstance(self.testcases[test], platform, self.outdir)
if self.device_testing:
tfilter = 'runnable'
else:
tfilter = 'buildable'
instance.run = instance.check_runnable(
self.enable_slow,
tfilter,
self.fixtures
)
instance.create_overlay(platform, self.enable_asan, self.enable_ubsan, self.enable_coverage, self.coverage_platform)
instance_list.append(instance)
self.add_instances(instance_list)
except KeyError as e:
logger.error("Key error while parsing tests file.({})".format(str(e)))
sys.exit(2)
except FileNotFoundError as e:
logger.error("Couldn't find input file with list of tests. ({})".format(e))
sys.exit(2)
def apply_filters(self, **kwargs):
toolchain = self.get_toolchain()
discards = {}
platform_filter = kwargs.get('platform')
exclude_platform = kwargs.get('exclude_platform', [])
testcase_filter = kwargs.get('run_individual_tests', [])
arch_filter = kwargs.get('arch')
tag_filter = kwargs.get('tag')
exclude_tag = kwargs.get('exclude_tag')
all_filter = kwargs.get('all')
runnable = kwargs.get('runnable')
force_toolchain = kwargs.get('force_toolchain')
force_platform = kwargs.get('force_platform')
emu_filter = kwargs.get('emulation_only')
logger.debug("platform filter: " + str(platform_filter))
logger.debug(" arch_filter: " + str(arch_filter))
logger.debug(" tag_filter: " + str(tag_filter))
logger.debug(" exclude_tag: " + str(exclude_tag))
default_platforms = False
emulation_platforms = False
if all_filter:
logger.info("Selecting all possible platforms per test case")
# When --all used, any --platform arguments ignored
platform_filter = []
elif not platform_filter and not emu_filter:
logger.info("Selecting default platforms per test case")
default_platforms = True
elif emu_filter:
logger.info("Selecting emulation platforms per test case")
emulation_platforms = True
if platform_filter:
self.verify_platforms_existence(platform_filter, f"platform_filter")
platforms = list(filter(lambda p: p.name in platform_filter, self.platforms))
elif emu_filter:
platforms = list(filter(lambda p: p.simulation != 'na', self.platforms))
elif arch_filter:
platforms = list(filter(lambda p: p.arch in arch_filter, self.platforms))
elif default_platforms:
platforms = list(filter(lambda p: p.default, self.platforms))
else:
platforms = self.platforms
logger.info("Building initial testcase list...")
for tc_name, tc in self.testcases.items():
if tc.build_on_all and not platform_filter:
platform_scope = self.platforms
elif tc.integration_platforms and self.integration:
self.verify_platforms_existence(
tc.integration_platforms, f"{tc_name} - integration_platforms")
platform_scope = list(filter(lambda item: item.name in tc.integration_platforms, \
self.platforms))
else:
platform_scope = platforms
integration = self.integration and tc.integration_platforms
# If there isn't any overlap between the platform_allow list and the platform_scope
# we set the scope to the platform_allow list
if tc.platform_allow and not platform_filter and not integration:
self.verify_platforms_existence(
tc.platform_allow, f"{tc_name} - platform_allow")
a = set(platform_scope)
b = set(filter(lambda item: item.name in tc.platform_allow, self.platforms))
c = a.intersection(b)
if not c:
platform_scope = list(filter(lambda item: item.name in tc.platform_allow, \
self.platforms))
# list of instances per testcase, aka configurations.
instance_list = []
for plat in platform_scope:
instance = TestInstance(tc, plat, self.outdir)
if runnable:
tfilter = 'runnable'
else:
tfilter = 'buildable'
instance.run = instance.check_runnable(
self.enable_slow,
tfilter,
self.fixtures
)
for t in tc.cases:
instance.results[t] = None
if runnable and self.duts:
for h in self.duts:
if h.platform == plat.name:
if tc.harness_config.get('fixture') in h.fixtures:
instance.run = True
if not force_platform and plat.name in exclude_platform:
discards[instance] = discards.get(instance, "Platform is excluded on command line.")
if (plat.arch == "unit") != (tc.type == "unit"):
# Discard silently
continue
if runnable and not instance.run:
discards[instance] = discards.get(instance, "Not runnable on device")
if self.integration and tc.integration_platforms and plat.name not in tc.integration_platforms:
discards[instance] = discards.get(instance, "Not part of integration platforms")
if tc.skip:
discards[instance] = discards.get(instance, "Skip filter")
if tag_filter and not tc.tags.intersection(tag_filter):
discards[instance] = discards.get(instance, "Command line testcase tag filter")
if exclude_tag and tc.tags.intersection(exclude_tag):
discards[instance] = discards.get(instance, "Command line testcase exclude filter")
if testcase_filter and tc_name not in testcase_filter:
discards[instance] = discards.get(instance, "Testcase name filter")
if arch_filter and plat.arch not in arch_filter:
discards[instance] = discards.get(instance, "Command line testcase arch filter")
if not force_platform:
if tc.arch_allow and plat.arch not in tc.arch_allow:
discards[instance] = discards.get(instance, "Not in test case arch allow list")
if tc.arch_exclude and plat.arch in tc.arch_exclude:
discards[instance] = discards.get(instance, "In test case arch exclude")
if tc.platform_exclude and plat.name in tc.platform_exclude:
discards[instance] = discards.get(instance, "In test case platform exclude")
if tc.toolchain_exclude and toolchain in tc.toolchain_exclude:
discards[instance] = discards.get(instance, "In test case toolchain exclude")
if platform_filter and plat.name not in platform_filter:
discards[instance] = discards.get(instance, "Command line platform filter")
if tc.platform_allow and plat.name not in tc.platform_allow:
discards[instance] = discards.get(instance, "Not in testcase platform allow list")
if tc.toolchain_allow and toolchain not in tc.toolchain_allow:
discards[instance] = discards.get(instance, "Not in testcase toolchain allow list")
if not plat.env_satisfied:
discards[instance] = discards.get(instance, "Environment ({}) not satisfied".format(", ".join(plat.env)))
if not force_toolchain \
and toolchain and (toolchain not in plat.supported_toolchains) \
and "host" not in plat.supported_toolchains \
and tc.type != 'unit':
discards[instance] = discards.get(instance, "Not supported by the toolchain")
if plat.ram < tc.min_ram:
discards[instance] = discards.get(instance, "Not enough RAM")
if tc.depends_on:
dep_intersection = tc.depends_on.intersection(set(plat.supported))
if dep_intersection != set(tc.depends_on):
discards[instance] = discards.get(instance, "No hardware support")
if plat.flash < tc.min_flash:
discards[instance] = discards.get(instance, "Not enough FLASH")
if set(plat.ignore_tags) & tc.tags:
discards[instance] = discards.get(instance, "Excluded tags per platform (exclude_tags)")
if plat.only_tags and not set(plat.only_tags) & tc.tags:
discards[instance] = discards.get(instance, "Excluded tags per platform (only_tags)")
test_configuration = ".".join([instance.platform.name,
instance.testcase.id])
# skip quarantined tests
if test_configuration in self.quarantine and not self.quarantine_verify:
discards[instance] = discards.get(instance,
f"Quarantine: {self.quarantine[test_configuration]}")
# run only quarantined test to verify their statuses (skip everything else)
if self.quarantine_verify and test_configuration not in self.quarantine:
discards[instance] = discards.get(instance, "Not under quarantine")
# if nothing stopped us until now, it means this configuration
# needs to be added.
instance_list.append(instance)
# no configurations, so jump to next testcase
if not instance_list:
continue
# if twister was launched with no platform options at all, we
# take all default platforms
if default_platforms and not tc.build_on_all and not integration:
if tc.platform_allow:
a = set(self.default_platforms)
b = set(tc.platform_allow)
c = a.intersection(b)
if c:
aa = list(filter(lambda tc: tc.platform.name in c, instance_list))
self.add_instances(aa)
else:
self.add_instances(instance_list)
else:
instances = list(filter(lambda tc: tc.platform.default, instance_list))
self.add_instances(instances)
elif integration:
instances = list(filter(lambda item: item.platform.name in tc.integration_platforms, instance_list))
self.add_instances(instances)
elif emulation_platforms:
self.add_instances(instance_list)
for instance in list(filter(lambda inst: not inst.platform.simulation != 'na', instance_list)):
discards[instance] = discards.get(instance, "Not an emulated platform")
else:
self.add_instances(instance_list)
for _, case in self.instances.items():
case.create_overlay(case.platform, self.enable_asan, self.enable_ubsan, self.enable_coverage, self.coverage_platform)
self.discards = discards
self.selected_platforms = set(p.platform.name for p in self.instances.values())
remove_from_discards = [] # configurations to be removed from discards.
for instance in self.discards:
instance.reason = self.discards[instance]
# If integration mode is on all skips on integration_platforms are treated as errors.
if self.integration and instance.platform.name in instance.testcase.integration_platforms \
and "Quarantine" not in instance.reason:
instance.status = "error"
instance.reason += " but is one of the integration platforms"
instance.fill_results_by_status()
self.instances[instance.name] = instance
# Such configuration has to be removed from discards to make sure it won't get skipped
remove_from_discards.append(instance)
else:
instance.status = "skipped"
instance.fill_results_by_status()
self.filtered_platforms = set(p.platform.name for p in self.instances.values()
if p.status != "skipped" )
# Remove from discards configurations that must not be discarded (e.g. integration_platforms when --integration was used)
for instance in remove_from_discards:
del self.discards[instance]
return discards
def add_instances(self, instance_list):
for instance in instance_list:
self.instances[instance.name] = instance
def add_tasks_to_queue(self, pipeline, build_only=False, test_only=False, retry_build_errors=False):
for instance in self.instances.values():
if build_only:
instance.run = False
no_retry_statuses = ['passed', 'skipped']
if not retry_build_errors:
no_retry_statuses.append("error")
if instance.status not in no_retry_statuses:
logger.debug(f"adding {instance.name}")
instance.status = None
if test_only and instance.run:
pipeline.put({"op": "run", "test": instance})
else:
pipeline.put({"op": "cmake", "test": instance})
def pipeline_mgr(self, pipeline, done_queue, lock, results):
while True:
try:
task = pipeline.get_nowait()
except queue.Empty:
break
else:
test = task['test']
pb = ProjectBuilder(self,
test,
lsan=self.enable_lsan,
asan=self.enable_asan,
ubsan=self.enable_ubsan,
coverage=self.enable_coverage,
extra_args=self.extra_args,
device_testing=self.device_testing,
cmake_only=self.cmake_only,
cleanup=self.cleanup,
valgrind=self.enable_valgrind,
inline_logs=self.inline_logs,
generator=self.generator,
generator_cmd=self.generator_cmd,
verbose=self.verbose,
warnings_as_errors=self.warnings_as_errors,
overflow_as_errors=self.overflow_as_errors
)
pb.process(pipeline, done_queue, task, lock, results)
return True
def execute(self, pipeline, done, results):
lock = Lock()
logger.info("Adding tasks to the queue...")
self.add_tasks_to_queue(pipeline, self.build_only, self.test_only,
retry_build_errors=self.retry_build_errors)
logger.info("Added initial list of jobs to queue")
processes = []
for job in range(self.jobs):
logger.debug(f"Launch process {job}")
p = Process(target=self.pipeline_mgr, args=(pipeline, done, lock, results, ))
processes.append(p)
p.start()
try:
for p in processes:
p.join()
except KeyboardInterrupt:
logger.info("Execution interrupted")
for p in processes:
p.terminate()
return results
def discard_report(self, filename):
try:
if not self.discards:
raise TwisterRuntimeError("apply_filters() hasn't been run!")
except Exception as e:
logger.error(str(e))
sys.exit(2)
with open(filename, "wt") as csvfile:
fieldnames = ["test", "arch", "platform", "reason"]
cw = csv.DictWriter(csvfile, fieldnames, lineterminator=os.linesep)
cw.writeheader()
for instance, reason in sorted(self.discards.items()):
rowdict = {"test": instance.testcase.name,
"arch": instance.platform.arch,
"platform": instance.platform.name,
"reason": reason}
cw.writerow(rowdict)
def target_report(self, outdir, suffix, append=False, report_skipped=True):
platforms = {inst.platform.name for _, inst in self.instances.items()}
for platform in platforms:
if suffix:
filename = os.path.join(outdir,"{}_{}.xml".format(platform, suffix))
else:
filename = os.path.join(outdir,"{}.xml".format(platform))
self.xunit_report(filename, platform, full_report=True,
append=append, version=self.version, report_skipped=report_skipped)
@staticmethod
def process_log(log_file):
filtered_string = ""
if os.path.exists(log_file):
with open(log_file, "rb") as f:
log = f.read().decode("utf-8")
filtered_string = ''.join(filter(lambda x: x in string.printable, log))
return filtered_string
def xunit_report(self, filename, platform=None, full_report=False, append=False, version="NA", report_skipped=True):
total = 0
fails = passes = errors = skips = 0
if platform:
selected = [platform]
logger.info(f"Writing target report for {platform}...")
else:
logger.info(f"Writing xunit report {filename}...")
selected = self.selected_platforms
if os.path.exists(filename) and append:
tree = ET.parse(filename)
eleTestsuites = tree.getroot()
else:
eleTestsuites = ET.Element('testsuites')
for p in selected:
inst = self.get_platform_instances(p)
fails = 0
passes = 0
errors = 0
skips = 0
duration = 0
eleTestsuite = None
if os.path.exists(filename) and append:
ts = eleTestsuites.findall(f'testsuite/[@name="{p}"]')
if ts:
eleTestsuite = ts[0]
else:
logger.info(f"Did not find any existing results for {p}")
for _, instance in inst.items():
handler_time = instance.metrics.get('handler_time', 0)
duration += handler_time
if full_report and instance.run:
for k in instance.results.keys():
if instance.results[k] == 'PASS':
passes += 1
elif instance.results[k] == 'BLOCK':
errors += 1
elif instance.results[k] == 'SKIP' or instance.status in ['skipped']:
if not eleTestsuite or not eleTestsuite.findall(f'testcase/[@name="{k}"]'):
skips += 1
else:
fails += 1
else:
if instance.status in ["error", "failed", "timeout", "flash_error"]:
if instance.reason in ['build_error', 'handler_crash']:
errors += 1
else:
fails += 1
elif instance.status == 'skipped':
skips += 1
elif instance.status == 'passed':
passes += 1
else:
if instance.status:
logger.error(f"{instance.name}: Unknown status {instance.status}")
else:
logger.error(f"{instance.name}: No status")
total = (errors + passes + fails + skips)
# do not produce a report if no tests were actually run (only built)
if total == 0:
continue
run = p
if not report_skipped and total == skips:
continue
# When we re-run the tests, we re-use the results and update only with
# the newly run tests.
if eleTestsuite:
eleTestsuite.attrib['failures'] = "%d" % fails
eleTestsuite.attrib['errors'] = "%d" % errors
eleTestsuite.attrib['skipped'] = "%d" % (skips + int(eleTestsuite.attrib['skipped']))
else:
eleTestsuite = ET.SubElement(eleTestsuites, 'testsuite',
name=run, time="%f" % duration,
tests="%d" % (total),
failures="%d" % fails,
errors="%d" % (errors), skipped="%s" % (skips))
eleTSPropetries = ET.SubElement(eleTestsuite, 'properties')
# Multiple 'property' can be added to 'properties'
# differing by name and value
ET.SubElement(eleTSPropetries, 'property', name="version", value=version)
for _, instance in inst.items():
if instance.status == 'skipped' and not report_skipped:
continue
if full_report:
tname = os.path.basename(instance.testcase.name)
else:
tname = instance.testcase.id
handler_time = instance.metrics.get('handler_time', 0)
if full_report:
for k in instance.results.keys():
# remove testcases that are being re-run from exiting reports
for tc in eleTestsuite.findall(f'testcase/[@name="{k}"]'):
eleTestsuite.remove(tc)
classname = ".".join(tname.split(".")[:2])
eleTestcase = ET.SubElement(
eleTestsuite, 'testcase',
classname=classname,
name="%s" % (k), time="%f" % handler_time)
if instance.results[k] in ['FAIL', 'BLOCK'] or \
(not instance.run and instance.status in ["error", "failed", "timeout"]):
if instance.results[k] == 'FAIL':
el = ET.SubElement(
eleTestcase,
'failure',
type="failure",
message="failed")
else:
el = ET.SubElement(
eleTestcase,
'error',
type="failure",
message=instance.reason)
log_root = os.path.join(self.outdir, instance.platform.name, instance.testcase.name)
log_file = os.path.join(log_root, "handler.log")
el.text = self.process_log(log_file)
elif instance.results[k] == 'PASS' \
or (not instance.run and instance.status in ["passed"]):
pass
elif instance.results[k] == 'SKIP' or (instance.status in ["skipped"]):
el = ET.SubElement(eleTestcase, 'skipped', type="skipped", message=instance.reason)
else:
el = ET.SubElement(
eleTestcase,
'error',
type="error",
message=f"{instance.reason}")
else:
if platform:
classname = ".".join(instance.testcase.name.split(".")[:2])
else:
classname = p + ":" + ".".join(instance.testcase.name.split(".")[:2])
# remove testcases that are being re-run from exiting reports
for tc in eleTestsuite.findall(f'testcase/[@classname="{classname}"][@name="{instance.testcase.name}"]'):
eleTestsuite.remove(tc)
eleTestcase = ET.SubElement(eleTestsuite, 'testcase',
classname=classname,
name="%s" % (instance.testcase.name),
time="%f" % handler_time)
if instance.status in ["error", "failed", "timeout", "flash_error"]:
failure = ET.SubElement(
eleTestcase,
'failure',
type="failure",
message=instance.reason)
log_root = ("%s/%s/%s" % (self.outdir, instance.platform.name, instance.testcase.name))
bl = os.path.join(log_root, "build.log")
hl = os.path.join(log_root, "handler.log")
log_file = bl
if instance.reason != 'Build error':
if os.path.exists(hl):
log_file = hl
else:
log_file = bl
failure.text = self.process_log(log_file)
elif instance.status == "skipped":
ET.SubElement(eleTestcase, 'skipped', type="skipped", message="Skipped")
result = ET.tostring(eleTestsuites)
with open(filename, 'wb') as report:
report.write(result)
return fails, passes, errors, skips
def csv_report(self, filename):
with open(filename, "wt") as csvfile:
fieldnames = ["test", "arch", "platform", "status",
"extra_args", "handler", "handler_time", "ram_size",
"rom_size"]
cw = csv.DictWriter(csvfile, fieldnames, lineterminator=os.linesep)
cw.writeheader()
for instance in self.instances.values():
rowdict = {"test": instance.testcase.name,
"arch": instance.platform.arch,
"platform": instance.platform.name,
"extra_args": " ".join(instance.testcase.extra_args),
"handler": instance.platform.simulation}
rowdict["status"] = instance.status
if instance.status not in ["error", "failed", "timeout"]:
if instance.handler:
rowdict["handler_time"] = instance.metrics.get("handler_time", 0)
ram_size = instance.metrics.get("ram_size", 0)
rom_size = instance.metrics.get("rom_size", 0)
rowdict["ram_size"] = ram_size
rowdict["rom_size"] = rom_size
cw.writerow(rowdict)
def json_report(self, filename, append=False, version="NA"):
logger.info(f"Writing JSON report {filename}")
report = {}
selected = self.selected_platforms
report["environment"] = {"os": os.name,
"zephyr_version": version,
"toolchain": self.get_toolchain()
}
json_data = {}
if os.path.exists(filename) and append:
with open(filename, 'r') as json_file:
json_data = json.load(json_file)
suites = json_data.get("testsuites", [])
if suites:
suite = suites[0]
testcases = suite.get("testcases", [])
else:
suite = {}
testcases = []
for p in selected:
inst = self.get_platform_instances(p)
for _, instance in inst.items():
testcase = {}
handler_log = os.path.join(instance.build_dir, "handler.log")
build_log = os.path.join(instance.build_dir, "build.log")
device_log = os.path.join(instance.build_dir, "device.log")
handler_time = instance.metrics.get('handler_time', 0)
ram_size = instance.metrics.get ("ram_size", 0)
rom_size = instance.metrics.get("rom_size",0)
for k in instance.results.keys():
testcases = list(filter(lambda d: not (d.get('testcase') == k and d.get('platform') == p), testcases ))
testcase = {"testcase": k,
"arch": instance.platform.arch,
"platform": p,
}
if ram_size:
testcase["ram_size"] = ram_size
if rom_size:
testcase["rom_size"] = rom_size
if instance.results[k] in ["SKIP"] or instance.status == 'skipped':
testcase["status"] = "skipped"
testcase["reason"] = instance.reason
elif instance.results[k] in ["PASS"] or instance.status == 'passed':
testcase["status"] = "passed"
if instance.handler:
testcase["execution_time"] = handler_time
elif instance.results[k] in ['FAIL', 'BLOCK'] or instance.status in ["error", "failed", "timeout", "flash_error"]:
testcase["status"] = "failed"
testcase["reason"] = instance.reason
testcase["execution_time"] = handler_time
if os.path.exists(handler_log):
testcase["test_output"] = self.process_log(handler_log)
elif os.path.exists(device_log):
testcase["device_log"] = self.process_log(device_log)
else:
testcase["build_log"] = self.process_log(build_log)
testcases.append(testcase)
suites = [ {"testcases": testcases} ]
report["testsuites"] = suites
with open(filename, "wt") as json_file:
json.dump(report, json_file, indent=4, separators=(',',':'))
def get_testcase(self, identifier):
results = []
for _, tc in self.testcases.items():
for case in tc.cases:
if case == identifier:
results.append(tc)
return results
def verify_platforms_existence(self, platform_names_to_verify, log_info=""):
"""
Verify if platform name (passed by --platform option, or in yaml file
as platform_allow or integration_platforms options) is correct. If not -
log and raise error.
"""
for platform in platform_names_to_verify:
if platform in self.platform_names:
break
else:
logger.error(f"{log_info} - unrecognized platform - {platform}")
sys.exit(2)
def create_build_dir_links(self):
"""
Iterate through all no-skipped instances in suite and create links
for each one build directories. Those links will be passed in the next
steps to the CMake command.
"""
links_dir_name = "twister_links" # folder for all links
links_dir_path = os.path.join(self.outdir, links_dir_name)
if not os.path.exists(links_dir_path):
os.mkdir(links_dir_path)
for instance in self.instances.values():
if instance.status != "skipped":
self._create_build_dir_link(links_dir_path, instance)
def _create_build_dir_link(self, links_dir_path, instance):
"""
Create build directory with original "long" path. Next take shorter
path and link them with original path - create link. At the end
replace build_dir to created link. This link will be passed to CMake
command. This action helps to limit path length which can be
significant during building by CMake on Windows OS.
"""
os.makedirs(instance.build_dir, exist_ok=True)
link_name = f"test_{self.link_dir_counter}"
link_path = os.path.join(links_dir_path, link_name)
if os.name == "nt": # if OS is Windows
command = ["mklink", "/J", f"{link_path}", f"{instance.build_dir}"]
subprocess.call(command, shell=True)
else: # for Linux and MAC OS
os.symlink(instance.build_dir, link_path)
# Here original build directory is replaced with symbolic link. It will
# be passed to CMake command
instance.build_dir = link_path
self.link_dir_counter += 1
class CoverageTool:
""" Base class for every supported coverage tool
"""
def __init__(self):
self.gcov_tool = None
self.base_dir = None
@staticmethod
def factory(tool):
if tool == 'lcov':
t = Lcov()
elif tool == 'gcovr':
t = Gcovr()
else:
logger.error("Unsupported coverage tool specified: {}".format(tool))
return None
logger.debug(f"Select {tool} as the coverage tool...")
return t
@staticmethod
def retrieve_gcov_data(input_file):
logger.debug("Working on %s" % input_file)
extracted_coverage_info = {}
capture_data = False
capture_complete = False
with open(input_file, 'r') as fp:
for line in fp.readlines():
if re.search("GCOV_COVERAGE_DUMP_START", line):
capture_data = True
continue
if re.search("GCOV_COVERAGE_DUMP_END", line):
capture_complete = True
break
# Loop until the coverage data is found.
if not capture_data:
continue
if line.startswith("*"):
sp = line.split("<")
if len(sp) > 1:
# Remove the leading delimiter "*"
file_name = sp[0][1:]
# Remove the trailing new line char
hex_dump = sp[1][:-1]
else:
continue
else:
continue
extracted_coverage_info.update({file_name: hex_dump})
if not capture_data:
capture_complete = True
return {'complete': capture_complete, 'data': extracted_coverage_info}
@staticmethod
def create_gcda_files(extracted_coverage_info):
logger.debug("Generating gcda files")
for filename, hexdump_val in extracted_coverage_info.items():
# if kobject_hash is given for coverage gcovr fails
# hence skipping it problem only in gcovr v4.1
if "kobject_hash" in filename:
filename = (filename[:-4]) + "gcno"
try:
os.remove(filename)
except Exception:
pass
continue
with open(filename, 'wb') as fp:
fp.write(bytes.fromhex(hexdump_val))
def generate(self, outdir):
for filename in glob.glob("%s/**/handler.log" % outdir, recursive=True):
gcov_data = self.__class__.retrieve_gcov_data(filename)
capture_complete = gcov_data['complete']
extracted_coverage_info = gcov_data['data']
if capture_complete:
self.__class__.create_gcda_files(extracted_coverage_info)
logger.debug("Gcov data captured: {}".format(filename))
else:
logger.error("Gcov data capture incomplete: {}".format(filename))
with open(os.path.join(outdir, "coverage.log"), "a") as coveragelog:
ret = self._generate(outdir, coveragelog)
if ret == 0:
logger.info("HTML report generated: {}".format(
os.path.join(outdir, "coverage", "index.html")))
class Lcov(CoverageTool):
def __init__(self):
super().__init__()
self.ignores = []
def add_ignore_file(self, pattern):
self.ignores.append('*' + pattern + '*')
def add_ignore_directory(self, pattern):
self.ignores.append('*/' + pattern + '/*')
def _generate(self, outdir, coveragelog):
coveragefile = os.path.join(outdir, "coverage.info")
ztestfile = os.path.join(outdir, "ztest.info")
cmd = ["lcov", "--gcov-tool", self.gcov_tool,
"--capture", "--directory", outdir,
"--rc", "lcov_branch_coverage=1",
"--output-file", coveragefile]
cmd_str = " ".join(cmd)
logger.debug(f"Running {cmd_str}...")
subprocess.call(cmd, stdout=coveragelog)
# We want to remove tests/* and tests/ztest/test/* but save tests/ztest
subprocess.call(["lcov", "--gcov-tool", self.gcov_tool, "--extract",
coveragefile,
os.path.join(self.base_dir, "tests", "ztest", "*"),
"--output-file", ztestfile,
"--rc", "lcov_branch_coverage=1"], stdout=coveragelog)
if os.path.exists(ztestfile) and os.path.getsize(ztestfile) > 0:
subprocess.call(["lcov", "--gcov-tool", self.gcov_tool, "--remove",
ztestfile,
os.path.join(self.base_dir, "tests/ztest/test/*"),
"--output-file", ztestfile,
"--rc", "lcov_branch_coverage=1"],
stdout=coveragelog)
files = [coveragefile, ztestfile]
else:
files = [coveragefile]
for i in self.ignores:
subprocess.call(
["lcov", "--gcov-tool", self.gcov_tool, "--remove",
coveragefile, i, "--output-file",
coveragefile, "--rc", "lcov_branch_coverage=1"],
stdout=coveragelog)
# The --ignore-errors source option is added to avoid it exiting due to
# samples/application_development/external_lib/
return subprocess.call(["genhtml", "--legend", "--branch-coverage",
"--ignore-errors", "source",
"-output-directory",
os.path.join(outdir, "coverage")] + files,
stdout=coveragelog)
class Gcovr(CoverageTool):
def __init__(self):
super().__init__()
self.ignores = []
def add_ignore_file(self, pattern):
self.ignores.append('.*' + pattern + '.*')
def add_ignore_directory(self, pattern):
self.ignores.append(".*/" + pattern + '/.*')
@staticmethod
def _interleave_list(prefix, list):
tuple_list = [(prefix, item) for item in list]
return [item for sublist in tuple_list for item in sublist]
def _generate(self, outdir, coveragelog):
coveragefile = os.path.join(outdir, "coverage.json")
ztestfile = os.path.join(outdir, "ztest.json")
excludes = Gcovr._interleave_list("-e", self.ignores)
# We want to remove tests/* and tests/ztest/test/* but save tests/ztest
cmd = ["gcovr", "-r", self.base_dir, "--gcov-executable",
self.gcov_tool, "-e", "tests/*"] + excludes + ["--json", "-o",
coveragefile, outdir]
cmd_str = " ".join(cmd)
logger.debug(f"Running {cmd_str}...")
subprocess.call(cmd, stdout=coveragelog)
subprocess.call(["gcovr", "-r", self.base_dir, "--gcov-executable",
self.gcov_tool, "-f", "tests/ztest", "-e",
"tests/ztest/test/*", "--json", "-o", ztestfile,
outdir], stdout=coveragelog)
if os.path.exists(ztestfile) and os.path.getsize(ztestfile) > 0:
files = [coveragefile, ztestfile]
else:
files = [coveragefile]
subdir = os.path.join(outdir, "coverage")
os.makedirs(subdir, exist_ok=True)
tracefiles = self._interleave_list("--add-tracefile", files)
return subprocess.call(["gcovr", "-r", self.base_dir, "--html",
"--html-details"] + tracefiles +
["-o", os.path.join(subdir, "index.html")],
stdout=coveragelog)
class DUT(object):
def __init__(self,
id=None,
serial=None,
serial_baud=None,
platform=None,
product=None,
serial_pty=None,
connected=False,
pre_script=None,
post_script=None,
post_flash_script=None,
runner=None):
self.serial = serial
self.baud = serial_baud or 115200
self.platform = platform
self.serial_pty = serial_pty
self._counter = Value("i", 0)
self._available = Value("i", 1)
self.connected = connected
self.pre_script = pre_script
self.id = id
self.product = product
self.runner = runner
self.fixtures = []
self.post_flash_script = post_flash_script
self.post_script = post_script
self.pre_script = pre_script
self.probe_id = None
self.notes = None
self.lock = Lock()
self.match = False
@property
def available(self):
with self._available.get_lock():
return self._available.value
@available.setter
def available(self, value):
with self._available.get_lock():
self._available.value = value
@property
def counter(self):
with self._counter.get_lock():
return self._counter.value
@counter.setter
def counter(self, value):
with self._counter.get_lock():
self._counter.value = value
def to_dict(self):
d = {}
exclude = ['_available', '_counter', 'match']
v = vars(self)
for k in v.keys():
if k not in exclude and v[k]:
d[k] = v[k]
return d
def __repr__(self):
return f"<{self.platform} ({self.product}) on {self.serial}>"
class HardwareMap:
schema_path = os.path.join(ZEPHYR_BASE, "scripts", "schemas", "twister", "hwmap-schema.yaml")
manufacturer = [
'ARM',
'SEGGER',
'MBED',
'STMicroelectronics',
'Atmel Corp.',
'Texas Instruments',
'Silicon Labs',
'NXP Semiconductors',
'Microchip Technology Inc.',
'FTDI',
'Digilent'
]
runner_mapping = {
'pyocd': [
'DAPLink CMSIS-DAP',
'MBED CMSIS-DAP'
],
'jlink': [
'J-Link',
'J-Link OB'
],
'openocd': [
'STM32 STLink', '^XDS110.*', 'STLINK-V3'
],
'dediprog': [
'TTL232R-3V3',
'MCP2200 USB Serial Port Emulator'
]
}
def __init__(self):
self.detected = []
self.duts = []
def add_device(self, serial, platform, pre_script, is_pty, baud=None):
device = DUT(platform=platform, connected=True, pre_script=pre_script, serial_baud=baud)
if is_pty:
device.serial_pty = serial
else:
device.serial = serial
self.duts.append(device)
def load(self, map_file):
hwm_schema = scl.yaml_load(self.schema_path)
duts = scl.yaml_load_verify(map_file, hwm_schema)
for dut in duts:
pre_script = dut.get('pre_script')
post_script = dut.get('post_script')
post_flash_script = dut.get('post_flash_script')
platform = dut.get('platform')
id = dut.get('id')
runner = dut.get('runner')
serial = dut.get('serial')
baud = dut.get('baud', None)
product = dut.get('product')
fixtures = dut.get('fixtures', [])
new_dut = DUT(platform=platform,
product=product,
runner=runner,
id=id,
serial=serial,
serial_baud=baud,
connected=serial is not None,
pre_script=pre_script,
post_script=post_script,
post_flash_script=post_flash_script)
new_dut.fixtures = fixtures
new_dut.counter = 0
self.duts.append(new_dut)
def scan(self, persistent=False):
from serial.tools import list_ports
if persistent and platform.system() == 'Linux':
# On Linux, /dev/serial/by-id provides symlinks to
# '/dev/ttyACMx' nodes using names which are unique as
# long as manufacturers fill out USB metadata nicely.
#
# This creates a map from '/dev/ttyACMx' device nodes
# to '/dev/serial/by-id/usb-...' symlinks. The symlinks
# go into the hardware map because they stay the same
# even when the user unplugs / replugs the device.
#
# Some inexpensive USB/serial adapters don't result
# in unique names here, though, so use of this feature
# requires explicitly setting persistent=True.
by_id = Path('/dev/serial/by-id')
def readlink(link):
return str((by_id / link).resolve())
persistent_map = {readlink(link): str(link)
for link in by_id.iterdir()}
else:
persistent_map = {}
serial_devices = list_ports.comports()
logger.info("Scanning connected hardware...")
for d in serial_devices:
if d.manufacturer in self.manufacturer:
# TI XDS110 can have multiple serial devices for a single board
# assume endpoint 0 is the serial, skip all others
if d.manufacturer == 'Texas Instruments' and not d.location.endswith('0'):
continue
s_dev = DUT(platform="unknown",
id=d.serial_number,
serial=persistent_map.get(d.device, d.device),
product=d.product,
runner='unknown',
connected=True)
for runner, _ in self.runner_mapping.items():
products = self.runner_mapping.get(runner)
if d.product in products:
s_dev.runner = runner
continue
# Try regex matching
for p in products:
if re.match(p, d.product):
s_dev.runner = runner
s_dev.connected = True
s_dev.lock = None
self.detected.append(s_dev)
else:
logger.warning("Unsupported device (%s): %s" % (d.manufacturer, d))
def save(self, hwm_file):
# use existing map
self.detected.sort(key=lambda x: x.serial or '')
if os.path.exists(hwm_file):
with open(hwm_file, 'r') as yaml_file:
hwm = yaml.load(yaml_file, Loader=SafeLoader)
if hwm:
hwm.sort(key=lambda x: x['serial'] or '')
# disconnect everything
for h in hwm:
h['connected'] = False
h['serial'] = None
for _detected in self.detected:
for h in hwm:
if _detected.id == h['id'] and _detected.product == h['product'] and not _detected.match:
h['connected'] = True
h['serial'] = _detected.serial
_detected.match = True
new_duts = list(filter(lambda d: not d.match, self.detected))
new = []
for d in new_duts:
new.append(d.to_dict())
if hwm:
hwm = hwm + new
else:
hwm = new
with open(hwm_file, 'w') as yaml_file:
yaml.dump(hwm, yaml_file, Dumper=Dumper, default_flow_style=False)
self.load(hwm_file)
logger.info("Registered devices:")
self.dump()
else:
# create new file
dl = []
for _connected in self.detected:
platform = _connected.platform
id = _connected.id
runner = _connected.runner
serial = _connected.serial
product = _connected.product
d = {
'platform': platform,
'id': id,
'runner': runner,
'serial': serial,
'product': product,
'connected': _connected.connected
}
dl.append(d)
with open(hwm_file, 'w') as yaml_file:
yaml.dump(dl, yaml_file, Dumper=Dumper, default_flow_style=False)
logger.info("Detected devices:")
self.dump(detected=True)
def dump(self, filtered=[], header=[], connected_only=False, detected=False):
print("")
table = []
if detected:
to_show = self.detected
else:
to_show = self.duts
if not header:
header = ["Platform", "ID", "Serial device"]
for p in to_show:
platform = p.platform
connected = p.connected
if filtered and platform not in filtered:
continue
if not connected_only or connected:
table.append([platform, p.id, p.serial])
print(tabulate(table, headers=header, tablefmt="github"))
|
callback_test.py
|
import threading
import time
from queue import Empty
import multiprocessing
import cv2
class test:
def __init__(self):
self.callbacks = []
def start(self):
self.queue = multiprocessing.Queue()
self.t = threading.Thread(target=self.deamon)
self.t.setDaemon(True)
self.t.start()
p = multiprocessing.Process(target=self.read_video, args=(self.queue,))
p.start()
def add_callback(self,cb):
self.callbacks.append(cb)
def deamon(self):
while True:
img = None
try:
img = self.queue.get(False)
except Empty:
img = None
if img is not None:
for cb in self.callbacks:
cb(img)
time.sleep(0.01)
def read_video(self, queue):
vid = cv2.VideoCapture(0)
while True:
ret,img = vid.read()
if ret:
queue.put(img)
def foo(x):
#cv2.imshow("test",x)
print("got {}".format(x.shape))
t = test()
t.add_callback(foo)
t.start()
while True:
print("waiting...")
time.sleep(1)
|
tensorflow serv.py
|
#
# Server in Python
# Binds REP socket to tcp://*:3001
# Expects input from client to reply with something
#
import time
import zmq
import numpy as np
import pandas as pd
import math
import os
import sys
import json
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# Keras stuff
from keras.models import Sequential
from keras.layers import Dense
from keras import optimizers
from keras import callbacks
import keras.backend as K
# Sklearn stuff
from sklearn.metrics import r2_score
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler, StandardScaler
def processParams(params):
allParams = params.split("&")
loss = ''
optimizer = ''
learningRate = 0
epochs = 0
batchSize = 0
testSize = 0
layers = []
for param in allParams:
if "loss" in param:
loss = param.split("=")[1]
elif "optimizer" in param:
optimizer = param.split("=")[1]
elif "learningRate" in param:
learningRate = param.split("=")[1]
elif "epochs" in param:
epochs = param.split("=")[1]
elif "batchSize" in param:
batchSize = param.split("=")[1]
elif "testSize" in param:
testSize = param.split("=")[1]
elif "layers" in param:
tmp = param[param.index('=')+1:]
tmp = tmp.replace('[', '')
tmp = tmp.replace(']', '')
tmpLayers = tmp.split(';')
for tmpLayer in tmpLayers:
tmpValues = tmpLayer.split(',')
toAdd = []
for tmpValue in tmpValues:
toAdd.append(tmpValue.split("=")[1])
layers.append(toAdd)
return {'loss':loss, 'optimizer':optimizer, 'learningRate':learningRate, 'epochs':epochs, 'batchSize':batchSize, 'testSize':testSize, 'layers':layers}
def buildModel(params, inputShape, outputShape):
# Adjusting format of inputShape
inputShape = (inputShape,)
model = Sequential()
for layer in params['layers']:
neurons = int(layer[1])
activationFunc = layer[2]
if layer[0] == 1:
model.add(Dense(neurons, input_shape=inputShape, activation=activationFunc, kernel_initializer='lecun_uniform'))
else:
model.add(Dense(neurons, activation=activationFunc, kernel_initializer='lecun_uniform'))
model.add(Dense(outputShape, kernel_initializer='lecun_uniform'))
model.compile(optimizer=getOptimizer(params), loss=params['loss'])
return model
def getOptimizer(params):
opt = None
optName = params['optimizer'].lower()
learningRate = float(params['learningRate'])
if optName == 'sgd':
opt = optimizers.SGD(lr=learningRate)
elif optName == 'rmsprop':
opt = optimizers.RMSprop(lr=learningRate)
elif optName == 'adagrad':
opt = optimizers.Adagrad(lr=learningRate)
elif optName == 'adadelta':
opt = optimizers.Adadelta(lr=learningRate)
elif optName == 'adam':
opt = optimizers.Adam(lr=learningRate)
elif optName == 'adamax':
opt = optimizers.Adamax(lr=learningRate)
elif optName == 'nadam':
opt = optimizers.Nadam(lr=learningRate)
return opt
def organizeData(testSize, sc, sc2):
# Get the dataset and prepare it for analysis and model
df = pd.read_csv('suncor_full.csv')
dataset=df.values[:,:]
features=dataset.shape[1]
# Create training and testing data
X = df.iloc[:, 1:features-1].values
y = df.iloc[:, features-1].values
split=int(X.shape[0]*(1-testSize))
print(split)
X_train=X[:split,:]
X_test=X[split:,:]
y_train=y[:split]
y_test=y[split:]
#X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=testSize, random_state=0)
# Normalize the dataset with sc and sc2 (MinMaxScalers)
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
y_train = np.reshape(y_train, (y_train.shape[0], 1))
y_test = np.reshape(y_test, (y_test.shape[0], 1))
y_train = sc2.fit_transform(y_train)
y_test = sc2.transform(y_test)
#print('Train size: (%d x %d)'%(X_train.shape[0], X_train.shape[1]))
#print('Test size: (%d x %d)'%(X_test.shape[0], X_test.shape[1]))
return X_train, X_test, y_train, y_test
def trainModel(params):
# Organize data
testSize = float(params['testSize'])
# Normalize the dataset
sc = MinMaxScaler()
sc2 = MinMaxScaler()
#sc2 = StandardScaler()
X_train, X_test, y_train, y_test = organizeData(testSize, sc, sc2)
"""
# Using clear_session() may result unexpected behaviors.
# For instance, after building and training the 6th model the program would just crash without throwing an exception.
K.clear_session()
"""
# Build model
model = buildModel(params, X_train.shape[1], y_train.shape[1])
# Train model
# Fitting the ANN to the training set
batchSize = int(params['batchSize'])
epochsNum = int(params['epochs'])
model.fit(X_train, y_train, batch_size=batchSize, epochs=epochsNum, verbose=0)
"""
#keras.callbacks.LambdaCallback(on_epoch_begin=None, on_epoch_end=None, on_batch_begin=None, on_batch_end=None, on_train_begin=None, on_train_end=None)
#https://keras.io/callbacks/
class Callback(callbacks.Callback):
def on_epoch_end(self, batch, logs={}):
print('There you go!')
#self.stopped_epoch = epoch
#self.model.stop_training = True
# Defining a callback
callbackTest = Callback()
model.fit(X_train, y_train, batch_size=batchSize, epochs=epochsNum, verbose=1, callbacks=[callbackTest])
"""
# Test model
y_pred = model.predict(X_test)
y_pred = sc2.inverse_transform(y_pred)
y_test = sc2.inverse_transform(y_test)
# Scalar test loss
#score = model.evaluate(X_test, y_test, verbose=1)
#print(score)
#https://en.wikipedia.org/wiki/Coefficient_of_determination
rSquared = r2_score(y_test, y_pred)
rmse = math.sqrt(mean_squared_error(y_test, y_pred))
print('R-Squared: %f' % rSquared)
print('RMSE: %f' % rmse)
# Converting from numpy arrays to list to allow json creation
return {'values':y_test.tolist(), 'predicted':y_pred.tolist(), 'r-squared':rSquared, 'rmse':rmse}
context = zmq.Context()
socket = context.socket(zmq.REP)
socket.bind("tcp://*:3001")
print("Server awaiting for requests on port 3001.")
while True:
try:
# Wait for next request from client
message = socket.recv()
print('> Received request. <')
procParams = processParams(message.decode('utf-8'))
results = trainModel(procParams)
# Converting dictionary to json
jsonResults = json.dumps(results)
# Send reply back to client
socket.send_string(jsonResults)
#socket.send(b"Done")
print('> Response sent! <')
except Exception as e:
#e = sys.exc_info()[0]
print(e)
socket.send_string('Error!')
"""
multiprocessing
from multiprocessing import Process, Queue
def run_in_separate_process(method, args):
def queue_wrapper(q, params):
r = method(*params)
q.put(r)
q = Queue()
p = Process(target=queue_wrapper, args=(q, args))
p.start()
return_val = q.get()
p.join()
return return_val
"""
|
csharpserver.py
|
from __future__ import print_function
import os
from empire.server.common.plugins import Plugin
import empire.server.common.helpers as helpers
import subprocess
import time
import socket
import base64
class Plugin(Plugin):
description = "Empire C# server plugin."
def onLoad(self):
print(helpers.color("[*] Loading Empire C# server plugin"))
self.main_menu = None
self.csharpserver_proc = None
self.info = {
'Name': 'csharpserver',
'Author': ['@Cx01N'],
'Description': ('Empire C# server for agents.'),
'Software': '',
'Techniques': [''],
'Comments': []
},
self.options = {
'status': {
'Description': 'Start/stop the Empire C# server.',
'Required': True,
'Value': 'start',
'SuggestedValues': ['start', 'stop'],
'Strict': True
}
}
self.tcp_ip = '127.0.0.1'
self.tcp_port = 2012
self.status = 'OFF'
def execute(self, command):
# This is for parsing commands through the api
try:
# essentially switches to parse the proper command to execute
self.options['status']['Value'] = command['status']
results = self.do_csharpserver('')
return results
except Exception as e:
print(e)
self.main_menu.plugin_socketio_message(self.info[0]['Name'], f'[!] {e}')
return False
def get_commands(self):
return self.commands
def register(self, mainMenu):
"""
any modifications to the mainMenu go here - e.g.
registering functions to be run by user commands
"""
mainMenu.__class__.do_csharpserver = self.do_csharpserver
self.installPath = mainMenu.installPath
self.main_menu = mainMenu
def do_csharpserver(self, *args):
"""
Check if the Empire C# server is already running.
"""
if len(args[0]) > 0:
self.start = args[0]
else:
self.start = self.options['status']['Value']
if not self.csharpserver_proc or self.csharpserver_proc.poll():
self.status = "OFF"
else:
self.status = "ON"
if not args:
self.main_menu.plugin_socketio_message(self.info[0]['Name'],
"[*] Empire C# server is currently: %s" % self.status)
self.main_menu.plugin_socketio_message(self.info[0]['Name'],
"[!] Empire C# <start|stop> <port>")
elif self.start == "stop":
if self.status == "ON":
self.csharpserver_proc.kill()
self.main_menu.plugin_socketio_message(self.info[0]['Name'],
"[*] Stopping Empire C# server")
self.status = "OFF"
else:
self.main_menu.plugin_socketio_message(self.info[0]['Name'],
"[!] Empire C# server is already stopped")
elif self.start == "start":
if self.status == "OFF":
# Will need to update this as we finalize the folder structure
server_dll = self.installPath + "/csharp/Covenant/bin/Debug/netcoreapp3.1/EmpireCompiler.dll"
# If dll hasn't been built yet
if not os.path.exists(server_dll):
csharp_cmd = ["dotnet", "build", self.installPath + "/csharp/"]
self.csharpserverbuild_proc = subprocess.Popen(csharp_cmd)
time.sleep(10)
self.csharpserverbuild_proc.kill()
self.main_menu.plugin_socketio_message(self.info[0]['Name'],
"[*] Starting Empire C# server")
csharp_cmd = ["dotnet",
self.installPath + "/csharp/Covenant/bin/Debug/netcoreapp3.1/EmpireCompiler.dll"]
self.csharpserver_proc = subprocess.Popen(csharp_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.status = "ON"
else:
self.main_menu.plugin_socketio_message(self.info[0]['Name'],
"[!] Empire C# server is already started")
thread = helpers.KThread(target=self.thread_csharp_responses, args=())
thread.daemon = True
thread.start()
def thread_csharp_responses(self):
while True:
output = self.csharpserver_proc.stdout.readline().rstrip()
if output:
print(helpers.color('[*] ' + output.decode('UTF-8')))
def do_send_message(self, compiler_yaml, task_name):
bytes_yaml = compiler_yaml.encode("UTF-8")
b64_yaml = base64.b64encode(bytes_yaml)
bytes_task_name = task_name.encode("UTF-8")
b64_task_name = base64.b64encode(bytes_task_name)
deliminator = ",".encode("UTF-8")
message = b64_task_name + deliminator + b64_yaml
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((self.tcp_ip, self.tcp_port))
s.send(message)
recv_message = s.recv(1024)
recv_message = recv_message.decode("ascii")
if recv_message.startswith("FileName:"):
file_name = recv_message.split(":")[1]
else:
self.main_menu.plugin_socketio_message(self.info[0]['Name'],
("[*] " + recv_message))
file_name = "failed"
s.close()
return file_name
def do_send_stager(self, stager, task_name):
bytes_yaml = stager.encode("UTF-8")
b64_yaml = base64.b64encode(bytes_yaml)
bytes_task_name = task_name.encode("UTF-8")
b64_task_name = base64.b64encode(bytes_task_name)
deliminator = ",".encode("UTF-8")
message = b64_task_name + deliminator + b64_yaml
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((self.tcp_ip, self.tcp_port))
s.send(message)
recv_message = s.recv(1024)
recv_message = recv_message.decode("ascii")
if recv_message.startswith("FileName:"):
file_name = recv_message.split(":")[1]
else:
self.main_menu.plugin_socketio_message(self.info[0]['Name'],
("[*] " + recv_message))
file_name = "failed"
s.close()
return file_name
def shutdown(self):
try:
b64_yaml = base64.b64encode(("dummy data").encode("UTF-8"))
b64_task_name = base64.b64encode(("close").encode("UTF-8"))
deliminator = ",".encode("UTF-8")
message = b64_task_name + deliminator + b64_yaml
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((self.tcp_ip, self.tcp_port))
s.send(message)
s.close()
self.csharpserverbuild_proc.kill()
self.csharpserver_proc.kill()
self.thread.kill()
except:
pass
return
|
build.py
|
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Build file for production version of Oppia. Minifies JS and CSS."""
from __future__ import annotations
import argparse
import collections
import fnmatch
import hashlib
import json
import os
import re
import shutil
import subprocess
import threading
from core import utils
from scripts import common
from scripts import servers
ASSETS_DEV_DIR = os.path.join('assets', '')
ASSETS_OUT_DIR = os.path.join('build', 'assets', '')
THIRD_PARTY_STATIC_DIR = os.path.join('third_party', 'static')
THIRD_PARTY_GENERATED_DEV_DIR = os.path.join('third_party', 'generated', '')
THIRD_PARTY_GENERATED_OUT_DIR = os.path.join(
'build', 'third_party', 'generated', '')
THIRD_PARTY_JS_RELATIVE_FILEPATH = os.path.join('js', 'third_party.js')
MINIFIED_THIRD_PARTY_JS_RELATIVE_FILEPATH = os.path.join(
'js', 'third_party.min.js')
THIRD_PARTY_CSS_RELATIVE_FILEPATH = os.path.join('css', 'third_party.css')
MINIFIED_THIRD_PARTY_CSS_RELATIVE_FILEPATH = os.path.join(
'css', 'third_party.min.css')
WEBFONTS_RELATIVE_DIRECTORY_PATH = os.path.join('webfonts', '')
EXTENSIONS_DIRNAMES_TO_DIRPATHS = {
'dev_dir': os.path.join('extensions', ''),
'staging_dir': os.path.join('backend_prod_files', 'extensions', ''),
'out_dir': os.path.join('build', 'extensions', '')
}
TEMPLATES_DEV_DIR = os.path.join('templates', '')
TEMPLATES_CORE_DIRNAMES_TO_DIRPATHS = {
'dev_dir': os.path.join('core', 'templates', ''),
'staging_dir': os.path.join('backend_prod_files', 'templates', ''),
'out_dir': os.path.join('build', 'templates', '')
}
WEBPACK_DIRNAMES_TO_DIRPATHS = {
'staging_dir': os.path.join('backend_prod_files', 'webpack_bundles', ''),
'out_dir': os.path.join('build', 'webpack_bundles', '')
}
# This json file contains a json object. The object's keys are file paths and
# the values are corresponded hash value. The paths need to be in posix style,
# as it is interpreted by the `url-interpolation` service, which which
# interprets the paths in this file as URLs.
HASHES_JSON_FILENAME = 'hashes.json'
HASHES_JSON_FILEPATH = os.path.join('assets', HASHES_JSON_FILENAME)
DEPENDENCIES_FILE_PATH = os.path.join('dependencies.json')
REMOVE_WS = re.compile(r'\s{2,}').sub
YUICOMPRESSOR_DIR = os.path.join(
os.pardir, 'oppia_tools', 'yuicompressor-2.4.8', 'yuicompressor-2.4.8.jar')
PARENT_DIR = os.path.abspath(os.path.join(os.getcwd(), os.pardir))
UGLIFY_FILE = os.path.join('node_modules', 'uglify-js', 'bin', 'uglifyjs')
WEBPACK_FILE = os.path.join('node_modules', 'webpack', 'bin', 'webpack.js')
WEBPACK_DEV_CONFIG = 'webpack.dev.config.ts'
WEBPACK_DEV_SOURCE_MAPS_CONFIG = 'webpack.dev.sourcemap.config.ts'
WEBPACK_PROD_CONFIG = 'webpack.prod.config.ts'
WEBPACK_PROD_SOURCE_MAPS_CONFIG = 'webpack.prod.sourcemap.config.ts'
# Files with these extensions shouldn't be moved to build directory.
FILE_EXTENSIONS_TO_IGNORE = ('.py', '.pyc', '.stylelintrc', '.ts', '.gitkeep')
# Files with these name patterns shouldn't be moved to build directory, and will
# not be served in production. (This includes protractor.js files in
# /extensions.)
JS_FILENAME_SUFFIXES_TO_IGNORE = ('Spec.js', 'protractor.js')
JS_FILENAME_SUFFIXES_NOT_TO_MINIFY = ('.bundle.js',)
GENERAL_FILENAMES_TO_IGNORE = ('.pyc', '.stylelintrc', '.DS_Store')
JS_FILEPATHS_NOT_TO_BUILD = (
os.path.join(
'core', 'templates', 'expressions', 'parser.js'),
os.path.join('extensions', 'ckeditor_plugins', 'pre', 'plugin.js')
)
# These filepaths shouldn't be renamed (i.e. the filepath shouldn't contain
# hash).
# This is because these files don't need cache invalidation, are referenced
# from third party files or should not be moved to the build directory.
# Statically served pages from app.yaml should be here to since they don't
# need cache invalidation.
FILEPATHS_NOT_TO_RENAME = (
'*.py',
'third_party/generated/js/third_party.min.js.map',
'third_party/generated/webfonts/*',
'*.bundle.js',
'*.bundle.js.map',
'webpack_bundles/*',
)
# These are the env vars that need to be removed from app.yaml when we are
# deploying to production.
ENV_VARS_TO_REMOVE_FROM_DEPLOYED_APP_YAML = (
'FIREBASE_AUTH_EMULATOR_HOST',
'DATASTORE_DATASET',
'DATASTORE_EMULATOR_HOST',
'DATASTORE_EMULATOR_HOST_PATH',
'DATASTORE_HOST',
'DATASTORE_PROJECT_ID',
'DATASTORE_USE_PROJECT_ID_AS_APP_ID'
)
# Hashes for files with these paths should be provided to the frontend in
# JS hashes object.
FILEPATHS_PROVIDED_TO_FRONTEND = (
'images/*', 'videos/*', 'i18n/*', '*.component.html',
'*_directive.html', '*.directive.html',
'*.template.html', '*.png', '*.json', '*.webp')
HASH_BLOCK_SIZE = 2**20
APP_DEV_YAML_FILEPATH = 'app_dev.yaml'
APP_YAML_FILEPATH = 'app.yaml'
_PARSER = argparse.ArgumentParser(
description="""
Creates a third-party directory where all the JS and CSS dependencies are
built and stored. Depending on the options passed to the script, might also
minify third-party libraries and/or generate a build directory.
""")
_PARSER.add_argument(
'--prod_env', action='store_true', default=False, dest='prod_env')
_PARSER.add_argument(
'--deploy_mode', action='store_true', default=False, dest='deploy_mode')
_PARSER.add_argument(
'--minify_third_party_libs_only', action='store_true', default=False,
dest='minify_third_party_libs_only')
_PARSER.add_argument(
'--maintenance_mode',
action='store_true',
default=False,
dest='maintenance_mode',
help=(
'Enable maintenance mode, '
'meaning that only super admins can access the site.'
)
)
_PARSER.add_argument(
'--source_maps',
action='store_true',
default=False,
dest='source_maps',
help='Build webpack with source maps.')
def generate_app_yaml(deploy_mode=False):
"""Generate app.yaml from app_dev.yaml.
Args:
deploy_mode: bool. Whether the script is being called from deploy
script.
Raises:
Exception. Environment variable to be removed does not exist.
"""
content = '# THIS FILE IS AUTOGENERATED, DO NOT MODIFY\n'
with utils.open_file(APP_DEV_YAML_FILEPATH, 'r') as yaml_file:
content += yaml_file.read()
if deploy_mode:
# The version: default line is required to run jobs on a local server (
# both in prod & non-prod env). This line is not required when app.yaml
# is generated during deployment. So, we remove this if the build
# process is being run from the deploy script.
content = content.replace('version: default', '')
# The FIREBASE_AUTH_EMULATOR_HOST environment variable is only needed to
# test locally, and MUST NOT be included in the deployed file.
for env_variable in ENV_VARS_TO_REMOVE_FROM_DEPLOYED_APP_YAML:
if env_variable not in content:
raise Exception(
'Environment variable \'%s\' to be '
'removed does not exist.' % env_variable
)
content = re.sub(' %s: ".*"\n' % env_variable, '', content)
if os.path.isfile(APP_YAML_FILEPATH):
os.remove(APP_YAML_FILEPATH)
with utils.open_file(APP_YAML_FILEPATH, 'w+') as prod_yaml_file:
prod_yaml_file.write(content)
def modify_constants(
prod_env=False, emulator_mode=True, maintenance_mode=False):
"""Modify constants.ts and feconf.py.
Args:
prod_env: bool. Whether the server is started in prod mode.
emulator_mode: bool. Whether the server is started in emulator mode.
maintenance_mode: bool. Whether the site should be put into
the maintenance mode.
"""
dev_mode_variable = (
'"DEV_MODE": false' if prod_env else '"DEV_MODE": true')
common.inplace_replace_file(
common.CONSTANTS_FILE_PATH,
r'"DEV_MODE": (true|false)',
dev_mode_variable,
expected_number_of_replacements=1
)
emulator_mode_variable = (
'"EMULATOR_MODE": true' if emulator_mode else '"EMULATOR_MODE": false')
common.inplace_replace_file(
common.CONSTANTS_FILE_PATH,
r'"EMULATOR_MODE": (true|false)',
emulator_mode_variable,
expected_number_of_replacements=1
)
enable_maintenance_mode_variable = (
'ENABLE_MAINTENANCE_MODE = %s' % str(maintenance_mode))
common.inplace_replace_file(
common.FECONF_PATH,
r'ENABLE_MAINTENANCE_MODE = (True|False)',
enable_maintenance_mode_variable,
expected_number_of_replacements=1
)
def set_constants_to_default():
"""Set variables in constants.ts and feconf.py to default values."""
modify_constants(prod_env=False, emulator_mode=True, maintenance_mode=False)
def _minify(source_path, target_path):
"""Runs the given file through a minifier and outputs it to target_path.
Args:
source_path: str. Absolute path to file to be minified.
target_path: str. Absolute path to location where to copy
the minified file.
"""
# The -Xmxn argument is an attempt to limit the max memory used when the
# minification process is running on CircleCI. Note that, from local
# experiments, 18m seems to work, but 12m is too small and results in an
# out-of-memory error.
# https://circleci.com/blog/how-to-handle-java-oom-errors/
# Use relative path to avoid java command line parameter parse error on
# Windows. Convert to posix style path because the java program requires
# the filepath arguments to be in posix path style.
target_path = common.convert_to_posixpath(
os.path.relpath(target_path))
source_path = common.convert_to_posixpath(
os.path.relpath(source_path))
yuicompressor_dir = common.convert_to_posixpath(YUICOMPRESSOR_DIR)
cmd = 'java -Xmx24m -jar %s -o %s %s' % (
yuicompressor_dir, target_path, source_path)
subprocess.check_call(cmd, shell=True)
def write_to_file_stream(file_stream, content):
"""Write to a file object using provided content.
Args:
file_stream: file. A stream handling object to do write operation on.
content: str. String content to write to file object.
"""
file_stream.write(str(content))
def _join_files(source_paths, target_file_stream):
"""Writes multiple files into one file.
Args:
source_paths: list(str). Paths to files to join together.
target_file_stream: file. A stream object of target file.
"""
for source_path in source_paths:
with utils.open_file(source_path, 'r') as source_file:
write_to_file_stream(target_file_stream, source_file.read())
def _minify_and_create_sourcemap(source_path, target_file_path):
"""Minifies and generates source map for a JS file. This function is only
meant to be used with third_party.min.js.
Args:
source_path: str. Path to JS file to minify.
target_file_path: str. Path to location of the minified file.
"""
print('Minifying and creating sourcemap for %s' % source_path)
source_map_properties = 'includeSources,url=\'third_party.min.js.map\''
cmd = '%s %s %s -c -m --source-map %s -o %s ' % (
common.NODE_BIN_PATH, UGLIFY_FILE, source_path,
source_map_properties, target_file_path)
subprocess.check_call(cmd, shell=True)
def _generate_copy_tasks_for_fonts(source_paths, target_path):
"""Queue up a copy task for each font file.
Args:
source_paths: list(str). Paths to fonts.
target_path: str. Path where the fonts should be copied.
Returns:
deque(Thread). A deque that contains all copy tasks queued to be
processed.
"""
copy_tasks = collections.deque()
for font_path in source_paths:
copy_task = threading.Thread(
target=shutil.copy,
args=(font_path, target_path,))
copy_tasks.append(copy_task)
return copy_tasks
def _insert_hash(filepath, file_hash):
"""Inserts hash into filepath before the file extension.
Args:
filepath: str. Path where the hash should be inserted.
file_hash: str. Hash to be inserted into the path.
Returns:
str. Filepath with hash inserted.
"""
filepath, file_extension = os.path.splitext(filepath)
return '%s.%s%s' % (filepath, file_hash, file_extension)
def ensure_directory_exists(filepath):
"""Ensures if directory tree exists, if not creates the directories.
Args:
filepath: str. Path to file located in directory that we want to ensure
exists.
"""
directory = os.path.dirname(filepath)
if not os.path.exists(directory):
os.makedirs(directory)
def safe_delete_directory_tree(directory_path):
"""Recursively delete a directory tree. If directory tree does not exist,
create the directories first then delete the directory tree.
Args:
directory_path: str. Directory path to be deleted.
"""
ensure_directory_exists(directory_path)
shutil.rmtree(directory_path)
def _ensure_files_exist(filepaths):
"""Ensures that files exist at the given filepaths.
Args:
filepaths: list(str). Paths to files that we want to ensure exist.
Raises:
OSError. One or more of the files does not exist.
"""
for filepath in filepaths:
if not os.path.isfile(filepath):
raise OSError('File %s does not exist.' % filepath)
def safe_copy_file(source_filepath, target_filepath):
"""Copy a file (no metadata) after ensuring the file exists at the given
source filepath.
NOTE: shutil.copyfile does not accept directory path as arguments.
Args:
source_filepath: str. Path to source file that we want to copy from.
target_filepath: str. Path to target file that we want to copy to.
"""
_ensure_files_exist([source_filepath])
shutil.copyfile(source_filepath, target_filepath)
def safe_delete_file(filepath):
"""Delete a file after ensuring the provided file actually exists.
Args:
filepath: str. Filepath to be deleted.
"""
_ensure_files_exist([filepath])
os.remove(filepath)
def get_file_count(directory_path):
"""Count total number of file in the given directory, ignoring any files
with extensions in FILE_EXTENSIONS_TO_IGNORE or files that should not be
built.
Args:
directory_path: str. Directory to be walked.
Returns:
int. Total number of files minus ignored files.
"""
total_file_count = 0
for root, _, filenames in os.walk(directory_path):
for filename in filenames:
# Ignore files with certain extensions.
filepath = os.path.join(root, filename)
if should_file_be_built(filepath) and not any(
filename.endswith(p) for p in FILE_EXTENSIONS_TO_IGNORE):
total_file_count += 1
return total_file_count
def _compare_file_count(
first_dir_list, second_dir_list):
"""Ensure that the total count of files in all directories in the first
list matches the count of files in all the directories in the second list.
Args:
first_dir_list: list(str). List of directories to compare.
second_dir_list: list(str). List of directories to compare.
Raises:
ValueError. The source directory list does not have the same file
count as the target directory list.
"""
file_counts = [0, 0]
for first_dir_path in first_dir_list:
file_counts[0] += get_file_count(first_dir_path)
for second_dir_path in second_dir_list:
file_counts[1] += get_file_count(second_dir_path)
if file_counts[0] != file_counts[1]:
print('Comparing %s vs %s' % (first_dir_list, second_dir_list))
raise ValueError(
'%s files in first dir list != %s files in second dir list' % (
file_counts[0], file_counts[1]))
def process_html(source_file_stream, target_file_stream):
"""Remove whitespaces and add hashes to filepaths in the HTML file stream
object.
Args:
source_file_stream: file. The stream object of the HTML file to be
read from.
target_file_stream: file. The stream object to write the minified HTML
file to.
"""
write_to_file_stream(
target_file_stream, REMOVE_WS(' ', source_file_stream.read()))
def get_dependency_directory(dependency):
"""Get dependency directory from dependency dictionary.
Args:
dependency: dict(str, str). Dictionary representing single dependency
from dependencies.json.
Returns:
str. Dependency directory.
"""
if 'targetDir' in dependency:
dependency_dir = dependency['targetDir']
else:
dependency_dir = dependency['targetDirPrefix'] + dependency['version']
return os.path.join(THIRD_PARTY_STATIC_DIR, dependency_dir)
def get_css_filepaths(dependency_bundle, dependency_dir):
"""Gets dependency css filepaths.
Args:
dependency_bundle: dict(str, list(str) | str). The dict has three keys:
- 'js': List of paths to js files that need to be copied.
- 'css': List of paths to css files that need to be copied.
- 'fontsPath': Path to folder containing fonts that need to be
copied.
dependency_dir: str. Path to directory where the files that need to
be copied are located.
Returns:
list(str). List of paths to css files that need to be copied.
"""
css_files = dependency_bundle.get('css', [])
return [os.path.join(dependency_dir, css_file) for css_file in css_files]
def get_js_filepaths(dependency_bundle, dependency_dir):
"""Gets dependency js filepaths.
Args:
dependency_bundle: dict(str, list(str) | str). The dict has three keys:
- 'js': List of paths to js files that need to be copied.
- 'css': List of paths to css files that need to be copied.
- 'fontsPath': Path to folder containing fonts that need to be
copied.
dependency_dir: str. Path to directory where the files that need to
be copied are located.
Returns:
list(str). List of paths to js files that need to be copied.
"""
js_files = dependency_bundle.get('js', [])
return [os.path.join(dependency_dir, js_file) for js_file in js_files]
def get_font_filepaths(dependency_bundle, dependency_dir):
"""Gets dependency font filepaths.
Args:
dependency_bundle: dict(str, list(str) | str). The dict has three keys:
- 'js': List of paths to js files that need to be copied.
- 'css': List of paths to css files that need to be copied.
- 'fontsPath': Path to folder containing fonts that need to be
copied.
dependency_dir: str. Path to directory where the files that need to
be copied are located.
Returns:
list(str). List of paths to font files that need to be copied.
"""
if 'fontsPath' not in dependency_bundle:
# Skip dependency bundles in dependencies.json that do not have
# fontsPath property.
return []
fonts_path = dependency_bundle['fontsPath']
# Obtain directory path to /font inside dependency folder.
# E.g. third_party/static/bootstrap-3.3.4/fonts/.
font_dir = os.path.join(dependency_dir, fonts_path)
font_filepaths = []
# Walk the directory and add all font files to list.
for root, _, filenames in os.walk(font_dir):
for filename in filenames:
font_filepaths.append(os.path.join(root, filename))
return font_filepaths
def get_dependencies_filepaths():
"""Extracts dependencies filepaths from dependencies.json file into
a dictionary.
Returns:
dict(str, list(str)). A dict mapping file types to lists of filepaths.
The dict has three keys: 'js', 'css' and 'fonts'. Each of the
corresponding values is a full list of dependency file paths of the
given type.
"""
filepaths = {
'js': [],
'css': [],
'fonts': []
}
with utils.open_file(DEPENDENCIES_FILE_PATH, 'r') as json_file:
dependencies_json = json.loads(
json_file.read(), object_pairs_hook=collections.OrderedDict)
frontend_dependencies = dependencies_json['dependencies']['frontend']
for dependency in frontend_dependencies.values():
if 'bundle' in dependency:
dependency_dir = get_dependency_directory(dependency)
filepaths['css'].extend(
get_css_filepaths(dependency['bundle'], dependency_dir))
filepaths['js'].extend(
get_js_filepaths(dependency['bundle'], dependency_dir))
filepaths['fonts'].extend(
get_font_filepaths(dependency['bundle'], dependency_dir))
_ensure_files_exist(filepaths['js'])
_ensure_files_exist(filepaths['css'])
_ensure_files_exist(filepaths['fonts'])
return filepaths
def minify_third_party_libs(third_party_directory_path):
"""Minify third_party.js and third_party.css and remove un-minified
files.
"""
third_party_js_filepath = os.path.join(
third_party_directory_path, THIRD_PARTY_JS_RELATIVE_FILEPATH)
third_party_css_filepath = os.path.join(
third_party_directory_path, THIRD_PARTY_CSS_RELATIVE_FILEPATH)
minified_third_party_js_filepath = os.path.join(
third_party_directory_path, MINIFIED_THIRD_PARTY_JS_RELATIVE_FILEPATH)
minified_third_party_css_filepath = os.path.join(
third_party_directory_path, MINIFIED_THIRD_PARTY_CSS_RELATIVE_FILEPATH)
_minify_and_create_sourcemap(
third_party_js_filepath, minified_third_party_js_filepath)
_minify(third_party_css_filepath, minified_third_party_css_filepath)
# Clean up un-minified third_party.js and third_party.css.
safe_delete_file(third_party_js_filepath)
safe_delete_file(third_party_css_filepath)
def build_third_party_libs(third_party_directory_path):
"""Joins all third party css files into single css file and js files into
single js file. Copies both files and all fonts into third party folder.
"""
print('Building third party libs at %s' % third_party_directory_path)
third_party_js_filepath = os.path.join(
third_party_directory_path, THIRD_PARTY_JS_RELATIVE_FILEPATH)
third_party_css_filepath = os.path.join(
third_party_directory_path, THIRD_PARTY_CSS_RELATIVE_FILEPATH)
webfonts_dir = os.path.join(
third_party_directory_path, WEBFONTS_RELATIVE_DIRECTORY_PATH)
dependency_filepaths = get_dependencies_filepaths()
ensure_directory_exists(third_party_js_filepath)
with utils.open_file(
third_party_js_filepath, 'w+') as third_party_js_file:
_join_files(dependency_filepaths['js'], third_party_js_file)
ensure_directory_exists(third_party_css_filepath)
with utils.open_file(
third_party_css_filepath, 'w+') as third_party_css_file:
_join_files(dependency_filepaths['css'], third_party_css_file)
ensure_directory_exists(webfonts_dir)
_execute_tasks(
_generate_copy_tasks_for_fonts(
dependency_filepaths['fonts'], webfonts_dir))
def build_using_webpack(config_path):
"""Execute webpack build process. This takes all TypeScript files we have in
/templates and generates JS bundles according the require() imports
and also compiles HTML pages into the /backend_prod_files/webpack_bundles
folder. The files are later copied into /build/webpack_bundles.
Args:
config_path: str. Webpack config to be used for building.
"""
print('Building webpack')
managed_webpack_compiler = servers.managed_webpack_compiler(
config_path=config_path, max_old_space_size=4096)
with managed_webpack_compiler as p:
p.wait()
assert get_file_count('backend_prod_files/webpack_bundles/') > 0, (
'webpack_bundles should be non-empty.')
def hash_should_be_inserted(filepath):
"""Returns if the file should be renamed to include hash in
the path.
Args:
filepath: str. Path relative to directory we are currently building.
Returns:
bool. True if filepath should contain hash else False.
"""
return not any(
fnmatch.fnmatch(filepath, pattern) for pattern
in FILEPATHS_NOT_TO_RENAME)
def should_file_be_built(filepath):
"""Determines if the file should be built.
- JS files: Returns False if filepath matches with pattern in
JS_FILENAME_SUFFIXES_TO_IGNORE or is in JS_FILEPATHS_NOT_TO_BUILD,
else returns True.
- Python files: Returns False if filepath ends with _test.py, else
returns True
- TS files: Returns False.
- Other files: Returns False if filepath matches with pattern in
GENERAL_FILENAMES_TO_IGNORE, else returns True.
Args:
filepath: str. Path relative to file we are currently building.
Returns:
bool. True if filepath should be built, else False.
"""
if filepath.endswith('.js'):
return all(
not filepath.endswith(p) for p in JS_FILENAME_SUFFIXES_TO_IGNORE)
elif filepath.endswith('_test.py'):
return False
elif filepath.endswith('.ts'):
return False
else:
return not any(
filepath.endswith(p) for p in GENERAL_FILENAMES_TO_IGNORE)
def generate_copy_tasks_to_copy_from_source_to_target(
source, target, file_hashes):
"""Generate copy task for each file in source directory, excluding files
with extensions in FILE_EXTENSIONS_TO_IGNORE. Insert hash from hash dict
into the destination filename.
Args:
source: str. Path relative to /oppia directory of directory
containing files and directories to be copied.
target: str. Path relative to /oppia directory of directory where
to copy the files and directories.
file_hashes: dict(str, str). Dictionary with filepaths as keys and
hashes of file content as values.
Returns:
deque(Thread). A deque that contains all copy tasks queued
to be processed.
"""
print('Processing %s' % os.path.join(os.getcwd(), source))
print('Copying into %s' % os.path.join(os.getcwd(), target))
copy_tasks = collections.deque()
for root, dirnames, filenames in os.walk(os.path.join(os.getcwd(), source)):
for directory in dirnames:
print('Copying %s' % os.path.join(root, directory))
for filename in filenames:
source_path = os.path.join(root, filename)
# Python files should not be copied to final build directory.
if not any(
source_path.endswith(p) for p in FILE_EXTENSIONS_TO_IGNORE):
target_path = source_path
# The path in hashes.json file is in posix style,
# see the comment above HASHES_JSON_FILENAME for details.
relative_path = common.convert_to_posixpath(
os.path.relpath(source_path, start=source))
if (hash_should_be_inserted(source + relative_path) and
relative_path in file_hashes):
relative_path = (
_insert_hash(relative_path, file_hashes[relative_path]))
target_path = os.path.join(os.getcwd(), target, relative_path)
ensure_directory_exists(target_path)
copy_task = threading.Thread(
target=safe_copy_file,
args=(source_path, target_path,))
copy_tasks.append(copy_task)
return copy_tasks
def is_file_hash_provided_to_frontend(filepath):
"""Returns if the hash for the filepath should be provided to the frontend.
Args:
filepath: str. Relative path to the file.
Returns:
bool. True if file hash should be provided to the frontend else False.
"""
return any(
fnmatch.fnmatch(filepath, pattern) for pattern
in FILEPATHS_PROVIDED_TO_FRONTEND)
def generate_md5_hash(filepath):
"""Returns md5 hash of file.
Args:
filepath: str. Absolute path to the file.
Returns:
str. Hexadecimal hash of specified file.
"""
m = hashlib.md5()
with utils.open_file(filepath, 'rb', encoding=None) as f:
while True:
buf = f.read(HASH_BLOCK_SIZE)
if not buf:
break
m.update(buf)
return m.hexdigest()
def get_filepaths_by_extensions(source_dir, file_extensions):
"""Return list of filepaths in a directory with certain extensions,
excluding filepaths that should not be built.
Args:
source_dir: str. Root directory to be walked.
file_extensions: tuple(str). Tuple of file extensions.
Returns:
list(str). List of filepaths with specified extensions.
"""
filepaths = []
for root, _, filenames in os.walk(source_dir):
for filename in filenames:
filepath = os.path.join(root, filename)
relative_filepath = os.path.relpath(filepath, start=source_dir)
if should_file_be_built(filepath) and any(
filename.endswith(p) for p in file_extensions):
filepaths.append(relative_filepath)
return filepaths
def get_file_hashes(directory_path):
"""Returns hashes of all files in directory tree, excluding files with
extensions in FILE_EXTENSIONS_TO_IGNORE or files that should not be built.
Args:
directory_path: str. Root directory of the tree.
Returns:
dict(str, str). Dictionary with keys specifying file paths and values
specifying file hashes.
"""
file_hashes = {}
print(
'Computing hashes for files in %s'
% os.path.join(os.getcwd(), directory_path))
for root, _, filenames in os.walk(
os.path.join(os.getcwd(), directory_path)):
for filename in filenames:
filepath = os.path.join(root, filename)
if should_file_be_built(filepath) and not any(
filename.endswith(p) for p in FILE_EXTENSIONS_TO_IGNORE):
# The path in hashes.json file is in posix style,
# see the comment above HASHES_JSON_FILENAME for details.
complete_filepath = common.convert_to_posixpath(
os.path.join(root, filename))
relative_filepath = common.convert_to_posixpath(os.path.relpath(
complete_filepath, start=directory_path))
file_hashes[relative_filepath] = generate_md5_hash(
complete_filepath)
return file_hashes
def filter_hashes(file_hashes):
"""Filters hashes that should be provided to the frontend
and prefixes "/" in front of the keys.
Args:
file_hashes: dict(str, str). Dictionary with filepaths as keys and
hashes of file content as values.
Returns:
dict(str, str). Filtered dictionary of only filepaths that should be
provided to the frontend.
"""
filtered_hashes = {}
for filepath, file_hash in file_hashes.items():
if is_file_hash_provided_to_frontend(filepath):
filtered_hashes['/' + filepath] = file_hash
return filtered_hashes
def save_hashes_to_file(file_hashes):
"""Return JS code that loads hashes needed for frontend into variable.
Args:
file_hashes: dict(str, str). Dictionary with filepaths as keys and
hashes of file content as values.
Returns:
str. JS code loading hashes as JSON into variable.
"""
# Only some of the hashes are needed in the frontend.
filtered_hashes = filter_hashes(file_hashes)
ensure_directory_exists(HASHES_JSON_FILEPATH)
with utils.open_file(HASHES_JSON_FILEPATH, 'w+') as hashes_json_file:
hashes_json_file.write(
str(json.dumps(filtered_hashes, ensure_ascii=False)))
hashes_json_file.write(u'\n')
def minify_func(source_path, target_path, filename):
"""Call the appropriate functions to handle different types of file
formats:
- HTML files: Remove whitespaces, interpolates paths in HTML to include
hashes in source directory and save edited file at target directory.
- CSS or JS files: Minify and save at target directory.
- Other files: Copy the file from source directory to target directory.
"""
skip_minify = any(
filename.endswith(p) for p in JS_FILENAME_SUFFIXES_NOT_TO_MINIFY)
if filename.endswith('.html'):
print('Building %s' % source_path)
with utils.open_file(source_path, 'r+') as source_html_file:
with utils.open_file(
target_path, 'w+') as minified_html_file:
process_html(source_html_file, minified_html_file)
elif ((filename.endswith('.css') or filename.endswith('.js')) and
not skip_minify):
print('Minifying %s' % source_path)
_minify(source_path, target_path)
else:
print('Copying %s' % source_path)
safe_copy_file(source_path, target_path)
def _execute_tasks(tasks, batch_size=24):
"""Starts all tasks and checks the results.
Runs no more than 'batch_size' tasks at a time.
"""
remaining_tasks = collections.deque(tasks)
currently_running_tasks = []
while remaining_tasks or currently_running_tasks:
if currently_running_tasks:
for task in collections.deque(currently_running_tasks):
if not task.is_alive():
currently_running_tasks.remove(task)
while remaining_tasks and len(currently_running_tasks) < batch_size:
task = remaining_tasks.popleft()
currently_running_tasks.append(task)
try:
task.start()
except RuntimeError as e:
raise OSError(
'threads can only be started once') from e
def generate_build_tasks_to_build_all_files_in_directory(source, target):
"""This function queues up tasks to build all files in a directory,
excluding files that should not be built.
Args:
source: str. Path relative to /oppia of directory containing source
files and directories to be built.
target: str. Path relative to /oppia of directory where the built files
and directories will be saved to.
Returns:
deque(Thread). A deque that contains all build tasks queued
to be processed.
"""
print('Processing %s' % os.path.join(os.getcwd(), source))
print('Generating into %s' % os.path.join(os.getcwd(), target))
build_tasks = collections.deque()
for root, dirnames, filenames in os.walk(os.path.join(os.getcwd(), source)):
for directory in dirnames:
print('Building directory %s' % os.path.join(root, directory))
for filename in filenames:
source_path = os.path.join(root, filename)
target_path = source_path.replace(source, target)
ensure_directory_exists(target_path)
if should_file_be_built(source_path):
task = threading.Thread(
target=minify_func,
args=(source_path, target_path, filename,))
build_tasks.append(task)
return build_tasks
def generate_build_tasks_to_build_files_from_filepaths(
source_path, target_path, filepaths):
"""This function queues up build tasks to build files from a list of
filepaths, excluding files that should not be built.
Args:
source_path: str. Path relative to /oppia directory of directory
containing files and directories to be copied.
target_path: str. Path relative to /oppia directory of directory where
to copy the files and directories.
filepaths: list(str). List of filepaths to be built.
Returns:
deque(Thread). A deque that contains all build tasks queued
to be processed.
"""
build_tasks = collections.deque()
for filepath in filepaths:
source_file_path = os.path.join(source_path, filepath)
target_file_path = os.path.join(target_path, filepath)
ensure_directory_exists(target_file_path)
if should_file_be_built(source_file_path):
task = threading.Thread(
target=minify_func,
args=(
source_file_path, target_file_path, filepath,))
build_tasks.append(task)
return build_tasks
def generate_delete_tasks_to_remove_deleted_files(
source_dir_hashes, staging_directory):
"""This function walks the staging directory and queues up deletion tasks to
remove files that are not in the hash dict i.e. remaining files in staging
directory that have since been deleted from source directory. Files with
extensions in FILE_EXTENSIONS_TO_IGNORE will be excluded.
Args:
source_dir_hashes: dict(str, str). Dictionary with filepaths as keys and
hashes of file content as values.
staging_directory: str. Path relative to /oppia directory of directory
containing files and directories to be walked.
Returns:
deque(Thread). A deque that contains all delete tasks
queued to be processed.
"""
print('Scanning directory %s to remove deleted file' % staging_directory)
delete_tasks = collections.deque()
for root, _, filenames in os.walk(
os.path.join(os.getcwd(), staging_directory)):
for filename in filenames:
target_path = os.path.join(root, filename)
# Ignore files with certain extensions.
if not any(
target_path.endswith(p) for p in FILE_EXTENSIONS_TO_IGNORE):
# On Windows the path is on Windows-Style, while the path in
# hashes is in posix style, we need to convert it so the check
# can run correctly.
relative_path = common.convert_to_posixpath(
os.path.relpath(target_path, start=staging_directory))
# Remove file found in staging directory but not in source
# directory, i.e. file not listed in hash dict.
if relative_path not in source_dir_hashes:
print(
'Unable to find %s in file hashes, deleting file'
% target_path)
task = threading.Thread(
target=safe_delete_file, args=(target_path,))
delete_tasks.append(task)
return delete_tasks
def get_recently_changed_filenames(source_dir_hashes, out_dir):
"""Compare hashes of source files and built files. Return a list of
filenames that were recently changed. Skips files that are not supposed to
built or already built.
Args:
source_dir_hashes: dict(str, str). Dictionary of hashes of files
to be built.
out_dir: str. Path relative to /oppia where built files are located.
Returns:
list(str). List of filenames expected to be re-hashed.
"""
# Hashes are created based on files' contents and are inserted between
# the filenames and their extensions,
# e.g base.240933e7564bd72a4dde42ee23260c5f.html
# If a file gets edited, a different MD5 hash is generated.
recently_changed_filenames = []
# Currently, Python files and HTML files are always re-built.
file_extensions_not_to_track = ('.html', '.py',)
for filename, md5_hash in source_dir_hashes.items():
# Skip files that are already built or should not be built.
if should_file_be_built(filename) and not any(
filename.endswith(p) for p in file_extensions_not_to_track):
final_filepath = _insert_hash(
os.path.join(out_dir, filename), md5_hash)
if not os.path.isfile(final_filepath):
# Filename with provided hash cannot be found, this file has
# been recently changed or created since last build.
recently_changed_filenames.append(filename)
if recently_changed_filenames:
print(
'The following files will be rebuilt due to recent changes: %s'
% recently_changed_filenames)
return recently_changed_filenames
def generate_build_tasks_to_build_directory(dirnames_dict):
"""This function queues up build tasks to build all files in source
directory if there is no existing staging directory. Otherwise, selectively
queue up build tasks to build recently changed files.
Args:
dirnames_dict: dict(str, str). This dict should contain three keys,
with corresponding values as follows:
- 'dev_dir': the directory that contains source files to be built.
- 'staging_dir': the directory that contains minified files waiting
for final copy process.
- 'out_dir': the final directory that contains built files with hash
inserted into filenames.
Returns:
deque(Thread). A deque that contains all build tasks queued
to be processed.
"""
source_dir = dirnames_dict['dev_dir']
staging_dir = dirnames_dict['staging_dir']
out_dir = dirnames_dict['out_dir']
build_tasks = collections.deque()
if not os.path.isdir(staging_dir):
# If there is no staging dir, perform build process on all files.
print('Creating new %s folder' % staging_dir)
ensure_directory_exists(staging_dir)
build_tasks += generate_build_tasks_to_build_all_files_in_directory(
source_dir, staging_dir)
else:
# If staging dir exists, rebuild all HTML and Python files.
file_extensions_to_always_rebuild = ('.html', '.py',)
print(
'Staging dir exists, re-building all %s files'
% ', '.join(file_extensions_to_always_rebuild))
filenames_to_always_rebuild = get_filepaths_by_extensions(
source_dir, file_extensions_to_always_rebuild)
build_tasks += generate_build_tasks_to_build_files_from_filepaths(
source_dir, staging_dir, filenames_to_always_rebuild)
dev_dir_hashes = get_file_hashes(source_dir)
source_hashes = {}
source_hashes.update(dev_dir_hashes)
# Clean up files in staging directory that cannot be found in file
# hashes dictionary.
_execute_tasks(generate_delete_tasks_to_remove_deleted_files(
source_hashes, staging_dir))
print(
'Getting files that have changed between %s and %s'
% (source_dir, out_dir))
recently_changed_filenames = get_recently_changed_filenames(
dev_dir_hashes, out_dir)
if recently_changed_filenames:
print(
'Re-building recently changed files at %s' % source_dir)
build_tasks += generate_build_tasks_to_build_files_from_filepaths(
source_dir, staging_dir, recently_changed_filenames)
else:
print('No changes detected. Using previously built files.')
return build_tasks
def _verify_filepath_hash(relative_filepath, file_hashes):
"""Ensure that hashes in filepaths match with the hash entries in hash
dict.
Args:
relative_filepath: str. Filepath that is relative from /build.
file_hashes: dict(str, str). Dictionary with filepaths as keys and
hashes of file content as values.
Raises:
ValueError. The hash dict is empty.
ValueError. Filepath has less than 2 partitions after splitting by '.'
delimiter.
ValueError. The filename does not contain hash.
KeyError. The filename's hash cannot be found in the hash dict.
"""
# Final filepath example:
# pages/base.240933e7564bd72a4dde42ee23260c5f.html.
if not file_hashes:
raise ValueError('Hash dict is empty')
filename_partitions = relative_filepath.split('.')
if len(filename_partitions) < 2:
raise ValueError('Filepath has less than 2 partitions after splitting')
hash_string_from_filename = filename_partitions[-2]
# Ensure hash string obtained from filename follows MD5 hash format.
if not re.search(r'([a-fA-F\d]{32})', relative_filepath):
if relative_filepath not in file_hashes:
return
raise ValueError(
'%s is expected to contain MD5 hash' % relative_filepath)
if hash_string_from_filename not in file_hashes.values():
raise KeyError(
'Hash from file named %s does not match hash dict values' %
relative_filepath)
def _verify_hashes(output_dirnames, file_hashes):
"""Verify a few metrics after build process finishes:
1) The hashes in filenames belongs to the hash dict.
2) hashes.json, third_party.min.css and third_party.min.js are built and
hashes are inserted.
Args:
output_dirnames: list(str). List of directory paths that contain
built files.
file_hashes: dict(str, str). Dictionary with filepaths as keys and
hashes of file content as values.
"""
# Make sure that hashed file name matches with current hash dict.
for built_dir in output_dirnames:
for root, _, filenames in os.walk(built_dir):
for filename in filenames:
parent_dir = os.path.basename(root)
converted_filepath = os.path.join(
THIRD_PARTY_GENERATED_DEV_DIR, parent_dir, filename)
if hash_should_be_inserted(converted_filepath):
# Obtain the same filepath format as the hash dict's key.
relative_filepath = os.path.relpath(
os.path.join(root, filename), start=built_dir)
_verify_filepath_hash(relative_filepath, file_hashes)
hash_final_filename = _insert_hash(
HASHES_JSON_FILENAME, file_hashes[HASHES_JSON_FILENAME])
# The path in hashes.json (generated via file_hashes) file is in posix
# style, see the comment above HASHES_JSON_FILENAME for details.
third_party_js_final_filename = _insert_hash(
MINIFIED_THIRD_PARTY_JS_RELATIVE_FILEPATH,
file_hashes[common.convert_to_posixpath(
MINIFIED_THIRD_PARTY_JS_RELATIVE_FILEPATH)])
# The path in hashes.json (generated via file_hashes) file is in posix
# style, see the comment above HASHES_JSON_FILENAME for details.
third_party_css_final_filename = _insert_hash(
MINIFIED_THIRD_PARTY_CSS_RELATIVE_FILEPATH,
file_hashes[common.convert_to_posixpath(
MINIFIED_THIRD_PARTY_CSS_RELATIVE_FILEPATH)])
_ensure_files_exist([
os.path.join(ASSETS_OUT_DIR, hash_final_filename),
os.path.join(
THIRD_PARTY_GENERATED_OUT_DIR, third_party_js_final_filename),
os.path.join(
THIRD_PARTY_GENERATED_OUT_DIR, third_party_css_final_filename)])
def generate_hashes():
"""Generates hashes for files."""
# The keys for hashes are filepaths relative to the subfolders of the future
# /build folder. This is so that the replacing inside the HTML files works
# correctly.
hashes = {}
# Create hashes for all directories and files.
hash_dirs = [
ASSETS_DEV_DIR, EXTENSIONS_DIRNAMES_TO_DIRPATHS['dev_dir'],
TEMPLATES_CORE_DIRNAMES_TO_DIRPATHS['dev_dir'],
THIRD_PARTY_GENERATED_DEV_DIR]
for hash_dir in hash_dirs:
hashes.update(get_file_hashes(hash_dir))
# Save hashes as JSON and write the JSON into JS file
# to make the hashes available to the frontend.
save_hashes_to_file(hashes)
# Update hash dict with newly created hashes.json.
hashes.update(
{HASHES_JSON_FILENAME: generate_md5_hash(HASHES_JSON_FILEPATH)})
# Make sure /assets/hashes.json is available to the frontend.
_ensure_files_exist([HASHES_JSON_FILEPATH])
return hashes
def generate_build_directory(hashes):
"""Generates hashes for files. Minifies files and interpolates paths
in HTMLs to include hashes. Renames the files to include hashes and copies
them into build directory.
"""
print('Building Oppia in production mode...')
build_tasks = collections.deque()
copy_tasks = collections.deque()
# Build files in /extensions and copy them into staging directory.
build_tasks += generate_build_tasks_to_build_directory(
EXTENSIONS_DIRNAMES_TO_DIRPATHS)
# Minify all template files and copy them into staging directory.
build_tasks += generate_build_tasks_to_build_directory(
TEMPLATES_CORE_DIRNAMES_TO_DIRPATHS)
_execute_tasks(build_tasks)
# Copy all files from staging directory to production directory.
copy_input_dirs = [
ASSETS_DEV_DIR, EXTENSIONS_DIRNAMES_TO_DIRPATHS['staging_dir'],
TEMPLATES_CORE_DIRNAMES_TO_DIRPATHS['staging_dir'],
THIRD_PARTY_GENERATED_DEV_DIR,
WEBPACK_DIRNAMES_TO_DIRPATHS['staging_dir']]
copy_output_dirs = [
ASSETS_OUT_DIR, EXTENSIONS_DIRNAMES_TO_DIRPATHS['out_dir'],
TEMPLATES_CORE_DIRNAMES_TO_DIRPATHS['out_dir'],
THIRD_PARTY_GENERATED_OUT_DIR, WEBPACK_DIRNAMES_TO_DIRPATHS['out_dir']]
assert len(copy_input_dirs) == len(copy_output_dirs)
for i, copy_input_dir in enumerate(copy_input_dirs):
safe_delete_directory_tree(copy_output_dirs[i])
copy_tasks += generate_copy_tasks_to_copy_from_source_to_target(
copy_input_dir, copy_output_dirs[i], hashes)
_execute_tasks(copy_tasks)
_verify_hashes(copy_output_dirs, hashes)
source_dirs_for_assets = [ASSETS_DEV_DIR, THIRD_PARTY_GENERATED_DEV_DIR]
output_dirs_for_assets = [ASSETS_OUT_DIR, THIRD_PARTY_GENERATED_OUT_DIR]
_compare_file_count(source_dirs_for_assets, output_dirs_for_assets)
source_dirs_for_third_party = [THIRD_PARTY_GENERATED_DEV_DIR]
output_dirs_for_third_party = [THIRD_PARTY_GENERATED_OUT_DIR]
_compare_file_count(
source_dirs_for_third_party, output_dirs_for_third_party)
source_dirs_for_webpack = [WEBPACK_DIRNAMES_TO_DIRPATHS['staging_dir']]
output_dirs_for_webpack = [WEBPACK_DIRNAMES_TO_DIRPATHS['out_dir']]
_compare_file_count(
source_dirs_for_webpack, output_dirs_for_webpack)
source_dirs_for_extensions = [
EXTENSIONS_DIRNAMES_TO_DIRPATHS['dev_dir']]
output_dirs_for_extensions = [EXTENSIONS_DIRNAMES_TO_DIRPATHS['out_dir']]
_compare_file_count(source_dirs_for_extensions, output_dirs_for_extensions)
source_dirs_for_templates = [
TEMPLATES_CORE_DIRNAMES_TO_DIRPATHS['dev_dir']]
output_dirs_for_templates = [
TEMPLATES_CORE_DIRNAMES_TO_DIRPATHS['out_dir']]
_compare_file_count(source_dirs_for_templates, output_dirs_for_templates)
print('Build completed.')
def generate_python_package():
"""Generates Python package using setup.py."""
print('Building Oppia package...')
subprocess.check_call('python setup.py -q sdist -d build', shell=True)
print('Oppia package build completed.')
def clean():
"""Cleans up existing build directories."""
safe_delete_directory_tree('build/')
safe_delete_directory_tree('backend_prod_files/')
safe_delete_directory_tree('webpack_bundles/')
def main(args=None):
"""The main method of this script."""
options = _PARSER.parse_args(args=args)
if options.maintenance_mode and not options.prod_env:
raise Exception(
'maintenance_mode should only be enabled in prod build.')
# Clean up the existing generated folders.
clean()
# Regenerate /third_party/generated from scratch.
safe_delete_directory_tree(THIRD_PARTY_GENERATED_DEV_DIR)
build_third_party_libs(THIRD_PARTY_GENERATED_DEV_DIR)
# If minify_third_party_libs_only is set to True, skips the rest of the
# build process once third party libs are minified.
if options.minify_third_party_libs_only:
if options.prod_env:
minify_third_party_libs(THIRD_PARTY_GENERATED_DEV_DIR)
return
else:
raise Exception(
'minify_third_party_libs_only should not be '
'set in non-prod env.')
modify_constants(
prod_env=options.prod_env,
emulator_mode=not options.deploy_mode,
maintenance_mode=options.maintenance_mode)
if options.prod_env:
minify_third_party_libs(THIRD_PARTY_GENERATED_DEV_DIR)
hashes = generate_hashes()
generate_python_package()
if options.source_maps:
build_using_webpack(WEBPACK_PROD_SOURCE_MAPS_CONFIG)
else:
build_using_webpack(WEBPACK_PROD_CONFIG)
generate_app_yaml(
deploy_mode=options.deploy_mode)
generate_build_directory(hashes)
save_hashes_to_file({})
# The 'no coverage' pragma is used as this line is un-testable. This is because
# it will only be called when build.py is used as a script.
if __name__ == '__main__': # pragma: no cover
main()
|
utils.py
|
# Copyright 2012-present MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for testing pymongo
"""
import contextlib
import functools
import os
import struct
import sys
import threading
import time
import warnings
from collections import defaultdict
from functools import partial
from pymongo import MongoClient, monitoring
from pymongo.errors import AutoReconnect, OperationFailure
from pymongo.monitoring import _SENSITIVE_COMMANDS
from pymongo.server_selectors import (any_server_selector,
writable_server_selector)
from pymongo.write_concern import WriteConcern
from test import (client_context, db_user, db_pwd)
IMPOSSIBLE_WRITE_CONCERN = WriteConcern(w=1000)
class WhiteListEventListener(monitoring.CommandListener):
def __init__(self, *commands):
self.commands = set(commands)
self.results = defaultdict(list)
def started(self, event):
if event.command_name in self.commands:
self.results['started'].append(event)
def succeeded(self, event):
if event.command_name in self.commands:
self.results['succeeded'].append(event)
def failed(self, event):
if event.command_name in self.commands:
self.results['failed'].append(event)
class EventListener(monitoring.CommandListener):
def __init__(self):
self.results = defaultdict(list)
def started(self, event):
self.results['started'].append(event)
def succeeded(self, event):
self.results['succeeded'].append(event)
def failed(self, event):
self.results['failed'].append(event)
class OvertCommandListener(EventListener):
"""A CommandListener that ignores sensitive commands."""
def started(self, event):
if event.command_name.lower() not in _SENSITIVE_COMMANDS:
super(OvertCommandListener, self).started(event)
def succeeded(self, event):
if event.command_name.lower() not in _SENSITIVE_COMMANDS:
super(OvertCommandListener, self).succeeded(event)
def failed(self, event):
if event.command_name.lower() not in _SENSITIVE_COMMANDS:
super(OvertCommandListener, self).failed(event)
class ServerAndTopologyEventListener(monitoring.ServerListener,
monitoring.TopologyListener):
"""Listens to all events."""
def __init__(self):
self.results = []
def opened(self, event):
self.results.append(event)
def description_changed(self, event):
self.results.append(event)
def closed(self, event):
self.results.append(event)
class HeartbeatEventListener(monitoring.ServerHeartbeatListener):
"""Listens to only server heartbeat events."""
def __init__(self):
self.results = []
def started(self, event):
self.results.append(event)
def succeeded(self, event):
self.results.append(event)
def failed(self, event):
self.results.append(event)
def _connection_string(h, p, authenticate):
if h.startswith("mongodb://"):
return h
elif client_context.auth_enabled and authenticate:
return "mongodb://%s:%s@%s:%d" % (db_user, db_pwd, str(h), p)
else:
return "mongodb://%s:%d" % (str(h), p)
def _mongo_client(host, port, authenticate=True, direct=False, **kwargs):
"""Create a new client over SSL/TLS if necessary."""
host = host or client_context.host
port = port or client_context.port
client_options = client_context.default_client_options.copy()
if client_context.replica_set_name and not direct:
client_options['replicaSet'] = client_context.replica_set_name
client_options.update(kwargs)
client = MongoClient(_connection_string(host, port, authenticate), port,
**client_options)
return client
def single_client_noauth(h=None, p=None, **kwargs):
"""Make a direct connection. Don't authenticate."""
return _mongo_client(h, p, authenticate=False, direct=True, **kwargs)
def single_client(h=None, p=None, **kwargs):
"""Make a direct connection, and authenticate if necessary."""
return _mongo_client(h, p, direct=True, **kwargs)
def rs_client_noauth(h=None, p=None, **kwargs):
"""Connect to the replica set. Don't authenticate."""
return _mongo_client(h, p, authenticate=False, **kwargs)
def rs_client(h=None, p=None, **kwargs):
"""Connect to the replica set and authenticate if necessary."""
return _mongo_client(h, p, **kwargs)
def rs_or_single_client_noauth(h=None, p=None, **kwargs):
"""Connect to the replica set if there is one, otherwise the standalone.
Like rs_or_single_client, but does not authenticate.
"""
return _mongo_client(h, p, authenticate=False, **kwargs)
def rs_or_single_client(h=None, p=None, **kwargs):
"""Connect to the replica set if there is one, otherwise the standalone.
Authenticates if necessary.
"""
return _mongo_client(h, p, **kwargs)
def one(s):
"""Get one element of a set"""
return next(iter(s))
def oid_generated_on_client(oid):
"""Is this process's PID in this ObjectId?"""
pid_from_doc = struct.unpack(">H", oid.binary[7:9])[0]
return (os.getpid() % 0xFFFF) == pid_from_doc
def delay(sec):
return '''function() { sleep(%f * 1000); return true; }''' % sec
def get_command_line(client):
command_line = client.admin.command('getCmdLineOpts')
assert command_line['ok'] == 1, "getCmdLineOpts() failed"
return command_line
def server_started_with_option(client, cmdline_opt, config_opt):
"""Check if the server was started with a particular option.
:Parameters:
- `cmdline_opt`: The command line option (i.e. --nojournal)
- `config_opt`: The config file option (i.e. nojournal)
"""
command_line = get_command_line(client)
if 'parsed' in command_line:
parsed = command_line['parsed']
if config_opt in parsed:
return parsed[config_opt]
argv = command_line['argv']
return cmdline_opt in argv
def server_started_with_auth(client):
try:
command_line = get_command_line(client)
except OperationFailure as e:
msg = e.details.get('errmsg', '')
if e.code == 13 or 'unauthorized' in msg or 'login' in msg:
# Unauthorized.
return True
raise
# MongoDB >= 2.0
if 'parsed' in command_line:
parsed = command_line['parsed']
# MongoDB >= 2.6
if 'security' in parsed:
security = parsed['security']
# >= rc3
if 'authorization' in security:
return security['authorization'] == 'enabled'
# < rc3
return security.get('auth', False) or bool(security.get('keyFile'))
return parsed.get('auth', False) or bool(parsed.get('keyFile'))
# Legacy
argv = command_line['argv']
return '--auth' in argv or '--keyFile' in argv
def server_started_with_nojournal(client):
command_line = get_command_line(client)
# MongoDB 2.6.
if 'parsed' in command_line:
parsed = command_line['parsed']
if 'storage' in parsed:
storage = parsed['storage']
if 'journal' in storage:
return not storage['journal']['enabled']
return server_started_with_option(client, '--nojournal', 'nojournal')
def server_is_master_with_slave(client):
command_line = get_command_line(client)
if 'parsed' in command_line:
return command_line['parsed'].get('master', False)
return '--master' in command_line['argv']
def drop_collections(db):
for coll in db.list_collection_names():
if not coll.startswith('system'):
db.drop_collection(coll)
def remove_all_users(db):
db.command("dropAllUsersFromDatabase", 1,
writeConcern={"w": client_context.w})
def joinall(threads):
"""Join threads with a 5-minute timeout, assert joins succeeded"""
for t in threads:
t.join(300)
assert not t.isAlive(), "Thread %s hung" % t
def connected(client):
"""Convenience to wait for a newly-constructed client to connect."""
with warnings.catch_warnings():
# Ignore warning that "ismaster" is always routed to primary even
# if client's read preference isn't PRIMARY.
warnings.simplefilter("ignore", UserWarning)
client.admin.command('ismaster') # Force connection.
return client
def wait_until(predicate, success_description, timeout=10):
"""Wait up to 10 seconds (by default) for predicate to be true.
E.g.:
wait_until(lambda: client.primary == ('a', 1),
'connect to the primary')
If the lambda-expression isn't true after 10 seconds, we raise
AssertionError("Didn't ever connect to the primary").
Returns the predicate's first true value.
"""
start = time.time()
while True:
retval = predicate()
if retval:
return retval
if time.time() - start > timeout:
raise AssertionError("Didn't ever %s" % success_description)
time.sleep(0.1)
def is_mongos(client):
res = client.admin.command('ismaster')
return res.get('msg', '') == 'isdbgrid'
def assertRaisesExactly(cls, fn, *args, **kwargs):
"""
Unlike the standard assertRaises, this checks that a function raises a
specific class of exception, and not a subclass. E.g., check that
MongoClient() raises ConnectionFailure but not its subclass, AutoReconnect.
"""
try:
fn(*args, **kwargs)
except Exception as e:
assert e.__class__ == cls, "got %s, expected %s" % (
e.__class__.__name__, cls.__name__)
else:
raise AssertionError("%s not raised" % cls)
@contextlib.contextmanager
def _ignore_deprecations():
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
yield
def ignore_deprecations(wrapped=None):
"""A context manager or a decorator."""
if wrapped:
@functools.wraps(wrapped)
def wrapper(*args, **kwargs):
with _ignore_deprecations():
return wrapped(*args, **kwargs)
return wrapper
else:
return _ignore_deprecations()
class DeprecationFilter(object):
def __init__(self, action="ignore"):
"""Start filtering deprecations."""
self.warn_context = warnings.catch_warnings()
self.warn_context.__enter__()
warnings.simplefilter(action, DeprecationWarning)
def stop(self):
"""Stop filtering deprecations."""
self.warn_context.__exit__()
self.warn_context = None
def read_from_which_host(
client,
pref,
tag_sets=None,
):
"""Read from a client with the given Read Preference.
Return the 'host:port' which was read from.
:Parameters:
- `client`: A MongoClient
- `mode`: A ReadPreference
- `tag_sets`: List of dicts of tags for data-center-aware reads
"""
db = client.pymongo_test
if isinstance(tag_sets, dict):
tag_sets = [tag_sets]
if tag_sets:
tags = tag_sets or pref.tag_sets
pref = pref.__class__(tags)
db.read_preference = pref
cursor = db.test.find()
try:
try:
next(cursor)
except StopIteration:
# No documents in collection, that's fine
pass
return cursor.address
except AutoReconnect:
return None
def assertReadFrom(testcase, client, member, *args, **kwargs):
"""Check that a query with the given mode and tag_sets reads from
the expected replica-set member.
:Parameters:
- `testcase`: A unittest.TestCase
- `client`: A MongoClient
- `member`: A host:port expected to be used
- `mode`: A ReadPreference
- `tag_sets` (optional): List of dicts of tags for data-center-aware reads
"""
for _ in range(10):
testcase.assertEqual(member,
read_from_which_host(client, *args, **kwargs))
def assertReadFromAll(testcase, client, members, *args, **kwargs):
"""Check that a query with the given mode and tag_sets reads from all
members in a set, and only members in that set.
:Parameters:
- `testcase`: A unittest.TestCase
- `client`: A MongoClient
- `members`: Sequence of host:port expected to be used
- `mode`: A ReadPreference
- `tag_sets` (optional): List of dicts of tags for data-center-aware reads
"""
members = set(members)
used = set()
for _ in range(100):
used.add(read_from_which_host(client, *args, **kwargs))
testcase.assertEqual(members, used)
def get_pool(client):
"""Get the standalone, primary, or mongos pool."""
topology = client._get_topology()
server = topology.select_server(writable_server_selector)
return server.pool
def get_pools(client):
"""Get all pools."""
return [
server.pool for server in
client._get_topology().select_servers(any_server_selector)]
# Constants for run_threads and lazy_client_trial.
NTRIALS = 5
NTHREADS = 10
def run_threads(collection, target):
"""Run a target function in many threads.
target is a function taking a Collection and an integer.
"""
threads = []
for i in range(NTHREADS):
bound_target = partial(target, collection, i)
threads.append(threading.Thread(target=bound_target))
for t in threads:
t.start()
for t in threads:
t.join(60)
assert not t.isAlive()
@contextlib.contextmanager
def frequent_thread_switches():
"""Make concurrency bugs more likely to manifest."""
interval = None
if not sys.platform.startswith('java'):
if hasattr(sys, 'getswitchinterval'):
interval = sys.getswitchinterval()
sys.setswitchinterval(1e-6)
else:
interval = sys.getcheckinterval()
sys.setcheckinterval(1)
try:
yield
finally:
if not sys.platform.startswith('java'):
if hasattr(sys, 'setswitchinterval'):
sys.setswitchinterval(interval)
else:
sys.setcheckinterval(interval)
def lazy_client_trial(reset, target, test, get_client):
"""Test concurrent operations on a lazily-connecting client.
`reset` takes a collection and resets it for the next trial.
`target` takes a lazily-connecting collection and an index from
0 to NTHREADS, and performs some operation, e.g. an insert.
`test` takes the lazily-connecting collection and asserts a
post-condition to prove `target` succeeded.
"""
collection = client_context.client.pymongo_test.test
with frequent_thread_switches():
for i in range(NTRIALS):
reset(collection)
lazy_client = get_client()
lazy_collection = lazy_client.pymongo_test.test
run_threads(lazy_collection, target)
test(lazy_collection)
def gevent_monkey_patched():
"""Check if gevent's monkey patching is active."""
# In Python 3.6 importing gevent.socket raises an ImportWarning.
with warnings.catch_warnings():
warnings.simplefilter("ignore", ImportWarning)
try:
import socket
import gevent.socket
return socket.socket is gevent.socket.socket
except ImportError:
return False
def eventlet_monkey_patched():
"""Check if eventlet's monkey patching is active."""
try:
import threading
import eventlet
return (threading.current_thread.__module__ ==
'eventlet.green.threading')
except ImportError:
return False
def is_greenthread_patched():
return gevent_monkey_patched() or eventlet_monkey_patched()
def disable_replication(client):
"""Disable replication on all secondaries, requires MongoDB 3.2."""
for host, port in client.secondaries:
secondary = single_client(host, port)
secondary.admin.command('configureFailPoint', 'stopReplProducer',
mode='alwaysOn')
def enable_replication(client):
"""Enable replication on all secondaries, requires MongoDB 3.2."""
for host, port in client.secondaries:
secondary = single_client(host, port)
secondary.admin.command('configureFailPoint', 'stopReplProducer',
mode='off')
|
grid.py
|
"""
Codes to submit multiple jobs to JCVI grid engine
"""
from __future__ import print_function
import os.path as op
import sys
import re
import logging
from multiprocessing import Pool, Process, Queue, cpu_count
from jcvi.formats.base import write_file, must_open
from jcvi.apps.base import (
OptionParser,
ActionDispatcher,
popen,
backup,
mkdir,
sh,
listify,
)
class Parallel(object):
"""
Run a number of commands in parallel.
"""
def __init__(self, cmds, cpus=cpu_count()):
self.cmds = cmds
self.cpus = min(len(cmds), cpus)
def run(self):
p = Pool(processes=self.cpus)
p.map(sh, self.cmds)
class Dependency(object):
"""
Used by MakeManager.
"""
def __init__(self, source, target, cmds, id, remove=False):
self.id = id
self.source = listify(source)
self.target = listify(target)
self.cmds = listify(cmds)
if remove:
rm_cmd = "rm -f {0}".format(" ".join(self.target))
self.cmds = [rm_cmd] + self.cmds
def __str__(self):
source = " ".join(self.source)
target = " ".join(self.target)
# When there are multiple targets, use .INTERMEDIATE
# <http://stackoverflow.com/questions/2973445/gnu-makefile-rule-generating-a-few-targets-from-a-single-source-file>
if len(self.target) > 1:
intermediate = "{0}.intermediate".format(self.id)
s = "{0} : {1}\n".format(target, intermediate)
s += ".INTERMEDIATE: {0}\n".format(intermediate)
s += "{0} : {1}\n".format(intermediate, source)
else:
s = "{0} : {1}\n".format(target, source)
for c in self.cmds:
c = c.replace("$", "$$") # Command escaping
s += "\t" + c + "\n"
return s
class MakeManager(list):
"""
Write and execute makefile.
"""
def __init__(self, filename="makefile"):
self.makefile = filename
self.targets = set()
self.ndeps = 0
def add(self, source, target, cmds, remove=False):
self.ndeps += 1
d = Dependency(source, target, cmds, self.ndeps, remove=remove)
self.append(d)
self.targets |= set(listify(target))
def write(self):
assert self.targets, "No targets specified"
filename = self.makefile
if op.exists(filename):
backup(filename)
fw = open(filename, "w")
print("all : {0}\n".format(" ".join(sorted(self.targets))), file=fw)
for d in self:
print(d, file=fw)
print("clean :\n\trm -rf {0}\n".format(" ".join(self.targets)), file=fw)
fw.close()
logging.debug("Makefile written to `{0}`.".format(self.makefile))
def run(self, cpus=1):
if not op.exists(self.makefile):
self.write()
cmd = "make -j {0} -f {1}".format(cpus, self.makefile)
sh(cmd)
def clean(self):
cmd = "make clean -f {}".format(self.makefile)
sh(cmd)
class Jobs(list):
"""
Runs multiple funcion calls on the SAME computer, using multiprocessing.
"""
def __init__(self, target, args):
for x in args:
x = listify(x)
self.append(Process(target=target, args=x))
def start(self):
for pi in self:
pi.start()
def join(self):
for pi in self:
pi.join()
def run(self):
self.start()
self.join()
class Poison:
pass
class WriteJobs(object):
"""
Runs multiple function calls, but write to the same file.
Producer-consumer model.
"""
def __init__(self, target, args, filename, cpus=cpu_count()):
workerq = Queue()
writerq = Queue()
for a in args:
workerq.put(a)
cpus = min(cpus, len(args))
for i in range(cpus):
workerq.put(Poison())
self.worker = Jobs(work, args=[(workerq, writerq, target)] * cpus)
self.writer = Process(target=write, args=(workerq, writerq, filename, cpus))
def run(self):
self.worker.start()
self.writer.start()
self.worker.join()
self.writer.join()
def work(queue_in, queue_out, target):
while True:
a = queue_in.get()
if isinstance(a, Poison):
break
res = target(a)
queue_out.put(res)
queue_out.put(Poison())
def write(queue_in, queue_out, filename, cpus):
from rich.progress import Progress
fw = must_open(filename, "w")
isize = queue_in.qsize()
logging.debug("A total of {0} items to compute.".format(isize))
isize = isize or 1
poisons = 0
with Progress() as progress:
task = progress.add_task("[green]Processing ...", total=isize)
while True:
res = queue_out.get()
qsize = queue_in.qsize()
progress.update(task, advance=qsize)
if isinstance(res, Poison):
poisons += 1
if poisons == cpus: # wait all workers finish
break
elif res:
print(res, file=fw)
fw.flush()
fw.close()
class GridOpts(dict):
def __init__(self, opts):
export = (
"pcode",
"queue",
"threaded",
"concurrency",
"outdir",
"name",
"hold_jid",
)
for e in export:
if e in opts.__dict__:
self[e] = getattr(opts, e)
class GridProcess(object):
pat1 = re.compile(r"Your job (?P<id>[0-9]*) ")
pat2 = re.compile(r"Your job-array (?P<id>\S*) ")
def __init__(
self,
cmd,
jobid="",
pcode="99999",
queue="default",
threaded=None,
infile=None,
outfile=None,
errfile=None,
arr=None,
concurrency=None,
outdir=".",
name=None,
hold_jid=None,
extra_opts=None,
grid_opts=None,
):
self.cmd = cmd
self.jobid = jobid
self.queue = queue
self.threaded = threaded
self.infile = infile
self.outfile = outfile or ""
self.errfile = errfile or ""
self.arr = arr
self.concurrency = concurrency
self.outdir = outdir
self.name = name
self.pcode = pcode
self.hold_jid = hold_jid
self.pat = self.pat2 if arr else self.pat1
self.extra = extra_opts if extra_opts else None
if grid_opts:
self.__dict__.update(GridOpts(grid_opts))
def __str__(self):
return "\t".join((x for x in (self.jobid, self.cmd, self.outfile) if x))
def build(self):
# Shell commands
if "|" in self.cmd or "&&" in self.cmd or "||" in self.cmd:
quote = '"' if "'" in self.cmd else "'"
self.cmd = "sh -c {1}{0}{1}".format(self.cmd, quote)
# qsub command (the project code is specific to jcvi)
qsub = "qsub -P {0} -cwd".format(self.pcode)
if self.queue != "default":
qsub += " -l {0}".format(self.queue)
if self.threaded:
qsub += " -pe threaded {0}".format(self.threaded)
if self.arr:
assert 1 <= self.arr < 100000
qsub += " -t 1-{0}".format(self.arr)
if self.concurrency:
qsub += " -tc {0}".format(self.concurrency)
if self.name:
qsub += ' -N "{0}"'.format(self.name)
if self.hold_jid:
param = "-hold_jid_ad" if self.arr else "-hold_jid"
qsub += " {0} {1}".format(param, self.hold_jid)
if self.extra:
qsub += " {0}".format(self.extra)
# I/O
infile = self.infile
outfile = self.outfile
errfile = self.errfile
outdir = self.outdir
mkdir(outdir)
redirect_same = outfile and (outfile == errfile)
if infile:
qsub += " -i {0}".format(infile)
if outfile:
self.outfile = op.join(outdir, outfile)
qsub += " -o {0}".format(self.outfile)
if errfile:
if redirect_same:
qsub += " -j y"
else:
self.errfile = op.join(outdir, errfile)
qsub += " -e {0}".format(self.errfile)
cmd = " ".join((qsub, self.cmd))
return cmd
def start(self):
cmd = self.build()
# run the command and get the job-ID (important)
output = popen(cmd, debug=False).read()
if output.strip() != "":
self.jobid = re.search(self.pat, output).group("id")
else:
self.jobid = "-1"
msg = "[{0}] {1}".format(self.jobid, self.cmd)
if self.infile:
msg += " < {0} ".format(self.infile)
if self.outfile:
backup(self.outfile)
msg += " > {0} ".format(self.outfile)
if self.errfile:
backup(self.errfile)
msg += " 2> {0} ".format(self.errfile)
logging.debug(msg)
class Grid(list):
def __init__(self, cmds, outfiles=[]):
assert cmds, "Commands empty!"
if not outfiles:
outfiles = [None] * len(cmds)
for cmd, outfile in zip(cmds, outfiles):
self.append(GridProcess(cmd, outfile=outfile))
def run(self):
for pi in self:
pi.start()
PBS_STANZA = """
#PBS -q standard
#PBS -J 1-{0}
#PBS -l select=1:ncpus={1}:mem=23gb
#PBS -l pvmem=23gb
#PBS -l walltime=100:00:00
#PBS -W group_list=genomeanalytics
"""
arraysh = """
CMD=`awk "NR==$SGE_TASK_ID" {0}`
$CMD"""
arraysh_ua = (
PBS_STANZA
+ """
cd $PBS_O_WORKDIR
CMD=`awk "NR==$PBS_ARRAY_INDEX" {2}`
$CMD"""
)
def get_grid_engine():
cmd = "qsub --version"
ret = popen(cmd, debug=False).read()
return "PBS" if "PBS" in ret else "SGE"
def main():
actions = (
("run", "run a normal command on grid"),
("array", "run an array job"),
("kill", "wrapper around the `qdel` command"),
)
p = ActionDispatcher(actions)
p.dispatch(globals())
def array(args):
"""
%prog array commands.list
Parallelize a set of commands on grid using array jobs.
"""
p = OptionParser(array.__doc__)
p.set_grid_opts(array=True)
p.set_params(prog="grid")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
(cmds,) = args
fp = open(cmds)
N = sum(1 for x in fp)
fp.close()
pf = cmds.rsplit(".", 1)[0]
runfile = pf + ".sh"
assert runfile != cmds, "Commands list file should not have a `.sh` extension"
engine = get_grid_engine()
threaded = opts.threaded or 1
contents = (
arraysh.format(cmds)
if engine == "SGE"
else arraysh_ua.format(N, threaded, cmds)
)
write_file(runfile, contents)
if engine == "PBS":
return
outfile = "{0}.{1}.out".format(pf, "\$TASK_ID")
errfile = "{0}.{1}.err".format(pf, "\$TASK_ID")
p = GridProcess(
"sh {0}".format(runfile),
outfile=outfile,
errfile=errfile,
arr=N,
extra_opts=opts.extra,
grid_opts=opts,
)
p.start()
def run(args):
"""
%prog run command ::: file1 file2
Parallelize a set of commands on grid. The syntax is modeled after GNU
parallel <http://www.gnu.org/s/parallel/man.html#options>
{} - input line
{.} - input line without extension
{_} - input line first part
{/} - basename of input line
{/.} - basename of input line without extension
{/_} - basename of input line first part
{#} - sequence number of job to run
::: - Use arguments from the command line as input source instead of stdin
(standard input).
If file name is `t/example.tar.gz`, then,
{} is "t/example.tar.gz", {.} is "t/example.tar", {_} is "t/example"
{/} is "example.tar.gz", {/.} is "example.tar", {/_} is "example"
A few examples:
ls -1 *.fastq | %prog run process {} {.}.pdf # use stdin
%prog run process {} {.}.pdf ::: *fastq # use :::
%prog run "zcat {} > {.}" ::: *.gz # quote redirection
%prog run < commands.list # run a list of commands
"""
p = OptionParser(run.__doc__)
p.set_grid_opts()
p.set_params(prog="grid")
opts, args = p.parse_args(args)
if len(args) == 0:
sys.exit(not p.print_help())
sep = ":::"
if sep in args:
sepidx = args.index(sep)
filenames = args[sepidx + 1 :]
args = args[:sepidx]
if not filenames:
filenames = [""]
else:
filenames = sys.stdin if not sys.stdin.isatty() else [""]
cmd = " ".join(args)
cmds = [] if filenames else [(cmd, None)]
for i, filename in enumerate(filenames):
filename = filename.strip()
noextname = filename.rsplit(".", 1)[0]
prefix, basename = op.split(filename)
basenoextname = basename.rsplit(".", 1)[0]
basefirstname = basename.split(".")[0]
firstname = op.join(prefix, basefirstname)
ncmd = cmd
if "{" in ncmd:
ncmd = ncmd.replace("{}", filename)
else:
ncmd += " " + filename
ncmd = ncmd.replace("{.}", noextname)
ncmd = ncmd.replace("{_}", firstname)
ncmd = ncmd.replace("{/}", basename)
ncmd = ncmd.replace("{/.}", basenoextname)
ncmd = ncmd.replace("{/_}", basefirstname)
ncmd = ncmd.replace("{#}", str(i))
outfile = None
if ">" in ncmd:
ncmd, outfile = ncmd.split(">", 1)
ncmd, outfile = ncmd.strip(), outfile.strip()
ncmd = ncmd.strip()
cmds.append((ncmd, outfile))
for ncmd, outfile in cmds:
p = GridProcess(ncmd, outfile=outfile, extra_opts=opts.extra, grid_opts=opts)
p.start()
def guess_method(tag):
from jcvi.formats.base import is_number
jobids = tag.split(",")
for jobid in jobids:
if not is_number(jobid):
return "pattern"
return "jobid"
def kill(args):
"""
%prog kill [options] JOBNAMEPAT/JOBIDs
Kill jobs based on JOBNAME pattern matching (case-sensitive)
or list of JOBIDs (comma separated)
Examples:
%prog kill "pyth*" # Use regex
%prog kill 160253,160245,160252 # Use list of job ids
%prog kill all # Everything
"""
import shlex
from jcvi.apps.base import sh, getusername
from subprocess import check_output, CalledProcessError
import xml.etree.ElementTree as ET
valid_methods = ("pattern", "jobid")
p = OptionParser(kill.__doc__)
p.add_option(
"--method",
choices=valid_methods,
help="Identify jobs based on [default: guess]",
)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
username = getusername()
(tag,) = args
tag = tag.strip()
if tag == "all":
sh("qdel -u {0}".format(username))
return
valid_jobids = set()
method = opts.method or guess_method(tag)
if method == "jobid":
jobids = tag.split(",")
valid_jobids |= set(jobids)
elif method == "pattern":
qsxmlcmd = 'qstat -u "{}" -j "{}" -nenv -njd -xml'.format(username, tag)
try:
qsxml = check_output(shlex.split(qsxmlcmd)).strip()
except CalledProcessError as e:
qsxml = None
logging.debug('No jobs matching the pattern "{0}"'.format(tag))
if qsxml is not None:
for job in ET.fromstring(qsxml).findall("djob_info"):
for elem in job.findall("element"):
jobid = elem.find("JB_job_number").text
valid_jobids.add(jobid)
if valid_jobids:
sh("qdel {0}".format(",".join(valid_jobids)))
if __name__ == "__main__":
main()
|
run_parallel.py
|
from multiprocessing import Process, Event, Queue, JoinableQueue
import multiprocessing
import tqdm
import dill
import numpy as np
import csaf.system as csys
import csaf.config as cconf
import csaf.trace as ctc
from csaf import csaf_logger
def save_states_to_file(filename, states):
np.savetxt(filename, [val['plant'] for val in states], delimiter=",")
def load_states_from_file(filename, component_name):
x0s = np.loadtxt(filename, delimiter=",")
return [{component_name : initial_state} for initial_state in x0s]
def gen_fixed_states(bounds, component_name):
def sanity_check(bounds):
# sanity check
for b in bounds:
assert(len(b) == 1 or len(b) == 3)
if len(b) == 3:
# lower bound is always first
lower = b[0]
upper = b[1]
step = b[2]
assert(lower <= upper)
# the step is smaller than the bounds interval
assert(upper - lower > step)
def interpolate_bounds(lower, upper, step) -> np.ndarray:
iters = int((upper - lower)/step)
return np.linspace(lower, upper, iters)
sanity_check(bounds)
# create initial vector
x0 = np.array([b[0] for b in bounds])
x0s = [x0]
# iterate over bounds
for idx, b in enumerate(bounds):
# ignore static values
if len(b) == 1:
continue
vals = interpolate_bounds(b[0],b[1],b[2])
new_x0s = []
for x in x0s:
for val in vals:
new_x0 = x.copy()
# ignore the value that already exists
if new_x0[idx] == val:
continue
new_x0[idx] = val
new_x0s.append(new_x0)
x0s += new_x0s
return [{component_name : initial_state} for initial_state in x0s]
def gen_random_states(bounds, component_name, iterations):
def generate_single_random_state(bounds):
sample = np.random.rand(len(bounds))
ranges = np.array([b[1] - b[0] if len(b) == 2 else b[0] for b in bounds])
offset = np.array([- b[0] for b in bounds])
return sample * ranges - offset
return [{component_name : generate_single_random_state(bounds)} for _ in range(iterations)]
class Worker(Process):
def __init__(self, evt, config, task_queue, result_queue, progress_queue=None):
super().__init__()
self._evt = evt
self._config = config
self.system = None
self._task_queue = task_queue
self._result_queue = result_queue
self._progress_queue = progress_queue
def run(self, on_finish=None):
self.system = csys.System.from_config(self._config)
self._evt.set()
while True:
next_task = self._task_queue.get()
if next_task is None:
self._task_queue.task_done()
break
answer = next_task(self.system)
self._task_queue.task_done()
self._result_queue.put(answer)
self.system.reset()
if self._progress_queue is not None:
self._progress_queue.put(True)
return
class Task(object):
def __init__(self, idx, system_attr, initial_states, *args, **kwargs):
self.idx = idx
self.system_attr = system_attr
self.args = args
self.kwargs = kwargs
self.states = initial_states
def __call__(self, system: csys.System):
for cname, cstate in self.states.items():
system.set_state(cname, cstate)
assert hasattr(system, self.system_attr)
try:
ret = getattr(system, self.system_attr)(*self.args, **self.kwargs)
answer = [self.idx, dill.dumps(ret), self.states]
except Exception as exc:
csaf_logger.warning(f"running {self.system_attr} failed for states {self.states}")
answer = [self.idx, exc, self.states]
return tuple(answer)
def __str__(self):
return f"id {self.idx} -- {self.system_attr}(args={self.args}, kwargs={self.kwargs})"
def run_workgroup(n_tasks, config, initial_states, *args, fname="simulate_tspan", show_status=True, **kwargs):
def progress_listener(q):
pbar = tqdm.tqdm(total = n_tasks)
for _ in iter(q.get, None):
pbar.update()
csaf_logger.info(f"starting a parallel run of {n_tasks} tasks over the method {fname}")
# Establish communication queues
tasks = JoinableQueue()
results = Queue()
progress = Queue()
# Start the progress bar
if show_status:
proc = Process(target=progress_listener, args=(progress,))
proc.start()
# Start workers
n_workers = min(n_tasks, multiprocessing.cpu_count() * 2)
workers = []
for idx in range(n_workers):
evt = Event()
w = Worker(evt, config, tasks, results, progress)
w.start()
evt.wait()
workers.append(w)
# Enqueue jobs
for idx in range(n_tasks):
t = Task(idx, fname, initial_states[idx], *args, **kwargs, show_status=False, return_passed=True)
tasks.put(t)
# Stop all workers
for _ in range(n_workers):
tasks.put(None)
# Wait for all of the tasks to finish
tasks.join()
if show_status:
progress.put(None)
proc.join()
# Start printing results
ret = [None] * n_tasks
while n_tasks:
result = results.get()
if not isinstance(result[1], Exception):
res = dill.loads(result[1])
ret[result[0]] = tuple([res[1], res[0], result[2]])
n_tasks -= 1
csaf_logger.info("parallel run finished")
ret = [val for val in ret if val != None]
return ret
|
camera.py
|
"""camera.py
This code implements the Camera class, which encapsulates code to
handle IP CAM, USB webcam or the Jetson onboard camera. In
addition, this Camera class is further extended to take a video
file or an image file as input.
"""
import logging
import threading
import subprocess
import numpy as np
import cv2
# The following flag ise used to control whether to use a GStreamer
# pipeline to open USB webcam source. If set to False, we just open
# the webcam using cv2.VideoCapture(index) machinery. i.e. relying
# on cv2's built-in function to capture images from the webcam.
USB_GSTREAMER = True
def add_camera_args(parser):
"""Add parser augument for camera options."""
parser.add_argument('--image', type=str, default=None,
help='image file name, e.g. dog.jpg')
parser.add_argument('--video', type=str, default=None,
help='video file name, e.g. traffic.mp4')
parser.add_argument('--video_looping', action='store_true',
help='loop around the video file [False]')
parser.add_argument('--rtsp', type=str, default=None,
help=('RTSP H.264 stream, e.g. '
'rtsp://admin:123456@192.168.1.64:554'))
parser.add_argument('--rtsp_latency', type=int, default=200,
help='RTSP latency in ms [200]')
parser.add_argument('--usb', type=int, default=None,
help='USB webcam device id (/dev/video?) [None]')
parser.add_argument('--gstr', type=str, default=None,
help='GStreamer string [None]')
parser.add_argument('--onboard', type=int, default=None,
help='Jetson onboard camera [None]')
parser.add_argument('--copy_frame', action='store_true',
help=('copy video frame internally [False]'))
parser.add_argument('--do_resize', action='store_true',
help=('resize image/video [False]'))
parser.add_argument('--width', type=int, default=640,
help='image width [640]')
parser.add_argument('--height', type=int, default=480,
help='image height [480]')
return parser
def open_cam_rtsp(uri, width, height, latency):
"""Open an RTSP URI (IP CAM)."""
gst_elements = str(subprocess.check_output('gst-inspect-1.0'))
if 'omxh264dec' in gst_elements:
# Use hardware H.264 decoder on Jetson platforms
gst_str = ('rtspsrc location={} latency={} ! '
'rtph264depay ! h264parse ! omxh264dec ! '
'nvvidconv ! '
'video/x-raw, width=(int){}, height=(int){}, '
'format=(string)BGRx ! videoconvert ! '
'appsink').format(uri, latency, width, height)
elif 'avdec_h264' in gst_elements:
# Otherwise try to use the software decoder 'avdec_h264'
# NOTE: in case resizing images is necessary, try adding
# a 'videoscale' into the pipeline
gst_str = ('rtspsrc location={} latency={} ! '
'rtph264depay ! h264parse ! avdec_h264 ! '
'videoconvert ! appsink').format(uri, latency)
else:
raise RuntimeError('H.264 decoder not found!')
return cv2.VideoCapture(gst_str, cv2.CAP_GSTREAMER)
def open_cam_usb(dev, width, height):
"""Open a USB webcam."""
if USB_GSTREAMER:
gst_str = ('v4l2src device=/dev/video{} ! '
'video/x-raw, width=(int){}, height=(int){} ! '
'videoconvert ! appsink').format(dev, width, height)
return cv2.VideoCapture(gst_str, cv2.CAP_GSTREAMER)
else:
return cv2.VideoCapture(dev)
def open_cam_gstr(gstr, width, height):
"""Open camera using a GStreamer string.
Example:
gstr = 'v4l2src device=/dev/video0 ! video/x-raw, width=(int){width}, height=(int){height} ! videoconvert ! appsink'
"""
gst_str = gstr.format(width=width, height=height)
return cv2.VideoCapture(gst_str, cv2.CAP_GSTREAMER)
def open_cam_onboard(width, height):
"""Open the Jetson onboard camera."""
gst_elements = str(subprocess.check_output('gst-inspect-1.0'))
if 'nvcamerasrc' in gst_elements:
# On versions of L4T prior to 28.1, you might need to add
# 'flip-method=2' into gst_str below.
gst_str = ('nvcamerasrc ! '
'video/x-raw(memory:NVMM), '
'width=(int)2592, height=(int)1458, '
'format=(string)I420, framerate=(fraction)30/1 ! '
'nvvidconv ! '
'video/x-raw, width=(int){}, height=(int){}, '
'format=(string)BGRx ! '
'videoconvert ! appsink').format(width, height)
elif 'nvarguscamerasrc' in gst_elements:
gst_str = ('nvarguscamerasrc ! '
'video/x-raw(memory:NVMM), '
'width=(int)1920, height=(int)1080, '
'format=(string)NV12, framerate=(fraction)30/1 ! '
'nvvidconv flip-method=2 ! '
'video/x-raw, width=(int){}, height=(int){}, '
'format=(string)BGRx ! '
'videoconvert ! appsink').format(width, height)
else:
raise RuntimeError('onboard camera source not found!')
return cv2.VideoCapture(gst_str, cv2.CAP_GSTREAMER)
def grab_img(cam):
"""This 'grab_img' function is designed to be run in the sub-thread.
Once started, this thread continues to grab a new image and put it
into the global 'img_handle', until 'thread_running' is set to False.
"""
while cam.thread_running:
_, cam.img_handle = cam.cap.read()
if cam.img_handle is None:
#logging.warning('Camera: cap.read() returns None...')
break
cam.thread_running = False
class Camera():
"""Camera class which supports reading images from theses video sources:
1. Image (jpg, png, etc.) file, repeating indefinitely
2. Video file
3. RTSP (IP CAM)
4. USB webcam
5. Jetson onboard camera
"""
def __init__(self, args):
self.args = args
self.is_opened = False
self.video_file = ''
self.video_looping = args.video_looping
self.thread_running = False
self.img_handle = None
self.copy_frame = args.copy_frame
self.do_resize = args.do_resize
self.img_width = args.width
self.img_height = args.height
self.cap = None
self.thread = None
self._open() # try to open the camera
def _open(self):
"""Open camera based on command line arguments."""
if self.cap is not None:
raise RuntimeError('camera is already opened!')
a = self.args
if a.image:
logging.info('Camera: using a image file %s' % a.image)
self.cap = 'image'
self.img_handle = cv2.imread(a.image)
if self.img_handle is not None:
if self.do_resize:
self.img_handle = cv2.resize(
self.img_handle, (a.width, a.height))
self.is_opened = True
self.img_height, self.img_width, _ = self.img_handle.shape
elif a.video:
logging.info('Camera: using a video file %s' % a.video)
self.video_file = a.video
self.cap = cv2.VideoCapture(a.video)
self._start()
elif a.rtsp:
logging.info('Camera: using RTSP stream %s' % a.rtsp)
self.cap = open_cam_rtsp(a.rtsp, a.width, a.height, a.rtsp_latency)
self._start()
elif a.usb is not None:
logging.info('Camera: using USB webcam /dev/video%d' % a.usb)
self.cap = open_cam_usb(a.usb, a.width, a.height)
self._start()
elif a.gstr is not None:
logging.info('Camera: using GStreamer string "%s"' % a.gstr)
self.cap = open_cam_gstr(a.gstr, a.width, a.height)
self._start()
elif a.onboard is not None:
logging.info('Camera: using Jetson onboard camera')
self.cap = open_cam_onboard(a.width, a.height)
self._start()
else:
raise RuntimeError('no camera type specified!')
def isOpened(self):
return self.is_opened
def _start(self):
if not self.cap.isOpened():
logging.warning('Camera: starting while cap is not opened!')
return
# Try to grab the 1st image and determine width and height
_, self.img_handle = self.cap.read()
if self.img_handle is None:
logging.warning('Camera: cap.read() returns no image!')
self.is_opened = False
return
self.is_opened = True
if self.video_file:
if not self.do_resize:
self.img_height, self.img_width, _ = self.img_handle.shape
else:
self.img_height, self.img_width, _ = self.img_handle.shape
# start the child thread if not using a video file source
# i.e. rtsp, usb or onboard
assert not self.thread_running
self.thread_running = True
self.thread = threading.Thread(target=grab_img, args=(self,))
self.thread.start()
def _stop(self):
if self.thread_running:
self.thread_running = False
#self.thread.join()
def read(self):
"""Read a frame from the camera object.
Returns None if the camera runs out of image or error.
"""
if not self.is_opened:
return None
if self.video_file:
_, img = self.cap.read()
if img is None:
logging.info('Camera: reaching end of video file')
if self.video_looping:
self.cap.release()
self.cap = cv2.VideoCapture(self.video_file)
_, img = self.cap.read()
if img is not None and self.do_resize:
img = cv2.resize(img, (self.img_width, self.img_height))
return img
elif self.cap == 'image':
return np.copy(self.img_handle)
else:
if self.copy_frame:
return self.img_handle.copy()
else:
return self.img_handle
def release(self):
self._stop()
try:
self.cap.release()
except:
pass
self.is_opened = False
def __del__(self):
self.release()
|
object_storage_service_benchmark.py
|
# Copyright 2016 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Object (blob) Storage benchmark tests.
There are two categories of tests here: 1) tests based on CLI tools, and 2)
tests that use APIs to access storage provider.
For 1), we aim to simulate one typical use case of common user using storage
provider: upload and downloads a set of files with different sizes from/to a
local directory.
For 2), we aim to measure more directly the performance of a storage provider
by accessing them via APIs. Here are the main scenarios covered in this
category:
a: Single byte object upload and download, measures latency.
b: List-after-write and list-after-update consistency measurement.
c: Single stream large object upload and download, measures throughput.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datetime
import glob
import json
import logging
import os
import posixpath
import re
import threading
import time
import uuid
import numpy as np
from perfkitbenchmarker import configs
from perfkitbenchmarker import data
from perfkitbenchmarker import errors
from perfkitbenchmarker import flag_util
from perfkitbenchmarker import flags
from perfkitbenchmarker import object_storage_service
from perfkitbenchmarker import providers
from perfkitbenchmarker import sample
from perfkitbenchmarker import units
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.providers.gcp import gcs
from perfkitbenchmarker.sample import PercentileCalculator # noqa
import six
from six.moves import range
from six.moves import zip
flags.DEFINE_enum('storage', providers.GCP,
[providers.GCP, providers.AWS,
providers.AZURE, providers.OPENSTACK],
'storage provider (GCP/AZURE/AWS/OPENSTACK) to use.')
flags.DEFINE_string('object_storage_region', None,
'Storage region for object storage benchmark.')
flags.DEFINE_string('object_storage_gcs_multiregion', None,
'Storage multiregion for GCS in object storage benchmark.')
flags.DEFINE_string('object_storage_storage_class', None,
'Storage class to use in object storage benchmark.')
flags.DEFINE_enum('object_storage_scenario', 'all',
['all', 'cli', 'api_data', 'api_namespace',
'api_multistream', 'api_multistream_writes',
'api_multistream_reads'],
'select all, or one particular scenario to run: \n'
'ALL: runs all scenarios. This is the default. \n'
'cli: runs the command line only scenario. \n'
'api_data: runs API based benchmarking for data paths. \n'
'api_namespace: runs API based benchmarking for namespace '
'operations. \n'
'api_multistream: runs API-based benchmarking with multiple '
'upload/download streams.\n'
'api_multistream_writes: runs API-based benchmarking with '
'multiple upload streams.')
flags.DEFINE_string('object_storage_bucket_name', None,
'If set, the bucket will be created with this name')
flags.DEFINE_boolean('object_storage_apply_region_suffix_to_bucket_name', False,
'If set, the region will be appended to the bucket name.')
flags.DEFINE_enum('cli_test_size', 'normal',
['normal', 'large'],
'size of the cli tests. Normal means a mixture of various \n'
'object sizes up to 32MiB (see '
'data/cloud-storage-workload.sh). \n'
'Large means all objects are of at least 1GiB.')
flags.DEFINE_integer('object_storage_multistream_objects_per_stream', 1000,
'Number of objects to send and/or receive per stream. '
'Only applies to the api_multistream scenario.',
lower_bound=1)
flag_util.DEFINE_yaml('object_storage_object_sizes', '1KB',
'Size of objects to send and/or receive. Only applies to '
'the api_multistream scenario. Examples: 1KB, '
'{1KB: 50%, 10KB: 50%}')
flags.DEFINE_integer('object_storage_streams_per_vm', 10,
'Number of independent streams per VM. Only applies to '
'the api_multistream scenario.',
lower_bound=1)
flags.DEFINE_integer('object_storage_list_consistency_iterations', 200,
'Number of iterations to perform for the api_namespace '
'list consistency benchmark. This flag is mainly for '
'regression testing in the benchmarks. Reduce the number '
'to shorten the execution time of the api_namespace '
'scenario. However, to get useful metrics from the '
'api_namespace scenario, a high number of iterations '
'should be used (>=200).')
flags.DEFINE_enum('object_storage_object_naming_scheme', 'sequential_by_stream',
['sequential_by_stream',
'approximately_sequential'],
'How objects will be named. Only applies to the '
'api_multistream benchmark. '
'sequential_by_stream: object names from each stream '
'will be sequential, but different streams will have '
'different name prefixes. '
'approximately_sequential: object names from all '
'streams will roughly increase together.')
flags.DEFINE_string('object_storage_objects_written_file_prefix', None,
'If specified, the bucket and all of the objects will not '
'be deleted, and the list of object names will be written '
'to a file with the specified prefix in the following '
'format: <bucket>/<object>. This prefix can be passed to '
'this benchmark in a later run via via the '
'object_storage_read_objects_prefix flag. Only valid for '
'the api_multistream and api_multistream_writes scenarios. '
'The filename is appended with the date and time so that '
'later runs can be given a prefix and a minimum age of '
'objects. The later run will then use the oldest objects '
'available or fail if there is no file with an old enough '
'date. The prefix is also appended with the region so that '
'later runs will read objects from the same region.')
flags.DEFINE_string('object_storage_read_objects_prefix', None,
'If specified, no new bucket or objects will be created. '
'Instead, the benchmark will read the objects listed in '
'a file with the specified prefix that was written some '
'number of hours before (as specifed by '
'object_storage_read_objects_min_hours). Only valid for '
'the api_multistream_reads scenario.')
flags.DEFINE_integer('object_storage_read_objects_min_hours', 72, 'The minimum '
'number of hours from which to read objects that were '
'written on a previous run. Used in combination with '
'object_storage_read_objects_prefix.')
flags.DEFINE_boolean('object_storage_dont_delete_bucket', False,
'If True, the storage bucket won\'t be deleted. Useful '
'for running the api_multistream_reads scenario multiple '
'times against the same objects.')
flags.DEFINE_string('object_storage_worker_output', None,
'If set, the worker threads\' output will be written to the'
'path provided.')
flags.DEFINE_float('object_storage_latency_histogram_interval', None,
'If set, a latency histogram sample will be created with '
'buckets of the specified interval in seconds. Individual '
'histogram samples are created for each different object '
'size in the distribution, because it is easy to aggregate '
'the histograms during post-processing, but impossible to '
'go in the opposite direction.')
flags.DEFINE_boolean(
'record_individual_latency_samples', False,
'If set, record the latency of each download and upload '
'in its own sample.')
FLAGS = flags.FLAGS
BENCHMARK_INFO = {'name': 'object_storage_service',
'description':
'Object/blob storage service benchmarks. Specify '
'--object_storage_scenario '
'to select a set of sub-benchmarks to run. default is all.',
'scratch_disk': False,
'num_machines': 1}
BENCHMARK_NAME = 'object_storage_service'
BENCHMARK_CONFIG = """
object_storage_service:
description: >
Object/blob storage service benchmarks. Specify
--object_storage_scenario
to select a set of sub-benchmarks to run. default is all.
vm_groups:
default:
vm_spec: *default_single_core
vm_count: null
flags:
gcloud_scopes: https://www.googleapis.com/auth/devstorage.read_write
"""
DATA_FILE = 'cloud-storage-workload.sh'
# size of all data used in the CLI tests.
DATA_SIZE_IN_BYTES = 256.1 * 1024 * 1024
DATA_SIZE_IN_MBITS = 8 * DATA_SIZE_IN_BYTES / 1000 / 1000
LARGE_DATA_SIZE_IN_BYTES = 3 * 1024 * 1024 * 1024
LARGE_DATA_SIZE_IN_MBITS = 8 * LARGE_DATA_SIZE_IN_BYTES / 1000 / 1000
API_TEST_SCRIPT = 'object_storage_api_tests.py'
API_TEST_SCRIPTS_DIR = 'object_storage_api_test_scripts'
# Files that will be sent to the remote VM as a package for API test script.
API_TEST_SCRIPT_PACKAGE_FILES = ['__init__.py',
'object_storage_interface.py',
'azure_flags.py',
's3_flags.py']
SCRIPT_DIR = '/tmp/run'
DOWNLOAD_DIRECTORY = posixpath.join(SCRIPT_DIR, 'temp')
# Various constants to name the result metrics.
THROUGHPUT_UNIT = 'Mbps'
LATENCY_UNIT = 'seconds'
NA_UNIT = 'na'
PERCENTILES_LIST = ['p0.1', 'p1', 'p5', 'p10', 'p50', 'p90', 'p95', 'p99',
'p99.9', 'average', 'stddev']
UPLOAD_THROUGHPUT_VIA_CLI = 'upload throughput via cli Mbps'
DOWNLOAD_THROUGHPUT_VIA_CLI = 'download throughput via cli Mbps'
CLI_TEST_ITERATION_COUNT = 100
LARGE_CLI_TEST_ITERATION_COUNT = 20
CLI_TEST_FAILURE_TOLERANCE = 0.05
# Azure does not parallelize operations in its CLI tools. We have to
# do the uploads or downloads of 100 test files sequentially, it takes
# a very long time for each iteration, so we are doing only 3 iterations.
CLI_TEST_ITERATION_COUNT_AZURE = 3
SINGLE_STREAM_THROUGHPUT = 'single stream %s throughput Mbps'
ONE_BYTE_LATENCY = 'one byte %s latency'
LIST_CONSISTENCY_SCENARIOS = ['list-after-write', 'list-after-update']
LIST_CONSISTENCY_PERCENTAGE = 'consistency percentage'
LIST_INCONSISTENCY_WINDOW = 'inconsistency window'
LIST_LATENCY = 'latency'
CONTENT_REMOVAL_RETRY_LIMIT = 5
# Some times even when a bucket is completely empty, the service provider would
# refuse to remove the bucket with "BucketNotEmpty" error until up to 1 hour
# later. We keep trying until we reach the one-hour limit. And this wait is
# necessary for some providers.
BUCKET_REMOVAL_RETRY_LIMIT = 120
RETRY_WAIT_INTERVAL_SECONDS = 30
# GCS has special region handling until we can remove it :(
DEFAULT_GCS_MULTIREGION = 'us'
# Keys for flag names and metadata values
OBJECT_STORAGE_REGION = 'object_storage_region'
REGIONAL_BUCKET_LOCATION = 'regional_bucket_location'
OBJECT_STORAGE_GCS_MULTIREGION = 'object_storage_gcs_multiregion'
GCS_MULTIREGION_LOCATION = 'gcs_multiregion_location'
DEFAULT = 'default'
# This accounts for the overhead of running RemoteCommand() on a VM.
MULTISTREAM_DELAY_PER_VM = 5.0 * units.second
# We wait this long for each stream. Note that this is multiplied by
# the number of streams per VM, not the total number of streams.
MULTISTREAM_DELAY_PER_STREAM = 0.1 * units.second
# And add a constant factor for PKB-side processing
MULTISTREAM_DELAY_CONSTANT = 10.0 * units.second
# The multistream write benchmark writes a file in the VM's /tmp with
# the objects it has written, which is used by the multistream read
# benchmark. This is the filename.
OBJECTS_WRITTEN_FILE = 'pkb-objects-written'
# If the gap between different stream starts and ends is above a
# certain proportion of the total time, we log a warning because we
# are throwing out a lot of information. We also put the warning in
# the sample metadata.
MULTISTREAM_STREAM_GAP_THRESHOLD = 0.2
# The API test script uses different names for providers than this
# script :(
STORAGE_TO_API_SCRIPT_DICT = {
providers.GCP: 'GCS',
providers.AWS: 'S3',
providers.AZURE: 'AZURE'}
_SECONDS_PER_HOUR = 60 * 60
def GetConfig(user_config):
return configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME)
# Raised when we fail to remove a bucket or its content after many retries.
# TODO: add a new class of error "ObjectStorageError" to errors.py and remove
# this one.
class BucketRemovalError(Exception):
pass
class NotEnoughResultsError(Exception):
pass
class ColdDataError(Exception):
"""Exception indicating that the cold object data does not exist."""
def _JsonStringToPercentileResults(results, json_input, metric_name,
metric_unit, metadata):
"""This function parses a percentile result string in Json format.
Args:
results: The final result set to put result in.
json_input: The input in Json format about percentiles.
metric_name: Name of the metric.
metric_unit: Unit of the metric.
metadata: The metadata to be included.
"""
result = json.loads(json_input)
for percentile in PERCENTILES_LIST:
results.append(sample.Sample(
('%s %s') % (metric_name, percentile),
float(result[percentile]),
metric_unit,
metadata))
def _GetClientLibVersion(vm, library_name):
"""This function returns the version of client lib installed on a vm.
Args:
vm: the VM to get the client lib version from.
library_name: the name of the client lib.
Returns:
The version string of the client.
"""
version, _ = vm.RemoteCommand('pip show %s |grep Version' % library_name)
logging.info('%s client lib version is: %s', library_name, version)
return version
def MultiThreadStartDelay(num_vms, threads_per_vm):
"""Find how long in the future we can simultaneously start threads on VMs.
Args:
num_vms: number of VMs to start threads on.
threads_per_vm: number of threads to start on each VM.
Returns:
A units.Quantity of time such that if we want to start
threads_per_vm threads on num_vms VMs, we can start the threads
sequentially, tell each of them to sleep for this number of
seconds, and we expect that we will be able to start the last
thread before the delay has finished.
"""
return (
MULTISTREAM_DELAY_CONSTANT +
MULTISTREAM_DELAY_PER_VM * num_vms +
MULTISTREAM_DELAY_PER_STREAM * threads_per_vm)
def _ProcessMultiStreamResults(start_times, latencies, sizes, operation,
all_sizes, results, metadata=None):
"""Read and process results from the api_multistream worker process.
Results will be reported per-object size and combined for all
objects.
Args:
start_times: a list of numpy arrays. Operation start times, as
POSIX timestamps.
latencies: a list of numpy arrays. Operation durations, in seconds.
sizes: a list of numpy arrays. Object sizes used in each
operation, in bytes.
operation: 'upload' or 'download'. The operation the results are from.
all_sizes: a sequence of integers. all object sizes in the
distribution used, in bytes.
results: a list to append Sample objects to.
metadata: dict. Base sample metadata
"""
num_streams = FLAGS.object_storage_streams_per_vm * FLAGS.num_vms
assert len(start_times) == num_streams
assert len(latencies) == num_streams
assert len(sizes) == num_streams
if metadata is None:
metadata = {}
metadata['num_streams'] = num_streams
metadata['objects_per_stream'] = (
FLAGS.object_storage_multistream_objects_per_stream)
metadata['object_naming'] = FLAGS.object_storage_object_naming_scheme
num_records = sum((len(start_time) for start_time in start_times))
logging.info('Processing %s total operation records', num_records)
stop_times = [start_time + latency
for start_time, latency in zip(start_times, latencies)]
last_start_time = max((start_time[0] for start_time in start_times))
first_stop_time = min((stop_time[-1] for stop_time in stop_times))
# Compute how well our synchronization worked
first_start_time = min((start_time[0] for start_time in start_times))
last_stop_time = max((stop_time[-1] for stop_time in stop_times))
start_gap = last_start_time - first_start_time
stop_gap = last_stop_time - first_stop_time
if ((start_gap + stop_gap) / (last_stop_time - first_start_time) <
MULTISTREAM_STREAM_GAP_THRESHOLD):
logging.info(
'First stream started %s seconds before last stream started', start_gap)
logging.info(
'Last stream ended %s seconds after first stream ended', stop_gap)
else:
logging.warning(
'Difference between first and last stream start/end times was %s and '
'%s, which is more than %s of the benchmark time %s.',
start_gap, stop_gap, MULTISTREAM_STREAM_GAP_THRESHOLD,
(last_stop_time - first_start_time))
metadata['stream_gap_above_threshold'] = True
# Find the indexes in each stream where all streams are active,
# following Python's [inclusive, exclusive) index convention.
active_start_indexes = []
for start_time in start_times:
for i in range(len(start_time)):
if start_time[i] >= last_start_time:
active_start_indexes.append(i)
break
active_stop_indexes = []
for stop_time in stop_times:
for i in range(len(stop_time) - 1, -1, -1):
if stop_time[i] <= first_stop_time:
active_stop_indexes.append(i + 1)
break
active_latencies = [
latencies[i][active_start_indexes[i]:active_stop_indexes[i]]
for i in range(num_streams)]
active_sizes = [
sizes[i][active_start_indexes[i]:active_stop_indexes[i]]
for i in range(num_streams)]
all_active_latencies = np.concatenate(active_latencies)
all_active_sizes = np.concatenate(active_sizes)
# Don't publish the full distribution in the metadata because doing
# so might break regexp-based parsers that assume that all metadata
# values are simple Python objects. However, do add an
# 'object_size_B' metadata field even for the full results because
# searching metadata is easier when all records with the same metric
# name have the same set of metadata fields.
distribution_metadata = metadata.copy()
if len(all_sizes) == 1:
distribution_metadata['object_size_B'] = all_sizes[0]
else:
distribution_metadata['object_size_B'] = 'distribution'
latency_prefix = 'Multi-stream %s latency' % operation
logging.info('Processing %s multi-stream %s results for the full '
'distribution.', len(all_active_latencies), operation)
_AppendPercentilesToResults(
results,
all_active_latencies,
latency_prefix,
LATENCY_UNIT,
distribution_metadata)
# Publish by-size and full-distribution stats even if there's only
# one size in the distribution, because it simplifies postprocessing
# of results.
for size in all_sizes:
this_size_metadata = metadata.copy()
this_size_metadata['object_size_B'] = size
logging.info('Processing multi-stream %s results for object size %s',
operation, size)
_AppendPercentilesToResults(
results,
all_active_latencies[all_active_sizes == size],
latency_prefix,
LATENCY_UNIT,
this_size_metadata)
# Record samples for individual downloads and uploads if requested.
if FLAGS.record_individual_latency_samples:
for latency in all_active_latencies[all_active_sizes == size]:
results.append(
sample.Sample('%s individual' % latency_prefix, latency,
LATENCY_UNIT, this_size_metadata))
# Build the object latency histogram if user requested it
if FLAGS.object_storage_latency_histogram_interval:
histogram_interval = FLAGS.object_storage_latency_histogram_interval
hist_latencies = [[l for l, s in zip(*w_l_s) if s == size]
for w_l_s in zip(latencies, sizes)]
max_latency = max([max(l) for l in hist_latencies])
# Note that int() floors for us
num_histogram_buckets = int(max_latency / histogram_interval) + 1
histogram_buckets = [0 for _ in range(num_histogram_buckets)]
for worker_latencies in hist_latencies:
for latency in worker_latencies:
# Note that int() floors for us
histogram_buckets[int(latency / histogram_interval)] += 1
histogram_str = ','.join([str(c) for c in histogram_buckets])
histogram_metadata = this_size_metadata.copy()
histogram_metadata['interval'] = histogram_interval
histogram_metadata['histogram'] = histogram_str
results.append(sample.Sample(
'Multi-stream %s latency histogram' % operation,
0.0, 'histogram', metadata=histogram_metadata))
# Throughput metrics
total_active_times = [np.sum(latency) for latency in active_latencies]
active_durations = [stop_times[i][active_stop_indexes[i] - 1] -
start_times[i][active_start_indexes[i]]
for i in range(num_streams)]
total_active_sizes = [np.sum(size) for size in active_sizes]
# 'net throughput (with gap)' is computed by taking the throughput
# for each stream (total # of bytes transmitted / (stop_time -
# start_time)) and then adding the per-stream throughputs. 'net
# throughput' is the same, but replacing (stop_time - start_time)
# with the sum of all of the operation latencies for that thread, so
# we only divide by the time that stream was actually transmitting.
results.append(sample.Sample(
'Multi-stream ' + operation + ' net throughput',
np.sum((size / active_time * 8
for size, active_time
in zip(total_active_sizes, total_active_times))),
'bit / second', metadata=distribution_metadata))
results.append(sample.Sample(
'Multi-stream ' + operation + ' net throughput (with gap)',
np.sum((size / duration * 8
for size, duration in zip(total_active_sizes, active_durations))),
'bit / second', metadata=distribution_metadata))
results.append(sample.Sample(
'Multi-stream ' + operation + ' net throughput (simplified)',
sum([np.sum(size) for size in sizes]) /
(last_stop_time - first_start_time) * 8,
'bit / second', metadata=distribution_metadata))
# QPS metrics
results.append(sample.Sample(
'Multi-stream ' + operation + ' QPS (any stream active)',
num_records / (last_stop_time - first_start_time), 'operation / second',
metadata=distribution_metadata))
results.append(sample.Sample(
'Multi-stream ' + operation + ' QPS (all streams active)',
len(all_active_latencies) / (first_stop_time - last_start_time),
'operation / second', metadata=distribution_metadata))
# Statistics about benchmarking overhead
gap_time = sum((active_duration - active_time
for active_duration, active_time
in zip(active_durations, total_active_times)))
results.append(sample.Sample(
'Multi-stream ' + operation + ' total gap time',
gap_time, 'second', metadata=distribution_metadata))
results.append(sample.Sample(
'Multi-stream ' + operation + ' gap time proportion',
gap_time / (first_stop_time - last_start_time) * 100.0,
'percent', metadata=distribution_metadata))
def _DistributionToBackendFormat(dist):
"""Convert an object size distribution to the format needed by the backend.
Args:
dist: a distribution, given as a dictionary mapping size to
frequency. Size will be a string with a quantity and a
unit. Frequency will be a percentage, including a '%'
character. dist may also be a string, in which case it represents
a single object size which applies to 100% of objects.
Returns:
A dictionary giving an object size distribution. Sizes will be
integers representing bytes. Frequencies will be floating-point
numbers in [0,100], representing percentages.
Raises:
ValueError if dist is not a valid distribution.
"""
if isinstance(dist, dict):
val = {flag_util.StringToBytes(size):
flag_util.StringToRawPercent(frequency)
for size, frequency in six.iteritems(dist)}
else:
# We allow compact notation for point distributions. For instance,
# '1KB' is an abbreviation for '{1KB: 100%}'.
val = {flag_util.StringToBytes(dist): 100.0}
# I'm requiring exact addition to 100, which can always be satisfied
# with integer percentages. If we want to allow general decimal
# percentages, all we have to do is replace this equality check with
# approximate equality.
if sum(six.itervalues(val)) != 100.0:
raise ValueError("Frequencies in %s don't add to 100%%!" % dist)
return val
class APIScriptCommandBuilder(object):
"""Builds command lines for the API test script.
Attributes:
test_script_path: the path to the API test script on the remote machine.
storage: the storage provider to use, in the format expected by
the test script.
service: the ObjectStorageService object corresponding to the
storage provider.
"""
def __init__(self, test_script_path, storage, service):
self.test_script_path = test_script_path
self.storage = storage
self.service = service
def BuildCommand(self, args):
"""Build a command string for the API test script.
Args:
args: a list of strings. These will become space-separated
arguments to the test script.
Returns:
A string that can be passed to vm.RemoteCommand.
"""
cmd_parts = [
self.test_script_path,
'--storage_provider=%s' % self.storage
] + args + self.service.APIScriptArgs()
if FLAGS.object_storage_storage_class is not None:
cmd_parts += ['--object_storage_class',
FLAGS.object_storage_storage_class]
return ' '.join(cmd_parts)
class UnsupportedProviderCommandBuilder(APIScriptCommandBuilder):
"""A dummy command builder for unsupported providers.
When a provider isn't supported by the API test script yet, we
create this command builder for them. It will let us run the CLI
benchmark on that provider, but if the user tries to run an API
benchmark, it will throw an error.
Attributes:
provider: the name of the unsupported provider.
"""
def __init__(self, provider):
self.provider = provider
def BuildCommand(self, args):
raise NotImplementedError('API tests are not supported on provider %s.' %
self.provider)
def OneByteRWBenchmark(results, metadata, vm, command_builder,
service, bucket_name):
"""A benchmark for small object latency.
Args:
results: the results array to append to.
metadata: a dictionary of metadata to add to samples.
vm: the VM to run the benchmark on.
command_builder: an APIScriptCommandBuilder.
service: an ObjectStorageService.
bucket_name: the primary bucket to benchmark.
Raises:
ValueError if an unexpected test outcome is found from the API
test script.
"""
one_byte_rw_cmd = command_builder.BuildCommand([
'--bucket=%s' % bucket_name,
'--scenario=OneByteRW'])
_, raw_result = vm.RemoteCommand(one_byte_rw_cmd)
logging.info('OneByteRW raw result is %s', raw_result)
for up_and_down in ['upload', 'download']:
search_string = 'One byte %s - (.*)' % up_and_down
result_string = re.findall(search_string, raw_result)
sample_name = ONE_BYTE_LATENCY % up_and_down
if len(result_string) > 0:
_JsonStringToPercentileResults(results,
result_string[0],
sample_name,
LATENCY_UNIT,
metadata)
else:
raise ValueError('Unexpected test outcome from OneByteRW api test: '
'%s.' % raw_result)
def SingleStreamThroughputBenchmark(results, metadata, vm, command_builder,
service, bucket_name):
"""A benchmark for large object throughput.
Args:
results: the results array to append to.
metadata: a dictionary of metadata to add to samples.
vm: the VM to run the benchmark on.
command_builder: an APIScriptCommandBuilder.
service: an ObjectStorageService.
bucket_name: the primary bucket to benchmark.
Raises:
ValueError if an unexpected test outcome is found from the API
test script.
"""
single_stream_throughput_cmd = command_builder.BuildCommand([
'--bucket=%s' % bucket_name,
'--scenario=SingleStreamThroughput'])
_, raw_result = vm.RemoteCommand(single_stream_throughput_cmd)
logging.info('SingleStreamThroughput raw result is %s', raw_result)
for up_and_down in ['upload', 'download']:
search_string = 'Single stream %s throughput in Bps: (.*)' % up_and_down
result_string = re.findall(search_string, raw_result)
sample_name = SINGLE_STREAM_THROUGHPUT % up_and_down
if not result_string:
raise ValueError('Unexpected test outcome from '
'SingleStreamThroughput api test: %s.' % raw_result)
# Convert Bytes per second to Mega bits per second
# We use MB (10^6) to be consistent with network
# bandwidth convention.
result = json.loads(result_string[0])
for percentile in PERCENTILES_LIST:
results.append(sample.Sample(
('%s %s') % (sample_name, percentile),
8 * float(result[percentile]) / 1000 / 1000,
THROUGHPUT_UNIT,
metadata))
def ListConsistencyBenchmark(results, metadata, vm, command_builder,
service, bucket_name):
"""A benchmark for bucket list consistency.
Args:
results: the results array to append to.
metadata: a dictionary of metadata to add to samples.
vm: the VM to run the benchmark on.
command_builder: an APIScriptCommandBuilder.
service: an ObjectStorageService.
bucket_name: the primary bucket to benchmark.
Raises:
ValueError if an unexpected test outcome is found from the API
test script.
"""
list_consistency_cmd = command_builder.BuildCommand([
'--bucket=%s' % bucket_name,
'--iterations=%d' % FLAGS.object_storage_list_consistency_iterations,
'--scenario=ListConsistency'])
_, raw_result = vm.RemoteCommand(list_consistency_cmd)
logging.info('ListConsistency raw result is %s', raw_result)
for scenario in LIST_CONSISTENCY_SCENARIOS:
metric_name = '%s %s' % (scenario, LIST_CONSISTENCY_PERCENTAGE)
search_string = '%s: (.*)' % metric_name
result_string = re.findall(search_string, raw_result)
if not result_string:
raise ValueError(
'Cannot get percentage from ListConsistency test.')
results.append(sample.Sample(
metric_name,
(float)(result_string[0]),
NA_UNIT,
metadata))
# Parse the list inconsistency window if there is any.
metric_name = '%s %s' % (scenario, LIST_INCONSISTENCY_WINDOW)
search_string = '%s: (.*)' % metric_name
result_string = re.findall(search_string, raw_result)
_JsonStringToPercentileResults(results,
result_string[0],
metric_name,
LATENCY_UNIT,
metadata)
# Also report the list latency. These latencies are from the lists
# that were consistent.
metric_name = '%s %s' % (scenario, LIST_LATENCY)
search_string = '%s: (.*)' % metric_name
result_string = re.findall(search_string, raw_result)
_JsonStringToPercentileResults(results,
result_string[0],
metric_name,
LATENCY_UNIT,
metadata)
def LoadWorkerOutput(output):
"""Load output from worker processes to our internal format.
Args:
output: list of strings. The stdouts of all worker processes.
Returns:
A tuple of start_time, latency, size. Each of these is a list of
numpy arrays, one array per worker process. start_time[i],
latency[i], and size[i] together form a table giving the start
time, latency, and size (bytes transmitted or received) of all
send/receive operations for worker i.
start_time holds POSIX timestamps, stored as np.float64. latency
holds times in seconds, stored as np.float64. size holds sizes in
bytes, stored as np.int64.
Example:
start_time[i] latency[i] size[i]
------------- ---------- -------
0.0 0.5 100
1.0 0.7 200
2.3 0.3 100
Raises:
AssertionError, if an individual worker's input includes
overlapping operations, or operations that don't move forward in
time, or if the input list isn't in stream number order.
"""
start_times = []
latencies = []
sizes = []
for worker_out in output:
json_out = json.loads(worker_out)
for stream in json_out:
assert len(stream['start_times']) == len(stream['latencies'])
assert len(stream['latencies']) == len(stream['sizes'])
start_times.append(np.asarray(stream['start_times'], dtype=np.float64))
latencies.append(np.asarray(stream['latencies'], dtype=np.float64))
sizes.append(np.asarray(stream['sizes'], dtype=np.int64))
return start_times, latencies, sizes
def _RunMultiStreamProcesses(vms, command_builder, cmd_args, streams_per_vm):
"""Runs all of the multistream read or write processes and doesn't return
until they complete.
Args:
vms: the VMs to run the benchmark on.
command_builder: an APIScriptCommandBuilder.
cmd_args: arguments for the command_builder.
streams_per_vm: number of threads per vm.
"""
output = [None] * len(vms)
def RunOneProcess(vm_idx):
logging.info('Running on VM %s.', vm_idx)
cmd = command_builder.BuildCommand(
cmd_args + ['--stream_num_start=%s' % (vm_idx * streams_per_vm)])
out, _ = vms[vm_idx].RobustRemoteCommand(cmd, should_log=False)
output[vm_idx] = out
# Each vm/process has a thread managing it.
threads = [
threading.Thread(target=RunOneProcess, args=(vm_idx,))
for vm_idx in range(len(vms))]
for thread in threads:
thread.start()
logging.info('Started %s processes.', len(vms))
# Wait for the threads to finish
for thread in threads:
thread.join()
logging.info('All processes complete.')
return output
def _DatetimeNow():
"""Returns datetime.datetime.now()."""
return datetime.datetime.now()
def _ColdObjectsWrittenFilename():
"""Generates a name for the objects_written_file.
Returns:
The name of the objects_written_file if it should be created, or None.
"""
if FLAGS.object_storage_objects_written_file_prefix:
# Note this format is required by _ColdObjectsWrittenFileAgeHours.
datetime_suffix = _DatetimeNow().strftime('%Y%m%d-%H%M')
return '%s-%s-%s-%s' % (
FLAGS.object_storage_objects_written_file_prefix,
FLAGS.object_storage_region,
uuid.uuid4(), # Add a UUID to support parallel runs that upload data.
datetime_suffix)
return None
def _ColdObjectsWrittenFileAgeHours(filename):
"""Determines the age in hours of an objects_written_file.
Args:
filename: The name of the file.
Returns:
The age of the file in hours (based on the name), or None.
"""
# Parse the year, month, day, hour, and minute from the filename based on the
# way it is written in _ColdObjectsWrittenFilename.
match = re.search(r'(\d\d\d\d)(\d\d)(\d\d)-(\d\d)(\d\d)$', filename)
if not match:
return None
year, month, day, hour, minute = (int(item) for item in match.groups())
write_datetime = datetime.datetime(year, month, day, hour, minute)
write_timedelta = _DatetimeNow() - write_datetime
return write_timedelta.total_seconds() / _SECONDS_PER_HOUR
def _MultiStreamOneWay(results, metadata, vms, command_builder,
service, bucket_name, operation):
"""Measures multi-stream latency and throughput in one direction.
Args:
results: the results array to append to.
metadata: a dictionary of metadata to add to samples.
vms: the VMs to run the benchmark on.
command_builder: an APIScriptCommandBuilder.
service: The provider's ObjectStorageService
bucket_name: the primary bucket to benchmark.
operation: 'upload' or 'download'
Raises:
ValueError if an unexpected test outcome is found from the API
test script.
"""
objects_written_file = posixpath.join(vm_util.VM_TMP_DIR,
OBJECTS_WRITTEN_FILE)
size_distribution = _DistributionToBackendFormat(
FLAGS.object_storage_object_sizes)
logging.info('Distribution %s, backend format %s.',
FLAGS.object_storage_object_sizes, size_distribution)
streams_per_vm = FLAGS.object_storage_streams_per_vm
start_time = (
time.time() +
MultiThreadStartDelay(FLAGS.num_vms, streams_per_vm).m_as('second'))
logging.info('Start time is %s', start_time)
cmd_args = [
'--bucket=%s' % bucket_name,
'--objects_per_stream=%s' % (
FLAGS.object_storage_multistream_objects_per_stream),
'--num_streams=%s' % streams_per_vm,
'--start_time=%s' % start_time,
'--objects_written_file=%s' % objects_written_file]
if operation == 'upload':
cmd_args += [
'--object_sizes="%s"' % size_distribution,
'--object_naming_scheme=%s' % FLAGS.object_storage_object_naming_scheme,
'--scenario=MultiStreamWrite']
elif operation == 'download':
cmd_args += ['--scenario=MultiStreamRead']
else:
raise Exception('Value of operation must be \'upload\' or \'download\'.'
'Value is: \'' + operation + '\'')
output = _RunMultiStreamProcesses(vms, command_builder, cmd_args,
streams_per_vm)
start_times, latencies, sizes = LoadWorkerOutput(output)
if FLAGS.object_storage_worker_output:
with open(FLAGS.object_storage_worker_output, 'w') as out_file:
out_file.write(json.dumps(output))
_ProcessMultiStreamResults(start_times, latencies, sizes, operation,
list(six.iterkeys(size_distribution)), results,
metadata=metadata)
# Write the objects written file if the flag is set and this is an upload
objects_written_path_local = _ColdObjectsWrittenFilename()
if operation == 'upload' and objects_written_path_local is not None:
# Get the objects written from all the VMs
# Note these are JSON lists with the following format:
# [[object1_name, object1_size],[object2_name, object2_size],...]
outs = vm_util.RunThreaded(
lambda vm: vm.RemoteCommand('cat ' + objects_written_file), vms)
maybe_storage_account = ''
maybe_resource_group = ''
if FLAGS.storage == 'Azure':
maybe_storage_account = '"azure_storage_account": "%s", ' % \
service.storage_account.name
maybe_resource_group = '"azure_resource_group": "%s", ' % \
service.resource_group.name
# Merge the objects written from all the VMs into a single string
objects_written_json = \
'{%s%s"bucket_name": "%s", "objects_written": %s}' % \
(maybe_storage_account, maybe_resource_group, bucket_name,
'[' + ','.join([out for out, _ in outs]) + ']')
# Write the file
with open(objects_written_path_local, 'w') as objects_written_file_local:
objects_written_file_local.write(objects_written_json)
def MultiStreamRWBenchmark(results, metadata, vms, command_builder,
service, bucket_name):
"""A benchmark for multi-stream read/write latency and throughput.
Args:
results: the results array to append to.
metadata: a dictionary of metadata to add to samples.
vms: the VMs to run the benchmark on.
command_builder: an APIScriptCommandBuilder.
service: The provider's ObjectStorageService
bucket_name: the primary bucket to benchmark.
Raises:
ValueError if an unexpected test outcome is found from the API
test script.
"""
logging.info('Starting multi-stream write test on %s VMs.', len(vms))
_MultiStreamOneWay(results, metadata, vms, command_builder, service,
bucket_name, 'upload')
logging.info('Finished multi-stream write test. Starting '
'multi-stream read test.')
_MultiStreamOneWay(results, metadata, vms, command_builder, service,
bucket_name, 'download')
logging.info('Finished multi-stream read test.')
def MultiStreamWriteBenchmark(results, metadata, vms, command_builder,
service, bucket_name):
"""A benchmark for multi-stream write latency and throughput.
Args:
results: the results array to append to.
metadata: a dictionary of metadata to add to samples.
vms: the VMs to run the benchmark on.
command_builder: an APIScriptCommandBuilder.
service: The provider's ObjectStorageService
bucket_name: the primary bucket to benchmark.
Raises:
ValueError if an unexpected test outcome is found from the API
test script.
"""
logging.info('Starting multi-stream write test on %s VMs.', len(vms))
_MultiStreamOneWay(results, metadata, vms, command_builder, service,
bucket_name, 'upload')
logging.info('Finished multi-stream write test.')
def MultiStreamReadBenchmark(results, metadata, vms, command_builder,
service, bucket_name, read_objects):
"""A benchmark for multi-stream read latency and throughput.
Args:
results: the results array to append to.
metadata: a dictionary of metadata to add to samples.
vms: the VMs to run the benchmark on.
command_builder: an APIScriptCommandBuilder.
service: The provider's ObjectStorageService
bucket_name: the primary bucket to benchmark.
read_objects: List of lists of [object_name, object_size]. In the outermost
list, each element corresponds to a VM's worker process.
Raises:
ValueError if an unexpected test outcome is found from the API
test script.
"""
logging.info('Starting multi-stream read test on %s VMs.', len(vms))
assert read_objects is not None, (
'api_multistream_reads scenario requires the '
'object_storage_read_objects_prefix flag to be set.')
# Send over the objects written file
try:
# Write the per-VM objects-written-files
assert len(read_objects) == len(vms), (
'object_storage_read_objects_prefix file specified requires exactly '
'%d VMs, but %d were provisioned.' % (len(read_objects), len(vms)))
for vm, vm_objects_written in zip(vms, read_objects):
# Note that each file is written with a unique name so that parallel runs
# don't overwrite the same local file. They are pushed to the VM to a file
# named OBJECTS_WRITTEN_FILE.
tmp_objects_written_path = os.path.join(vm_util.GetTempDir(),
'%s-%s' % (OBJECTS_WRITTEN_FILE,
vm.name))
with open(tmp_objects_written_path, 'w') as objects_written_file:
objects_written_file.write(json.dumps(vm_objects_written))
vm.PushFile(tmp_objects_written_path,
posixpath.join(vm_util.VM_TMP_DIR, OBJECTS_WRITTEN_FILE))
except Exception as e:
raise Exception('Failed to upload the objects written files to the VMs: '
'%s' % e)
_MultiStreamOneWay(results, metadata, vms, command_builder, service,
bucket_name, 'download')
logging.info('Finished multi-stream read test.')
def CheckPrerequisites(benchmark_config):
"""Verifies that the required resources are present.
Args:
benchmark_config: Benchmark config to verify.
Raises:
perfkitbenchmarker.data.ResourceNotFound: On missing resource.
perfkitbenchmarker.errors.Setup.InvalidFlagConfigurationError: On invalid
flags.
"""
del benchmark_config
data.ResourcePath(DATA_FILE)
if FLAGS.object_storage_apply_region_suffix_to_bucket_name:
if not FLAGS.object_storage_region:
raise errors.Setup.InvalidFlagConfigurationError(
'Please specify --object_storage_region if using '
'--object_storage_apply_region_suffix_to_bucket_name.')
def _AppendPercentilesToResults(output_results, input_results, metric_name,
metric_unit, metadata):
# PercentileCalculator will (correctly) raise an exception on empty
# input, but an empty input list makes semantic sense here.
if len(input_results) == 0:
return
percentiles = PercentileCalculator(input_results)
for percentile in PERCENTILES_LIST:
output_results.append(sample.Sample(('%s %s') % (metric_name, percentile),
percentiles[percentile],
metric_unit,
metadata))
def CLIThroughputBenchmark(output_results, metadata, vm, command_builder,
service, bucket):
"""A benchmark for CLI tool throughput.
We will upload and download a set of files from/to a local directory
via cli tools and observe the throughput.
Args:
results: the results array to append to.
metadata: a dictionary of metadata to add to samples.
vm: the VM to run the benchmark on.
command_builder: an APIScriptCommandBuilder.
service: an ObjectStorageService.
bucket_name: the primary bucket to benchmark.
Raises:
NotEnoughResultsError: if we failed too many times to upload or download.
"""
data_directory = '/tmp/run/data'
# The real solution to the iteration count issue is dynamically
# choosing the number of iterations based on how long they
# take. This will work for now, though.
if FLAGS.storage == providers.AZURE:
iteration_count = CLI_TEST_ITERATION_COUNT_AZURE
elif FLAGS.cli_test_size == 'normal':
iteration_count = CLI_TEST_ITERATION_COUNT
else:
iteration_count = LARGE_CLI_TEST_ITERATION_COUNT
# The CLI-based tests require some provisioning on the VM first.
vm.RemoteCommand(
'cd /tmp/run/; bash cloud-storage-workload.sh %s' % FLAGS.cli_test_size)
# CLI tool based tests.
cli_upload_results = []
cli_download_results = []
if FLAGS.cli_test_size == 'normal':
data_size_in_mbits = DATA_SIZE_IN_MBITS
file_names = ['file-%s.dat' % i for i in range(100)]
else:
data_size_in_mbits = LARGE_DATA_SIZE_IN_MBITS
file_names = ['file_large_3gib.dat']
for _ in range(iteration_count):
try:
service.EmptyBucket(bucket)
except Exception:
pass
try:
_, res = service.CLIUploadDirectory(vm, data_directory,
file_names, bucket)
except errors.VirtualMachine.RemoteCommandError:
logging.info('failed to upload, skip this iteration.')
continue
throughput = data_size_in_mbits / vm_util.ParseTimeCommandResult(res)
logging.info('cli upload throughput %f', throughput)
cli_upload_results.append(throughput)
try:
vm.RemoveFile(posixpath.join(DOWNLOAD_DIRECTORY, '*'))
except Exception:
pass
try:
_, res = service.CLIDownloadBucket(vm, bucket,
file_names, DOWNLOAD_DIRECTORY)
except errors.VirtualMachine.RemoteCommandError:
logging.info('failed to download, skip this iteration.')
continue
throughput = data_size_in_mbits / vm_util.ParseTimeCommandResult(res)
logging.info('cli download throughput %f', throughput)
cli_download_results.append(throughput)
expected_successes = iteration_count * (1 - CLI_TEST_FAILURE_TOLERANCE)
if (len(cli_download_results) < expected_successes or
len(cli_upload_results) < expected_successes):
raise NotEnoughResultsError('Failed to complete the required number of '
'iterations.')
# Report various percentiles.
metrics_prefix = ''
if FLAGS.cli_test_size != 'normal':
metrics_prefix = '%s ' % FLAGS.cli_test_size
_AppendPercentilesToResults(output_results,
cli_upload_results,
'%s%s' % (metrics_prefix,
UPLOAD_THROUGHPUT_VIA_CLI),
THROUGHPUT_UNIT,
metadata)
_AppendPercentilesToResults(output_results,
cli_download_results,
'%s%s' % (metrics_prefix,
DOWNLOAD_THROUGHPUT_VIA_CLI),
THROUGHPUT_UNIT,
metadata)
def PrepareVM(vm, service):
vm.Install('pip')
vm.RemoteCommand('sudo pip install absl-py')
vm.RemoteCommand('sudo pip install pyyaml')
vm.Install('openssl')
# Prepare data on vm, create a run directory in temporary directory, and add
# permission.
vm.RemoteCommand('sudo mkdir -p ' + SCRIPT_DIR)
vm.RemoteCommand('sudo chmod 777 ' + SCRIPT_DIR)
vm.RemoteCommand('sudo mkdir -p ' + DOWNLOAD_DIRECTORY)
vm.RemoteCommand('sudo chmod 777 ' + DOWNLOAD_DIRECTORY)
remote_package_dir = posixpath.join(SCRIPT_DIR, 'providers')
vm.RemoteCommand('sudo mkdir -p ' + remote_package_dir)
vm.RemoteCommand('sudo chmod 777 ' + remote_package_dir)
file_path = data.ResourcePath(DATA_FILE)
vm.PushFile(file_path, SCRIPT_DIR)
# push the test script
script_path = data.ResourcePath(
os.path.join(API_TEST_SCRIPTS_DIR, API_TEST_SCRIPT))
vm.PushFile(script_path, '/tmp/run/')
# push the package dependencies of the test script
for file_name in API_TEST_SCRIPT_PACKAGE_FILES + service.APIScriptFiles():
path = data.ResourcePath(
os.path.join(API_TEST_SCRIPTS_DIR, file_name))
logging.info('Uploading %s to %s', path, vm)
vm.PushFile(path, remote_package_dir)
service.PrepareVM(vm)
def CleanupVM(vm, service):
service.CleanupVM(vm)
vm.RemoteCommand('/usr/bin/yes | sudo pip uninstall absl-py')
vm.RemoteCommand('sudo rm -rf /tmp/run/')
objects_written_file = posixpath.join(vm_util.VM_TMP_DIR,
OBJECTS_WRITTEN_FILE)
vm.RemoteCommand('rm -f %s' % objects_written_file)
def Prepare(benchmark_spec):
"""Prepare vm with cloud provider tool and prepare vm with data file.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
Raises:
ColdDataError: If this benchmark is reading cold data, but the data isn't
cold enough (as configured by object_storage_read_objects_min_hours).
"""
# We would like to always cleanup server side states when exception happens.
benchmark_spec.always_call_cleanup = True
# Load the objects to read file if specified
benchmark_spec.read_objects = None
if FLAGS.object_storage_read_objects_prefix is not None:
# By taking a glob, we choose an arbitrary file that is old enough, assuming
# there is ever more than one.
search_prefix = '%s-%s*' % (
FLAGS.object_storage_read_objects_prefix,
FLAGS.object_storage_region)
read_objects_filenames = glob.glob(search_prefix)
logging.info('Considering object files %s*: %s', search_prefix,
read_objects_filenames)
for filename in read_objects_filenames:
age_hours = _ColdObjectsWrittenFileAgeHours(filename)
if age_hours and age_hours > FLAGS.object_storage_read_objects_min_hours:
read_objects_filename = filename
break
else:
raise ColdDataError(
'Object data older than %d hours does not exist. Current cold data '
'files include the following: %s' % (
FLAGS.object_storage_read_objects_min_hours,
read_objects_filenames))
with open(read_objects_filename) as read_objects_file:
# Format of json structure is:
# {"bucket_name": <bucket_name>,
# ... any other provider-specific context needed
# "objects_written": <objects_written_array>}
benchmark_spec.read_objects = json.loads(read_objects_file.read())
benchmark_spec.read_objects_filename = read_objects_filename
benchmark_spec.read_objects_age_hours = age_hours
# When this benchmark reads these files, the data will be deleted. Delete
# the file that specifies the data too.
if not FLAGS.object_storage_dont_delete_bucket:
os.remove(read_objects_filename)
assert benchmark_spec.read_objects is not None, (
'Failed to read the file specified by '
'--object_storage_read_objects_prefix')
# Load the provider and its object storage service
providers.LoadProvider(FLAGS.storage)
# Determine the bucket name.
if benchmark_spec.read_objects is not None:
# Using an existing bucket
bucket_name = benchmark_spec.read_objects['bucket_name']
if FLAGS.object_storage_bucket_name is not None:
logging.warning('--object_storage_bucket_name ignored because '
'--object_storage_read_objects was specified')
else:
# Use a new bucket (or the name of a specified bucket).
bucket_name = FLAGS.object_storage_bucket_name or 'pkb%s' % FLAGS.run_uri
if FLAGS.object_storage_apply_region_suffix_to_bucket_name:
# Avoid non-alphanumeric characters in the region as bucket names on some
# clouds cannot contain non-alphanumeric characters.
bucket_name = '%s%s' % (bucket_name,
re.sub(r'[\W_]', '', FLAGS.object_storage_region))
service = object_storage_service.GetObjectStorageClass(FLAGS.storage)()
if (FLAGS.storage == 'Azure' and
FLAGS.object_storage_read_objects_prefix is not None):
# Storage provider is azure and we are reading existing objects.
# Need to prepare the ObjectStorageService with the existing storage
# account and resource group associated with the bucket containing our
# objects
service.PrepareService(
FLAGS.object_storage_region,
# On Azure, use an existing storage account if we
# are reading existing objects
(benchmark_spec.read_objects['azure_storage_account'],
benchmark_spec.read_objects['azure_resource_group']))
elif FLAGS.storage == 'Azure' and FLAGS.object_storage_bucket_name:
# We are using a bucket that may exist from a previous run. We should use
# a storage account and resource group for this bucket based on the same
# name (for consistency).
service.PrepareService(
FLAGS.object_storage_region,
# The storage account must not exceed 24 characters.
(bucket_name[:24], bucket_name + '-resource-group'),
try_to_create_storage_account_and_resource_group=True)
else:
service.PrepareService(FLAGS.object_storage_region)
vms = benchmark_spec.vms
vm_util.RunThreaded(lambda vm: PrepareVM(vm, service), vms)
# Make the bucket.
if benchmark_spec.read_objects is None:
# Fail if we cannot create the bucket as long as the bucket name was not
# set via a flag. If it was set by a flag, then we will still try to create
# the bucket, but won't fail if it was created. This supports running the
# benchmark on the same bucket multiple times.
raise_on_bucket_creation_failure = not FLAGS.object_storage_bucket_name
if FLAGS.storage == 'GCP' and FLAGS.object_storage_gcs_multiregion:
# Use a GCS multiregional bucket
multiregional_service = gcs.GoogleCloudStorageService()
multiregional_service.PrepareService(FLAGS.object_storage_gcs_multiregion
or DEFAULT_GCS_MULTIREGION)
multiregional_service.MakeBucket(
bucket_name, raise_on_failure=raise_on_bucket_creation_failure)
else:
# Use a regular bucket
service.MakeBucket(
bucket_name, raise_on_failure=raise_on_bucket_creation_failure)
# Save the service and the bucket name for later
benchmark_spec.service = service
benchmark_spec.bucket_name = bucket_name
def Run(benchmark_spec):
"""Run storage benchmark and publish results.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
Returns:
Total throughput in the form of tuple. The tuple contains
the sample metric (string), value (float), unit (string).
"""
logging.info('Start benchmarking object storage service, '
'scenario is %s, storage provider is %s.',
FLAGS.object_storage_scenario, FLAGS.storage)
service = benchmark_spec.service
bucket_name = benchmark_spec.bucket_name
metadata = {'storage_provider': FLAGS.storage}
vms = benchmark_spec.vms
if FLAGS[OBJECT_STORAGE_REGION].present:
metadata[REGIONAL_BUCKET_LOCATION] = FLAGS.object_storage_region
else:
metadata[REGIONAL_BUCKET_LOCATION] = DEFAULT
if FLAGS[OBJECT_STORAGE_GCS_MULTIREGION].present:
metadata[GCS_MULTIREGION_LOCATION] = FLAGS.object_storage_gcs_multiregion
else:
metadata[GCS_MULTIREGION_LOCATION] = DEFAULT
metadata.update(service.Metadata(vms[0]))
results = []
test_script_path = '/tmp/run/%s' % API_TEST_SCRIPT
try:
command_builder = APIScriptCommandBuilder(
test_script_path, STORAGE_TO_API_SCRIPT_DICT[FLAGS.storage], service)
except KeyError:
command_builder = UnsupportedProviderCommandBuilder(FLAGS.storage)
for name, benchmark in [('cli', CLIThroughputBenchmark),
('api_data', OneByteRWBenchmark),
('api_data', SingleStreamThroughputBenchmark),
('api_namespace', ListConsistencyBenchmark)]:
if FLAGS.object_storage_scenario in {name, 'all'}:
benchmark(results, metadata, vms[0], command_builder,
service, bucket_name)
# MultiStreamRW and MultiStreamWrite support multiple VMs, so they have a
# slightly different calling convention than the others.
for name, benchmark in [('api_multistream', MultiStreamRWBenchmark),
('api_multistream_writes',
MultiStreamWriteBenchmark)]:
if FLAGS.object_storage_scenario in {name, 'all'}:
benchmark(results, metadata, vms, command_builder, benchmark_spec.service,
bucket_name)
# MultiStreamRead has the additional 'read_objects' parameter
if FLAGS.object_storage_scenario in {'api_multistream_reads', 'all'}:
metadata['cold_objects_filename'] = benchmark_spec.read_objects_filename
metadata['cold_objects_age_hours'] = benchmark_spec.read_objects_age_hours
MultiStreamReadBenchmark(results, metadata, vms, command_builder,
benchmark_spec.service, bucket_name,
benchmark_spec.read_objects['objects_written'])
# Clear the bucket if we're not saving the objects for later
# This is needed for long running tests, or else the objects would just pile
# up after each run.
keep_bucket = (FLAGS.object_storage_objects_written_file_prefix is not None or
FLAGS.object_storage_dont_delete_bucket)
if not keep_bucket:
service.EmptyBucket(bucket_name)
return results
def Cleanup(benchmark_spec):
"""Clean up storage bucket/container and clean up vm.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
service = benchmark_spec.service
bucket_name = benchmark_spec.bucket_name
vms = benchmark_spec.vms
vm_util.RunThreaded(lambda vm: CleanupVM(vm, service), vms)
# Only clean up bucket if we're not saving the objects for a later run
keep_bucket = (FLAGS.object_storage_objects_written_file_prefix is not None or
FLAGS.object_storage_dont_delete_bucket)
if not keep_bucket:
service.DeleteBucket(bucket_name)
service.CleanupService()
|
sendImg.py
|
import time, pickle, requests, threading
from picamera import PiCamera
from picamera.array import PiRGBArray
class sendImg:
def __init__(self):
self.count = 0
# camera initialisation
self.camera = PiCamera()
self.camera.exposure_mode = "sports"
self.camera.resolution = (640, 480)
self.output = PiRGBArray(self.camera)
# start camera preview to let camera warm up
self.camera.start_preview()
threading.Thread(target=time.sleep, args=(2,)).start()
# this function takes a picture when commanded
def takePic(self):
self.camera.capture(self.output, 'bgr')
frame = self.output.array
self.output.truncate(0)
self.count += 1
data = pickle.dumps(frame)
# send to Laptop via HTTP POST
r = requests.post("http://192.168.16.133:8123", data=data) #static IP
print("Image", self.count, "sent")
|
monitors.py
|
"""
Common threading utils for anchore engine services.
"""
import time
import threading
from anchore_engine.subsys import logger
# generic monitor_func implementation
click = 0
running = False
last_run = 0
monitor_thread = None
def default_monitor_func(**kwargs):
"""
Generic monitor thread function for invoking tasks defined in a monitor dict
:param kwargs:
:return:
"""
global click, running, last_run
my_monitors = kwargs['monitors']
monitor_threads = kwargs['monitor_threads']
servicename = kwargs['servicename']
timer = int(time.time())
if click < 5:
click = click + 1
logger.debug("service ("+str(servicename)+") starting in: " + str(5 - click))
return (True)
if round(time.time() - last_run) < kwargs['kick_timer']:
logger.spew(
"timer hasn't kicked yet: " + str(round(time.time() - last_run)) + " : " + str(kwargs['kick_timer']))
return (True)
try:
running = True
last_run = time.time()
# handle setting the cycle timers based on configuration
for monitor_name in list(my_monitors.keys()):
if not my_monitors[monitor_name]['initialized']:
# first time
if 'cycle_timers' in kwargs and monitor_name in kwargs['cycle_timers']:
try:
the_cycle_timer = my_monitors[monitor_name]['cycle_timer']
min_cycle_timer = my_monitors[monitor_name]['min_cycle_timer']
max_cycle_timer = my_monitors[monitor_name]['max_cycle_timer']
config_cycle_timer = int(kwargs['cycle_timers'][monitor_name])
if config_cycle_timer < 0:
the_cycle_timer = abs(int(config_cycle_timer))
elif config_cycle_timer < min_cycle_timer:
logger.warn("configured cycle timer for handler ("+str(monitor_name)+") is less than the allowed min ("+str(min_cycle_timer)+") - using allowed min")
the_cycle_timer = min_cycle_timer
elif config_cycle_timer > max_cycle_timer:
logger.warn("configured cycle timer for handler ("+str(monitor_name)+") is greater than the allowed max ("+str(max_cycle_timer)+") - using allowed max")
the_cycle_timer = max_cycle_timer
else:
the_cycle_timer = config_cycle_timer
my_monitors[monitor_name]['cycle_timer'] = the_cycle_timer
except Exception as err:
logger.warn("exception setting custom cycle timer for handler ("+str(monitor_name)+") - using default")
my_monitors[monitor_name]['initialized'] = True
# handle the thread (re)starters here
for monitor_name in list(my_monitors.keys()):
start_thread = False
if monitor_name not in monitor_threads:
start_thread = True
else:
if not monitor_threads[monitor_name].isAlive():
logger.debug("thread stopped - restarting: " + str(monitor_name))
monitor_threads[monitor_name].join()
start_thread = True
if start_thread:
monitor_threads[monitor_name] = threading.Thread(target=my_monitors[monitor_name]['handler'], args=my_monitors[monitor_name]['args'], kwargs={'mythread': my_monitors[monitor_name]})
logger.debug("starting up monitor_thread: " + str(monitor_name))
monitor_threads[monitor_name].start()
except Exception as err:
logger.error(str(err))
finally:
running = False
return True
def monitor(*args, **kwargs):
global monitor_thread
try:
donew = False
if monitor_thread:
if monitor_thread.isAlive():
logger.spew("MON: thread still running")
else:
logger.spew("MON: thread stopped running")
donew = True
monitor_thread.join()
logger.spew("MON: thread joined: isAlive=" + str(monitor_thread.isAlive()))
else:
logger.spew("MON: no thread")
donew = True
if donew:
logger.spew("MON: starting")
monitor_thread = threading.Thread(target=default_monitor_func, kwargs=kwargs)
monitor_thread.start()
else:
logger.spew("MON: skipping")
except Exception as err:
logger.warn("MON thread start exception: " + str(err))
|
test_autograd.py
|
# Owner(s): ["module: autograd"]
import contextlib
import gc
import io
import math
import os
import random
import sys
import tempfile
import threading
import time
import unittest
import uuid
import warnings
import operator
import subprocess
from copy import deepcopy
from collections import OrderedDict
from itertools import product
from operator import mul
from functools import reduce, partial
import torch
from torch import nn
from torch._six import inf, nan
from torch.autograd.function import once_differentiable
from torch.autograd.profiler import (profile, record_function, emit_nvtx)
from torch.autograd.profiler_util import (_format_time, EventList, FunctionEvent, FunctionEventAvg)
from torch.utils.checkpoint import checkpoint
from torch.testing import make_tensor
from torch.testing._internal.common_cuda import TEST_CUDA
from torch.testing._internal.common_utils import (
TestCase, run_tests, skipIfNoLapack, slowTest, IS_WINDOWS, IS_MACOS,
disable_gc, gradcheck, gradgradcheck, parametrize,
instantiate_parametrized_tests, skipIfMps)
from torch.autograd import Variable, Function, detect_anomaly, kineto_available
from torch.autograd.function import InplaceFunction
import torch.autograd.forward_ad as fwAD
from torch.testing._internal.common_methods_invocations import mask_not_all_zeros
from torch.testing._internal.common_device_type import (instantiate_device_type_tests, skipCUDAIfRocm,
onlyCPU, onlyCUDA, dtypes, dtypesIfCUDA,
deviceCountAtLeast, skipMeta, dtypesIfMPS)
from torch.testing._internal.common_dtype import floating_types_and
from torch.testing._internal.logging_tensor import no_dispatch
import pickle
def graph_desc(fn):
if fn is None:
return 'None'
result = type(fn).__name__ + '('
next_functions = fn.next_functions
for next_fn, _ in next_functions:
result += graph_desc(next_fn)
result += ', '
if next_functions:
result = result[:-2]
return result + ')'
class TestAutograd(TestCase):
def test_tensor_grad_warnings(self):
dummy = torch.empty(1)
with warnings.catch_warnings(record=True) as w:
# Accessing .grad on leaf
dummy.requires_grad_()
foo = dummy.grad
self.assertEqual(len(w), 0)
# Accessing .grad on non-leaf
dummy = dummy.clone()
foo = dummy.grad
self.assertEqual(len(w), 1)
# Accessing .grad on non-leaf that retains gradients
dummy.retain_grad()
foo = dummy.grad
self.assertEqual(len(w), 1)
def _function_test(self, cls):
x = torch.randn(5, 5, requires_grad=True)
y = torch.randn(5, 5, requires_grad=True)
result = cls.apply(x, 2, y)
go = torch.ones((), requires_grad=True)
result.sum().backward(go, create_graph=True)
self.assertEqual(x.grad, y + torch.ones(5, 5))
self.assertEqual(y.grad, x + torch.ones(5, 5) * 2)
self.assertIsNotNone(x.grad.grad_fn)
self.assertIsNotNone(y.grad.grad_fn)
return x, y
def test_function(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, tensor1, pyscalar, tensor2):
ctx.pyscalar = pyscalar
ctx.save_for_backward(tensor1, tensor2)
return tensor1 + pyscalar * tensor2 + tensor1 * tensor2
@staticmethod
def backward(ctx, grad_output):
var1, var2 = ctx.saved_tensors
# NOTE: self is the test case here
self.assertIsInstance(var1, torch.Tensor)
self.assertIsInstance(var2, torch.Tensor)
self.assertIsInstance(grad_output, torch.Tensor)
return (grad_output + grad_output * var2, None,
grad_output * ctx.pyscalar + grad_output * var1)
x, y = self._function_test(MyFunction)
x_grad_desc = graph_desc(x.grad.grad_fn)
y_grad_desc = graph_desc(y.grad.grad_fn)
self.assertExpected(x_grad_desc, "x_grad_desc")
self.assertExpected(y_grad_desc, "y_grad_desc")
def test_once_differentiable(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, tensor1, pyscalar, tensor2):
ctx.pyscalar = pyscalar
ctx.save_for_backward(tensor1, tensor2)
return tensor1 + pyscalar * tensor2 + tensor1 * tensor2
@staticmethod
@once_differentiable
def backward(ctx, grad_output):
self.assertFalse(torch.is_grad_enabled())
t1, t2 = ctx.saved_tensors
return (grad_output + grad_output * t2, None,
grad_output * ctx.pyscalar + grad_output * t1)
x, y = self._function_test(MyFunction)
self.assertEqual(graph_desc(x.grad.grad_fn),
'CopyBackwards(None, Error(AccumulateGrad(), None, AccumulateGrad()))')
self.assertEqual(graph_desc(y.grad.grad_fn),
'CopyBackwards(None, Error(AccumulateGrad(), None, AccumulateGrad()))')
def test_function_returns_input(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, grad):
return grad * 2
for shape in [(1,), ()]:
v = torch.ones(shape, requires_grad=True)
MyFunction.apply(v).backward()
self.assertEqual(v.grad, torch.full(shape, 2.))
with torch.no_grad():
v.grad.zero_()
MyFunction.apply(v.clone()).backward()
self.assertEqual(v.grad, torch.full(shape, 2.))
def test_function_returns_undefined_tensor(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, x):
return x * 2
@staticmethod
def backward(ctx, grad):
return None
# Test that undefined tensors returned from custom backward function
# are propagated as undefined and not tensor full of zeroes
x = torch.ones(1, requires_grad=True)
MyFunction.apply(x).backward()
self.assertIsNone(x.grad)
MyFunction.apply(x ** 2).backward()
self.assertIsNone(x.grad)
MyFunction.apply(x).sum().backward()
self.assertIsNone(x.grad)
self.assertIsNone(torch.autograd.grad(MyFunction.apply(x), x, allow_unused=True)[0])
def test_materialize_grads(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, grad):
self.assertEqual(grad, torch.zeros(1))
return grad
x = torch.ones(1, requires_grad=True)
torch._C._functions.UndefinedGrad()(MyFunction.apply(x)).backward()
def test_dont_materialize_grads(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, x):
ctx.set_materialize_grads(False)
return x
@staticmethod
def backward(ctx, grad):
self.assertIsNone(grad)
return grad
x = torch.ones(1, requires_grad=True)
torch._C._functions.UndefinedGrad()(MyFunction.apply(x)).backward()
def test_legacy_function_deprecation_exception(self):
# Trigger exception
class MyFunction(Function):
def forward(self, x):
return x
def backward(self, grad_output):
return grad_output
# Check exception occurs
with self.assertRaisesRegex(
RuntimeError,
'Legacy autograd function with non-static forward method is deprecated'):
MyFunction()(torch.randn(3, 4))
class SimulateBackwardError(Function):
@staticmethod
def forward(ctx, input):
return input.clone()
@staticmethod
@once_differentiable
def backward(ctx, input):
raise Exception("Simulate error on backward pass")
def test_custom_function_exception(self):
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
tmp = (t1 + t2) * (t1 + t2)
t3 = TestAutograd.SimulateBackwardError.apply(tmp)
with self.assertRaisesRegex(Exception, "Simulate error on backward pass"):
t3.sum().backward()
def test_custom_function_non_tensor_inputs_outputs(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, t1, t2, scale, t3):
t4 = t1 + t2 * t3
t5 = t1 * t2 + t3
t4 *= scale
t5 *= scale
# Save scale
ctx.scale = scale
ctx.save_for_backward(t1, t2, t3)
return scale, t4, None, True, t5, "bar", t1
@staticmethod
@once_differentiable
def backward(ctx, *grads):
# Verify grads
self.assertEqual(7, len(grads))
self.assertIsNone(grads[0])
self.assertIsNone(grads[2])
self.assertIsNone(grads[3])
self.assertIsNone(grads[5])
scale = ctx.scale
var1, var2, var3 = ctx.saved_tensors
return (
grads[1] * scale + grads[4] * var2 * scale + grads[6],
grads[1] * var3 * scale + grads[4] * var1 * scale,
None,
grads[1] * var2 * scale + grads[4] * scale,
)
t1 = torch.rand(10, dtype=torch.double, requires_grad=True)
t2 = torch.rand(10, dtype=torch.double, requires_grad=True)
t3 = torch.rand(10, dtype=torch.double)
scale = random.randint(0, 10)
res = MyFunction.apply(t1, t2, scale, t3)
self.assertEqual(scale, res[0])
self.assertEqual((t1 + t2 * t3) * scale, res[1])
self.assertEqual(None, res[2])
self.assertEqual(True, res[3])
self.assertEqual((t1 * t2 + t3) * scale, res[4])
self.assertEqual("bar", res[5])
self.assertEqual(t1, res[6])
# Validate running backward.
torch.autograd.backward([res[1].sum(), res[4].sum(), res[6].sum()])
self.assertIsNotNone(t1.grad)
self.assertIsNotNone(t2.grad)
self.assertIsNone(t3.grad)
# Test gradcheck
def foo(t1, t2, t3):
res = MyFunction.apply(t1, t2, scale, t3)
return res[1], res[4], res[6]
gradcheck(foo, (t1, t2, t3))
def test_custom_function_no_tensors(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, t1, t2, scale, t3):
t4 = t1 + t2 * t3
t5 = t1 * t2 + t3
t4 *= scale
t5 *= scale
return scale, t4, None, True, t5, "bar", t1
@staticmethod
@once_differentiable
def backward(ctx, *args):
return (args[0], args[1], None, args[2])
t1 = random.random()
t2 = random.random()
t3 = random.random()
scale = random.randint(0, 10)
res = MyFunction.apply(t1, t2, scale, t3)
self.assertEqual(scale, res[0])
self.assertEqual((t1 + t2 * t3) * scale, res[1])
self.assertEqual(None, res[2])
self.assertEqual(True, res[3])
self.assertEqual((t1 * t2 + t3) * scale, res[4])
self.assertEqual("bar", res[5])
self.assertEqual(t1, res[6])
def test_invalid_gradients(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, x):
return x * 2
@staticmethod
def backward(ctx, grad_output):
return torch.randn(10, dtype=torch.float)
with self.assertRaisesRegex(RuntimeError, 'expected shape'):
input = torch.randn(5, 5, dtype=torch.float, requires_grad=True)
MyFunction.apply(input).sum().backward()
def test_unrelated_inputs(self):
# test to ensure grad(grad)check runs successfully even if there is an
# unrelated (but differentiable) inputs
def my_function(x, y):
return x * x
x = torch.rand(10, dtype=torch.double, requires_grad=True)
y = torch.rand(10, dtype=torch.double, requires_grad=True)
gradcheck(my_function, (x, y))
gradgradcheck(my_function, (x, y))
def test_not_implemented_grad(self):
a = torch.rand(2, requires_grad=True)
# if grad for nextafter ends up being implemented, this should be changed
y = torch.nextafter(a, a).sum()
with self.assertRaisesRegex(
NotImplementedError,
'the derivative for .* is not implemented'):
y.backward()
def test_not_implemented_fwad(self):
x = torch.randn(3)
v = torch.rand(3)
with fwAD.dual_level():
dual_x = fwAD.make_dual(x, v)
err_msg = r"Trying to use forward AD with .* that does not support it"
hint_msg = "Running forward AD for an OP that does not implement it should raise a NotImplementedError"
with self.assertRaisesRegex(NotImplementedError, err_msg, msg=hint_msg):
# if forward AD ends up being implemented for torch.igamma, choose a different op
torch.igamma(dual_x, dual_x)
def test_accumulate_grad(self):
grad_output = torch.ones(5, 5)
def compute_grad(create_graph):
x = torch.randn(5, 5, requires_grad=True)
y = x + 2
y.backward(grad_output, retain_graph=True)
x_grad = x.grad
x_grad_clone = x.grad.clone()
y.backward(grad_output, create_graph=create_graph)
return x_grad, x_grad_clone
# Accumulate in-place when create_graph is False
x_grad, x_grad_clone = compute_grad(create_graph=False)
self.assertEqual(x_grad, x_grad_clone * 2)
# Accumulate out-of-place when create_graph is False
x_grad, x_grad_clone = compute_grad(create_graph=True)
self.assertEqual(x_grad, x_grad_clone)
def test_accumulate_grad_tensor_reference(self):
def _test_grad_tensor(params_grad_tensor, backward_grad_tensor, should_preserve_reference, create_graph):
params = torch.tensor([1.5, 1.5]).requires_grad_()
params.grad = params_grad_tensor
grad_saved = params.grad
params.backward(backward_grad_tensor, create_graph=create_graph)
self.assertEqual(id(grad_saved) == id(params.grad), should_preserve_reference)
for create_graph in (False, True):
# Accumulate dense gradient to sparse gradient will change the `params.grad` reference
_test_grad_tensor(
torch.sparse_coo_tensor(torch.tensor([[1, 1]]).long(), torch.tensor([1., 1.])),
torch.tensor([1.5, 1.5]),
False, # never accumulates in-place
create_graph)
# Accumulate dense gradient to dense gradient will preserve the `params.grad` reference,
# but only if create_graph=False.
_test_grad_tensor(
torch.tensor([1.5, 1.5]),
torch.tensor([1.5, 1.5]),
not create_graph,
create_graph)
# Accumulate sparse gradient to sparse gradient will preserve the `params.grad` reference,
# but only if create_graph=False.
_test_grad_tensor(
torch.sparse_coo_tensor(torch.tensor([[1, 1]]).long(), torch.tensor([1., 1.])),
torch.sparse_coo_tensor(torch.tensor([[1, 1]]).long(), torch.tensor([1., 1.])),
not create_graph,
create_graph)
def test_accumulate_grad_with_zero_numel_grad(self):
a = torch.rand(4, 0, requires_grad=True)
b = torch.rand(4, 1, requires_grad=True)
c = a + b
assert c.shape == (4, 0)
c.sum().backward()
self.assertEqual(b.grad, torch.zeros(4, 1))
self.assertEqual(a.grad, torch.zeros(4, 0))
def test_hessian_vector(self):
x = torch.randn(2, 2, requires_grad=True)
y = torch.randn(2, 2, requires_grad=True)
z = x ** 2 + y * x + y ** 2
z.backward(torch.ones(2, 2), create_graph=True)
with torch.no_grad():
x_grad = 2 * x + y
y_grad = x + 2 * y
self.assertEqual(x.grad, x_grad)
self.assertEqual(y.grad, y_grad)
grad_sum = 2 * x.grad + y.grad
grad_sum.backward(torch.ones(2, 2))
x_hv = torch.ones(2, 2) * 5
y_hv = torch.ones(2, 2) * 4
self.assertEqual(x.grad, x_grad + x_hv)
self.assertEqual(y.grad, y_grad + y_hv)
def test_grad(self):
x = torch.randn(2, 2, requires_grad=True)
y = torch.randn(2, 2, requires_grad=True)
z = x ** 2 + y * x + y ** 2
z.backward(torch.ones(2, 2), create_graph=True)
x_grad = 2 * x + y
y_grad = x + 2 * y
self.assertEqual(x.grad, x_grad)
self.assertEqual(y.grad, y_grad)
grad_sum = 2 * x.grad + y.grad
x_hv = torch.autograd.grad(
outputs=[grad_sum], grad_outputs=[torch.ones(2, 2)],
inputs=[x], create_graph=True)
expected_x_hv = torch.ones(2, 2) * 5
expected_y_hv = torch.ones(2, 2) * 4
self.assertEqual(x_hv[0], expected_x_hv)
self.assertEqual(x.grad, x_grad)
self.assertEqual(y.grad, y_grad)
# Test that grad_outputs and outputs have the same shape
grad_out = torch.ones(2)
try:
torch.autograd.grad(
outputs=[grad_sum], grad_outputs=[grad_out],
inputs=[x], create_graph=True)
self.assertFail()
except RuntimeError as error:
self.assertEqual(str(error), "Mismatch in shape: grad_output[0] has a shape of "
+ str(grad_out.shape) + " and output[0] has a shape of "
+ str(grad_sum.shape) + ".")
def test_grad_nonleaf(self):
x_init = torch.randn(2, 2, requires_grad=True)
x = x_init
y = torch.randn(2, 2, requires_grad=True)
grad_output = torch.ones(2, 2)
def fn(x):
return x ** 2 + y * x + y ** 2
for _ in range(5):
grad_x, = torch.autograd.grad(
fn(x), x, grad_outputs=grad_output, create_graph=True)
grad_x_expected = 2 * x + y
self.assertIsNone(y.grad)
self.assertIsNone(x.grad)
self.assertEqual(grad_x, grad_x_expected)
x = x + 0.05 * grad_x
val_init = fn(x_init).sum()
val_final = fn(x).sum()
self.assertGreater(val_final, val_init)
x.backward(grad_output)
self.assertIsNotNone(y.grad)
self.assertIsNotNone(x_init.grad)
def test_grad_nonleaf_many_outputs(self):
# This checks an edge case for function callbacks
# We want to capture two grads of a function, but can only
# register a single callback.
x = torch.randn(4, 2, requires_grad=True)
a, b = x.chunk(2)
def hook(*grads):
hook_called[0] = True
hook_called = [False]
x.register_hook(hook)
go = torch.randn(2, 2)
grad_a, grad_b = torch.autograd.grad(
(a + 2 * b), [a, b], grad_outputs=go, create_graph=True)
self.assertEqual(grad_a, go)
self.assertEqual(grad_b, go * 2)
self.assertFalse(hook_called[0])
self.assertIsNone(x.grad)
def test_grad_nonleaf_register_hook(self):
# This checks an edge case for register_hook.
# We want to capture grad of a nonleaf tensor,
# but avoid segfault during backward of other nonleaf tensors
x = torch.randn(5, requires_grad=True)
x_list = x.unbind()
x0 = x_list[0]
hook_results = [None]
def hook(grad):
hook_results[0] = grad
x0.register_hook(hook)
x_list[0].backward()
self.assertEqual(hook_results[0], torch.tensor(1.))
expected_grad = torch.tensor([1., 0, 0, 0, 0])
self.assertEqual(x.grad, expected_grad)
self.assertIsNone(x_list[0].grad)
for i in range(1, 5, 1):
x_list[i].backward()
self.assertEqual(hook_results[0], None)
expected_grad[i] = 1.0
self.assertEqual(x.grad, expected_grad)
self.assertIsNone(x_list[i].grad)
def test_hook_with_no_name(self):
# Create a hook that do not have a __name__ attribute
class MyHookClass:
def __call__(self, grad):
return grad.clone()
x = torch.randn(5, requires_grad=True).clone()
x.register_hook(MyHookClass())
x.sum().backward()
# Should run fine
def test_sharded_grad(self):
leaves = [torch.zeros(5, 5, requires_grad=True) for _ in range(10)]
intermediates = [l * i + l * l for i, l in enumerate(leaves)]
loss = sum(v * i for i, v in enumerate(intermediates)).sum()
# define a helper for dividing intermediates into groups
def group(l, group_size):
return (l[i:i + group_size] for i in range(0, len(l), group_size))
# Compute the d loss / d intermediates in chunks of shard_size
shard_size = 2
d_intermediates = [d_i for intermediates_batch in group(intermediates, shard_size)
for d_i in torch.autograd.grad(loss, intermediates_batch)]
# Compute rest of backward pass
torch.autograd.backward(intermediates, d_intermediates)
for i, l in enumerate(leaves):
self.assertEqual(l.grad, i * i * (1 + l))
def test_backward_badcalls(self):
x = torch.ones(1)
with self.assertRaisesRegex(RuntimeError, 'does not require grad'):
x.backward()
def test_grad_badcalls(self):
x = torch.ones(1)
y = x ** 2
with self.assertRaisesRegex(RuntimeError, 'does not require grad'):
torch.autograd.grad(x, y)
with self.assertRaisesRegex(RuntimeError, 'does not require grad'):
torch.autograd.grad(y, x)
x = torch.ones(1, requires_grad=True)
y = x ** 2
torch.autograd.grad(y, x) # this should succeed now
def test_grad_empty_inputs(self):
x = torch.tensor([1.0], requires_grad=True)
with self.assertRaisesRegex(ValueError, "grad requires non-empty inputs."):
torch.autograd.grad(2 * x, [], grad_outputs=torch.tensor([1.0]))
def test_grad_fn_badcalls(self):
error_regex = 'expected .* arguments, got .* instead'
x = torch.ones(1, requires_grad=True)
y = x ** 2
with self.assertRaisesRegex(TypeError, error_regex):
y.grad_fn(x.detach(), x.detach()) # too many
with self.assertRaisesRegex(TypeError, error_regex):
y.grad_fn() # too few
y.grad_fn(x.detach()) # this should succeed
def test_grad_unreachable(self):
x = torch.ones(1, requires_grad=True)
y = torch.ones(1, requires_grad=True)
# Make sure x and y have grad accumulators allocated
z = x * 2
w = y * 2
grad_x, grad_y = torch.autograd.grad(x * 2, [x, y], allow_unused=True)
self.assertEqual(grad_x, x * 2)
self.assertIsNone(grad_y)
# This is slightly different than the case above, because z doesn't even
# have a grad accumulator allocated.
z = torch.ones(1, requires_grad=True)
grad_x, grad_z = torch.autograd.grad(x * 2, [x, z], allow_unused=True)
self.assertEqual(grad_x, x * 2)
self.assertIsNone(grad_z)
# allow_unused=False, but grads contains None inside, should throw
with self.assertRaisesRegex(RuntimeError,
"Set allow_unused=True"):
grad_x, grad_y = torch.autograd.grad(x * 2, [x, y], allow_unused=False)
def test_grad_unreachable_discovery(self):
# Test that certain nodes are not erroneously executed when an input
# is unreachable. See #39784
class MyFunc(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, x):
self.fail("This node should not be executed!")
x = MyFunc.apply(torch.randn(1, requires_grad=True) * 2)
y = torch.randn(1, requires_grad=True)
(gY,) = torch.autograd.grad(x, (y, ), allow_unused=True)
self.assertIsNone(gY)
x = MyFunc.apply(torch.randn(1, requires_grad=True) * 2)
y = torch.randn(1, requires_grad=True)
z = torch.randn(1, requires_grad=True)
(gY, gZ) = torch.autograd.grad(x + z, (y, z), allow_unused=True)
self.assertIsNone(gY)
self.assertIsNotNone(gZ)
x = MyFunc.apply(torch.randn(1, requires_grad=True) * 2)
y = torch.randn(1, requires_grad=True)
torch.autograd.backward(x, inputs=(y, )) # allow_unused is implicitly True!
self.assertIsNone(y.grad)
def test_grad_batched_grad(self):
x = torch.randn(2, 2, requires_grad=True)
out = x.clone() # Size([2, 2])
batched_grad = torch.arange(3).expand(2, 2, 3).transpose(0, 2) # Size([3, 2, 2])
grad, = torch.autograd.grad(out, (x,), (batched_grad,), is_grads_batched=True)
self.assertEqual(grad, torch.arange(3).expand(2, 2, 3).transpose(0, 2).to(dtype=grad.dtype))
# Detect shape mismatch
grad_out = torch.ones(2, 2)
with self.assertRaisesRegex(RuntimeError, "If `is_grads_batched=True`, we interpret the first"):
torch.autograd.grad(outputs=out, grad_outputs=(grad_out,), inputs=(x,), is_grads_batched=True)
# Scalar outputs
out = x.sum() # Size([])
batched_grad = torch.arange(3) # Size([3])
grad, = torch.autograd.grad(out, (x,), (batched_grad,), is_grads_batched=True)
self.assertEqual(grad, torch.arange(3).expand(2, 2, 3).transpose(0, 2).to(dtype=grad.dtype))
# We consider scalar and sized-1 to be a mismatch. This is consistent with current non-batched behavior.
grad_out = torch.ones(2).unsqueeze(1)
with self.assertRaisesRegex(RuntimeError, "If `is_grads_batched=True`, we interpret the first"):
torch.autograd.grad(outputs=out, grad_outputs=(grad_out,), inputs=(x,), is_grads_batched=True)
def test_hooks(self):
x = torch.ones(5, 5, requires_grad=True)
y = torch.ones(5, 5) * 4
y.requires_grad_(True)
counter = [0]
def bw_hook(inc, grad):
self.assertIsInstance(grad, torch.Tensor)
counter[0] += inc
z = x ** 2 + x * 2 + x * y + y
x.register_hook(lambda *args: bw_hook(0, *args))
test = z.register_hook(lambda *args: bw_hook(1, *args))
z.backward(torch.ones(5, 5), retain_graph=True)
self.assertEqual(counter[0], 1)
test2 = z.register_hook(lambda *args: bw_hook(2, *args))
z.backward(torch.ones(5, 5), retain_graph=True)
self.assertEqual(counter[0], 4)
test2.remove()
z.backward(torch.ones(5, 5), retain_graph=True)
self.assertEqual(counter[0], 5)
def bw_hook_modify(grad):
return grad.mul(2)
test.remove()
z.register_hook(bw_hook_modify)
with torch.no_grad():
y.grad.zero_()
z.backward(torch.ones(5, 5), retain_graph=True)
self.assertEqual(y.grad, (x + 1) * 2)
y.register_hook(bw_hook_modify)
with torch.no_grad():
y.grad.zero_()
z.backward(torch.ones(5, 5))
self.assertEqual(y.grad, (x + 1) * 4)
def test_hooks_cpp(self):
# Tests hooks for autograd function implemented in C++
bn = torch.nn.BatchNorm1d(5, affine=False)
bn.double()
bn.eval()
counter = [0]
def bw_hook(grad):
counter[0] += 1
return grad * 2
x = torch.ones(5, 5, dtype=torch.double, requires_grad=True)
z = bn(x)
z.register_hook(bw_hook)
z.sum().backward()
self.assertEqual(counter[0], 1, msg='bw_hook not called')
self.assertEqual(x.grad, torch.ones(5, 5, dtype=torch.double) * 2, atol=1e-5, rtol=0)
def test_hook_none(self):
# WARNING: this is a test for autograd internals.
# You should never have to use such things in your code.
class NoneGradientFunction(Function):
@staticmethod
def forward(ctx, x, y):
assert ctx.needs_input_grad[0]
assert not ctx.needs_input_grad[1]
return x, y
@staticmethod
def backward(ctx, grad_x, grad_y):
return grad_x, None
was_called = [False]
def hook(grad):
self.assertIsNotNone(grad)
was_called[0] = True
x = torch.randn(5, 5, requires_grad=True)
y = torch.randn(5, 5)
rx, ry = NoneGradientFunction.apply(x, y)
rx.register_hook(hook)
ry.register_hook(hook)
sum(rx, ry).sum().backward()
self.assertTrue(was_called[0])
def test_retain_grad(self):
input = torch.rand(1, 3, requires_grad=True)
h1 = input * 3
out = (h1 * h1).sum()
# It should be possible to call retain_grad() multiple times
h1.retain_grad()
h1.retain_grad()
# Gradient should be accumulated
out.backward(retain_graph=True)
self.assertEqual(h1 * 2, h1.grad)
out.backward(retain_graph=True)
self.assertEqual(h1 * 4, h1.grad)
with torch.no_grad():
input.grad.zero_()
# It should be a no-op for leaves
input.retain_grad()
input.retain_grad()
out.backward()
self.assertEqual(input * 18, input.grad)
def test_retain_grad_cycle(self):
x = torch.ones(5, 5, requires_grad=True)
def run_test():
y = x * 2
y.retain_grad()
return y / 2, torch._C._WeakTensorRef(y)
z, ref = run_test()
self.assertTrue(ref.expired())
z.sum().backward()
def test_backward(self):
v = torch.randn(5, 5, requires_grad=True)
x = torch.randn(5, 5, requires_grad=True)
y = (torch.rand(5, 5) + 0.1).requires_grad_(True)
z = torch.randn(5, 5, requires_grad=True)
grad_output = torch.randn(5, 5)
v.backward(grad_output)
self.assertEqual(v.grad, grad_output)
a = x + (y * z) + 4 * z ** 2 * x / y
a.backward(grad_output)
x_grad = 4 * z.pow(2) / y + 1
y_grad = z - 4 * x * z.pow(2) / y.pow(2)
z_grad = 8 * x * z / y + y
self.assertEqual(x.grad, x_grad * grad_output)
self.assertEqual(y.grad, y_grad * grad_output)
self.assertEqual(z.grad, z_grad * grad_output)
def test_sparse_mm_backward(self):
size = (3, 3)
sparse = torch.sparse_coo_tensor(size, requires_grad=True)
dense = torch.randn(size, requires_grad=True)
with self.assertRaisesRegex(
RuntimeError,
"The backward pass for this operation requires the 'mat1' tensor to be strided,"):
z = dense.addmm(sparse, dense)
mm_test_cases = [
# a requires grad, a is sparse, b requires grad, b is sparse, error message
(False, True, True, False, None),
(False, False, True, True, "The backward pass for this operation requires the 'mat2'"),
(False, True, True, True, "The backward pass for this operation requires the 'mat2'"),
(True, False, True, True, "The backward pass for this operation requires the 'mat2'"),
(True, True, False, False, "The backward pass for this operation requires the 'self'"),
(True, True, True, False, "The backward pass for this operation requires the 'self'"),
(True, True, True, True, "The backward pass for this operation requires the 'mat2'"),
]
for a_req_grad, a_is_sparse, b_req_grad, b_is_sparse, err_msg in mm_test_cases:
# We should only be testing cases with sparse inputs, and at least one
# input needs to require grad so we can call a backward pass
assert a_is_sparse or b_is_sparse
assert a_req_grad or b_req_grad
a = torch.randn(size, requires_grad=a_req_grad)
if a_is_sparse:
a = a.to_sparse()
b = torch.randn(size, requires_grad=b_req_grad)
if b_is_sparse:
b = b.to_sparse()
# If no error expected, check that sparse and dense cases match
if err_msg is None:
r = a.mm(b)
r.sum().backward()
a_grad = None if a.grad is None else a.grad.clone().detach()
b_grad = None if b.grad is None else b.grad.clone().detach()
# Redo with only dense tensors
a = (a.to_dense() if a.is_sparse else a).clone().detach()
a.requires_grad = a_req_grad
b = (b.to_dense() if b.is_sparse else b).clone().detach()
b.requires_grad = b_req_grad
r = a.mm(b)
r.sum().backward()
self.assertEqual(a_grad, a.grad)
self.assertEqual(b_grad, b.grad)
else:
with self.assertRaisesRegex(RuntimeError, err_msg):
a.mm(b)
def test_multi_backward(self):
x = torch.randn(5, 5, requires_grad=True)
y = torch.randn(5, 5, requires_grad=True)
q = torch.randn(5, 5, requires_grad=True)
a = torch.randn(5, 5, requires_grad=True)
b = torch.randn(5, 5, requires_grad=True)
q2 = q * 2
z = x + y + q2
c = a * b + q2
grad_z = torch.randn(5, 5)
grad_c = torch.randn(5, 5)
torch.autograd.backward([z, c], [grad_z, grad_c])
self.assertEqual(x.grad, grad_z)
self.assertEqual(y.grad, grad_z)
self.assertEqual(a.grad, grad_c * b)
self.assertEqual(b.grad, grad_c * a)
self.assertEqual(q.grad, (grad_c + grad_z) * 2)
def test_multi_backward_no_grad(self):
x = torch.randn(5, 5, requires_grad=True)
y = torch.randn(5, 5, requires_grad=False)
z = x + y
q = y * 2
# NB: we currently raise an exception if any arguments to backwards
# have requires_grad=False and don't have a grad_fn. We may want to
# relax that check to a warning.
def call_backwards():
torch.autograd.backward([z, q], [torch.ones(5, 5), torch.ones(5, 5)])
self.assertRaises(RuntimeError, call_backwards)
def test_backward_with_inputs(self):
x = torch.randn(2, 2, dtype=torch.double, requires_grad=True)
y = torch.randn(2, 2, dtype=torch.double, requires_grad=True)
def fn():
return x ** 2 + y * x + y ** 2
gradient = torch.ones(2, 2)
x_grad_expected = 2 * x + y
y_grad_expected = x + 2 * y
@torch.no_grad()
def reset_grad():
x.grad.zero_()
y.grad.zero_()
torch.autograd.backward(fn(), gradient, inputs=[x, y])
self.assertEqual(x.grad, x_grad_expected)
self.assertEqual(y.grad, y_grad_expected)
reset_grad()
torch.autograd.backward(fn(), gradient, inputs=[x])
self.assertEqual(x.grad, x_grad_expected)
self.assertEqual(y.grad, torch.zeros(2, 2), exact_dtype=False)
reset_grad()
torch.autograd.backward(fn(), gradient, inputs=[y])
self.assertEqual(y.grad, y_grad_expected)
self.assertEqual(x.grad, torch.zeros(2, 2), exact_dtype=False)
reset_grad()
torch.autograd.backward(fn(), gradient, inputs=y)
self.assertEqual(y.grad, y_grad_expected)
self.assertEqual(x.grad, torch.zeros(2, 2), exact_dtype=False)
reset_grad()
self.assertRaisesRegex(RuntimeError, 'cannot be empty',
lambda: torch.autograd.backward(fn(), gradient, inputs=[]))
def test_backward_with_nonleaf_inputs(self):
x = torch.randn(2, 2, dtype=torch.double, requires_grad=True)
x_nonleaf = x * 1
y = torch.randn(2, 2, dtype=torch.double, requires_grad=True)
z = torch.randn(2, 2, dtype=torch.double, requires_grad=True)
out = x_nonleaf ** 2 + y * x_nonleaf + y ** 2
out.backward(torch.ones(2, 2, dtype=torch.double), create_graph=True, inputs=[x, y, x_nonleaf])
x_grad_expected = 2 * x + y
y_grad_expected = x + 2 * y
x_non_leaf_expected = 2 * x_nonleaf + y
self.assertEqual(y.grad, y_grad_expected)
self.assertEqual(x.grad, x_grad_expected)
self.assertEqual(x_nonleaf.grad, x_non_leaf_expected)
# backward doesn't have an allow_unused flag, so the behavior of backward
# when variable is not part of the graph is as if allow_used were true
# x.grad will simply be None.
out.backward(torch.ones(2, 2, dtype=torch.double), create_graph=True, inputs=[z])
self.assertIsNone(z.grad)
def test_dependent_backward(self):
x = torch.randn(10, requires_grad=True)
y = x ** 2
z = y ** 3
go_y = torch.randn(10)
go_z = torch.randn(10)
torch.autograd.backward([y, z], [go_y, go_z])
xd = x
self.assertEqual(x.grad, 2 * xd * go_y + 6 * xd.pow(5) * go_z)
def test_save_output_nr(self):
x = torch.randn(10, requires_grad=True)
class MultiOutputFn(Function):
@staticmethod
def forward(ctx, x):
return x[:5], x[5:]
@staticmethod
def backward(ctx, *grad):
return torch.cat(grad)
a, b = MultiOutputFn.apply(x)
self.assertEqual(b.output_nr, 1)
class TestFn(Function):
@staticmethod
def forward(ctx, b):
ctx.save_for_backward(b)
return b * 2
@staticmethod
def backward(ctx, grad_b):
b, = ctx.saved_tensors
self.assertEqual(b.output_nr, 1)
TestFn.apply(b).sum().backward()
def test_free_deep_graph(self):
def scope():
depth = 150000
x = torch.randn(1, requires_grad=True)
y = x.clone()
# build a "chain" computation graph
for _ in range(depth):
y = y + y * 0.000001
# graph deletion occurs when the above locals go out of scope.
# In this case `del y` will trigger it but it's easier to leave
# it to Python to delete the locals.
# Should not stack overflow
scope()
def test_free_deep_graph_complicated(self):
def scope():
depth = 100000
randchoice = torch.randint(2, [depth, 2])
x = torch.randn(1, requires_grad=True)
y = x.clone()
# Hold the two previous values
prev_values = [None, None]
# Build a "chain with skip connections" graph
for _ in range(depth):
prev_tensors = [tensor for tensor in prev_values[:-1]
if tensor is not None]
prev_values.append(y)
prev_values.pop(0)
# Definitely pick one tensor to add
y += y * 0.000001
# Possibly add other tensors
nprev = len(prev_tensors)
if nprev == 2:
y += randchoice[depth].mul(torch.cat(prev_tensors)).sum()
# graph deletion occurs when the above locals go out of scope.
# Should not stack overflow
scope()
def test_free_deep_graph_pyfunction(self):
class MyOp(Function):
@staticmethod
def forward(ctx, tensor1, tensor2):
return tensor1 + tensor2
@staticmethod
def backward(ctx, grad_output):
return grad_output, grad_output
def scope():
depth = 150000
x = torch.randn(1, requires_grad=True)
y = x.clone()
# build deeply nested computation graph
for _ in range(depth):
y = MyOp.apply(y, y)
# graph deletion occurs when the above locals go out of scope.
# Should not stack overflow
scope()
def test_no_unnecessary_save(self):
# If we kept x in the derivative Function of x * 2 we would
# get an error in the backward that would complain that we've
# modified x, which was needed for gradient computation.
# Since we should elide unnecessary saves, this test should pass.
mu = torch.ones(1, requires_grad=True)
x = torch.empty(1)
loss = 0
for i in range(3):
x.detach_()
x.copy_(mu + i)
ft = torch.tensor([float(i)])
multiplied = x * ft
s = multiplied.sum()
loss += s
loss.backward()
def test_no_grad(self):
x = torch.ones(5, 5, requires_grad=True)
y = torch.ones(5, 5) * 4
with torch.no_grad():
w = x + y
@torch.no_grad()
def adder(x, y):
return x + y
z = adder(x, y)
self.assertFalse(w.requires_grad)
self.assertRaises(RuntimeError, lambda: w.backward(torch.ones(5, 5)))
self.assertIsNone(w.grad_fn)
self.assertFalse(z.requires_grad)
self.assertRaises(RuntimeError, lambda: z.backward(torch.ones(5, 5)))
self.assertIsNone(z.grad_fn)
# test nested decorator and with-statement on no_grad
with torch.no_grad():
self.assertFalse(torch.is_grad_enabled())
w = adder(x, y)
self.assertFalse(torch.is_grad_enabled())
def test_set_grad_generator_functions(self):
@torch.no_grad()
def gen_no_grad():
for i in range(10):
self.assertEqual(torch.is_grad_enabled(), False)
yield i
with torch.enable_grad():
for _ in gen_no_grad():
self.assertEqual(torch.is_grad_enabled(), True)
@torch.enable_grad()
def gen_enable_grad():
for i in range(10):
self.assertEqual(torch.is_grad_enabled(), True)
yield i
with torch.no_grad():
for _ in gen_enable_grad():
self.assertEqual(torch.is_grad_enabled(), False)
def test_set_grad_generator_functions_recursive(self):
# enable_grad_decorator_recursive and no_grad_decorator_recursive call each other
# recursively, to ensure that the decorators preserve the caller's setting
@torch.enable_grad()
def enable_grad_decorator_recursive(depth):
self.assertTrue(torch.is_grad_enabled())
if depth > 0:
no_grad_decorator_recursive(depth - 1)
self.assertTrue(torch.is_grad_enabled())
@torch.no_grad()
def no_grad_decorator_recursive(depth):
self.assertFalse(torch.is_grad_enabled())
if depth > 0:
enable_grad_decorator_recursive(depth - 1)
self.assertFalse(torch.is_grad_enabled())
# enable_grad_context_manager_recursive and no_grad_context_manager_recursive call
# each other recursively, to ensure that the decorators preserve the caller's setting
def enable_grad_context_manager_recursive(depth):
with torch.enable_grad():
self.assertTrue(torch.is_grad_enabled())
if depth > 0:
no_grad_context_manager_recursive(depth - 1)
self.assertTrue(torch.is_grad_enabled())
def no_grad_context_manager_recursive(depth):
with torch.no_grad():
self.assertFalse(torch.is_grad_enabled())
if depth > 0:
enable_grad_context_manager_recursive(depth - 1)
self.assertFalse(torch.is_grad_enabled())
with torch.enable_grad():
self.assertTrue(torch.is_grad_enabled())
enable_grad_decorator_recursive(10)
self.assertTrue(torch.is_grad_enabled())
enable_grad_context_manager_recursive(10)
self.assertTrue(torch.is_grad_enabled())
with torch.no_grad():
self.assertFalse(torch.is_grad_enabled())
enable_grad_decorator_recursive(10)
self.assertFalse(torch.is_grad_enabled())
enable_grad_context_manager_recursive(10)
self.assertFalse(torch.is_grad_enabled())
def test_set_grad_coroutines(self):
@torch.no_grad()
def coro_no_grad(n=10):
self.assertFalse(torch.is_grad_enabled())
for i in range(n):
self.assertFalse(torch.is_grad_enabled())
r = yield i
self.assertFalse(torch.is_grad_enabled())
self.assertEqual(i, r)
self.assertFalse(torch.is_grad_enabled())
@torch.enable_grad()
def coro_enable_grad(n=10):
self.assertTrue(torch.is_grad_enabled())
for i in range(n):
self.assertTrue(torch.is_grad_enabled())
r = yield i
self.assertTrue(torch.is_grad_enabled())
self.assertEqual(i, r)
self.assertTrue(torch.is_grad_enabled())
with torch.enable_grad():
self.assertTrue(torch.is_grad_enabled())
coro, r = coro_no_grad(), None
try:
while True:
self.assertTrue(torch.is_grad_enabled())
r = coro.send(r)
self.assertTrue(torch.is_grad_enabled())
except StopIteration:
pass
with torch.no_grad():
self.assertFalse(torch.is_grad_enabled())
coro, r = coro_enable_grad(), None
try:
while True:
self.assertFalse(torch.is_grad_enabled())
r = coro.send(r)
self.assertFalse(torch.is_grad_enabled())
except StopIteration:
pass
def test_set_grad_coroutines_benign_exceptions(self):
class RecoverableException(Exception):
pass
@torch.no_grad()
def coro_no_grad(n=10):
has_raised = False
for i in range(n):
try:
self.assertFalse(torch.is_grad_enabled())
yield (-i if has_raised else i)
except RecoverableException:
self.assertFalse(torch.is_grad_enabled())
has_raised = True
@torch.enable_grad()
def coro_enable_grad(n=10):
has_raised = False
for i in range(n):
try:
self.assertTrue(torch.is_grad_enabled())
yield (-i if has_raised else i)
except RecoverableException:
self.assertTrue(torch.is_grad_enabled())
has_raised = True
with torch.enable_grad():
coro = coro_no_grad()
assert 0 == next(coro)
try:
while True:
r = coro.throw(RecoverableException)
self.assertLess(r, 0)
except StopIteration:
pass
with torch.no_grad():
coro = coro_enable_grad()
assert 0 == next(coro)
try:
while True:
r = coro.throw(RecoverableException)
self.assertLess(r, 0)
except StopIteration:
pass
def test_set_grad_coroutines_critical_exceptions(self):
class UnrecoverableException(Exception):
pass
class SecondaryException(Exception):
pass
@torch.no_grad()
def coro_no_grad(n=10):
has_raised = False
for i in range(n):
try:
self.assertFalse(torch.is_grad_enabled())
yield (-i if has_raised else i)
except UnrecoverableException:
self.assertFalse(torch.is_grad_enabled())
raise SecondaryException
@torch.enable_grad()
def coro_enable_grad(n=10):
has_raised = False
for i in range(n):
try:
self.assertTrue(torch.is_grad_enabled())
yield (-i if has_raised else i)
except UnrecoverableException:
self.assertTrue(torch.is_grad_enabled())
raise SecondaryException
with torch.enable_grad():
coro = coro_no_grad()
assert 0 == next(coro)
with self.assertRaises(SecondaryException):
coro.throw(UnrecoverableException)
with torch.no_grad():
coro = coro_enable_grad()
assert 0 == next(coro)
with self.assertRaises(SecondaryException):
coro.throw(UnrecoverableException)
def test_set_grad_coroutines_exit(self):
@torch.no_grad()
def coro_no_grad(state):
for i in range(10):
try:
self.assertFalse(torch.is_grad_enabled())
yield i
except GeneratorExit:
self.assertFalse(torch.is_grad_enabled())
state.add('GeneratorExit')
raise
@torch.enable_grad()
def coro_enable_grad(state):
for i in range(10):
try:
self.assertTrue(torch.is_grad_enabled())
yield i
except GeneratorExit:
self.assertTrue(torch.is_grad_enabled())
state.add('GeneratorExit')
raise
state = set()
with torch.enable_grad():
coro = coro_no_grad(state)
for i in range(5):
next(coro)
coro.close()
self.assertTrue('GeneratorExit' in state)
state = set()
with torch.no_grad():
coro = coro_enable_grad(state)
for i in range(5):
next(coro)
coro.close()
self.assertTrue('GeneratorExit' in state)
def test_no_grad_python_function(self):
"""Python Functions should respect grad mode."""
x = torch.ones(5, 5, requires_grad=True)
class MyOp(Function):
@staticmethod
def forward(self, x):
return x + 1
@staticmethod
def backward(self, dy):
return dy
with torch.no_grad():
y = MyOp.apply(x)
self.assertFalse(y.requires_grad)
def test_indexing(self):
x = torch.arange(1., 17).view(4, 4)
y = Variable(x, requires_grad=True)
def compare(x, y, idx, indexed_tensor, indexed_var):
indexed_var_t = indexed_var.data
if not isinstance(indexed_tensor, torch.Tensor):
indexed_var_t = indexed_var_t[0]
self.assertEqual(indexed_tensor, indexed_var_t)
indexed_var.sum().backward()
expected_grad = torch.empty(x.size()).fill_(0)
expected_grad[idx] = 1
self.assertEqual(y.grad, expected_grad)
def check_index(x, y, idx):
if y.grad is not None:
with torch.no_grad():
y.grad.zero_()
indexed_tensor = x[idx]
indexed_var = y[idx]
compare(x, y, idx, indexed_tensor, indexed_var)
check_index(x, y, 1)
check_index(x, y, (1, 1))
check_index(x, y, slice(1, None))
check_index(x, y, slice(None, 2))
check_index(x, y, (slice(None, 2), 2))
check_index(x, y, (slice(1, 2), 2))
check_index(x, y, (1, slice(2, None)))
check_index(x, y, (slice(None, None), slice(2, None)))
check_index(x, y, torch.LongTensor([0, 2]))
check_index(x, y, torch.rand(4, 4).bernoulli().bool())
check_index(x, y, (Ellipsis, slice(2, None)))
check_index(x, y, ([0], [0]))
check_index(x, y, ([1, 2, 3], [0]))
check_index(x, y, ([1, 2], [2, 1]))
check_index(x, y, ([[1, 2], [3, 0]], [[0, 1], [2, 3]]))
check_index(x, y, ([slice(None), [2, 3]]))
check_index(x, y, ([[2, 3], slice(None)]))
# advanced indexing, with less dim, or ellipsis
check_index(x, y, ([0]))
check_index(x, y, ([0], ))
x = torch.arange(1., 49).view(4, 3, 4)
y = Variable(x, requires_grad=True)
check_index(x, y, (slice(None), [0], [0]))
check_index(x, y, ([0], [0], slice(None)))
check_index(x, y, (slice(None), [0, 1, 2], [0]))
check_index(x, y, ([0, 1, 2], [0], slice(None)))
check_index(x, y, (slice(None), [1, 2], [2, 1]))
check_index(x, y, ([1, 2], [2, 1], slice(None)))
check_index(x, y, (slice(None), [[1, 2], [2, 0]], [[0, 1], [2, 3]]))
check_index(x, y, ([[1, 2], [3, 0]], [[0, 1], [2, 2]], slice(None)))
check_index(x, y, (slice(None), slice(None), [2, 1]))
check_index(x, y, (slice(None), [2, 1], slice(None)))
check_index(x, y, ([2, 1], slice(None), slice(None)))
# advanced indexing, with less dim, or ellipsis
check_index(x, y, ([0], ))
check_index(x, y, ([0], slice(None)))
check_index(x, y, ([0], Ellipsis))
check_index(x, y, ([1, 2], [0, 1]))
check_index(x, y, ([1, 2], [0, 1], Ellipsis))
check_index(x, y, (Ellipsis, [1, 2], [0, 1]))
# advanced indexing, with a tensor wrapped in a variable
z = torch.LongTensor([0, 1])
zv = Variable(z, requires_grad=False)
seq = [z, Ellipsis]
seqv = [zv, Ellipsis]
if y.grad is not None:
with torch.no_grad():
y.grad.zero_()
indexed_tensor = x[seq]
indexed_var = y[seqv]
compare(x, y, seq, indexed_tensor, indexed_var)
def test_indexing_duplicates(self):
x = torch.arange(1., 17).view(4, 4)
y = Variable(x, requires_grad=True)
idx = torch.LongTensor([1, 1, 3, 2, 1, 2])
y[idx].sum().backward()
expected_grad = torch.zeros(4, 4)
for i in idx:
expected_grad[i] += 1
self.assertEqual(y.grad, expected_grad)
# with advanced indexing
x = torch.arange(1., 17).view(4, 4)
y = Variable(x, requires_grad=True)
idx = [[1, 1, 3, 2, 1, 2], [0]]
y[idx].sum().backward()
expected_grad = torch.zeros(4, 4)
for i in idx[0]:
for j in idx[1]:
expected_grad[i][j] += 1
self.assertEqual(y.grad, expected_grad)
x = torch.arange(1., 17).view(4, 4)
y = Variable(x, requires_grad=True)
idx = [[[1, 2], [0, 0]], [[0, 1], [1, 1]]]
y[idx].sum().backward()
expected_grad = torch.tensor([[0., 2., 0., 0.],
[1., 0., 0., 0.],
[0., 1., 0., 0.],
[0., 0., 0., 0.]])
self.assertEqual(y.grad, expected_grad)
x = torch.arange(1., 65).view(4, 4, 4)
y = Variable(x, requires_grad=True)
idx = [[1, 1, 1], slice(None), slice(None)]
y[idx].sum().backward()
expected_grad = torch.empty(4, 4, 4).zero_()
expected_grad[1].fill_(3)
self.assertEqual(y.grad, expected_grad)
def test_index_backward_does_not_save_tensor(self):
# Example from https://github.com/pytorch/pytorch/issues/24853.
# if `index(tensor, indices)` saves `tensor` for backwards, then it will
# trigger a version check on `tensor` during the backward pass, which
# will cause the following code to error because `tensor` gets modified
# by the indexing line.
a = torch.tensor([1., 0, 0])
b = torch.zeros(3, requires_grad=True)
tensor = b + 0
tensor[a != 0] = tensor[a != 0]
tensor.backward(torch.zeros_like(tensor))
def test_volatile_deprecated(self):
v = torch.autograd.torch.randn(3, 3)
with warnings.catch_warnings(record=True) as w:
self.assertFalse(v.volatile)
self.assertIn('volatile', str(w[0].message))
def test_saved_variables_deprecated(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, tensor1, tensor2):
ctx.save_for_backward(tensor1, tensor2)
return tensor1 + tensor2
@staticmethod
def backward(ctx, grad_output):
var1, var2 = ctx.saved_variables
return (grad_output, grad_output)
with warnings.catch_warnings(record=True) as warns:
warnings.simplefilter("always")
x = torch.randn((3, 3), requires_grad=True)
y = torch.randn((3, 3), requires_grad=True)
MyFunction.apply(x, y).sum().backward()
has_deprecated = map(lambda warn:
'deprecated' in str(warn) and
'saved_variables' in str(warn),
warns)
has_deprecated = reduce(lambda x, y: x or y, has_deprecated)
self.assertTrue(has_deprecated)
def test_requires_grad(self):
x = torch.randn(5, 5)
y = torch.randn(5, 5)
z = torch.randn(5, 5, requires_grad=True)
a = x + y
self.assertFalse(a.requires_grad)
b = a + z
self.assertTrue(b.requires_grad)
def error():
raise RuntimeError
# Make sure backward isn't called on these
a._backward_hooks = OrderedDict()
x._backward_hooks = OrderedDict()
y._backward_hooks = OrderedDict()
a._backward_hooks['test'] = error
x._backward_hooks['test'] = error
y._backward_hooks['test'] = error
b.backward(torch.ones(5, 5))
def test_requires_grad_(self):
x = torch.randn(5, 5)
y = torch.randn(5, 5, requires_grad=True)
self.assertIs(x, x.requires_grad_())
self.assertTrue(x.requires_grad)
self.assertIs(y, y.requires_grad_())
self.assertTrue(y.requires_grad)
self.assertIs(x, x.requires_grad_(True))
self.assertTrue(x.requires_grad)
self.assertIs(y, y.requires_grad_(True))
self.assertTrue(y.requires_grad)
z = x * y
self.assertRaises(RuntimeError, lambda: z.requires_grad_(False))
self.assertIs(z, z.requires_grad_())
self.assertTrue(z.requires_grad)
self.assertIs(z, z.requires_grad_(True))
self.assertTrue(z.requires_grad)
self.assertIs(x, x.requires_grad_(False))
self.assertFalse(x.requires_grad)
self.assertIs(y, y.requires_grad_(False))
self.assertFalse(y.requires_grad)
def test_requires_grad_inplace(self):
a = torch.randn(5, 5)
b = torch.randn(5, 5, requires_grad=True)
a += b
self.assertTrue(a.requires_grad)
# non-leaf
a = torch.randn(5, 5) + 0
b = torch.randn(5, 5, requires_grad=True)
a += b
self.assertTrue(a.requires_grad)
def test_no_requires_grad_inplace(self):
# basic case, should be able to modify inplace while requires_grad is False
a = torch.randn(2, 3)
a.add_(5)
a.requires_grad = True
a.sum().backward()
self.assertEqual(a.grad, torch.ones(2, 3))
# same but with a view
a = torch.randn(2, 3)
b = a[:]
b.add_(5)
a.requires_grad = True
a.sum().backward()
self.assertEqual(a.grad, torch.ones(2, 3))
# should fail if requires_grad = True when we modify inplace
a = torch.randn(2, 3)
b = a[:]
a.requires_grad = True
with self.assertRaises(RuntimeError):
a.add_(5)
with self.assertRaises(RuntimeError):
b.add_(5)
def test_attribute_deletion(self):
x = torch.randn((5, 5), requires_grad=True)
del x.grad
self.assertIsNone(x.grad)
with self.assertRaises(RuntimeError):
del x.data
with self.assertRaises(TypeError):
x.data = None
with self.assertRaises(RuntimeError):
del x.requires_grad
with self.assertRaises(RuntimeError):
del x._grad_fn
with self.assertRaises(RuntimeError):
del x._backward_hooks
def test_duplicate_backward_root(self):
a = torch.randn(5, 5, requires_grad=True)
b = torch.randn(5, 5, requires_grad=True)
x = a * b
grad_output = torch.randn_like(x)
torch.autograd.backward([x, x], [grad_output, grad_output])
self.assertEqual(a.grad, b * grad_output * 2)
self.assertEqual(b.grad, a * grad_output * 2)
def test_backward_no_grad(self):
a = torch.randn(5, 5, requires_grad=True)
b = a + 2
with self.assertRaises(RuntimeError):
torch.autograd.backward([b], [None])
def test_backward_twice_with_saved_values(self):
b = torch.randn(3, requires_grad=True, dtype=torch.double)
c = torch.zeros(3, dtype=torch.double)
c[[1, 2]] = b[[1, 1]]
c.backward(torch.tensor([1, 1, 1], dtype=torch.double))
self.assertRaisesRegex(RuntimeError, 'Specify retain_graph=True',
lambda: c.backward(torch.tensor([1, 1, 1], dtype=torch.double)))
def test_backward_twice_retained_graph_with_saved_values(self):
b = torch.randn(3, requires_grad=True, dtype=torch.double)
c = torch.zeros(3, dtype=torch.double)
c[[1, 2]] = b[[1, 1]]
c.backward(torch.tensor([1, 1, 1], dtype=torch.double), retain_graph=True)
c.backward(torch.tensor([1, 1, 1], dtype=torch.double))
def test_backward_twice_without_saved_values(self):
b = torch.randn(3, requires_grad=True, dtype=torch.double)
c = b + 1
c.backward(torch.tensor([1, 1, 1], dtype=torch.double))
c.backward(torch.tensor([1, 1, 1], dtype=torch.double))
def test_backward_twice_retained_graph_without_saved_values(self):
b = torch.randn(3, requires_grad=True, dtype=torch.double)
c = torch.zeros(3, dtype=torch.double)
c[[1, 2]] = b[[1, 1]]
c.backward(torch.tensor([1, 1, 1], dtype=torch.double), retain_graph=True)
c.backward(torch.tensor([1, 1, 1], dtype=torch.double))
def test_backward_create_graph_warns(self):
try:
prev = torch.is_warn_always_enabled()
torch.set_warn_always(True)
b = torch.randn(3, requires_grad=True, dtype=torch.double)
c = b * b
with warnings.catch_warnings(record=True) as ws:
c.backward(torch.ones_like(c), create_graph=True)
b.grad = None
self.assertTrue(any('Using backward() with create_graph=True' in str(w.message) for w in ws))
# Should not warn for grad
with warnings.catch_warnings(record=True) as ws:
torch.autograd.grad(c, b, torch.ones_like(c), create_graph=True)
self.assertFalse(any('Using backward() with create_graph=True' in str(w.message) for w in ws))
finally:
torch.set_warn_always(prev)
def test_next_functions(self):
x = torch.randn(5, 5, requires_grad=True)
y = torch.randn(5, 5, requires_grad=True)
a = x + y
self.assertIsNotNone(a.grad_fn)
next_functions = a.grad_fn.next_functions
self.assertEqual(len(next_functions), 2)
self.assertIsInstance(next_functions[0][0], torch._C._functions.AccumulateGrad)
self.assertEqual(next_functions[0][1], 0)
self.assertIsInstance(next_functions[1][0], torch._C._functions.AccumulateGrad)
self.assertEqual(next_functions[1][1], 0)
b = a + 5
next_functions = b.grad_fn.next_functions
self.assertEqual(len(next_functions), 2)
self.assertIs(next_functions[0][0], a.grad_fn)
self.assertIs(next_functions[1][0], None)
def test_inplace(self):
x = torch.ones(5, 5, requires_grad=True)
y = Variable(torch.ones(5, 5) * 4, requires_grad=True)
z = x * y
q = z + y
w = z * y
z.add_(2)
# Add doesn't need it's inputs to do backward, so it shouldn't raise
q.backward(torch.ones(5, 5), retain_graph=True)
# Mul saves both inputs in forward, so it should raise
self.assertRaises(RuntimeError, lambda: w.backward(torch.ones(5, 5)))
z = x * y
q = z * y
r = z + y
w = z.add_(y)
# w is a the last expression, so this should succeed
w.backward(torch.ones(5, 5), retain_graph=True)
# r doesn't use the modified value in backward, so it should succeed
r.backward(torch.ones(5, 5), retain_graph=True)
# q uses dirty z, so it should raise
self.assertRaises(RuntimeError, lambda: q.backward(torch.ones(5, 5)))
with torch.no_grad():
x.grad.zero_()
m = x / 2
z = m + y / 8
q = z * y
r = z + y
prev_version = z._version
w = z.exp_()
self.assertNotEqual(z._version, prev_version)
r.backward(torch.ones(5, 5), retain_graph=True)
self.assertEqual(x.grad, torch.ones(5, 5) / 2)
w.backward(torch.ones(5, 5), retain_graph=True)
self.assertEqual(x.grad, torch.empty(5, 5).fill_((1 + math.e) / 2))
self.assertRaises(RuntimeError, lambda: q.backward(torch.ones(5, 5)))
leaf = torch.ones(5, 5, requires_grad=True)
x = leaf.clone()
x.add_(10)
self.assertEqual(x, torch.ones(5, 5) * 11)
# x should be still usable
y = x + 2
y.backward(torch.ones(5, 5))
self.assertEqual(leaf.grad, torch.ones(5, 5))
z = x * y
x.add_(2)
self.assertRaises(RuntimeError, lambda: z.backward(torch.ones(5, 5)))
def test_mark_non_differentiable(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, input):
output = input > 0
ctx.mark_non_differentiable(output)
return output
@staticmethod
def backward(ctx, grad_output):
return (grad_output * 0).to(torch.double)
x = torch.randn(5, 5, requires_grad=True)
mask = MyFunction.apply(x)
self.assertFalse(mask.requires_grad)
y = x.masked_fill(mask, 0)
y.sum().backward()
def test_mark_non_differentiable_mixed(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, input):
a = input + 1
b = input + 2
ctx.mark_non_differentiable(a)
return a, b
@staticmethod
def backward(ctx, grad_a, grad_b):
self.assertTrue((grad_a == 0).all())
self.assertTrue((grad_b == 1).all())
return grad_b
x = torch.randn(5, 5, requires_grad=True)
a, b = MyFunction.apply(x)
self.assertFalse(a.requires_grad)
self.assertTrue(b.requires_grad)
b.sum().backward()
self.assertEqual(x.grad, torch.ones(5, 5))
def test_mark_non_differentiable_none(self):
# This used to segfault because MyFunction would send back null
# gradients to MulBackward, which is implemented in C++. C++
# implemented functions expect incoming grad_ouptuts to be non-null.
class MyFunction(Function):
@staticmethod
def forward(ctx, input):
output = input.clone()
ctx.mark_non_differentiable(output)
return output
@staticmethod
def backward(ctx, grad_output):
return None
x = torch.randn(5, 5, requires_grad=True)
r = MyFunction.apply(x * x)
(r * x).sum().backward()
def test_return_duplicate(self):
class DoubleDuplicate(Function):
@staticmethod
def forward(ctx, x):
output = x * 2
return output, output
@staticmethod
def backward(ctx, grad1, grad2):
return grad1 * 2 + grad2 * 2
def fn(x):
a, b = DoubleDuplicate.apply(x)
self.assertIs(a, b)
return a + b
x = torch.randn(5, 5, dtype=torch.double, requires_grad=True)
gradcheck(fn, [x])
gradgradcheck(fn, [x])
def test_return_duplicate_inplace(self):
class DoubleInplace(Function):
@staticmethod
def forward(ctx, x):
x.mul_(2)
ctx.mark_dirty(x)
return x, x
@staticmethod
def backward(ctx, grad1, grad2):
return grad1 * 2 + grad2 * 2
def inplace_fn(x):
a, b = DoubleInplace.apply(x.clone())
self.assertIs(a, b)
return a + b
x = torch.randn(5, 5, dtype=torch.double, requires_grad=True)
gradcheck(inplace_fn, [x])
gradgradcheck(inplace_fn, [x])
# Can't modify leaf variables in-place
self.assertRaises(RuntimeError, lambda: InplaceFunction.apply(x))
# Functions which modify views in-place must return only one output
self.assertRaises(RuntimeError, lambda: InplaceFunction.apply(x.clone()[0]))
def _test_setitem(self, size, index):
x = torch.ones(*size, requires_grad=True)
y = x + 2
y_version = y._version
y[index] = 2
self.assertNotEqual(y._version, y_version)
y.backward(torch.ones(*size))
expected_grad = torch.ones(*size)
expected_grad[index] = 0
self.assertEqual(x.grad, expected_grad)
def _test_setitem_tensor(self, size, index):
x = torch.ones(*size, requires_grad=True)
y = x + 2
y_version = y._version
value = x.new(x[index].size()).fill_(7)
value.requires_grad = True
y[index] = value
self.assertNotEqual(y._version, y_version)
y.backward(torch.ones(*size))
expected_grad_input = torch.ones(*size)
expected_grad_input[index] = 0
self.assertEqual(x.grad, expected_grad_input)
self.assertEqual(value.grad, torch.ones_like(value))
# case when x broadcasts to as y[1]
x = torch.randn(4, requires_grad=True)
y = torch.zeros(2, 3, 4)
y[1] = x
y.backward(torch.randn(2, 3, 4))
self.assertEqual(x.size(), x.grad.size())
def test_setitem(self):
self._test_setitem((5, 5), 1)
self._test_setitem((5,), 1)
self._test_setitem((1,), 0)
self._test_setitem((10,), [[0, 4, 2]])
self._test_setitem((5, 5), [[0, 4], [2, 2]])
self._test_setitem((5, 5, 5), [slice(None), slice(None), [1, 3]])
self._test_setitem((5, 5, 5), [slice(None), [1, 3], slice(None)])
self._test_setitem((5, 5, 5), [[1, 3], slice(None), slice(None)])
self._test_setitem((5, 5, 5), [slice(None), [2, 4], [1, 3]])
self._test_setitem((5, 5, 5), [[1, 3], [2, 4], slice(None)])
self._test_setitem_tensor((5, 5), 3)
self._test_setitem_tensor((5, 5), [[0, 1], [1, 0]])
self._test_setitem_tensor((5,), 3)
self._test_setitem_tensor((5,), Variable(torch.LongTensor([3]), requires_grad=False).sum())
self._test_setitem_tensor((5,), [[0, 1, 2, 3]])
self._test_setitem_tensor((5, 5, 5), [slice(None), slice(None), [1, 3]])
self._test_setitem_tensor((5, 5, 5), [slice(None), [1, 3], slice(None)])
self._test_setitem_tensor((5, 5, 5), [[1, 3], slice(None), slice(None)])
self._test_setitem_tensor((5, 5, 5), [slice(None), [2, 4], [1, 3]])
self._test_setitem_tensor((5, 5, 5), [[1, 3], [2, 4], slice(None)])
self._test_setitem_tensor((5, 5, 5), [Variable(torch.LongTensor([1,
3]), requires_grad=False), [2, 4], slice(None)])
def test_setitem_mask(self):
mask = torch.BoolTensor(5, 5).bernoulli_()
self._test_setitem((5, 5), Variable(mask))
self._test_setitem((5,), Variable(mask[0]))
self._test_setitem((1,), Variable(mask[0, 0:1]))
self._test_setitem_tensor((5, 5), Variable(mask))
self._test_setitem_tensor((5,), Variable(mask[0]))
def test_select_sum(self):
# both select and sum return Scalars in ATen; ensure they work together.
x = torch.randn(10, dtype=torch.double, requires_grad=True)
def func(x):
return x.select(0, 1).sum()
gradcheck(func, [x])
gradgradcheck(func, [x])
def test_diagonal_expanded_v(self):
value = torch.rand([])
v_expanded = torch.tensor(value).expand(10)
a = torch.rand(10, 10, dtype=torch.double, requires_grad=True)
result, = torch.autograd.grad(a.diagonal(), a, v_expanded)
self.assertEqual(result, torch.eye(10, dtype=torch.double) * value)
def test_select_expanded_v(self):
v_expanded = torch.rand(10).expand(10, 10)
a = torch.rand(10, 10, 10, requires_grad=True)
result, = torch.autograd.grad(a[0], a, v_expanded)
expected = torch.zeros(10, 10, 10)
expected[0] = v_expanded
self.assertEqual(result, expected)
def test_slice_expanded_v(self):
v_expanded = torch.rand(10, 1).expand(2, 10, 10)
a = torch.rand(10, 10, 10, requires_grad=True)
result, = torch.autograd.grad(a[3:5], a, v_expanded)
expected = torch.zeros(10, 10, 10)
expected[3:5] = v_expanded
self.assertEqual(result, expected)
def test_unused_output(self):
x = torch.randn(10, 10, requires_grad=True)
outputs = x.chunk(5)
o = outputs[2]
o = o * 4 + 2
o.sum().backward()
expected_grad = torch.zeros(10, 10)
expected_grad[4:6] = 4
self.assertEqual(x.grad, expected_grad)
with torch.no_grad():
x.grad.zero_()
grad_output = torch.randn(2, 10)
outputs = x.chunk(5)
outputs[0].backward(grad_output)
expected_grad = torch.zeros(10, 10)
expected_grad[:2] = grad_output
self.assertEqual(x.grad, expected_grad)
# TODO: opinfo this or move to the sparse test suite
def _test_sparse_gather(self, size_x, size_ind, dim):
x = torch.randn(size_x, requires_grad=True)
if len(size_ind) > 0 and len(size_x) > 0:
ind = torch.randint(x.size(dim), size_ind)
else:
ind = torch.zeros(size_ind, dtype=torch.int64)
out = torch.gather(x, dim, ind, sparse_grad=False)
grad = torch.rand_like(out)
out.backward(grad)
grad_dense = x.grad.clone()
x.grad = None
out = torch.gather(x, dim, ind, sparse_grad=True)
out.backward(grad)
self.assertEqual(grad_dense, x.grad.to_dense())
def test_sparse_gather_dim0(self):
self._test_sparse_gather((10, 10), (5, 10), 0)
def test_sparse_gather_dim1(self):
self._test_sparse_gather((10, 10, 5), (10, 5, 5), 1)
def test_sparse_gather_dim_neg(self):
self._test_sparse_gather((10, 10, 5), (10, 10, 2), -1)
def test_sparse_gather_ind_scalar(self):
self._test_sparse_gather((10,), (), 0)
def test_sparse_gather_x_scalar(self):
self._test_sparse_gather((), (2,), 0)
def test_sparse_gather_both_scalar(self):
self._test_sparse_gather((), (), 0)
def test_gc_in_destructor(self):
"""
Previously, if a Function destructor triggered a garbage collection,
the Variable's tp_dealloc handler would get called twice leading to a
segfault.
"""
class CollectOnDelete(Function):
def forward(self, x):
return x
def backward(self, grad_output):
return grad_output
def __del__(self):
gc.collect()
for _ in range(10):
CollectOnDelete().forward(torch.randn(1, requires_grad=True)).backward()
def test_naughty_autograd_function_attribute_access(self):
class Id(Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, grad_x):
return grad_x
with self.assertWarnsRegex(DeprecationWarning, "should not be instantiated"):
f = Id()
# # After raising warning, should still return an instance
self.assertIsInstance(f, Id)
x = torch.zeros(1, requires_grad=True)
with self.assertRaisesRegex(RuntimeError, "non-static forward method is deprecated"):
f(x)
t = Id.apply(x)
self.assertEqual(t.grad_fn.name(), "IdBackward")
# THPFunction is the base class of both grad_fn and autograd functions,
# which means that a lot of accessors on them may segfault. Test that we
# properly error in this case.
t = torch.ones(1, requires_grad=True)
t._backward_hooks = dict()
with self.assertRaisesRegex(RuntimeError, "Attribute '_register_hook_dict' is invalid"):
f._register_hook_dict(t)
with self.assertRaisesRegex(RuntimeError, "Attribute 'register_hook' is invalid"):
f.register_hook(lambda x, y: None)
with self.assertRaisesRegex(RuntimeError, "Attribute 'next_functions' is invalid"):
f.next_functions
with self.assertRaisesRegex(RuntimeError, "Attribute 'name' is invalid"):
f.name()
with self.assertRaisesRegex(RuntimeError, "underlying PyNode has already been deallocated"):
f.metadata
@unittest.expectedFailure
def test_naughty_anomaly_access(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, g):
return g
x = torch.zeros(1, requires_grad=True)
y = MyFunction.apply(x)
y.backward()
y.grad_fn.metadata
g = y.grad_fn
del y
g.metadata # this currently fails, but shouldn't
def test_naughty_autograd_function_stashing_ctx(self):
saved_ctx = []
class Id(Function):
@staticmethod
def forward(ctx, x):
ctx.save_for_backward(x)
return x
@staticmethod
def backward(ctx, grad_x):
saved_ctx.append(ctx)
return ctx.saved_tensors
p = torch.zeros(1, requires_grad=True)
loss = Id.apply(p)
loss.backward(retain_graph=True)
del loss
# At this point in time, it complains that the graph has been freed
# (which indeed true, although a somewhat indirect way of stating the
# problem).
self.assertRaises(RuntimeError, lambda: saved_ctx[0].saved_tensors)
def test_custom_autograd_repeated_grad_grad(self):
# This test failed the equality check in PR #22983; it's an interesting
# and different test case worth enshrining. mult1 is not testing
# anything that interesting, but mult2 is the interesting case.
def mult1(x):
return x.prod(dim=-1).prod(dim=-1)
class Mult(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
y = mult1(x)
ctx.save_for_backward(x, y)
return y
@staticmethod
def backward(ctx, grad_output):
x, y = ctx.saved_tensors
return (grad_output * y)[:, None, None] / x
mult2 = Mult.apply
def check_gradgrad_repeated(x, y):
gy, = torch.autograd.grad(y[0], x, create_graph=True)
ggy_1, = torch.autograd.grad(gy[0, 0, 0], x, retain_graph=True)
gy, = torch.autograd.grad(y[0], x, create_graph=True)
ggy_2, = torch.autograd.grad(gy[0, 0, 0], x, retain_graph=True)
self.assertEqual(ggy_1[0, 0, 1], ggy_2[0, 0, 1])
x = torch.ones(2, 4, 4).requires_grad_()
check_gradgrad_repeated(x, mult1(x))
check_gradgrad_repeated(x, mult2(x))
def test_custom_autograd_no_early_free(self):
# This test failed complaining that buffers had already been freed
# prior to #22983. Also pretty interesting test case.
class Double(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
y = x ** 2
ctx.save_for_backward(x, y)
return y
@staticmethod
def backward(ctx, grad_output):
x, _ = ctx.saved_tensors
return grad_output * 2 * x
# this is equivalent, but uses the output of .forward() in .backward()
class Double2(Double):
@staticmethod
def backward(ctx, grad_output):
x, y = ctx.saved_tensors
return grad_output * 2 * y / x
double = Double.apply
double2 = Double2.apply
x = torch.tensor(2).double().requires_grad_()
self.assertTrue(gradcheck(double, x))
self.assertTrue(gradgradcheck(double, x))
self.assertTrue(gradcheck(double2, x))
self.assertTrue(gradgradcheck(double2, x))
y = double(x)
torch.autograd.grad(y, x, create_graph=True)
torch.autograd.grad(y, x)
y = double2(x)
torch.autograd.grad(y, x, create_graph=True)
torch.autograd.grad(y, x) # should not error!
def test_detach(self):
x = torch.randn(10, 10, requires_grad=True)
y = x + 2
y = y.detach()
z = y * 4 + 2
self.assertFalse(y.requires_grad)
self.assertFalse(z.requires_grad)
x = torch.randn(10, 10, requires_grad=True)
y = x * 2
y = y.detach()
self.assertFalse(y.requires_grad)
self.assertIsNone(y.grad_fn)
z = x + y
z.sum().backward()
# This is an incorrect gradient, but we assume that's what the user
# wanted. detach() is an advanced option.
self.assertEqual(x.grad, torch.ones(10, 10))
# in-place detach
x = torch.randn(10, 10, requires_grad=True)
y = torch.randn(10, 10, requires_grad=True)
a = x * 2
(y + a).sum().backward(retain_graph=True)
a.detach_()
self.assertFalse(a.requires_grad)
(y + a).sum().backward() # this won't backprop to x
self.assertEqual(x.grad, torch.ones(10, 10) * 2)
self.assertEqual(y.grad, torch.ones(10, 10) * 2)
# in-place deatch on a view raises an exception
view = x.narrow(0, 1, 4)
self.assertRaisesRegex(RuntimeError, 'view', lambda: view.detach_())
def test_detach_base(self):
"detaching base does not detach view"
x = torch.randn(10, 10, requires_grad=True)
view = x.narrow(0, 1, 4)
x.detach_()
self.assertFalse(x.requires_grad)
self.assertTrue(view.requires_grad)
self.assertIsNotNone(view.grad_fn)
self.assertIs(view._base, x)
def test_detach_then_inplace_raises_in_autograd(self):
x = torch.randn([], requires_grad=True)
orig_x = x.detach().clone()
y = x ** 2 # saves x
z = x.detach()
z.zero_()
with self.assertRaisesRegex(RuntimeError, "has been modified by an inplace"):
y.backward()
def test_detach_disallows_metadata_change(self):
x = torch.randn([], requires_grad=True)
detached = x.detach()
with self.assertRaisesRegex(
RuntimeError, "not allowed on a Tensor created from .data or .detach()"):
detached.resize_(3, 3)
def _test_type_conversion_backward(self, t, ):
fvar = Variable(t(torch.randn(5, 5).float()), requires_grad=True)
fvar.double().sum().backward()
self.assertEqual(fvar.grad, torch.ones_like(fvar))
self.assertEqual(type(fvar.grad), type(fvar))
dvar = Variable(t(torch.randn(5, 5).double()), requires_grad=True)
dvar.float().sum().backward()
self.assertEqual(dvar.grad, torch.ones_like(dvar))
self.assertEqual(type(dvar.grad), type(dvar))
def test_type_conversions(self):
x = torch.randn(5, 5)
self.assertIsInstance(x.float(), torch.FloatTensor)
self.assertIsInstance(x.int(), torch.IntTensor)
if torch.cuda.is_available():
self.assertIsInstance(x.float().cuda(), torch.cuda.FloatTensor)
self.assertIsInstance(x.int().cuda(), torch.cuda.IntTensor)
self.assertIsInstance(x.int().cuda().cpu(), torch.IntTensor)
if torch.cuda.device_count() >= 2:
x2 = x.float().cuda(1)
self.assertIsInstance(x2, torch.cuda.FloatTensor)
self.assertIs(x2.get_device(), 1)
x2 = x.float().cuda()
self.assertIsInstance(x2, torch.cuda.FloatTensor)
self.assertIs(x2.get_device(), 0)
x2 = x2.cuda(1)
self.assertIsInstance(x2, torch.cuda.FloatTensor)
self.assertIs(x2.get_device(), 1)
y = Variable(torch.randn(5).cuda(1), requires_grad=True)
y.cpu().sum().backward()
self.assertIs(y.grad.get_device(), 1)
self.assertIs(y.long().get_device(), 1)
for t in [torch.DoubleTensor, torch.FloatTensor, torch.IntTensor, torch.ByteTensor]:
for y_var in (True, False):
y = torch.randint(5, (5, 5), dtype=t.dtype)
y = Variable(y) if y_var else y
self.assertIsInstance(x.type(t), t)
self.assertIsInstance(x.type_as(y), t)
# TODO: t.dtype should work
t_dtype = t().dtype
self.assertIsInstance(x.type(t_dtype), t)
self.assertIs(t_dtype, x.type(t_dtype).dtype)
self.assertEqual(y.data_ptr(), y.type(t).data_ptr())
if torch.cuda.is_available():
for x_cuda in (True, False):
for y_cuda in (True, False):
x_c = x.cuda() if x_cuda else x
y_c = y.cuda() if y_cuda else y
_, y_type = y_c.type().rsplit('.', 1)
y_typestr = ('torch.cuda.' if y_cuda else 'torch.') + y_type
self.assertEqual(y_c.type(), x_c.type(y_typestr).type())
self.assertIs(y_c.dtype, x_c.type(y_c.dtype).dtype)
self.assertEqual(y_c.data_ptr(), y_c.cuda().data_ptr() if y_cuda else y_c.data_ptr())
self._test_type_conversion_backward(lambda x: x)
if torch.cuda.is_available():
self._test_type_conversion_backward(lambda x: x.cuda())
if torch.cuda.device_count() >= 2:
# one of these has to be the non-default device
self._test_type_conversion_backward(lambda x: x.cuda(0))
self._test_type_conversion_backward(lambda x: x.cuda(1))
def test_isolated_node(self):
x = torch.randn(5, 5, requires_grad=True)
y = torch.randn(5, 5, requires_grad=True)
a = x + y
b = torch.max(a, 1, True)[1].repeat(1, 5).double()
o = (b + a).sum()
o.backward()
def test_shape(self):
x = torch.randn(3, 4)
self.assertEqual(2, len(x.shape))
self.assertEqual(x.shape[0], 3)
self.assertEqual(x.shape[1], 4)
def test_numpy_requires_grad(self):
x = torch.randn(2, 2, requires_grad=True)
err_msg_outputs = r"Can't call numpy\(\) on Tensor that requires grad. Use tensor.detach\(\).numpy\(\) instead."
with self.assertRaisesRegex(RuntimeError, err_msg_outputs):
x.numpy()
with torch.no_grad():
x.numpy()
x = torch.randn(2, 2)
x.numpy()
with torch.no_grad():
x.numpy()
def test_return_leaf(self):
class Identity(Function):
@staticmethod
def forward(ctx, a, b):
return a, a + b
@staticmethod
def backward(ctx, grad_a, grad_b):
return grad_a + grad_b, grad_b
hook_called = [False]
x = torch.randn(5, 5, requires_grad=True)
y = torch.randn(5, 5, requires_grad=True)
q, p = Identity.apply(x, y)
# Make sure hooks only receive grad from usage of q, not x.
def hook(grad):
hook_called[0] = True
self.assertEqual(grad, torch.ones(5, 5))
q.register_hook(hook)
(q + p + x).sum().backward()
self.assertEqual(x.grad, torch.ones(5, 5) * 3)
self.assertEqual(y.grad, torch.ones(5, 5))
self.assertTrue(hook_called[0])
def test_return_leaf_inplace(self):
class Inplace(InplaceFunction):
@staticmethod
def forward(ctx, a, b):
ctx.mark_dirty(a)
return a.add_(b), b + 2
@staticmethod
def backward(ctx, grad_a, grad_b):
return grad_a, grad_a + grad_b
x = torch.randn(5, 5)
y = torch.randn(5, 5, requires_grad=True)
q, p = Inplace.apply(x, y)
self.assertIs(q, x)
self.assertIs(q.grad_fn.__class__, Inplace._backward_cls)
self.assertTrue(q.requires_grad)
q.sum().backward()
self.assertEqual(y.grad, torch.ones(5, 5))
def test_leaf_assignment(self):
x = torch.randn(5, 5)
y = torch.randn(5, requires_grad=True)
z = torch.randn(5, requires_grad=True)
x[0] = y
x[1] = 2 * z
self.assertTrue(x.requires_grad)
self.assertIsNot(x.grad_fn, None)
x.sum().backward()
self.assertEqual(y.grad, torch.ones(5))
self.assertEqual(z.grad, torch.ones(5) * 2)
def test_no_grad_assignment(self):
x = torch.randn(5, 5, requires_grad=True)
y = torch.randn(5)
with torch.no_grad():
x[0] = y
self.assertTrue(x.requires_grad)
self.assertIsNone(x.grad_fn)
def test_no_grad_modifies_version(self):
x = torch.randn(5, requires_grad=True)
y = torch.randn(5, requires_grad=True)
z = (x * y).sum()
with torch.no_grad():
x *= 2
self.assertRaisesRegex(RuntimeError, 'modified by an inplace operation',
lambda: z.backward())
def test_no_grad_input(self):
class MyFunction(Function):
@staticmethod
def forward(self, x):
return x
@staticmethod
def backward(self, grad_output):
return grad_output
x = torch.randn(5, requires_grad=True)
with torch.no_grad():
y = MyFunction.apply(x)
self.assertTrue(x.requires_grad)
self.assertIsNone(y.grad_fn)
def test_backward_copy(self):
# This tests checks backward engine for a very subtle bug that appreared
# in one of the initial versions of autograd. Gradients tensors were
# simply stored in lists while the function waited for all its gradients
# to be computed. However, sometimes an output was used multiple times,
# so the gradients needed to be summed. Engine used to keep a need_copy
# set of tensors that will need a clone upon next addition and removed
# them from the set as soon as the clone was performed. However, this
# could lead to incorrect results if the same gradient tensor was
# buffered in three places in the graph:
# 1. When accumulating gradients in one of these places it was cloned
# and removed from need_copy set.
# 2. When accumulating in second place, it wasn't in the need_copy set,
# so the gradients were simply accumulated in-place (which already
# modified the grad in 3rd place)
# 3. When accumulating in the third place, it wasn't in the need_copy set
# as well, so the incoming gradient was summed in-place, yielding
# incorrect results in all functions, except the first one.
x = torch.ones(5, 5, requires_grad=True)
y = torch.ones(5, 5, requires_grad=True)
# Simulate that we're in the middle of the graph
a = x + 2
b = y + 2
c = x + 2
# This op will just return grad_output two times in backward
add1 = a + b
add2 = add1 + c
# Simulate a long branch, so grad_output will get buffered.
for _ in range(4):
a = a * 2
b = b * 2
c = c * 2
branch = a + b + c
out = add2 + branch
# expected gradients are:
# for x: 34 (16 from final a, 16 from final c, 2 from add2)
# for y: 17 (16 from final b, 1 from add2)
grad_output = torch.ones(5, 5)
out.backward(grad_output)
self.assertEqual(x.grad, torch.ones(5, 5) * 34)
self.assertEqual(y.grad, torch.ones(5, 5) * 17)
def test_save_none_for_backward(self):
test_case = self
class MyFn(Function):
@staticmethod
def forward(ctx, input):
ctx.save_for_backward(None, input, None)
return input * input
@staticmethod
def backward(ctx, grad_output):
n1, input, n2 = ctx.saved_tensors
test_case.assertIsNone(n1)
test_case.assertIsNone(n2)
return 2 * input * grad_output
x = torch.randn(5, 5, requires_grad=True)
y = MyFn.apply(x)
y.sum().backward()
self.assertEqual(x.grad, 2 * x)
def test_too_many_grads(self):
class MyFn(Function):
@staticmethod
def forward(ctx, input):
return input
@staticmethod
def backward(ctx, grad_output):
return grad_output, None, None
x = torch.randn(5, 5, requires_grad=True)
y = MyFn.apply(x)
y.sum().backward()
self.assertEqual(x.grad, torch.ones_like(x))
def test_pickle(self):
x = torch.randn(10, 10, requires_grad=True)
y = torch.randn(10, 10, requires_grad=False)
def assert_strict_equal(var1, var2):
self.assertEqual(var1, var2)
self.assertEqual(var1.requires_grad, var2.requires_grad)
serialized = [pickle.dumps([x, y], protocol=p) for p in range(3)]
for dump in serialized:
xc, yc = pickle.loads(dump)
assert_strict_equal(xc, x)
assert_strict_equal(yc, y)
def test_dep_nograd(self):
class F1(Function):
@staticmethod
def forward(ctx, input):
out = torch.randn(input.size())
ctx.mark_non_differentiable(out)
return input, out
@staticmethod
def backward(ctx, grad_output, ignored):
return grad_output
class F2(Function):
@staticmethod
def forward(ctx, input, ignored):
return input
@staticmethod
def backward(ctx, grad_output):
return grad_output, None
x = torch.randn(5, requires_grad=True)
a, b = F1.apply(x)
b = b + 1 # separate F1 from F2 by another op
self.assertTrue(a.requires_grad)
self.assertFalse(b.requires_grad)
c = F2.apply(a, b)
c.backward(torch.ones(c.size()))
self.assertEqual(x.grad, torch.ones(x.size()))
def test_set_grad_enabled(self):
x = torch.tensor([1.], requires_grad=True)
with torch.set_grad_enabled(False):
y = x * 2
self.assertFalse(y.requires_grad)
with torch.set_grad_enabled(True):
y = x * 2
self.assertTrue(y.requires_grad)
with torch.set_grad_enabled(False):
torch.set_grad_enabled(True)
y = x * 2
self.assertTrue(y.requires_grad)
def test_simple_reentrant(self):
y_data = torch.randn(2, 2)
class Reenter(Function):
@staticmethod
def forward(ctx, x):
with torch.enable_grad():
ctx.x = Variable(x, requires_grad=True)
ctx.y = Variable(y_data, requires_grad=True)
ctx.output_var = ctx.x * ctx.y
return ctx.output_var.detach()
@staticmethod
def backward(ctx, grad_output):
with torch.enable_grad():
ctx.output_var.sum().backward()
return ctx.x.grad * grad_output
# Reentrant starts on CPU thread, finishs on GPU thread
x = torch.randn(2, 2, requires_grad=True)
out = Reenter.apply(x)
out.sum().backward()
self.assertEqual(x.grad, y_data)
def test_reentrant_child_error(self):
# Parent graph.
a = torch.rand(3, 3, requires_grad=True)
c = a * a
# Reentrant child graph.
b = torch.rand(3, 3, requires_grad=True)
e = b * b
f = TestAutograd.SimulateBackwardError.apply(e)
reentrant_root = f.sum()
class ReentrantFunc(Function):
@staticmethod
def forward(ctx, inp):
return inp.clone()
@staticmethod
def backward(ctx, grad):
# Reentrant backward in child will throw an error.
reentrant_root.backward()
return grad
d = ReentrantFunc.apply(c)
with self.assertRaisesRegex(Exception, 'Simulate error'):
d.sum().backward()
def test_var_mean_differentiable(self):
dim = [2, 4]
keepdim = False
input1 = torch.randn(3, 4, 5, 6, 2, 3, requires_grad=True)
input2 = deepcopy(input1)
var1, mean1 = torch.var_mean(input1, dim=dim, keepdim=keepdim)
var2 = input2.var(dim=dim, keepdim=keepdim)
mean2 = input2.mean(dim=dim, keepdim=keepdim)
grad = torch.randn(3, 4, 6, 3, requires_grad=True)
r1 = var1 * var1 * mean1 * mean1
r2 = var2 * var2 * mean2 * mean2
self.assertEqual(r1, r2, rtol=0.01, atol=0.0)
torch.autograd.backward(r1, grad)
torch.autograd.backward(r2, grad)
self.assertEqual(input1.grad, input2.grad, rtol=0.01, atol=0.0)
@skipIfNoLapack
def test_lobpcg(self):
def func(k, A, largest=True, B=None):
X_shape = list(A.shape)
X_shape[-1] = k
X = torch.eye(A.size(-2), k, dtype=A.dtype, device=A.device)
if A.dim() > 2:
X = X.expand(X_shape)
D, U = torch.lobpcg(A=A, k=k, B=B, X=X, largest=largest)
# LOBPCG uses a random initial eigenspace approximation
# if parameter `X` is not provided.
# This may cause a non-deterministic behavior
# when it comes to the sign of an eigenvector
# (note if v is an eigenvector, so is -v),
# hence we eliminate this non-determinism
# by making sure that each column of U
# gets multiplied by the sign of its max (in absolute value) element.
# Also, gradcheck changes the content of the input by +/- eps (default to 1e-06)
# to compute the numerical gradient which can also cause the signs to flip.
_, idx = U.abs().max(-2, keepdim=True)
sign = U.gather(-2, idx).sign()
U = U * sign
return D, U
# TODO: review if this can be ported to OpInfos or moved to test_linalg.py
def run_symeig_test(k, sizes, largest=True):
A = torch.rand(*sizes).double()
A = (A @ A.mT) / 10
A.requires_grad_(True)
gradcheck(lambda A: func(k, A, largest), A, check_batched_grad=False)
# Custom gradient vectors for better stability due to some
# non-determinism in the lobpcg's forward.
# Note it is not required if symeig is in forward instead (tested).
D_grad = torch.rand(*A.shape[:-2], k) / 100
U_grad = torch.rand(*A.shape[:-1], k) / 100
gradgradcheck(lambda A: func(k, A, largest), A, [D_grad, U_grad], atol=1e-4, check_batched_grad=False)
# check whether A.grad is symmetric
A = A.detach().requires_grad_(True)
D, U = func(k, A, largest)
(D.sum() + U.sum()).backward()
self.assertEqual(A.grad, A.grad.mT)
for largest in [True, False]:
run_symeig_test(1, (6, 6), largest=largest)
run_symeig_test(1, (2, 6, 6), largest=largest)
run_symeig_test(1, (2, 2, 6, 6), largest=largest)
run_symeig_test(2, (6, 6), largest=largest)
run_symeig_test(2, (2, 6, 6), largest=largest)
run_symeig_test(2, (2, 2, 6, 6), largest=largest)
run_symeig_test(3, (9, 9), largest=largest)
run_symeig_test(3, (2, 9, 9), largest=largest)
run_symeig_test(3, (2, 2, 9, 9), largest=largest)
def test_variable_traverse(self):
def get_out_and_unrefed_cycle():
inp = torch.randn(10, requires_grad=True)
tmp = inp.view(10, 1)
out = tmp.view(10)
# Create a reference cycle that contains an
# intermediary Variable in the graph
my_list = []
my_list.append(tmp)
my_list.append(my_list)
return out
out = get_out_and_unrefed_cycle()
gc.collect()
# This will segfault if things have been erroneously released
out.backward(torch.randn(out.size()))
# TODO: review porting these to OpInfo tests
def test_pow_zero_tensor_gradient(self):
def run_test(input_size, exponent):
input = torch.zeros(*input_size, requires_grad=True)
input.pow(exponent).sum().backward()
self.assertEqual(input.grad.abs().sum(), 0)
run_test((10,), torch.zeros(10))
run_test((10, 10), torch.zeros(10, 10))
run_test((10,), 0)
def test_profiler(self):
x = torch.randn(10, 10)
with profile(use_kineto=kineto_available()) as p:
self.assertTrue(torch.autograd._profiler_enabled())
y = x * 2 + 4
self.assertFalse(torch.autograd._profiler_enabled())
names = ['aten::mul', 'aten::add']
found_indices = set()
for evt in p.function_events:
if evt.name in names:
found_indices.add(names.index(evt.name))
self.assertEqual(len(found_indices), len(names))
def test_profiler_seq_nr(self):
with profile(use_kineto=kineto_available()) as p:
x = torch.randn(10, 10, requires_grad=True)
y = torch.randn(10, 10, requires_grad=True)
z = x + y
s = z.sum()
s.backward()
print(p.key_averages().table(
sort_by="self_cpu_time_total", row_limit=-1))
# expecting aten::add, aten::sum to have the sequence numbers,
# expecting the corresponding backward nodes to have the same numbers
# as the forward ops
add_seq_nr = -1
sum_seq_nr = -1
found_add = found_sum = False
found_bwd_add = found_bwd_sum = False
found_empty = False
for e in p.function_events:
# Ignore record_function user scope.
if "autograd::engine::evaluate_function" in e.name:
continue
if e.name == "aten::add":
add_seq_nr = e.sequence_nr
self.assertFalse(found_add)
found_add = True
elif e.name == "aten::sum":
sum_seq_nr = e.sequence_nr
self.assertFalse(found_sum)
found_sum = True
elif "Add" in e.name and "Backward" in e.name:
self.assertEqual(e.sequence_nr, add_seq_nr)
self.assertFalse(found_bwd_add)
found_bwd_add = True
elif "Sum" in e.name and "Backward" in e.name:
self.assertEqual(e.sequence_nr, sum_seq_nr)
self.assertFalse(found_bwd_sum)
found_bwd_sum = True
# check that nested ops (e.g. empty) don't have
# sequence number
if e.name == "aten::empty":
self.assertEqual(e.sequence_nr, -1)
found_empty = True
self.assertGreaterEqual(add_seq_nr, 0)
self.assertGreaterEqual(sum_seq_nr, 0)
self.assertNotEqual(add_seq_nr, sum_seq_nr)
self.assertTrue(found_add)
self.assertTrue(found_sum)
self.assertTrue(found_bwd_add)
self.assertTrue(found_bwd_sum)
self.assertTrue(found_empty)
def test_profiler_unboxed_only(self):
x = torch.rand(3, 4)
with torch.autograd.profiler.profile(use_kineto=kineto_available()) as prof:
x.resize_([3, 2])
def test_profiler_propagation(self):
def foo(x):
with record_function("in_foo") as rf:
return x * 2
x = torch.rand(3, 4)
traced_foo = torch.jit.trace(foo, x)
def bar(x):
with record_function("in_bar") as rf:
# we expect that profiler will be able
# propagate across fork
fut = torch.jit._fork(traced_foo, x)
y = torch.jit._wait(fut)
# note: continuation (and rf's end) can
# be executed in a different thread
with record_function("in_bar_after_wait") as rf2:
y = y * 2
return y
traced_bar = torch.jit.trace(bar, x)
with profile(use_kineto=kineto_available()) as p:
traced_bar(x)
found_foo = False
found_bar = False
found_bar_after_wait = False
for info in p.function_events:
if info.name == "in_foo":
self.assertFalse(found_foo)
found_foo = True
elif info.name == "in_bar":
self.assertFalse(found_bar)
found_bar = True
elif info.name == "in_bar_after_wait":
self.assertFalse(found_bar_after_wait)
found_bar_after_wait = True
self.assertTrue(found_foo)
self.assertTrue(found_bar)
self.assertTrue(found_bar_after_wait)
def test_record_function_callbacks(self):
x = torch.randn(10, 10)
with profile(use_kineto=kineto_available()) as p:
with record_function("foo"):
y = x * 2 + 4
function_events = p.function_events
foo_event = [event for event in function_events if "foo" in event.name][0]
self.assertEqual(foo_event.count, 1)
def test_record_function_new_signatures(self):
# Test the new _record_function ops work
# Note: Remove once record_function uses these directly
x = torch.randn(10, 10)
with profile(use_kineto=kineto_available()) as p:
record = torch.ops.profiler._record_function_enter_new("bar", None)
try:
y = x * 2 + 4
finally:
torch.ops.profiler._record_function_exit(record)
function_events = p.function_events
foo_event = [event for event in function_events if "bar" in event.name][0]
self.assertEqual(foo_event.count, 1)
def test_profiler_aggregation_fake(self):
events = EventList()
id = [0]
def get_id():
id[0] = id[0] + 1
return id[0]
# [[thread_id, [(start, end, id), ....]], ...]
# Using list instead of a dict so order is guaranteed for any Python
# version
threads = [
[1, [(0, 1, get_id()), (1, 2, get_id())]],
[0, [(0, 2, get_id()), (1, 2, get_id()), (1, 3, get_id())]],
]
for thread, ranges in threads:
for range in ranges:
assert(len(range) == 3)
events.append(
FunctionEvent(
id=range[2],
node_id=0,
name="",
thread=thread,
start_us=range[0],
end_us=range[1],
)
)
events._populate_cpu_children()
# Note that [1, 3] pushes out [0, 2] first. Then we record [1, 2]
# as a child of [1, 3]
res = [[], [], [], [], [4]]
def get_children_ids(event):
return [child.id for child in event.cpu_children]
assert([get_children_ids(event) for event in events] == res)
def test_profiler_aggregation_table(self):
"""
Test if the profiling result is aggregated for `str(prof)`
See: https://github.com/pytorch/pytorch/issues/37500
"""
x = torch.randn(1024)
with torch.autograd.profiler.profile(use_kineto=kineto_available()) as prof:
torch.einsum("i->", x)
prof_str = str(prof)
prof_table = prof.table()
self.assertEqual(prof_table, prof_str)
def test_profiler_function_event_avg(self):
avg = FunctionEventAvg()
avg.add(FunctionEvent(id=0, node_id=0, name="foo", thread=0, start_us=10, end_us=15))
avg.add(FunctionEvent(id=1, node_id=0, name="foo", thread=0, start_us=20, end_us=30))
avg.add(avg)
self.assertEqual(avg.key, "foo")
# aggregate stats
self.assertEqual(avg.count, 4)
self.assertEqual(avg.cpu_time_total, 30)
self.assertEqual(avg.self_cpu_time_total, 30)
self.assertEqual(avg.cuda_time_total, 0)
# average stats
self.assertEqual(avg.cpu_time, 7.5)
self.assertEqual(avg.cuda_time_total, 0)
def test_profiler_shapes(self):
print("")
layer1 = torch.nn.Linear(20, 30)
layer2 = torch.nn.Linear(30, 40)
input = torch.randn(128, 20)
with profile(record_shapes=True, use_kineto=kineto_available()) as prof:
layer2(layer1(input))
print(prof.function_events)
linear_expected_shapes = [
[[128, 20], [30, 20], [30]],
[[128, 30], [40, 30], [40]],
]
found_indices = set()
for event in prof.function_events:
if event.name == "aten::linear":
self.assertTrue(event.input_shapes in linear_expected_shapes)
found_indices.add(linear_expected_shapes.index(event.input_shapes))
self.assertEqual(len(found_indices), len(linear_expected_shapes))
def test_profiler_aggregation_lstm(self):
print("")
rnn = torch.nn.LSTM(10, 20, 2)
total_time_s = 0
with profile(record_shapes=True, use_kineto=kineto_available()) as prof:
for i in range(20):
input = torch.randn(5, 3, 10)
h = torch.randn(2, 3, 20)
c = torch.randn(2, 3, 20)
start = time.time()
rnn(input, (h, c))
end = time.time()
total_time_s += end - start
print(prof.table(
sort_by="self_cpu_time_total", row_limit=10, header="TEST"))
print(prof.key_averages(group_by_input_shape=True).table(
sort_by="self_cpu_time_total", row_limit=10))
print(prof.table(
sort_by="self_cpu_time_total", row_limit=10, max_src_column_width=300, header="TEST", top_level_events_only=True))
print(prof.key_averages(group_by_input_shape=True).table(
sort_by="self_cpu_time_total", row_limit=10, top_level_events_only=True))
total_time_us = total_time_s * 1000.0 * 1000.0 # make it us which is profiler default
print(
"Total time based on python measurements: ",
_format_time(total_time_us)
)
print(
"CPU time measurement python side overhead: {:.2f}%".format(
(total_time_us / prof.self_cpu_time_total - 1.0) * 100.0
)
)
if sys.platform != "win32":
with tempfile.NamedTemporaryFile() as trace_file:
prof.export_chrome_trace(trace_file.name)
def test_record_function(self):
x = torch.randn(10, 10)
def forward(x):
with record_function("outer"):
y = x * 2 + 4
with record_function("inner"):
y = y - 1
y = y / 1
forward(x)
with profile(use_kineto=kineto_available()) as p:
forward(x)
events = p.function_events
important_events = [
'outer',
'aten::mul',
'aten::add',
'inner',
'aten::sub',
'aten::div'
]
idx = 0
for info in events:
if info.name == important_events[idx]:
idx = idx + 1
if idx == len(important_events):
break
self.assertEqual(idx, len(important_events))
# We can also use record_function to decorate arbitrary function
@record_function('my_func')
def f(x, y):
return x + y
with profile(use_kineto=kineto_available()) as p:
f(1, 2)
self.assertTrue('my_func' in str(p))
def test_record_function_multithreaded(self):
rf = record_function("outer")
rf.__enter__()
with record_function("inner"):
# test that exiting the record function after starting another one
# doesn't throw.
rf.__exit__(None, None, None)
with record_function("inner"):
rf.__enter__()
# test that exiting the record function after ending another one
# doesn't throw.
rf.__exit__(None, None, None)
def test_dir(self):
x = torch.randn(10, 10)
keys = dir(x)
self.assertIn('shape', keys)
# real and imag are only implemented for complex tensors.
y = torch.randn(10, 10, dtype=torch.cfloat)
imag_key = 'imag'
self.assertRaises(RuntimeError, lambda: hasattr(x, imag_key))
self.assertTrue(hasattr(y, imag_key))
keys.remove(imag_key)
for key in keys:
self.assertTrue(hasattr(x, key))
def test_inplace_on_view_saved_output(self):
# Test an in-place operation on a view in which the in-place op saves
# its output. Previously, this created a reference cycle.
dealloc = [0]
class IncrementOnDelete(object):
def __del__(self):
dealloc[0] += 1
def test():
root = torch.randn(3, 3, requires_grad=True)
copy = root.clone()
copy.grad_fn.register_hook(IncrementOnDelete())
view = copy.view(9)
torch.nn.functional.relu(view, inplace=True)
test()
self.assertEqual(dealloc[0], 1)
def test_inplace_on_view_leaf_errors(self):
# Issue #21875: Fail faster (when we try to modify the view vs. in backward())
x = torch.zeros(1, requires_grad=True)
y = x.view_as(x)
with self.assertRaisesRegex(RuntimeError,
"a view of a leaf Variable that "
"requires grad is being used in "
"an in-place operation."):
y.add_(1)
def test_inplace_on_view_backward(self):
# Issue #10532: Make sure that this does not raise RuntimeError.
net = nn.Sequential(
nn.InstanceNorm2d(2),
nn.ReLU(True)
)
x = torch.tensor([[[[1.0, 1.0]]]], requires_grad=True)
g, = torch.autograd.grad(net(x).pow(2), [x], grad_outputs=x.new_ones(x.shape) , create_graph=True)
torch.autograd.grad(g.sum(), [x])
self.assertEqual(x, torch.tensor([[[[1.0, 1.0]]]]))
# https://discuss.pytorch.org/t/freeing-buffer-strange-behavior/31955/8
inputs = torch.ones((1, 3, 256, 256), requires_grad=True)
tmp1 = (inputs + 1).view_as(inputs)
tmp2 = torch.nn.functional.threshold(tmp1, 0., 0., True)
prob_interpolated = torch.sigmoid(tmp2)
gradients = torch.autograd.grad(outputs=prob_interpolated, inputs=inputs,
grad_outputs=torch.ones(prob_interpolated.size()),
create_graph=True, retain_graph=True)[0]
gradient_penalty = gradients.sum()
gradient_penalty.backward()
fn = gradient_penalty.grad_fn.next_functions[0][0].next_functions[1][0]
self.assertEqual(fn.name(), "ThresholdBackwardBackward0")
def test_inplace_on_view_weak_grad_fn(self):
# Issue 23502: Test that b's grad_fn is preserved.
a = torch.arange(10.0, requires_grad=True)
b = a.narrow(0, 0, 2).clone().view(-1)
b.relu_()
c = b.clone()
del b
gc.collect()
s = c.sum()
s.backward()
self.assertEqual(s, torch.tensor(1.0))
# Issue #21875: Fail faster (when we try to modify the view vs. in backward())
a = torch.rand(10, requires_grad=True).narrow(0, 0, 10)
with self.assertRaises(RuntimeError):
b = a.relu_()
def test_out_variant_raises_when_inputs_require_grad(self):
a = torch.randn(2, 2, requires_grad=True)
b = torch.randn(2, 2, requires_grad=True)
x = torch.zeros_like(a)
# out=... functions don't support automatic differentiation currently
self.assertRaisesRegex(RuntimeError, 'out=', lambda: torch.mul(a, b, out=x))
# the inputs can require grad if we're in no_grad() mode
with torch.no_grad():
torch.mul(a, b, out=x)
self.assertEqual(x, a * b)
a = torch.randn(2, 2)
b = torch.randn(2, 2)
x = torch.zeros(2, 2, requires_grad=True)
# we should throw an exception if the output requires grad
self.assertRaisesRegex(RuntimeError, 'out=', lambda: torch.mul(a, b, out=x))
# TODO: see if this test can be OpInfo'd or moved to diagonal's test suite
def test_diagonal_derivative_requires_grad(self):
# test that the backward requires grad
# we do this is because diagonal_backward uses inplace
# operations and gradgradcheck does not catch whether
# they works as expected (it will succeed even if
# the gradient has requires_grad == False
a = torch.randn(5, 6, requires_grad=True)
b = torch.diagonal(a)**2
c = b.sum()
d, = torch.autograd.grad(c, a, retain_graph=True, create_graph=True)
self.assertTrue(d.requires_grad)
def test_anomaly_detect_nan(self):
size = 10
class MyFunc(Function):
@staticmethod
def forward(ctx, inp1, inp2, fail_0th):
ctx.fail_0th = fail_0th
return inp1.sum(0, keepdim=True)
@staticmethod
def backward(ctx, gO):
gI = gO.clone().expand(size)
gI[0] = 0
gI[0] /= 0 # Generate a nan
if ctx.fail_0th:
return gI, None, None
else:
return None, gI, None
inp = torch.rand(size, requires_grad=True)
out = MyFunc.apply(inp, inp, True)
out.backward() # Should not fail
inp = torch.rand(size, requires_grad=True)
out = MyFunc.apply(inp, inp, True)
with self.assertRaisesRegex(RuntimeError, "Function 'MyFuncBackward' returned nan values in its 0th output."):
with warnings.catch_warnings(record=True) as w:
with detect_anomaly():
out.backward()
self.assertIn('No forward pass information', str(w[0].message))
inp = torch.rand(size, requires_grad=True)
with self.assertRaisesRegex(RuntimeError, "Function 'MyFuncBackward' returned nan values in its 1th output."):
with warnings.catch_warnings(record=True) as w:
with detect_anomaly():
out = MyFunc.apply(inp, inp, False)
out.backward()
self.assertIn('MyFunc.apply', str(w[0].message))
def test_nested_anomaly_detect_nan(self):
size = 10
class MyFunc(Function):
@staticmethod
def forward(ctx, inp1, fail_0th):
ctx.fail_0th = fail_0th
ctx.save_for_backward(inp1)
return inp1.sum(0, keepdim=True)
@staticmethod
def backward(ctx, gO):
inp, = ctx.saved_tensors
fail_0th = ctx.fail_0th
g = gO.clone().expand(size)
gI = MyFunc2.apply(g * inp, g + inp, fail_0th)
return gI, None
class MyFunc2(Function):
@staticmethod
def forward(ctx, inp1, inp2, fail_0th):
ctx.fail_0th = fail_0th
return inp1 * 2.0 + inp2
@staticmethod
def backward(ctx, gO):
fail_0th = ctx.fail_0th
g1 = gO.clone()
g2 = gO.clone()
g1[0] = 0
g2[0] = 0
# generate a nan
if fail_0th:
g1[0] /= 0
else:
g2[0] /= 0
return g1, g2, None
inp = torch.rand(size, requires_grad=True)
out = MyFunc.apply(inp, True)
ginp, = torch.autograd.grad(out, (inp,), create_graph=True)
gsum = ginp.sum()
gsum.backward() # should not fail
inp = torch.rand(size, requires_grad=True)
out = MyFunc.apply(inp, True)
ginp, = torch.autograd.grad(out, (inp,), create_graph=True)
gsum = ginp.sum()
with warnings.catch_warnings(record=True) as w:
with self.assertRaisesRegex(RuntimeError, "Function 'MyFunc2Backward' returned nan values in its 0th output."):
with detect_anomaly():
gsum.backward()
self.assertIn('No forward pass information', str(w[1].message))
inp = torch.rand(size, requires_grad=True)
with warnings.catch_warnings(record=True) as w:
with self.assertRaisesRegex(RuntimeError, "Function 'MyFunc2Backward' returned nan values in its 1th output."):
with detect_anomaly():
out = MyFunc.apply(inp, False)
ginp, = torch.autograd.grad(out, (inp,), create_graph=True)
gsum = ginp.sum()
gsum.backward()
self.assertIn('MyFunc2.apply', str(w[1].message))
self.assertIn('MyFunc.apply', str(w[2].message))
def test_anomaly_grad_warnings(self):
# PyTorch won't throw warnings if there is an error
# but we'd want to at least see them in stderr
class StdErrDiverter:
def __enter__(self):
self.stderr_orig = sys.stderr
self.stderr_new = io.StringIO()
sys.stderr = self.stderr_new
return self
def __exit__(self, *args):
self.captured = self.stderr_new.getvalue()
sys.stderr = self.stderr_orig
# if the warnings don't throw, they will be handled as regular warnings
with self.assertRaisesRegex(RuntimeError,
"one of the variables needed for gradient computation has been "
"modified by an inplace operation"):
with warnings.catch_warnings(record=True) as w:
with detect_anomaly():
a = torch.randn(5, requires_grad=True)
d1 = a + 1
d2 = d1 ** 2
d1 += 1
torch.autograd.grad(d2.sum(), a)
self.assertEqual(len(w), 2)
self.assertIn('Anomaly Detection has been enabled', str(w[0].message))
self.assertIn('Error detected in PowBackward0', str(w[1].message))
# if the warning throws, it will be printed to sys.stderr
with self.assertRaisesRegex(RuntimeError,
"one of the variables needed for gradient computation has been "
"modified by an inplace operation"):
with warnings.catch_warnings(record=True) as w:
with detect_anomaly():
warnings.simplefilter("error")
with StdErrDiverter() as s:
a = torch.randn(5, requires_grad=True)
d1 = a + 1
d2 = d1 ** 2
d1 += 1
torch.autograd.grad(d2.sum(), a)
self.assertEqual(len(w), 1)
self.assertIn('Anomaly Detection has been enabled', str(w[0].message))
self.assertIn('Error detected in PowBackward0', s.captured)
def test_anomaly_assign_parent_cleanup(self):
# Test that python objects created are properly cleaned up when assign_parent is called
import weakref
def get_ref():
# we use torch.exp here but any function that will construct a new node in its
# backward call in grad mode will work
x = torch.randn(2, 2, requires_grad=True)
t = x.exp()
# ExpBackward calls mul, creating the MulBackward node when create_graph=True.
# In anomaly mode, a PyObject referencing MulBackward's "parent" ExpBackward is added to
# MulBackward's anomaly metadata dict, creating the following reference chain:
#
# grad -> MulBackward -> PyObject -> ExpBackward
#
with detect_anomaly():
grad = torch.autograd.grad(t, x, torch.ones_like(t), create_graph=True)
# We add a weak reference to a new Foo object, which we insert into ExpBackward's metadata dict
#
# (PyObject) -> ExpBackward -> dict -> *Foo*
# t ----^ WeakRef ---^
#
# We want to test that when grad goes out of scope at the end of this function that PyObject is destroyed
# We can test this by seeing whether Foo is not kept alive once t is destroyed
class Foo(object):
pass
my_obj = Foo()
meta_dict = t.grad_fn.metadata
meta_dict[0] = my_obj
ref = weakref.ref(my_obj)
return t, ref
t, ref = get_ref()
self.assertIsNotNone(ref())
del t
self.assertIsNone(ref())
def test_nested_anomaly_printstack_cleanup(self):
# Test if metadata dict PyObject is properly destroyed
import weakref
def get_ref():
# This is similar to the construction in test_anomaly_assign_parent_cleanup:
#
# MyFuncBackward2 -> PyObject -> MyFuncBackward -> dict -> Foo
# out ---^ WeakRef ---^
#
# We want to check that Foo is still properly destroyed even when MyFunc2Backward's
# AnomalyMetadata calls printstack, which does some python object manipulation.
#
# You might be wondering why we still have to test_anomaly_assign_parent_cleanup,
# since if PyObject is not destroyed here, wouldn't this test would detect that also?
# The answer is that custom function's PyObject (THPFunction) actually only hold
# a weak reference to the c++ node!
class MyFunc(Function):
@staticmethod
def forward(ctx, x):
ctx.save_for_backward(x)
return x
@staticmethod
def backward(ctx, gO):
x, = ctx.saved_tensors
return MyFunc2.apply(x)
class MyFunc2(Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, gO):
return gO + float("NaN")
inp = torch.rand(1, requires_grad=True)
out = MyFunc.apply(inp)
ginp, = torch.autograd.grad(out, (inp,), create_graph=True)
with warnings.catch_warnings(record=True) as w:
with self.assertRaisesRegex(RuntimeError, "Function 'MyFunc2Backward' returned nan values in its 0th output."):
with detect_anomaly():
ginp.backward()
class Foo(object):
pass
my_obj = Foo()
meta_dict = out.grad_fn.metadata
meta_dict[0] = my_obj
ref = weakref.ref(my_obj)
return out, ref
t, ref = get_ref()
self.assertIsNotNone(ref())
del t
self.assertIsNone(ref())
# TODO: update these tests to use the linalg module and move to test_linalg.py
@skipIfNoLapack
def test_eig_no_eigenvectors(self):
A = torch.tensor([[1., 2.], [2., 4.]], dtype=torch.float32, requires_grad=True)
w, v = torch.eig(A, eigenvectors=False)
with self.assertRaisesRegex(RuntimeError, 'is not differentiable'):
torch.autograd.backward([w, v], [torch.ones_like(w), torch.ones_like(v)])
@skipIfNoLapack
def test_eig_complex_eigenvalues(self):
A = torch.tensor([[0., -1.], [1., 0.]], dtype=torch.float32, requires_grad=True)
w, v = torch.eig(A, eigenvectors=True)
with self.assertRaisesRegex(RuntimeError, 'does not support complex eigenvalues'):
torch.autograd.backward([w, v], [torch.ones_like(w), torch.ones_like(v)])
@skipIfNoLapack
def test_symeig_no_eigenvectors(self):
A = torch.tensor([[1., 2.], [2., 4.]], dtype=torch.float32, requires_grad=True)
w, v = torch.symeig(A, eigenvectors=False)
with self.assertRaisesRegex(RuntimeError, 'is not differentiable'):
torch.autograd.backward([w, v], [torch.ones_like(w), torch.ones_like(v)])
def test_no_grad_copy(self):
# create autograd function that saves grad pointer as class static
class MyFunc(Function):
static_grad_ptr = None
@staticmethod
def forward(ctx, inp1, inp2):
return inp1 + inp2
@staticmethod
def backward(ctx, grad):
MyFunc.static_grad_ptr = grad.data_ptr()
return grad, grad
class NonContGradFunc(Function):
@staticmethod
def forward(ctx, inp1):
ctx.size = inp1.size()
return torch.tensor([1.])
@staticmethod
def backward(ctx, grad):
return torch.ones(1).expand(ctx.size)
a = torch.randn(5, 6, requires_grad=True)
b = torch.randn(5, 6, requires_grad=True)
# non-contiguous grad should be copied
NonContGradFunc.apply(MyFunc.apply(a, b)).backward()
self.assertFalse(a.grad.data_ptr() == MyFunc.static_grad_ptr)
self.assertFalse(b.grad.data_ptr() == MyFunc.static_grad_ptr)
# test case that should trigger no copy for one of a,b
a.grad = b.grad = None
MyFunc.apply(a, b)[1][0].backward()
p_g = MyFunc.static_grad_ptr
p_a = a.grad.data_ptr()
p_b = b.grad.data_ptr()
# check a,b uses different grad buffer
self.assertFalse(p_a == p_b)
# check one of them is using the computed buffer
self.assertTrue(p_a == p_g or p_b == p_g)
def test_no_grad_copy_sparse(self):
# create autograd function that saves grad pointer as class static
class MyFunc(Function):
static_grad_ptr = None
@staticmethod
def forward(ctx, inp1, inp2):
return inp1 + inp2
@staticmethod
def backward(ctx, grad):
MyFunc.static_grad_ptr = grad._values().data_ptr()
return grad, grad
class NonContGradFunc(Function):
static_grad_ptr = None
@staticmethod
def forward(ctx, inp1, inp2):
return inp1 + inp2
@staticmethod
def backward(ctx, grad):
# Create a sparse tensor with non-contigous indices and values
# and return as grad.
v = torch.rand(1, 3)
i = torch.ones(1, 1, dtype=torch.long)
nv = v.expand(8, 3)
ni = i.expand(1, 8)
ngrad = torch.sparse.FloatTensor(ni, nv, torch.Size([10, 3]))
NonContGradFunc.static_grad_ptr = ngrad._values().data_ptr()
return ngrad, ngrad
a = torch.randn(10, 3, requires_grad=True)
b = torch.randn(10, 3, requires_grad=True)
input = torch.tensor([1, 2, 4, 5, 4, 3, 2, 9])
offsets = torch.tensor([0, 4])
import torch.nn.functional as F
# test case that should trigger no copy for one of a,b
emb_matrix = MyFunc.apply(a, b)
loss = F.embedding_bag(emb_matrix, input, offsets, sparse=True).sum()
loss.backward(retain_graph=True)
p_g = MyFunc.static_grad_ptr
p_a = a.grad._values().data_ptr()
p_b = b.grad._values().data_ptr()
# check a,b uses different grad buffer
self.assertFalse(p_a == p_b)
# check one of them is using the computed buffer
self.assertTrue(p_a == p_g or p_b == p_g)
# Run backwards multiple times to ensure accumulation works.
for i in range(10):
loss.backward(retain_graph=True)
# non-contiguous indices and value, we should trigger a copy.
a.grad = b.grad = None
emb_matrix = NonContGradFunc.apply(a, b)
loss = F.embedding_bag(emb_matrix, input, offsets, sparse=True).sum()
loss.backward(retain_graph=True)
p_g = NonContGradFunc.static_grad_ptr
p_a = a.grad._values().data_ptr()
p_b = b.grad._values().data_ptr()
# check a,b uses different grad buffer
self.assertFalse(p_a == p_b)
# Verify we cloned both grads.
self.assertFalse(p_a == p_g)
self.assertFalse(p_b == p_g)
# Run backwards multiple times to ensure accumulation works.
for i in range(10):
loss.backward(retain_graph=True)
def test_gradcheck_single_input(self):
def check(fast_mode):
def f(inp):
return inp.mul(5)
gradcheck(f, torch.rand(10, dtype=torch.float64, requires_grad=True), fast_mode=fast_mode)
gradgradcheck(f, torch.rand(10, dtype=torch.float64, requires_grad=True), fast_mode=fast_mode)
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_sparse_input(self):
def check(fast_mode):
def fn(sparse):
return torch.sparse.sum(sparse)
gradcheck(fn, torch.rand(10, dtype=torch.double).to_sparse().requires_grad_(True), check_sparse_nnz=True,
check_batched_grad=False, fast_mode=fast_mode)
with self.assertRaisesRegex(RuntimeError, 'gradcheck expects all tensor inputs are dense'):
gradcheck(fn, torch.rand(10, dtype=torch.double).to_sparse().requires_grad_(True), check_sparse_nnz=False,
check_batched_grad=False, fast_mode=fast_mode)
check(fast_mode=True)
check(fast_mode=False)
@unittest.expectedFailure
def test_gradcheck_sparse_csr_input(self):
def check(fast_mode):
def fn(sparse_csr):
return torch.clone(sparse_csr).to_dense()
# Fails because gradcheck can't work with sparse csr inputs yet
gradcheck(fn, torch.rand(2, 2, dtype=torch.double).to_sparse_csr().requires_grad_(True), check_sparse_nnz=True,
check_batched_grad=False, fast_mode=fast_mode)
with self.assertRaisesRegex(RuntimeError, 'gradcheck expects all tensor inputs are dense'):
gradcheck(fn, torch.rand(2, 2, dtype=torch.double).to_sparse_csr().requires_grad_(True), check_sparse_nnz=False,
check_batched_grad=False, fast_mode=fast_mode)
# check(fast_mode=True) # Segmentation fault
check(fast_mode=False)
def test_gradcheck_nondeterministic(self):
class NonDetFunc(Function):
@staticmethod
def forward(ctx, x, jitter=0.0):
ctx._jitter = jitter
return x
@staticmethod
def backward(ctx, grad_out):
return NonDetFunc.apply(grad_out, ctx._jitter) * (1 + torch.rand_like(grad_out) * ctx._jitter), None
def check(fast_mode):
inp = torch.randn(5, 5, dtype=torch.double, requires_grad=True)
gradcheck(lambda x: NonDetFunc.apply(x, 0.0), inp, check_batched_grad=False, fast_mode=fast_mode)
with self.assertRaisesRegex(RuntimeError, 'Backward is not reentrant'):
gradcheck(lambda x: NonDetFunc.apply(x, 1e-6), inp, check_batched_grad=False, fast_mode=fast_mode)
with self.assertRaisesRegex(RuntimeError, 'Backward is not reentrant'):
gradgradcheck(lambda x: NonDetFunc.apply(x, 1e-12), inp, check_batched_grad=False, fast_mode=fast_mode)
gradcheck(lambda x: NonDetFunc.apply(x, 0.0), inp, nondet_tol=1e-5, check_batched_grad=False,
fast_mode=fast_mode)
gradcheck(lambda x: NonDetFunc.apply(x, 1e-6), inp, nondet_tol=1e-5, check_batched_grad=False,
fast_mode=fast_mode)
gradgradcheck(lambda x: NonDetFunc.apply(x, 1e-12), inp, nondet_tol=1e-5, check_batched_grad=False,
fast_mode=fast_mode)
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_validates_inputs(self):
def check(fast_mode):
# when inputs are not dense, but check_sparse_nnz is false
x = torch.rand(10, requires_grad=True).to_sparse()
with self.assertRaisesRegex(RuntimeError, 'dense when check_sparse_nnz is set to False.'):
gradcheck(lambda x: x.to_dense(), (x,), check_sparse_nnz=False, check_batched_grad=False,
fast_mode=fast_mode)
self.assertFalse(gradcheck(lambda x: x.to_dense(), (x,), check_sparse_nnz=False,
check_batched_grad=False, raise_exception=False, fast_mode=fast_mode))
# when none of the inputs require grad (always raises even if raise_exception=False)
x = torch.rand(10, requires_grad=False)
with self.assertRaisesRegex(ValueError, 'at least one input tensor to require gradient'):
gradcheck(lambda x: x, (x,), raise_exception=False, fast_mode=fast_mode)
# (warning) when inputs are not double precision
x = torch.ones(1, dtype=torch.float32, requires_grad=True)
with self.assertWarnsRegex(UserWarning, "Input #0 requires gradient and is not a double precision"):
self.assertTrue(gradcheck(lambda x: x, (x,), atol=1e-1, fast_mode=fast_mode))
# when layout is not mkldnn(aka has strides) and input has a dimension with stride 0. (always raises
# even if raise_exception=False)
x = torch.ones(1, dtype=torch.float64, requires_grad=True)
x = x.expand((2, 2))
with self.assertRaisesRegex(RuntimeError, 'The 0th input has a dimension with stride 0'):
gradcheck(lambda x: x, (x,), raise_exception=False, fast_mode=fast_mode)
check(fast_mode=True)
check(fast_mode=False)
@unittest.skipIf(not torch._C.has_mkldnn, "MKL-DNN build is disabled")
def test_gradcheck_validates_input_mkldnn(self):
# when mkldnn inputs, forward mode testing is not allowed
# Update tolerances below to make sure the gradient match even in single precision floats
# Use the warning assert to hide the float32 warning
x = torch.ones(1).to_mkldnn().requires_grad_()
with self.assertWarnsRegex(UserWarning, "Input #0 requires gradient and is not a double precision"):
with self.assertRaisesRegex(ValueError, 'MKLDNN inputs are not support for forward AD gradcheck.'):
gradcheck(lambda x: x.to_dense(), (x,), raise_exception=False, fast_mode=False, check_forward_ad=True,
atol=1e-1, rtol=1e-1)
with self.assertWarnsRegex(UserWarning, "Input #0 requires gradient and is not a double precision"):
with self.assertRaisesRegex(ValueError, 'MKLDNN inputs are not support for forward AD gradcheck.'):
gradcheck(lambda x: x.to_dense(), (x,), raise_exception=False, fast_mode=True, check_forward_ad=True,
atol=1e-1, rtol=1e-1)
@unittest.skipIf(not torch._C.has_mkldnn, "MKL-DNN build is disabled")
def test_gradcheck_test_outputs(self):
def check(fast_mode):
# when sparse outputs (always raise even if raise_exception=False)
x = torch.rand(10, requires_grad=True).to_sparse()
with self.assertRaisesRegex(ValueError, 'Sparse output is not supported at gradcheck yet'):
gradcheck(lambda x: x, (x,), check_sparse_nnz=True, check_batched_grad=False, raise_exception=False,
fast_mode=fast_mode)
# when mkldnn outputs (always raise even if raise_exception=False)
root = torch.randn(4, 5, dtype=torch.float32, requires_grad=True)
with self.assertRaisesRegex(ValueError, 'MKLDNN output is not supported at gradcheck yet'):
gradcheck(lambda x: x.to_mkldnn(), (root,), check_batched_grad=False, raise_exception=False, fast_mode=fast_mode)
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_check_no_differentiable_outputs(self):
def check(fast_mode):
# When none of the outputs are differentiable, but numerical gradient is not zero
x = torch.ones((1,), requires_grad=True)
with self.assertRaisesRegex(RuntimeError, 'Numerical gradient for function expected to be zero'):
gradcheck(lambda x: torch.tensor([x]), x)
self.assertFalse(gradcheck(lambda x: torch.tensor([x]), x, raise_exception=False, fast_mode=fast_mode))
# succeed when no outputs at all
self.assertTrue(gradcheck(lambda x: (), (x,), fast_mode=fast_mode))
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_check_batched_grad(self):
def check(fast_mode):
x = torch.rand(10, dtype=torch.double, requires_grad=True).to_sparse()
# runtime error while compute batched grad (print big error)
with self.assertRaisesRegex(RuntimeError, 'gradcheck or gradgradcheck failed while testing batched gradient'):
gradcheck(lambda x: x.to_dense(), (x,), check_sparse_nnz=True, check_batched_grad=True, fast_mode=fast_mode)
self.assertFalse(gradcheck(lambda x: x.to_dense(), (x,), check_sparse_nnz=True, check_batched_grad=True,
raise_exception=False, fast_mode=fast_mode))
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_backward_mul_by_grad_output(self):
# when grad_input is sparse and has incorrect sparse_dim/dense_dim
def check(fast_mode):
def fn(x):
def hook(grad):
if grad is not None:
return grad.to_dense().to_sparse(1)
return grad
y = x.clone()
y.register_hook(hook)
return y.to_dense()
x = torch.ones((2, 2), dtype=torch.double, requires_grad=True).to_sparse()
with self.assertRaisesRegex(RuntimeError, 'grad is sparse tensor, but has incorrect sparse_dim'):
gradcheck(fn, (x,), atol=1e-1, check_sparse_nnz=True, check_batched_grad=False, fast_mode=fast_mode)
self.assertFalse(gradcheck(fn, (x,), atol=1e-1, check_sparse_nnz=True, check_batched_grad=False,
raise_exception=False, fast_mode=fast_mode))
# when backward not multiplied by grad_output (non-sparse case)
def fn2(x):
y = x.clone()
y.register_hook(lambda x: x + 1e-2)
return y
x = torch.ones(1, dtype=torch.double, requires_grad=True)
with self.assertRaisesRegex(RuntimeError, 'backward not multiplied by grad_output'):
gradcheck(fn2, (x,), atol=1e-1, fast_mode=fast_mode)
self.assertFalse(gradcheck(fn2, (x,), atol=1e-1, raise_exception=False, fast_mode=fast_mode))
# when backward not multiplied by grad_output (sparse case)
def fn3(x):
y = x.clone().to_dense()
y.register_hook(lambda x: x + 1e-2)
return y
x = torch.ones(1, dtype=torch.double, requires_grad=True).to_sparse()
with self.assertRaisesRegex(RuntimeError, 'backward not multiplied by grad_output'):
gradcheck(fn3, (x,), atol=1e-1, check_sparse_nnz=True, check_batched_grad=False, fast_mode=fast_mode)
self.assertFalse(gradcheck(fn3, (x,), atol=1e-1, check_sparse_nnz=True, check_batched_grad=False,
raise_exception=False, fast_mode=fast_mode))
# when layout of grad_input is not the same as input
class Test(Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, x):
return x.to_sparse()
x = torch.ones(1, dtype=torch.double, requires_grad=True)
with self.assertRaisesRegex(RuntimeError, 'grad is incorrect layout'):
gradcheck(Test.apply, (x,), check_batched_grad=False, fast_mode=fast_mode)
self.assertFalse(gradcheck(Test.apply, (x,), check_batched_grad=False, raise_exception=False, fast_mode=fast_mode))
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_undefined_grad(self):
def check(fast_mode):
# when encounter runtime error while running backward
def fn(x):
def hook(x):
if x is None:
raise RuntimeError("x is undefined")
y = x.clone()
y.register_hook(hook)
return y
x = torch.ones(1, dtype=torch.double, requires_grad=True)
with self.assertWarnsRegex(UserWarning, "Backwards compatibility: New undefined gradient support checking feature"):
with self.assertRaisesRegex(RuntimeError, 'Expected backward function to handle undefined output grads'):
gradcheck(fn, (x,), fast_mode=fast_mode)
self.assertFalse(gradcheck(fn, (x,), raise_exception=False, fast_mode=fast_mode))
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_jacobian_mismatch(self):
def check(fast_mode):
def fn(x): # R -> R, C -> C
y = x.clone()
y.register_hook(lambda x: x + 1e-2)
return y
x = torch.ones(2, 2, requires_grad=True)
with self.assertRaisesRegex(RuntimeError, 'Jacobian mismatch for output 0 with respect to input 0'):
gradcheck(fn, (x,), fast_mode=fast_mode)
self.assertFalse(gradcheck(fn, (x,), raise_exception=False, fast_mode=fast_mode))
x_c = torch.ones(2, 2, requires_grad=True, dtype=torch.complex128)
with self.assertRaisesRegex(RuntimeError, 'While considering the imaginary part of complex outputs only'):
gradcheck(fn, (x_c,), fast_mode=False)
self.assertFalse(gradcheck(fn, (x_c,), raise_exception=False, fast_mode=False))
def fn2(x): # R -> C
y = torch.complex(x, x)
y.register_hook(lambda x: x + 1e-2)
return y
x = torch.ones(2, 2, requires_grad=True)
with self.assertRaisesRegex(RuntimeError, 'While considering the imaginary part of complex outputs only'):
gradcheck(fn2, (x,), fast_mode=False)
self.assertFalse(gradcheck(fn2, (x,), raise_exception=False, fast_mode=False))
def fn3(x): # C -> R
y = torch.real(x)
y.register_hook(lambda x: x + 1e-2)
return y
with self.assertRaisesRegex(RuntimeError, 'Jacobian mismatch for output 0 with respect to input 0'):
gradcheck(fn3, (x_c,), fast_mode=False)
self.assertFalse(gradcheck(fn3, (x_c,), raise_exception=False, fast_mode=False))
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_dense_and_sparse_inputs(self):
def check(fast_mode):
def fn(x, y):
return x * y.coalesce().to_dense()
a = torch.rand(2, 2, dtype=torch.double, requires_grad=True)
b = torch.rand(2, 2, dtype=torch.double,).to_sparse().requires_grad_(True)
self.assertTrue(gradcheck(fn, (a, b), check_sparse_nnz=True, check_batched_grad=False, fast_mode=fast_mode))
check(fast_mode=True)
check(fast_mode=False)
@unittest.skipIf(not torch._C.has_mkldnn, "MKL-DNN build is disabled")
def test_gradcheck_multiple_mkldnn_inputs(self):
def check(fast_mode):
def fn(x, y):
return x + y.to_dense()
a = torch.rand(10, requires_grad=True)
b = torch.rand(10, dtype=torch.float32).to_mkldnn().requires_grad_(True)
self.assertTrue(gradcheck(fn, (a, b), atol=1e-1, check_batched_grad=False, fast_mode=fast_mode))
def fn2(x, y):
return x.to_dense() + y.to_dense()
c = torch.rand(10, dtype=torch.float32).to_mkldnn().requires_grad_(True)
self.assertTrue(gradcheck(fn, (a, c), atol=1e-1, check_batched_grad=False, fast_mode=fast_mode))
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_output_shape_or_dtype_depend_on_values(self):
def check(fast_mode):
def fn(x):
if torch.all(x >= 1):
return torch.cat([x, x])
else:
return x
a = torch.ones(1, dtype=torch.double, requires_grad=True)
with self.assertRaisesRegex(AssertionError, 'return outputs with the same shape when inputs are perturbed'):
self.assertTrue(gradcheck(fn, (a,), fast_mode=fast_mode))
def fn2(x):
if torch.all(x >= 1):
return x.to(torch.float32)
else:
return x
with self.assertRaisesRegex(AssertionError, 'return outputs with the same dtype when inputs are perturbed'):
self.assertTrue(gradcheck(fn2, (a,), fast_mode=fast_mode))
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_complex_non_complex_outputs(self):
def fn(x, y):
z = torch.complex(x, y)
return z, x + 1
a = torch.ones(2, 2, requires_grad=True, dtype=torch.float64)
b = torch.ones(2, 2, requires_grad=True, dtype=torch.float64)
self.assertTrue(gradcheck(fn, (a, b)))
def fn2(z):
return z, torch.real(z)
c = torch.ones(2, 2, requires_grad=True, dtype=torch.complex128)
self.assertTrue(gradcheck(fn2, (c)))
def test_gradcheck_get_numerical_jacobian(self):
# get_numerical_jacobian is deprecated and no longer used internally by gradcheck
from torch.autograd.gradcheck import get_numerical_jacobian
def fn(inputs):
# get_numerical_jacobian requires fn to take inputs as a tuple
# and returns the jacobian wrt the first output
x = inputs[0]
y = inputs[1]
return 2 * x + y, x + 2 * y
a = torch.rand(2, 2, requires_grad=True, dtype=torch.float64)
b = torch.rand(2, 2, requires_grad=True, dtype=torch.float64)
with self.assertWarnsRegex(UserWarning, "get_numerical_jacobian was part of PyTorch's private API"):
jacobian = get_numerical_jacobian(fn, (a, b), target=a, eps=1e-6)
self.assertEqual(jacobian[0], 2 * torch.eye(4, dtype=torch.double))
with self.assertWarnsRegex(UserWarning, "get_numerical_jacobian was part of PyTorch's private API"):
jacobian = get_numerical_jacobian(fn, (a, b), eps=1e-6)
self.assertEqual(jacobian[0], 2 * torch.eye(4, dtype=torch.double))
self.assertEqual(jacobian[1], 1 * torch.eye(4, dtype=torch.double))
with self.assertRaisesRegex(ValueError, "Expected grad_out to be 1.0"):
jacobian = get_numerical_jacobian(fn, (a, b), eps=1e-6, grad_out=2.0)
def test_gradcheck_get_analytical_jacobian(self):
from torch.autograd.gradcheck import get_analytical_jacobian
def fn(x, y):
return 2 * x + y, x + 2 * y
a = torch.rand(2, 2, requires_grad=True, dtype=torch.float64)
b = torch.rand(2, 2, requires_grad=True, dtype=torch.float64)
outputs = fn(a, b)
with self.assertWarnsRegex(UserWarning, "get_analytical_jacobian was part of PyTorch's private API"):
jacobians, reentrant, correct_grad_sizes, correct_grad_types = get_analytical_jacobian((a, b), outputs[0])
self.assertEqual(jacobians[0], 2 * torch.eye(4, dtype=torch.double))
self.assertEqual(jacobians[1], 1 * torch.eye(4, dtype=torch.double))
self.assertTrue(reentrant)
class NonDetFunc(Function):
@staticmethod
def forward(ctx, x, jitter=0.0):
ctx._jitter = jitter
return x
@staticmethod
def backward(ctx, grad_out):
return NonDetFunc.apply(grad_out, ctx._jitter) * (1 + torch.rand_like(grad_out) * ctx._jitter), None
outputs = NonDetFunc.apply(a, 1e-6)
with self.assertWarnsRegex(UserWarning, "get_analytical_jacobian was part of PyTorch's private API"):
jacobians, reentrant, correct_grad_sizes, correct_grad_types = get_analytical_jacobian((a,), outputs)
self.assertFalse(reentrant)
with self.assertRaisesRegex(ValueError, "Expected grad_out to be 1.0"):
jacobians, _, _, _ = get_analytical_jacobian((a,), outputs, grad_out=2.0)
def test_gradcheck_custom_error(self):
from torch.autograd.gradcheck import GradcheckError
def check(fast_mode):
def fn(x):
y = x.clone()
y.register_hook(lambda x: x + 1e-2)
return y
x = torch.ones(2, 2, requires_grad=True)
with self.assertRaisesRegex(GradcheckError, 'Jacobian mismatch for output 0 with respect to input 0'):
gradcheck(fn, (x,), fast_mode=fast_mode)
with self.assertRaisesRegex(RuntimeError, 'Jacobian mismatch for output 0 with respect to input 0'):
gradcheck(fn, (x,), fast_mode=fast_mode)
self.assertFalse(gradcheck(fn, (x,), raise_exception=False, fast_mode=fast_mode))
def fn2(x):
raise RuntimeError("Not a GradcheckError!")
# Checks that when raise_exception=False, non-GradcheckErrors are not caught by gradcheck
with self.assertRaisesRegex(RuntimeError, "Not a GradcheckError!"):
gradcheck(fn2, (x,), fast_mode=fast_mode, raise_exception=False)
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_forward_ad(self):
def fn(x, y):
return x + y, y
def bad_fn(x, y):
# Hacky way to check if we're currently inside a forward ad level
is_running_forward_ad = fwAD._current_level >= 0
if is_running_forward_ad:
y_p, y_d = fwAD.unpack_dual(y)
y = fwAD.make_dual(y_p, y_d * 1.1)
return x + y, y
err_msg = "Jacobian computed with forward mode mismatch for output 0 with respect to input 1"
for fast_mode in [True, False]:
# Test for all inputs and outputs being real
x = torch.rand(2, dtype=torch.double, requires_grad=True)
y = torch.rand(2, dtype=torch.double, requires_grad=True)
gradcheck(fn, (x, y), check_forward_ad=True, fast_mode=fast_mode)
with self.assertRaisesRegex(RuntimeError, err_msg):
gradcheck(bad_fn, (x, y), check_forward_ad=True, fast_mode=fast_mode)
def basic_mul(x):
return torch.view_as_real(torch.resolve_conj(x * 1j))
gradcheck(basic_mul, x, check_forward_ad=True, fast_mode=fast_mode)
# Test for one input and one output being complex
x = torch.rand(2, dtype=torch.cdouble, requires_grad=True)
gradcheck(fn, (x, y), check_forward_ad=True, fast_mode=fast_mode)
with self.assertRaisesRegex(RuntimeError, err_msg):
gradcheck(bad_fn, (x, y), check_forward_ad=True, fast_mode=fast_mode)
# Test for all inputs and outputs being complex
y = torch.rand(2, dtype=torch.cdouble, requires_grad=True)
gradcheck(fn, (x, y), check_forward_ad=True, fast_mode=fast_mode)
with self.assertRaisesRegex(RuntimeError, err_msg):
gradcheck(bad_fn, (x, y), check_forward_ad=True, fast_mode=fast_mode)
def test_gradcheck_forward_ad_runs_with_no_requires_grad(self):
# Currently requires_grad is used as a easy way for gradcheck to know
# which inputs of the function are meant to be differentiable
# This test checks that when the inputs are passed to the function they should not have
# requires_grad=True even though they may have requires_grad=True when passed
# to gradcheck
class UserFn(Function):
@staticmethod
def forward(ctx, x, y):
if fwAD._current_level >= 0:
self.assertFalse(x.requires_grad)
self.assertFalse(y.requires_grad)
return x.clone(), y.clone()
@staticmethod
def jvp(ctx, x_t, y_t):
return x_t, y_t
x = torch.rand(2, dtype=torch.double, requires_grad=True)
y = torch.rand(2, dtype=torch.double, requires_grad=True)
gradcheck(UserFn.apply, (x, y), check_forward_ad=True, check_undefined_grad=False, check_backward_ad=False,
check_batched_grad=False, check_batched_forward_grad=False)
gradcheck(UserFn.apply, (x, y), check_forward_ad=True, check_undefined_grad=True, check_backward_ad=False,
check_batched_grad=False, check_batched_forward_grad=False)
gradcheck(UserFn.apply, (x, y), check_forward_ad=True, check_undefined_grad=True, check_backward_ad=False,
check_batched_grad=False, check_batched_forward_grad=True)
x = torch.rand(2, dtype=torch.double, requires_grad=True)
y = torch.rand(2, dtype=torch.double, requires_grad=False)
gradcheck(UserFn.apply, (x, y), check_forward_ad=True, check_undefined_grad=True, check_backward_ad=False,
check_batched_grad=False, check_batched_forward_grad=True)
def test_gradcheck_forward_ad_respects_requires_grad(self):
# Currently requires_grad is used as a easy way for gradcheck to know
# which inputs of the function are meant to be differentiable
jvp_count = [0]
class UserFn(Function):
@staticmethod
def forward(ctx, x, y):
return x.clone(), y.clone()
@staticmethod
def jvp(ctx, x_t, y_t):
jvp_count[0] += 1
return x_t, y_t
x = torch.rand(2, dtype=torch.double, requires_grad=True)
y = torch.rand(2, dtype=torch.double, requires_grad=True)
gradcheck(UserFn.apply, (x, y), check_forward_ad=True, check_undefined_grad=False, check_backward_ad=False,
check_batched_grad=False, check_batched_forward_grad=False)
self.assertEqual(jvp_count[0], 2) # (2) once per input
jvp_count = [0]
gradcheck(UserFn.apply, (x, y), check_forward_ad=True, check_undefined_grad=True, check_backward_ad=False,
check_batched_grad=False, check_batched_forward_grad=False)
self.assertEqual(jvp_count[0], 6) # (+4): (once with normal ZT (+1), once with efficient ZT (+1)) for each input (x2)
jvp_count = [0]
gradcheck(UserFn.apply, (x, y), check_forward_ad=True, check_undefined_grad=True, check_backward_ad=False,
check_batched_grad=False, check_batched_forward_grad=True)
self.assertEqual(jvp_count[0], 12) # (+6): (compute batch of 2 with vmap (+1), with a loop (+2)) for each input (x2)
jvp_count = [0]
# Repeat the previous test except we mark one input with requires_grad=False
# NB: _test_undefined_forward_mode is only (+1), when function has single differentiable input, not (+2)!
# Otherwise, other counts are halved.
x = torch.rand(2, dtype=torch.double, requires_grad=True)
y = torch.rand(2, dtype=torch.double, requires_grad=False)
gradcheck(UserFn.apply, (x, y), check_forward_ad=True, check_undefined_grad=True, check_backward_ad=False,
check_batched_grad=False, check_batched_forward_grad=True)
self.assertEqual(jvp_count[0], 5) # 1 + 1 + 3
def test_gradcheck_check_forward_or_backward_only(self):
"""Depending on settings for check_forward_ad and check_backward_ad, the
correct codepaths should be reached (or not reached)
"""
fwd_fail_err_msg = "FAIL FWD"
bwd_fail_err_msg = "FAIL BWD"
class UserFn(Function):
@staticmethod
def forward(ctx, foo, fwd_bad, bwd_bad):
ctx.fwd_bad = fwd_bad
ctx.bwd_bad = bwd_bad
return foo * 2
@staticmethod
def vjp(ctx, gO):
if ctx.bwd_bad:
raise RuntimeError(bwd_fail_err_msg)
else:
return 2 * gO, None, None
@staticmethod
def jvp(ctx, gI, _1, _2):
if ctx.fwd_bad:
raise RuntimeError(fwd_fail_err_msg)
else:
return 2 * gI
for fast_mode in (True, False):
for check_forward_ad in (True, False):
for check_backward_ad in (True, False):
for fwd_bad in (True, False):
for bwd_bad in (True, False):
fwd_should_fail = fwd_bad and check_forward_ad
bwd_should_fail = bwd_bad and check_backward_ad
def run():
gradcheck(UserFn.apply, (x, fwd_bad, bwd_bad), check_forward_ad=check_forward_ad,
check_backward_ad=check_backward_ad, check_undefined_grad=check_backward_ad,
check_batched_grad=check_backward_ad, fast_mode=fast_mode)
x = torch.rand(2, dtype=torch.double, requires_grad=True)
if not check_forward_ad and not check_backward_ad:
with self.assertRaisesRegex(AssertionError, "Expected at least one of"):
run()
continue
if not fwd_should_fail and not bwd_should_fail:
run()
else:
# If both fail, backward AD failure "hides" forward AD failure
if fwd_should_fail:
fail_msg = fwd_fail_err_msg
if bwd_should_fail:
fail_msg = bwd_fail_err_msg
with self.assertRaisesRegex(RuntimeError, fail_msg):
run()
def test_gradcheck_forward_ad_batched_grad(self):
x = torch.rand(2, dtype=torch.double, requires_grad=True)
# multiple inputs and outputs with non-tensors inputs
def fn1(a: torch.Tensor, b: int):
return a.clone(), a + 1
gradcheck(fn1, (x, 1), check_forward_ad=True, check_backward_ad=False, check_batched_grad=False,
check_undefined_grad=False, check_batched_forward_grad=True)
# unrelated inputs: tangent for c is None
def fn2(a: torch.Tensor, c: torch.Tensor):
return a.clone()
gradcheck(fn2, (x, x.clone()), check_forward_ad=True, check_backward_ad=False, check_batched_grad=False,
check_undefined_grad=False, check_batched_forward_grad=True)
class Fn(Function):
@staticmethod
def forward(ctx, foo):
return foo * 2
@staticmethod
def vjp(ctx, gO):
return gO * 2
@staticmethod
def jvp(ctx, gI):
torch.randn_like(gI)
return gI * 2
msg = "vmap: We do not yet support calling random operations inside of vmap"
with self.assertRaisesRegex(RuntimeError, msg):
gradcheck(Fn.apply, (x,), check_forward_ad=True, check_batched_forward_grad=True)
def test_version_counter(self):
x = torch.randn(1, 2)
# In-place op bumps version
x_saved_version = x._version
x.add_(1).add_(1)
self.assertTrue(x._version > x_saved_version)
# Differentiable view shares version counter
xz = x[:]
self.assertTrue(x._version == xz._version)
xz.add_(1)
self.assertTrue(x._version == xz._version)
# `x.data = y` preserves version counter of `x`
x_saved_version = x._version
x.data = torch.randn(2, 3)
self.assertTrue(x._version == x_saved_version)
x.add_(1)
self.assertTrue(x._version > x_saved_version)
# Make sure `x` is still using the same version counter it shares with `xz`
self.assertTrue(x._version == xz._version)
# In-place op on `xz` also updates version of `x`,
# because they share the version counter
xz.add_(1)
self.assertTrue(x._version == xz._version)
def test_set_data_tensorimpl_type(self):
# Dense tensor has impl of type `TensorImpl`, while sparse tensor has impl
# of type `SparseTensorImpl`.
x = torch.randn(1, 2)
x_s = torch.sparse_coo_tensor(torch.zeros([1, 1]), torch.ones([1]))
with self.assertRaisesRegex(RuntimeError, 'incompatible tensor type'):
x.data = x_s
def test_set_data_preserve_pyobj(self):
a = torch.randn(1, 2)
b = torch.randn(1, 2)
b_id_saved = id(b)
b.data = a
self.assertTrue(b_id_saved == id(b))
@unittest.skipIf(IS_WINDOWS, "Skipping because doesn't work for windows")
def test_thread_shutdown(self):
code = """import torch
from torch.autograd import Function
class MyFunction(Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, grad):
return grad
for shape in [(1,), ()]:
v = torch.ones(shape, requires_grad=True)
MyFunction.apply(v).backward()
"""
s = TestCase.runWithPytorchAPIUsageStderr(code)
# The autograd engine creates worker threads only when GPU devices are present.
# So make sure that we do shutdown threads when we're testing cuda and make sure
# that there is no thread to shutdown when we're not using cuda.
if TEST_CUDA:
self.assertRegex(s, "PYTORCH_API_USAGE torch.autograd.thread_shutdown")
else:
self.assertNotRegex(s, "PYTORCH_API_USAGE torch.autograd.thread_shutdown")
@unittest.skipIf(IS_MACOS, "Fails with SIGBUS on macOS; https://github.com/pytorch/pytorch/issues/25941")
def test_deep_reentrant(self):
class DeepReentrant(Function):
@staticmethod
def forward(ctx, x):
with torch.enable_grad():
ctx.x = Variable(x.detach(), requires_grad=True)
ctx.x = ctx.x - 1
return ctx.x.detach()
@staticmethod
def backward(ctx, x):
if ctx.x < 0:
return x
with torch.enable_grad():
DeepReentrant.apply(ctx.x).sum().backward()
return x
# Test stack overflow escape mechanism
v = torch.tensor(2000.0, requires_grad=True)
# This will cause stack overflow if reentrant calls are handled
# in the same thread recursively
DeepReentrant.apply(v).sum().backward()
# Test stack overflow escape mechanism multiple times
# to ensure reusing workers in the pool works fine
v2 = torch.tensor(200.0, requires_grad=True)
DeepReentrant.apply(v2).sum().backward()
def test_reentrant_priority(self):
order = []
class MyFunction(Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, x):
order.append("MyFunction")
return x
class Reentrant(Function):
@staticmethod
def forward(ctx, x):
with torch.enable_grad():
ctx.x = Variable(x.detach(), requires_grad=True)
ctx.x = ctx.x - 1
return ctx.x.detach()
@staticmethod
def backward(ctx, x):
order.append("Reentrant")
if ctx.x < 0:
return x
with torch.enable_grad():
Reentrant.apply(ctx.x).backward()
return x
a = MyFunction.apply(torch.tensor(6.0, requires_grad=True))
b = Reentrant.apply(torch.tensor(9.0, requires_grad=True))
v = a * b
v.backward()
# The tasks for the Reentrant and MyFunction backward() will be added
# to the queue in the autograd engine at the same time. The backward
# for Reentrant will be executed first, which will then add other
# backward tasks to the queue. We want to ensure all the reentrant tasks
# are prioritized over the MyFunction backward task regardless of their
# sequence numbers
self.assertEqual(len(order), 11)
self.assertEqual(order.count("Reentrant"), 10)
self.assertEqual(order[-1], "MyFunction")
@slowTest
def test_checkpointing(self):
num_inp = 2000
nz_inp = 10
nz_out = 10
nz_bottleneck = 1000
# small proxy network for some complex reasoning we want to do per input
module = nn.Sequential(
nn.Linear(nz_inp, nz_bottleneck),
nn.ReLU(),
nn.Linear(nz_bottleneck, nz_inp)
)
feat_combined = []
for r in range(num_inp):
data_r = torch.empty(1, nz_inp)
data_r.uniform_()
data_r.requires_grad = True
feat_r = checkpoint(module, data_r)
feat_combined.append(feat_r)
# compute mean as a proxy for some joint reasoning
mean_combined = torch.stack(feat_combined).mean()
mean_combined.backward()
@slowTest
@parametrize("input_requires_grad", [True, False])
def test_checkpointing_without_reentrant(self, input_requires_grad):
"""
Basic test for checkpoint without reentrant autograd.
"""
num_inp = 2000
nz_inp = 10
nz_out = 10
nz_bottleneck = 1000
# small proxy network for some complex reasoning we want to do per input
module = nn.Sequential(
nn.Linear(nz_inp, nz_bottleneck),
nn.ReLU(),
nn.Linear(nz_bottleneck, nz_inp)
)
# Run model with and without checkpointing and verify gradients are
# equivalent, regardless of if inputs require grads or not.
module_copy = deepcopy(module)
feat_combined = []
feat_combined_no_checkpoint = []
for r in range(num_inp):
data_r = torch.empty(1, nz_inp)
data_r.uniform_()
data_r.requires_grad = input_requires_grad
data_r_copy = data_r.clone()
feat_r = checkpoint(module, data_r, use_reentrant=False)
feat_combined.append(feat_r)
feat_r_no_checkpoint = module_copy(data_r)
feat_combined_no_checkpoint.append(feat_r_no_checkpoint)
# compute mean as a proxy for some joint reasoning
mean_combined = torch.stack(feat_combined).mean()
mean_combined.backward()
mean_combined_no_checkpoint = torch.stack(feat_combined_no_checkpoint).mean()
mean_combined_no_checkpoint.backward()
for checkpoint_param, param in zip(module.parameters(), module_copy.parameters()):
self.assertEqual(checkpoint_param.grad, param.grad)
def test_checkpoint_valid_reset_on_error(self):
a = torch.randn(2, 2, requires_grad=True)
with self.assertRaisesRegex(Exception, "Checkpointing is not compatible with .grad()"):
b = checkpoint(torch.exp, a).sum()
torch.autograd.grad(b, (a,))
c = checkpoint(torch.exp, a).sum()
c.backward()
@parametrize("use_reentrant", [True, False])
def test_checkpointing_without_reentrant_detached_tensor(self, use_reentrant):
class NoGradModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear = nn.Linear(2, 2, bias=False)
self.lin2 = nn.Linear(2, 2, bias=False)
def forward(self, x):
with torch.no_grad():
return self.lin2(self.linear(x))
module = NoGradModule()
err_ctx = (
self.assertRaisesRegex(
RuntimeError,
"none of output has requires_grad=True"
)
if use_reentrant
else contextlib.suppress()
)
a = torch.randn(2, 2, requires_grad=True)
for _ in range(3):
with err_ctx:
# out does not require grad
out = checkpoint(module, a, use_reentrant=use_reentrant)
# Make loss require grad, otherwise we would run into
# "element 0 of tensors does not require grad and does not have a grad_fn"
out += a
out.sum().backward()
def test_checkpointing_without_reentrant_correct_grad(self):
"""
Verifies that correct gradients are calculated for checkpoint
without reentrant autograd, for both backward() and autograd.grad().
"""
a = torch.randn(2, 2, requires_grad=True)
b = torch.exp(a).sum()
b.backward()
b_grad = a.grad
a.grad = None
c = checkpoint(torch.exp, a, use_reentrant=False).sum()
c.backward()
c_grad = a.grad
a.grad = None
d = checkpoint(torch.exp, a, use_reentrant=False).sum()
d_grad, = torch.autograd.grad(d, (a,))
self.assertEqual(b_grad, c_grad)
self.assertEqual(b_grad, d_grad)
def test_checkpointing_without_reentrant_dataparallel(self):
"""
Verifies gradient correctness when checkpoint without reentrant autograd
is used in conjunction with DataParallel.
"""
class LinearModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear = nn.Linear(2, 2, bias=False)
def forward(self, inp):
return self.linear(inp)
a = torch.randn(2, 2, requires_grad=True)
if torch.cuda.is_available():
a = a.cuda()
model = LinearModule()
if torch.cuda.is_available():
model = model.cuda()
b = deepcopy(model)(a).sum()
b.backward()
b_grad = a.grad
a.grad = None
module = torch.nn.DataParallel(deepcopy(model))
c = checkpoint(module, a, use_reentrant=False).sum()
c.backward()
c_grad = a.grad
self.assertEqual(b_grad, c_grad)
def test_checkpointing_without_reentrant_parameter_used_in_an_out(self):
"""
Ensures that gradient hooks are only called once per tensor.
"""
w = torch.randn(10, 10, requires_grad=True)
count = 0
def hook(grad):
nonlocal count
count += 1
w.register_hook(hook)
x = torch.rand(10, 10, requires_grad=True)
h = w * x # Using w outside the checkpoint
out = checkpoint(lambda x: w * x, h, use_reentrant=False) # Using w inside the checkpoint
out.sum().backward()
# should only call hook once
self.assertEqual(count, 1)
def test_checkpointing_without_reentrant_arbitrary_input_output(self):
"""
Ensures checkpointing without reentrant autograd works with functions
with arbitrary input/output structures.
"""
class MyModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.layer = torch.nn.Linear(5, 5, bias=False)
def forward(self, dict_input):
tensor = dict_input["tensor"]
return {
"result": self.layer(tensor)
}
model_no_checkpoint = MyModel()
model_checkpoint_without_reentrant = deepcopy(model_no_checkpoint)
inp = {
"tensor": torch.randn(5, 5)
}
out_no_checkpoint = model_no_checkpoint(inp)["result"].sum()
out_checkpoint = checkpoint(
model_checkpoint_without_reentrant,
inp,
use_reentrant=False
)["result"].sum()
self.assertEqual(out_checkpoint, out_no_checkpoint)
out_no_checkpoint.backward()
out_checkpoint.backward()
for param, checkpoint_param in zip(model_no_checkpoint.parameters(), model_checkpoint_without_reentrant.parameters()):
self.assertEqual(param.grad, checkpoint_param.grad)
def test_callback_adds_callback(self):
called = [0]
def callback_final():
called[0] += 1
def callback_adds_callback():
called[0] += 1
Variable._execution_engine.queue_callback(callback_final)
class MyFunc(Function):
@staticmethod
def forward(ctx, input):
return input
@staticmethod
@once_differentiable
def backward(ctx, grad):
Variable._execution_engine.queue_callback(callback_adds_callback)
return grad
a = torch.rand((3, 3), requires_grad=True)
b = MyFunc.apply(a)
b.sum().backward()
self.assertEqual(called[0], 2)
def _test_reentrant_with_callbacks(self, install_callbacks_in_depths):
counter = {}
counter["inner"] = 0
counter["outer"] = 0
def inc_inner_counter():
counter["inner"] += 1
def inc_outer_counter():
counter["outer"] += 1
class MyFunc(Function):
@staticmethod
def forward(ctx, input):
return input
@staticmethod
@once_differentiable
def backward(ctx, input):
if 1 in install_callbacks_in_depths:
# Add a callback to execute.
Variable._execution_engine.queue_callback(inc_inner_counter)
return input
class MyReentrantFunc(Function):
@staticmethod
def forward(ctx, input):
return input
@staticmethod
@once_differentiable
def backward(ctx, input):
if 0 in install_callbacks_in_depths:
# Add a callback to execute.
Variable._execution_engine.queue_callback(inc_outer_counter)
# Reentrant backward call.
tmp_inp = input.detach().requires_grad_()
with torch.enable_grad():
tmp_out = (MyFunc.apply(tmp_inp)).sum()
tmp_out.backward()
return input
t1 = torch.rand((3, 3), requires_grad=True)
t2 = MyReentrantFunc.apply(t1)
t3 = t2.sum()
torch.autograd.backward([t3])
return counter
def test_reentrant_with_callbacks_depth_0(self):
# Verify callback is called only once.
ret = self._test_reentrant_with_callbacks([0])
self.assertEqual(1, ret["outer"])
self.assertEqual(0, ret["inner"])
def test_reentrant_with_callbacks_depth_1(self):
# Verify callback is called only once.
ret = self._test_reentrant_with_callbacks([1])
self.assertEqual(0, ret["outer"])
self.assertEqual(1, ret["inner"])
def test_reentrant_with_callbacks_both_depths(self):
# Verify callback is called twice.
ret = self._test_reentrant_with_callbacks([0, 1])
self.assertEqual(1, ret["outer"])
self.assertEqual(1, ret["inner"])
def test_reentrant_with_leaf_variable_hook(self):
handle = None
param = torch.rand(10, requires_grad=True)
def add_gradient_penalty_to_grad(grad):
handle.remove()
old_param_grad = grad
param.grad = None
# Add some sort of gradient penalty by directly updating the gradients
with torch.enable_grad():
g = grad.detach().requires_grad_()
new_param = param.detach().requires_grad_()
out = ((g * 2) + new_param).sum()
out.backward()
res = g.grad + grad
param.grad = old_param_grad
return res
handle = param.register_hook(add_gradient_penalty_to_grad)
# Forward pass
tmp = (param * param)
loss = tmp.sum()
# Compute the gradients
loss.backward()
def test_reentrant_with_non_leaf_variable_hook(self):
handle = None
param = torch.rand(10, requires_grad=True)
def manual_increase_gradient(grad):
handle.remove()
# Add some sort of gradient penalty by directly updating the gradients
with torch.enable_grad():
g = grad.detach().requires_grad_()
out = ((g * 2) + 5).sum()
out.backward()
res = g.grad + grad
return res
# Forward pass
tmp = (param * param)
handle = tmp.register_hook(manual_increase_gradient)
loss = tmp.sum()
# Compute the gradients
loss.backward()
self.assertEqual(param.grad, 6 * param)
def test_grad_fn_attr_bindings(self):
# Check that the getter of each type returns what we want
# See `gen_autograd_functions.py` for how the getters are generated
#
# This test is only meant to check if the codegen'd bindings work
# Please help update this test if you update the names of any the fields we check!
#
a = torch.ones(1, requires_grad=True)
b = torch.ones(1, requires_grad=True)
out = torch.stack([a, b], dim=0)
self.assertEqual(out.grad_fn._saved_tensors, (a, b)) # TensorList -> Tuple[Tensor]
self.assertIsInstance(out.grad_fn._saved_tensors[0], torch.Tensor)
self.assertIsInstance(out.grad_fn._raw_saved_tensors[0], torch._C._autograd.SavedTensor)
self.assertEqual(out.grad_fn._saved_dim, 0) # int64_t -> int
self.assertIsInstance(out.grad_fn._saved_dim, int)
out.grad_fn._raw_saved_tensors[0].register_hooks(lambda x: x, lambda x: x)
out.sum().backward()
with self.assertRaisesRegex(RuntimeError, "after they have already been freed"):
out.grad_fn._saved_tensors
with self.assertRaisesRegex(RuntimeError, "after they have already been freed"):
out.grad_fn._raw_saved_tensors
self.assertEqual(out.grad_fn._saved_dim, 0)
a = torch.ones(2, 2, requires_grad=True)
indices = torch.tensor([0, 1])
out = a[:, indices]
self.assertEqual(out.grad_fn._saved_indices, (None, indices)) # c10::List<c10::optional<Tensor>> -> Tuple[Tensor?]
self.assertIsInstance(out.grad_fn._saved_indices[1], torch.Tensor)
self.assertIsInstance(out.grad_fn._raw_saved_indices[1], torch._C._autograd.SavedTensor)
self.assertEqual(out.grad_fn._saved_self_sizes, a.shape) # IntArrayRef -> Tuple[int]
self.assertIsInstance(out.grad_fn._saved_self_sizes[0], int)
out.grad_fn._raw_saved_indices[1].register_hooks(lambda x: x, lambda x: x)
with self.assertRaisesRegex(RuntimeError, "None is forbidden"):
out.grad_fn._raw_saved_indices[0].register_hooks(lambda x: x, lambda x: x)
a = torch.ones(2, 2, requires_grad=True)
out = a * a
out.grad_fn._raw_saved_self.register_hooks(lambda x: x, lambda x: x)
out.sum().backward()
with self.assertRaisesRegex(RuntimeError, "after it has been freed"):
out.grad_fn._raw_saved_self.register_hooks(lambda x: x, lambda x: x)
a = torch.ones(1, 1, 2, requires_grad=True)
out = torch.nn.functional.interpolate(a, 4, mode="linear")
self.assertEqual(out.grad_fn._saved_output_size, (4,)) # c10::optional<IntArrayRef> -> int[]?
self.assertIsInstance(out.grad_fn._saved_output_size[0], int)
self.assertEqual(out.grad_fn._saved_align_corners, False) # bool -> bool
self.assertIsInstance(out.grad_fn._saved_align_corners, bool)
if hasattr(out.grad_fn, '_saved_scale_factors'):
self.assertIsNone(out.grad_fn._saved_scale_factors) # c10::optional<ArrayRef<double>> -> float[]?
else:
self.assertIsNone(out.grad_fn._saved_scales) # c10::optional<ArrayRef<double>> -> float[]?
out = torch.nn.functional.interpolate(a, scale_factor=0.5, mode="linear")
self.assertIsNone(out.grad_fn._saved_output_size)
self.assertEqual(out.grad_fn._saved_scale_factors, (0.5,))
self.assertIsInstance(out.grad_fn._saved_scale_factors[0], float)
a = torch.ones(2, 2, requires_grad=True)
out = torch.pdist(a, p=1)
self.assertEqual(out.grad_fn._saved_p, 1.) # double -> float
self.assertIsInstance(out.grad_fn._saved_p, float)
a = torch.ones(1, 1, 2, requires_grad=True)
out = torch.logit(a, 1.)
self.assertEqual(out.grad_fn._saved_eps, 1.) # c10:optional<double> -> float?
self.assertIsInstance(out.grad_fn._saved_eps, float)
out = torch.logit(a)
self.assertIsNone(out.grad_fn._saved_eps)
if torch._C.has_lapack:
a = torch.ones(1, 1, requires_grad=True)
q, r = torch.linalg.qr(a, mode="reduced")
self.assertEqual(q.grad_fn._saved_mode, "reduced") # std::string -> str
a = torch.tensor([1.], requires_grad=True)
out = torch.div(a, 2., rounding_mode="trunc")
self.assertEqual(out.grad_fn._saved_rounding_mode, "trunc") # c10::optional<std::string> -> str?
out = torch.div(a, 2., rounding_mode=None)
self.assertIsNone(out.grad_fn._saved_rounding_mode) # c10::optional<std::string> -> str?
x = torch.zeros(5, requires_grad=True)
out = torch.threshold(x, threshold=(1 + 0j), value=(1 + 0j))
self.assertIsInstance(out.grad_fn._saved_threshold, complex) # Scalar(complex double) -> complex
cfloat = torch.tensor(1 + 0j, dtype=torch.complex64)
out = torch.threshold(x, threshold=cfloat, value=(1 + 0j))
self.assertIsInstance(out.grad_fn._saved_threshold, complex) # Scalar(complex float) -> complex
out = torch.threshold(x, threshold=1., value=1.)
self.assertIsInstance(out.grad_fn._saved_threshold, float) # Scalar(floating point) -> float
out = torch.threshold(x, threshold=1, value=1)
self.assertIsInstance(out.grad_fn._saved_threshold, int) # Scalar(integral) -> int
out = torch.threshold(x, threshold=False, value=False)
self.assertIsInstance(out.grad_fn._saved_threshold, bool) # Scalar(bool) -> bool
a = torch.ones(2, 2, requires_grad=True)
out = a.as_strided((3,), (1,), 1)
self.assertEqual(out.grad_fn._saved_storage_offset, 1) # c10:optional<int64_t> -> int?
self.assertIsInstance(out.grad_fn._saved_storage_offset, int)
out = a.as_strided((3,), (1,))
self.assertIsNone(out.grad_fn._saved_storage_offset)
a = torch.ones(2, requires_grad=True)
out = torch.tanh(a)
self.assertEqual(out, out.grad_fn._saved_result) # saved variable when output
a = torch.randn(3, 5, requires_grad=True)
b = torch.tensor([1, 0, 4])
loss = nn.NLLLoss()
out = loss(a, b)
self.assertIsNone(out.grad_fn._saved_weight)
loss = nn.NLLLoss(weight=torch.ones((5,)))
out = loss(a, b)
self.assertEqual(out.grad_fn._saved_weight, torch.ones((5,))) # c10:optional<Tensor> -> Tensor?
out.sum().backward()
with self.assertRaisesRegex(RuntimeError, "after they have already been freed"):
out.grad_fn._saved_weight
def test_cant_create_saved_tensors(self):
with self.assertRaisesRegex(RuntimeError, "Trying to create a SavedTensor object from Python is forbidden"):
torch.autograd.SavedTensor()
def test_custom_function_saved_tensors(self):
def getFn(save=True):
class MyFn(Function):
@staticmethod
def forward(ctx, x):
if save:
ctx.save_for_backward(x, None)
return x
@staticmethod
def backward(ctx, g):
return g
return MyFn
a = torch.randn(5, requires_grad=True)
y = getFn(True).apply(a)
self.assertEqual((a, None), y.grad_fn.saved_tensors)
saved = y.grad_fn._raw_saved_tensors
self.assertIsInstance(saved[0], torch._C._autograd.SavedTensor)
# We can't tell the underlying tensor is None without unpacking it
self.assertIsInstance(saved[1], torch._C._autograd.SavedTensor)
# We catch that error when the user calls register_hooks on it
with self.assertRaisesRegex(RuntimeError, "None is forbidden"):
saved[1].register_hooks(lambda x: x, lambda x: x)
with self.assertRaisesRegex(TypeError, "incompatible function arguments"):
saved[0].register_hooks(lambda x: x)
with self.assertRaisesRegex(TypeError, "incompatible function arguments"):
saved[0].register_hooks(1, 1)
saved[0].register_hooks(lambda x: x, lambda x: x)
with self.assertRaisesRegex(RuntimeError, "already been set"):
saved[0].register_hooks(lambda x: x, lambda x: x)
y.sum().backward()
# Using a reference to the SavedTensor object after the
# saved variables have been released can lead to undefined behavior
del saved
with self.assertRaisesRegex(RuntimeError, "after they have already been freed"):
y.grad_fn._raw_saved_tensors
with self.assertRaisesRegex(RuntimeError, "after they have already been freed"):
y.grad_fn.saved_tensors
y = getFn(False).apply(a)
self.assertEqual(y.grad_fn.saved_tensors, ())
self.assertEqual(y.grad_fn._raw_saved_tensors, ())
def test_autograd_views_codegen(self):
# This is not necessarily the absolute correct behavior, but this is the current
# one. This test is here to make sure that any change to this behavior is detected
# and not silent. The TODOs below mark the places with unexpected behavior.
# Note that any change in these test will be BC-breaking and should be done carefully.
# This test checks the behavior of two codegen functions (view_as and unbind)
# with respect to view tracking and inplace operation on the output.
def run_test(grad_mode, requires_grad, is_view, should_raise_tuple):
def maybe_check_raise(fn, should_raise):
self.assertTrue(should_raise is None or isinstance(should_raise, str))
if should_raise is not None:
with self.assertRaisesRegex(RuntimeError, should_raise):
fn()
else:
fn()
inp = torch.rand(2, requires_grad=requires_grad).clone()
with torch.set_grad_enabled(grad_mode):
out = inp.view_as(inp)
# Are they differentiable views?
self.assertTrue(out._is_view() == is_view)
# Are inplace allowed?
maybe_check_raise(lambda: out.add_(1), should_raise_tuple[0])
inp = torch.rand(2, requires_grad=requires_grad).clone()
with torch.set_grad_enabled(grad_mode):
out = inp.unbind()
# Are they differentiable views?
self.assertTrue(out[0]._is_view() == is_view)
self.assertTrue(out[1]._is_view() == is_view)
# Are inplace allowed?
maybe_check_raise(lambda: out[0].add_(1), should_raise_tuple[1])
maybe_check_raise(lambda: out[1].add_(1), should_raise_tuple[2])
# should_raise contains None if it should not raise
# should_raise contains a string of the error if it should raise
# The 3 elements are for view_as, first output of unbind and second output of unbind
run_test(grad_mode=True, requires_grad=False, is_view=True,
should_raise_tuple=(None, None, None))
inp_change_err = "Output {} of UnbindBackward0 is a view and is being modified inplace."
run_test(grad_mode=True, requires_grad=True, is_view=True,
should_raise_tuple=(None, inp_change_err.format("0"), inp_change_err.format("1")))
leaf_grad_err = "A view was created in no_grad mode and is being modified inplace"
run_test(grad_mode=False, requires_grad=True, is_view=True,
should_raise_tuple=(leaf_grad_err, leaf_grad_err, leaf_grad_err))
run_test(grad_mode=False, requires_grad=False, is_view=True,
should_raise_tuple=(None, None, None))
def test_inplace_not_requires_grad(self):
class MyFn(torch.autograd.Function):
@staticmethod
def forward(ctx, inp):
return inp.view_as(inp)
@staticmethod
def backward(ctx, grad):
return grad
# Original Tensor does not require grad
a = torch.rand(1, 2)
# Tensor being written does require grad
b = torch.rand(1, requires_grad=True)
# Take an invalid view on 'a' that should raise an error (warns during deprecation)
view_a = MyFn.apply(a)
with self.assertRaisesRegex(RuntimeError, "This view was created inside a custom Function"):
view_a += b
# Extra test for copy_ that is a manual implementation and could be easily
# forgotten when the codegen is updated (warns during deprecation)
a = torch.rand(1, 2)
b = torch.rand(1, requires_grad=True)
view_a = MyFn.apply(a)
with self.assertRaisesRegex(RuntimeError, "This view was created inside a custom Function"):
view_a.copy_(b)
# Functions that should throw must properly throw
a = torch.rand(1, 2)
b = torch.rand(1, requires_grad=True)
view_a = a.unbind()[0]
with self.assertRaisesRegex(RuntimeError, "This view is the output of a function that returns "
"multiple views."):
view_a.copy_(b)
# Sanity check that views that should work still work
a = torch.rand(1, 2)
b = torch.rand(1, requires_grad=True)
a.select(1, 0).copy_(b)
def _do_test_autograd_simple_views_python(self, dtype):
# This is not necessarily the absolute correct behavior, but this is the current
# one. This test is here to make sure that any change to this behavior is detected
# and not silent. The TODOs below mark the places with unexpected behavior.
# Note that any change in these test will be BC-breaking and should be done carefully.
# This checks the autograd.Function behavior when we return one or multiple outputs
# while one of these is an input, a view of an input or of a temporary tensor.
# This indicator is used to track how many times the backward function was called
bw_called = [0]
# This indicator is used to check if the argument `ga` contains non-zero values
ga_nz = [False]
class IdOneOutput(Function):
@staticmethod
def forward(ctx, a, b, make_view):
if make_view:
a = a.narrow(0, 0, 2)
else:
a = a.clone()
return a
@staticmethod
def backward(ctx, ga):
bw_called[0] += 1
return ga, None, None
class IdTwoOutput(Function):
@staticmethod
def forward(ctx, a, b, make_view):
if make_view:
a = a.narrow(0, 0, 2)
else:
a = a.clone()
return a, a + b
@staticmethod
def backward(ctx, ga, gab):
bw_called[0] += 1
if ga.eq(0).all():
ga_nz[0] = False
else:
ga_nz[0] = True
return ga + gab, gab, None
class ViewOfTemp(Function):
@staticmethod
def forward(ctx, a, make_view):
ctx.save_for_backward(a)
if make_view:
a = a.narrow(0, 0, 2)
else:
a = a.clone()
b = a.clone()
return b.select(0, 0)
@staticmethod
def backward(ctx, grad):
bw_called[0] += 1
a, = ctx.saved_tensors
res = torch.zeros_like(a)
res.select(0, 0).copy_(grad)
return res, None
fn_id_to_inplace_on_view_err_msg = {
"one_output": ("Output 0 of IdOneOutputBackward is a view and is being "
"modified inplace. This view was created inside a custom Function"),
"two_output": ("Output 0 of IdTwoOutputBackward is a view and is being modified inplace."
" This view is the output of a function that returns multiple views."),
"view_of_temp": ("Output 0 of ViewOfTempBackward is a view and is being "
"modified inplace. This view was created inside a custom Function")
}
for fn_id in ["one_output", "two_output", "view_of_temp"]:
for inplace in [True, False]:
for make_view in [True, False]:
# Used for special casing the tests below
output_is_a_view = (make_view or fn_id == "view_of_temp")
def fn(a, b):
# never modify a, b inplace for gracheck
a = a.clone()
b = b.clone()
if fn_id == "two_output":
tmp1, tmp2 = IdTwoOutput.apply(a, b, make_view)
if inplace:
tmp1 += 3
tmp2 += 3
else:
tmp1 = tmp1 + 3
tmp2 = tmp2 + 3
tmp = tmp1 * tmp2
else:
if fn_id == "one_output":
tmp = IdOneOutput.apply(a, b, make_view)
else:
tmp = ViewOfTemp.apply(a + b, make_view)
if inplace:
tmp += 3
else:
tmp = tmp + 3
return tmp.sum()
a = torch.ones(2, dtype=dtype, requires_grad=True)
b = torch.ones(2, dtype=dtype, requires_grad=True)
err_msg = fn_id_to_inplace_on_view_err_msg[fn_id]
if not inplace or not output_is_a_view:
gradcheck(fn, (a, b), check_batched_grad=False)
# Was the custom backward called properly
bw_called[0] = 0
ga_nz[0] = True # For the case where the backward is called
if inplace and output_is_a_view:
with self.assertRaisesRegex(RuntimeError, err_msg):
fn(a, b)
else:
fn(a, b).backward()
expected_called = 1
expected_ga_nz = True
if output_is_a_view and inplace:
expected_called = 0
self.assertTrue(bw_called[0] == expected_called)
self.assertTrue(ga_nz[0] == expected_ga_nz)
def test_autograd_simple_views_python(self):
self._do_test_autograd_simple_views_python(torch.double)
self._do_test_autograd_simple_views_python(torch.cdouble)
def test_autograd_inplace_views_creation_meta(self):
# Tests creation_meta properly handled for inplace views
class Func(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
return x.view_as(x)
@staticmethod
def backward(ctx, x):
return x
view_custom = Func.apply
def run_test(fn, fn_type, grad_mode_view, grad_mode_iview, requires_grad, error1, error2):
# This test checks the behavior of inplace-view functions when
# the views are created in grad mode or not
base = torch.rand(2, 3, requires_grad=requires_grad).clone()
# 1. Create a view with `grad_mode=grad_mode_view`
with torch.set_grad_enabled(grad_mode_view):
if fn_type == "multi_view":
inp = base.unbind()[0]
elif fn_type == "custom" :
inp = view_custom(base)
else:
inp = base.view_as(base)
# 2. Perform inplace view with `grad_mode=grad_mode_iview`
with torch.set_grad_enabled(grad_mode_iview):
if error1 is not None:
with self.assertRaisesRegex(RuntimeError, error1):
fn(inp)
return
else:
# If error is None, check that runs without error
fn(inp)
# 3. Do inplace on the (new) view
if error2 is not None:
with self.assertRaisesRegex(RuntimeError, error2):
inp.add_(1)
else:
# If error is None, check that runs without error
inp.add_(1)
no_grad_err = "A view was created in no_grad mode"
multi_view_err = "function that returns multiple views"
custom_err = "view was created inside a custom Function"
def run_tests(fn):
for fn_type in ("normal", "multi_view", "custom"):
for grad_mode_view in (True, False):
for grad_mode_iview in (True, False):
for requires_grad in (True, False):
error1 = None # expected error when we do inplace_view on original view
error2 = None # expected error when we do inplace on the resulting view
if requires_grad:
if not grad_mode_view and grad_mode_iview:
error1 = no_grad_err
if not grad_mode_view and not grad_mode_iview:
error2 = no_grad_err
if fn_type == "multi_view":
if grad_mode_view and grad_mode_iview:
error1 = multi_view_err
if grad_mode_view and not grad_mode_iview:
error2 = multi_view_err
if fn_type == "custom":
if grad_mode_view and grad_mode_iview:
error1 = custom_err
if grad_mode_view and not grad_mode_iview:
error2 = custom_err
run_test(fn, fn_type, grad_mode_view, grad_mode_iview, requires_grad, error1, error2)
# This list was created by logging gen_inplace_or_view_type.py
# detach_ is excluded for this test because it cannot be applied to
# views and thus does not return a view
run_tests(lambda v: v.as_strided_((1, 0), (2, 2)))
run_tests(lambda v: v.transpose_(0, 0))
run_tests(lambda v: v.t_())
run_tests(lambda v: v.squeeze_(0))
run_tests(lambda v: v.unsqueeze_(0))
run_tests(lambda v: v.swapdims_(0, 0))
run_tests(lambda v: v.swapaxes_(0, 0))
# TODO This is not the correct behavior -
# See https://github.com/pytorch/pytorch/issues/49825#issuecomment-794466627
def test_autograd_inplace_views_cross_dtype(self):
# This test is here to make sure that any change to this behavior is detected
# and not silent. The TODOs below mark the places with unexpected behavior.
a_orig = torch.rand(3, 3, requires_grad=True, dtype=torch.complex64)
a = a_orig.clone()
b = torch.view_as_real(a)
b = b.transpose(0, 1)
b += 1
b.backward(torch.arange(0, 18, dtype=torch.float).view(3, 3, 2))
non_inplace_grad = a_orig.grad
a_orig = torch.rand(3, 3, requires_grad=True, dtype=torch.complex64)
a = a_orig.clone()
b = torch.view_as_real(a)
b.transpose_(0, 1)
b += 1
b.backward(torch.arange(0, 18, dtype=torch.float).view(3, 3, 2))
inplace_grad = a_orig.grad
# TODO: this is a bug!
# once this is fixed, it should have the transpose removed:
# self.assertEqual(non_inplace_grad, inplace_grad)
self.assertEqual(non_inplace_grad.T, inplace_grad)
def test_autograd_multiple_views_python(self):
# This is not necessarily the absolute correct behavior, but this is the current
# one. This test is here to make sure that any change to this behavior is detected
# and not silent. The TODOs below mark the places with unexpected behavior.
# Note that any change in these test will be BC-breaking and should be done carefully.
# This checks that multiples views in the forward are properly traced and how they
# behave with respect to inplace operations.
# This indicator is used to track how many times the backward function was called
bw_called = [0]
class ComplexView(Function):
@staticmethod
def forward(ctx, a, idx):
res = a.narrow(0, idx, 1)
res = a.select(0, idx)
ctx.save_for_backward(a)
ctx.idx = idx
return res
@staticmethod
def backward(ctx, grad):
bw_called[0] += 1
a, = ctx.saved_tensors
res = torch.zeros_like(a)
res.select(0, ctx.idx).copy_(grad)
return res, None
a = torch.ones(2, requires_grad=True)
idx = 1
bw_called[0] = 0
out = ComplexView.apply(a.clone(), idx)
out.sum().backward()
self.assertTrue(bw_called[0] == 1)
out = ComplexView.apply(a.clone(), idx)
with self.assertRaisesRegex(RuntimeError,
"Output 0 of ComplexViewBackward is a view and is being modified inplace"):
out += 1
def test_autograd_python_custom_function_inplace(self):
# This is not necessarily the absolute correct behavior, but this is the current
# one. This test is here to make sure that any change to this behavior is detected
# and not silent. The TODOs below mark the places with unexpected behavior.
# Note that any change in these test will be BC-breaking and should be done carefully.
# This test checks custom autograd.Function that perform inplace operations
bw_called = [0]
# I) Single output
class MyAdder(Function):
@staticmethod
def forward(ctx, a, b):
a.add_(b)
ctx.mark_dirty(a)
return a
@staticmethod
def backward(ctx, grad):
bw_called[0] += 1
return grad, grad
a = torch.ones(2, requires_grad=True)
b = torch.ones(2, requires_grad=True)
# No extra inplace
c = MyAdder.apply(a.clone(), b)
c.sum().backward()
self.assertTrue(bw_called[0] == 1)
# With extra inplace on the output
bw_called[0] = 0
c = MyAdder.apply(a.clone(), b)
c += 2
c.sum().backward()
self.assertTrue(bw_called[0] == 1)
# The input is a view
bw_called[0] = 0
c = MyAdder.apply(a.clone().view_as(a), b)
c.sum().backward()
self.assertTrue(bw_called[0] == 1)
# Should not give non-inputs to mark_dirty
class MyAdderBad(Function):
@staticmethod
def forward(ctx, a, b):
c = 3 * a
c.add_(b)
ctx.mark_dirty(c)
return c
@staticmethod
def backward(ctx, grad):
bw_called[0] += 1
grad = 3 * grad
return grad, grad
a = torch.ones(2, requires_grad=True)
b = torch.ones(2, requires_grad=True)
with warnings.catch_warnings(record=True) as w:
MyAdderBad.apply(a.clone(), b)
self.assertEqual(len(w), 1)
# II) Multiple outputs
class MyBadAdder(Function):
@staticmethod
def forward(ctx, a, b):
a.add_(b)
ctx.mark_dirty(a)
return a, a + b
@staticmethod
def backward(ctx, ga, gab):
bw_called[0] += 1
return ga + gab, ga + gab
# No extra inplace
bw_called[0] = 0
c, d = MyBadAdder.apply(a.clone(), b)
(c * d).sum().backward()
self.assertTrue(bw_called[0] == 1)
# With extra inplace on the output
bw_called[0] = 0
c, d = MyBadAdder.apply(a.clone(), b)
c += 2
(c * d).sum().backward()
self.assertTrue(bw_called[0] == 1)
# The input is a view
inplace_on_view_err = "your Function modifies inplace an input that is a view of another Tensor"
with self.assertRaisesRegex(RuntimeError, inplace_on_view_err):
c, d = MyBadAdder.apply(a.clone().view_as(a), b)
# III) Inplace + other op
class MyOutPlaceAdder(Function):
@staticmethod
def forward(ctx, a, b):
a.add_(b)
ctx.mark_dirty(a)
return a.clone(), a + b
@staticmethod
def backward(ctx, ga, gab):
bw_called[0] += 1
return ga + gab, ga + 2 * gab
# We don't reuse the input
def fn(a, b):
orig_a = a.clone().view_as(a)
c, d = MyOutPlaceAdder.apply(orig_a, b)
return (c * d).sum()
bad_mark_dirty_err = "Some elements marked as dirty during the forward method were not returned as output."
with self.assertRaisesRegex(RuntimeError, bad_mark_dirty_err):
fn(a, b)
def test_named_tensor_for_complex_views(self):
names = ["batch", "height", "width", "complex"]
z = torch.ones((5, 12, 14, 2), requires_grad=True)
z_named = z.refine_names(*names)
z_complex = torch.view_as_complex(z_named.rename(None)).refine_names(*names[:-1])
z_complex.sum().backward()
self.assertEqual(z.grad, torch.view_as_real(torch.ones_like(z_complex).rename(None)))
def test_custom_function_return_view_in_nograd(self):
class Alias(Function):
@staticmethod
def forward(ctx, x):
return x[:]
@staticmethod
def backward(ctx, gx):
return gx
inp = torch.rand(2, requires_grad=True)
with torch.no_grad():
output = Alias.apply(inp)
with torch.no_grad():
expected_output = inp[:]
# Calling the custom function should operate as if we called an equivalent op
self.assertEqual(output.requires_grad, expected_output.requires_grad)
# Check that in-place modification on view throws
leaf_grad_err = "A view was created in no_grad mode and is being modified inplace"
with self.assertRaisesRegex(RuntimeError, leaf_grad_err):
output.zero_()
def test_grad_mode_restored_reentrant(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, inp):
return inp.clone()
@staticmethod
def backward(ctx, go):
original = torch._C.is_grad_enabled()
with torch.enable_grad():
self.assertTrue(torch._C.is_grad_enabled())
foo = torch.rand(go.size(), requires_grad=True)
grad, = torch.autograd.grad(
foo ** 3, foo, grad_outputs=go
)
self.assertTrue(torch._C.is_grad_enabled())
self.assertTrue(torch._C.is_grad_enabled() == original)
return grad
inp = torch.rand(3, requires_grad=True)
# Case where original==False
MyFunction.apply(inp).sum().backward()
# Case where original==True
MyFunction.apply(inp).sum().backward(create_graph=True)
def test_power_function(self):
a = torch.tensor([0., 0., 0.])
b = torch.tensor([-1., 0., 1.], requires_grad=True)
c = torch.sum(a**b)
c.backward()
self.assertEqual(b.grad, torch.tensor([-inf, 0., 0.]))
s = 0
b = torch.tensor([-1., 0., 1.], requires_grad=True)
c = torch.sum(s**b)
c.backward()
self.assertEqual(b.grad, torch.tensor([-inf, 0., 0.]))
def test_custom_function_error(self):
class BadFw(Function):
@staticmethod
def backward(ctx, foo):
return foo
class BadBw(Function):
@staticmethod
def forward(ctx, foo):
return foo.clone()
class BadBw2(Function):
@staticmethod
def forward(ctx, foo):
return foo.clone()
@staticmethod
def backward(ctx, foo):
return foo
@staticmethod
def vjp(ctx, foo):
return foo
class BadJvp(Function):
@staticmethod
def forward(ctx, foo):
return foo.clone()
inp = torch.rand(1, requires_grad=True)
with self.assertRaisesRegex(NotImplementedError, "must implement the forward"):
BadFw.apply(inp)
with self.assertRaisesRegex(RuntimeError, "must implement either the backward"):
BadBw.apply(inp).sum().backward()
with self.assertRaisesRegex(RuntimeError, "Implementing both 'backward' and 'vjp'"):
BadBw2.apply(inp).sum().backward()
with self.assertRaisesRegex(RuntimeError, "must implement the jvp function"):
with fwAD.dual_level():
d = fwAD.make_dual(inp, torch.rand_like(inp))
res = BadJvp.apply(d)
def test_custom_function_forward_mode_view_checks(self):
flag_to_error = {
"ok": None,
"not_a_view": "jvp is not returning a view",
"not_a_view_of_inp": "jvp is not returning a view of the given",
"not_a_view_of_inp_base": "jvp is not returning a view of the same base",
}
class ViewFn(Function):
@staticmethod
def forward(ctx, foo, flag):
ctx.flag = flag
ctx.size = foo.size()
return foo.narrow(0, 0, 2)
@staticmethod
def vjp(ctx, gO):
gI = gO.new_zeros(ctx.size)
gI.narrow(0, 0, 2).copy_(gO)
return gI, None
@staticmethod
def jvp(ctx, gI, _):
res = gI.narrow(0, 0, 2)
if ctx.flag != "ok":
# Break the view in the gradients!
res = res.clone()
if ctx.flag in ["not_a_view_of_inp", "not_a_view_of_inp_base"]:
# Result should be a view, just of the wrong thing
res = res.view_as(res)
return res
inp = torch.rand(4, 4, dtype=torch.double, requires_grad=True)
for flag, msg in flag_to_error.items():
def test_fn(inp):
if flag == "not_a_view_of_inp_base":
inp = inp.view_as(inp)
return ViewFn.apply(inp, flag)
if msg is None:
gradcheck(test_fn, inp, check_forward_ad=True)
else:
with self.assertRaisesRegex(RuntimeError, msg):
gradcheck(test_fn, inp, check_forward_ad=True)
def test_custom_function_forward_mode_inplace_checks(self):
class InplaceFn(Function):
@staticmethod
def forward(ctx, foo, flag):
ctx.mark_dirty(foo)
ctx.flag = flag
foo.mul_(2)
return foo
@staticmethod
def vjp(ctx, gO):
return 2 * gO, None
@staticmethod
def jvp(ctx, gI, _):
if ctx.flag:
# Don't do the change inplace
return 2 * gI
else:
gI.mul_(2)
return gI
inp = torch.rand(4, 4, dtype=torch.double, requires_grad=True)
def test_fn(inp, flag):
inp = inp.clone()
return InplaceFn.apply(inp, flag)
gradcheck(test_fn, (inp, False), check_forward_ad=True)
with self.assertRaisesRegex(RuntimeError, "inplace custom Function is not modifying the forward mode gradients inplace"):
gradcheck(test_fn, (inp, True), check_forward_ad=True)
def test_custom_function_forward_mode_wrong_formula(self):
class UserFn(Function):
@staticmethod
def forward(ctx, foo, should_fail):
ctx.should_fail = should_fail
return foo * 2
@staticmethod
def vjp(ctx, gO):
return 2 * gO, None
@staticmethod
def jvp(ctx, gI, _):
if ctx.should_fail:
# Wrong gradient formula
return 3 * gI
else:
return 2 * gI
inp = torch.rand(10, dtype=torch.double, requires_grad=True)
gradcheck(UserFn.apply, (inp, False), check_forward_ad=True)
with self.assertRaisesRegex(RuntimeError, "Jacobian computed with forward mode mismatch for output 0"):
gradcheck(UserFn.apply, (inp, True), check_forward_ad=True)
def test_custom_function_forward_mode_non_tensor_before_tensor_args(self):
class MyFn(torch.autograd.Function):
@staticmethod
def forward(ctx, nt, x, nt2, y):
return x * 2 + y * 3
@staticmethod
def jvp(ctx, nt, x_t, nt2, y_t):
self.assertIsNone(nt)
self.assertIsNone(nt2)
return x_t * 2 + y_t * 3
x = torch.tensor(1., dtype=torch.double)
t = torch.tensor(1., dtype=torch.double)
y = torch.tensor(1., dtype=torch.double)
with fwAD.dual_level():
dual_x = fwAD.make_dual(x, t)
MyFn.apply(1, dual_x, 1, y)
gradcheck(MyFn.apply, (1, x.requires_grad_(True), 1, y.requires_grad_(True)), check_forward_ad=True,
check_backward_ad=False, check_batched_grad=False)
def test_custom_function_forward_mode_forward_is_no_op(self):
error_regex = "A custom Function's forward is returning a view \\(or an input as-is\\)"
return_lambdas = {
# If we return an input as-is in forward, that is treated
# as if self.view_as(self) is performed. If jvp returns x.view_as(x),
# this is OK.
"view_as": lambda x: x.view_as(x),
# Expect this to raise an error
"self": lambda x: x,
# Expect this to raise the same error
"mul_by_2": lambda x: x * 2,
}
for k, fn in return_lambdas.items():
class MyFn(torch.autograd.Function):
@staticmethod
def forward(ctx, x, y):
return x + y, x
@staticmethod
def vjp(ctx, gO1, gO2):
return gO1 + gO2, gO1
@staticmethod
def jvp(ctx, x_t, y_t):
return x_t + y_t, fn(x_t)
a = torch.tensor(1., dtype=torch.double, requires_grad=True)
t = torch.tensor(1., dtype=torch.double)
b = torch.tensor(1., dtype=torch.double, requires_grad=True)
c = torch.tensor(1., dtype=torch.double)
t2 = torch.tensor(1., dtype=torch.double)
d = torch.tensor(1., dtype=torch.double)
with fwAD.dual_level():
a_dual = fwAD.make_dual(a, t)
c_dual = fwAD.make_dual(c, t2)
if k == "view_as":
_, out2 = MyFn.apply(a_dual, b)
self.assertTrue(fwAD.unpack_dual(out2).tangent._base is t)
_, out2 = MyFn.apply(c_dual, d)
self.assertTrue(fwAD.unpack_dual(out2).tangent._base is t2)
else:
with self.assertRaisesRegex(RuntimeError, error_regex):
MyFn.apply(a_dual, b)
with self.assertRaisesRegex(RuntimeError, error_regex):
MyFn.apply(c_dual, d)
if k == "view_as":
gradcheck(MyFn.apply, (a, c), check_forward_ad=True)
else:
with self.assertRaisesRegex(RuntimeError, error_regex):
gradcheck(MyFn.apply, (a, c), check_forward_ad=True)
def test_custom_function_save_for_forward(self):
class Func(torch.autograd.Function):
@staticmethod
def forward(ctx, x: torch.Tensor, y: torch.Tensor, z: int):
ctx.save_for_backward(x, y)
ctx.save_for_forward(x, y)
ctx.z = z
ctx.prod = x * y
return z * ctx.prod
@staticmethod
def jvp(ctx, x_t, y_t, _):
x_p, y_p = ctx.saved_tensors
z = ctx.z
return z * (y_p * x_t + x_p * y_t)
@staticmethod
def vjp(ctx, grad_out):
x, y = ctx.saved_tensors
z = ctx.z
return z * grad_out * y, z * grad_out * x, None
a = torch.tensor(1., requires_grad=True, dtype=torch.double)
t = torch.tensor(1., dtype=torch.double)
b = torch.tensor(2., requires_grad=True, dtype=torch.double)
c = 4
with fwAD.dual_level():
a_dual = fwAD.make_dual(a, t)
out = Func.apply(a_dual, b, c)
out.backward()
gradcheck(Func.apply, (a, b, c), check_forward_ad=True)
# When saved for backward, but not saved for forward
class Func(torch.autograd.Function):
@staticmethod
def forward(ctx, x: torch.Tensor):
ctx.save_for_backward(x)
return x.clone()
@staticmethod
def jvp(ctx, x_t):
self.assertEqual(len(ctx.saved_tensors), 0)
return x_t
@staticmethod
def vjp(ctx, grad_out):
x, = ctx.saved_tensors
self.assertEqual(len(ctx.saved_tensors), 1)
return grad_out
with fwAD.dual_level():
a_dual = fwAD.make_dual(a, t)
out = Func.apply(a_dual)
out.backward()
gradcheck(Func.apply, (a,), check_forward_ad=True)
def test_custom_function_local_inplace(self):
class MyFn(torch.autograd.Function):
@staticmethod
def forward(ctx, inp, inplace):
view = inp.clone()[:3]
if inplace:
view += 2
return view
@staticmethod
def backward(ctx, grad):
return grad, None
base = torch.rand(10, requires_grad=True)
foo = MyFn.apply(base, False)
self.assertEqual(foo.grad_fn.__class__.__name__, "MyFnBackward")
foo = MyFn.apply(base, True)
self.assertEqual(foo.grad_fn.__class__.__name__, "MyFnBackward")
def test_integer_outputs(self):
inp = torch.rand(4, requires_grad=True)
out = inp.argmax()
self.assertFalse(out.dtype.is_floating_point)
self.assertFalse(out.requires_grad)
out = inp.argmin()
self.assertFalse(out.dtype.is_floating_point)
self.assertFalse(out.requires_grad)
out = inp.argsort()
self.assertFalse(out.dtype.is_floating_point)
self.assertFalse(out.requires_grad)
val = torch.rand((), requires_grad=True)
out = torch.searchsorted(inp, val)
self.assertFalse(out.dtype.is_floating_point)
self.assertFalse(out.requires_grad)
bins = torch.linspace(0, 1.0, steps=100, requires_grad=True)
vals = torch.rand(5, 5, requires_grad=True)
out = torch.bucketize(vals, bins)
self.assertFalse(out.dtype.is_floating_point)
self.assertFalse(out.requires_grad)
val = torch.empty(5).requires_grad_()
out = val.count_nonzero()
self.assertFalse(out.requires_grad)
def assert_only_first_requires_grad(res):
if not isinstance(res, tuple):
res = (res,)
self.assertTrue(res[0].requires_grad)
for out in res[1:]:
if out is not None:
self.assertFalse(out.requires_grad)
for sort in [True, False]:
for return_inverse in [True, False]:
for return_counts in [True, False]:
res = torch.unique(inp, sorted=sort, return_inverse=return_inverse,
return_counts=return_counts)
assert_only_first_requires_grad(res)
res = torch.unique(inp, sorted=sort, return_inverse=return_inverse,
return_counts=return_counts, dim=0)
assert_only_first_requires_grad(res)
res = torch.unique_consecutive(inp, return_inverse=return_inverse,
return_counts=return_counts)
assert_only_first_requires_grad(res)
res = torch.unique_consecutive(inp, return_inverse=return_inverse,
return_counts=return_counts, dim=0)
assert_only_first_requires_grad(res)
# Here we test the internal functions to make sure all of them are
# covered on top of the public API
res = torch._unique(inp, sorted=sort, return_inverse=return_inverse)
assert_only_first_requires_grad(res)
# This looks public but is actually manually deleted from the
# torch namespace in torch/functional.py
res = torch._VF.unique_dim(inp, dim=0, sorted=sort, return_inverse=return_inverse,
return_counts=return_counts)
assert_only_first_requires_grad(res)
# We don't test `unique_dim_consecutive` here.
# It looks public but the python binding is actually manually disabled in
# tools/autograd/gen_python_functions.py
res = torch._unique2(inp, sorted=sort, return_inverse=return_inverse,
return_counts=return_counts)
assert_only_first_requires_grad(res)
def test_custom_function_cycle(self):
class MyFn(Function):
@staticmethod
def forward(ctx, x, metadata):
x = x.clone()
ctx.meta = metadata
ctx.save_for_backward(x)
return x
@staticmethod
def backward(ctx, gO):
x, = ctx.saved_tensors
self.assertEqual(x, 3.14)
self.assertEqual(ctx.meta["foo"], 3.14)
return gO * x, None
def get_refs(with_backward):
a = torch.tensor(3.14, requires_grad=True)
metadata = {}
out = MyFn.apply(a, metadata)
metadata["foo"] = out
if with_backward:
out.sum().backward()
self.assertEqual(a.grad, a)
return torch._C._WeakTensorRef(out)
with disable_gc():
ref = get_refs(False)
self.assertFalse(ref.expired())
gc.collect()
self.assertTrue(ref.expired())
# The backward clears the saved_variables but not the __dict__
with disable_gc():
ref = get_refs(True)
self.assertFalse(ref.expired())
gc.collect()
self.assertTrue(ref.expired())
def test_input_buffer_accum(self):
leaf = torch.rand(2, 2, requires_grad=True)
# An op that returns sparse gradients
ind = torch.tensor([[0, 0]], dtype=torch.long)
out2 = leaf.gather(0, ind, sparse_grad=True)
# An op that returns the gradients as-is
out1 = leaf.clone()
grad_out1_original = torch.rand_like(out1)
grad_out1 = grad_out1_original.clone()
grad_out2 = torch.rand_like(out2)
torch.autograd.backward((out1, out2), (grad_out1, grad_out2))
# Given gradients should not be modified inplace
self.assertEqual(grad_out1, grad_out1_original)
def test_no_unnecessary_unwrapping(self):
a = torch.randn(5, requires_grad=True)
a_orig = a.detach().clone()
b = a * a
c = a * b
d = torch.exp(a)
# a is leaf
self.assertIs(b.grad_fn._saved_self, a)
self.assertIs(b.grad_fn._saved_other, a)
self.assertIs(c.grad_fn._saved_self, a)
# b is not an output
self.assertIs(c.grad_fn._saved_other, b)
# d is an output
self.assertEqual(d.grad_fn._saved_result, d)
self.assertIsNot(d.grad_fn._saved_result, d)
c.sum().backward()
with self.assertRaisesRegex(RuntimeError, "after they have already been freed"):
c.grad_fn._saved_self
# a is left untouched
self.assertEqual(a, a_orig)
def test_saved_variable_version_counter(self):
a = torch.rand(2, requires_grad=True)
b = torch.exp(a)
b_unpacked = b.grad_fn._saved_result
self.assertEqual(b, b_unpacked)
self.assertEqual(b._version, b_unpacked._version)
with torch.no_grad():
b += 1
self.assertEqual(b, b_unpacked)
self.assertEqual(b._version, b_unpacked._version)
def test_saved_variable_packing_unpacking_saved_original_with_hooks(self):
# Tests that packing/unpacking a SavedVariable works correctly with user-defined hooks
# The saved_original / did_not_save_original distinction corresponds to the `save_original`
# attribute of `SavedVariable`.
def test(get_input, is_leaf):
a = get_input()
grad_fn = a.grad_fn
y = a * a
y.grad_fn._raw_saved_self.register_hooks(lambda x: 2 * x, lambda x: x / 2)
self.assertEqual(a, y.grad_fn._saved_self)
if not is_leaf:
self.assertIs(grad_fn, y.grad_fn._saved_self.grad_fn)
y.sum().backward()
else:
y.sum().backward()
self.assertEqual(2 * a, a.grad)
a = get_input()
grad_fn = a.grad_fn
y = a * a
y.grad_fn._raw_saved_self.register_hooks(lambda x: 2 * x, lambda x: x)
self.assertEqual(2 * a, y.grad_fn._saved_self)
if not is_leaf:
self.assertIs(grad_fn, y.grad_fn._saved_self.grad_fn)
y.sum().backward()
else:
y.sum().backward()
self.assertEqual(3 * a, a.grad)
# double backward
a = get_input()
grad_fn = a.grad_fn
y = a ** 3
y.grad_fn._raw_saved_self.register_hooks(lambda x: x, lambda x: x)
s = torch.sum(y)
g, = torch.autograd.grad(s, (a, ), create_graph=True)
if not is_leaf:
self.assertIs(grad_fn, y.grad_fn._saved_self.grad_fn)
g.sum().backward()
else:
g.sum().backward()
self.assertEqual(6 * a, a.grad)
a = get_input()
y = a * a
y.grad_fn._raw_saved_self.register_hooks(lambda x: x, lambda x: 1)
with self.assertRaisesRegex(TypeError, "Output of saved tensor unpack_hook expected to be a Tensor"):
print(y.grad_fn._saved_self)
a = get_input()
y = a * a
with self.assertRaisesRegex(TypeError, "missing 1 required positional argument"):
y.grad_fn._raw_saved_self.register_hooks(lambda x, b: x, lambda x: x)
a = get_input()
y = a * a
with self.assertRaisesRegex(TypeError, "missing 1 required positional argument"):
y.grad_fn._raw_saved_self.register_hooks(lambda x, b: (x, b), lambda x: x)
def inplace_double(x):
x *= 2
return x
a = get_input()
t = a * a
with self.assertRaisesRegex(RuntimeError, "A saved tensor pack hook is modifying its input in place."):
t.grad_fn._raw_saved_self.register_hooks(inplace_double, lambda x: x / 2)
# leaf
test(lambda: torch.randn(5, requires_grad=True), True)
# not leaf, not output
test(lambda: (1 + torch.randn(5, requires_grad=True)), False)
def test_saved_variable_packing_unpacking_did_not_save_original_with_hooks(self):
# Tests that packing/unpacking a SavedVariable works correctly with user-defined hooks
# The saved_original / did_not_save_original distinction corresponds to the `save_original`
# attribute of `SavedVariable`.
a = torch.randn(5, requires_grad=True)
y = torch.exp(a)
y.grad_fn._raw_saved_result.register_hooks(lambda x: x, lambda x: x)
self.assertEqual(y, y.grad_fn._saved_result)
self.assertIs(y.grad_fn, y.grad_fn._saved_result.grad_fn)
y.sum().backward()
self.assertEqual(a.grad, y)
def test_saved_variable_packing_unpacking_saved_original_with_default_hooks(self):
# Tests that default hooks are properly registered, used and reset
# The saved_original / did_not_save_original distinction corresponds to the `save_original`
# attribute of `SavedVariable`.
# See also:
# - test_saved_variable_packing_unpacking_saved_original_with_hooks
def pack(x):
warnings.warn("pack")
return x
with torch.autograd.graph.saved_tensors_hooks(pack, lambda x: x):
a = torch.ones(5, requires_grad=True)
warnings.simplefilter('always')
with warnings.catch_warnings(record=True) as w:
y = a * a
# should raise two warnings from a being saved twice
self.assertEqual(len(w), 2)
with torch.autograd.graph.saved_tensors_hooks(lambda x: x, lambda x: x):
a = torch.randn(5, requires_grad=True)
y = a * a
self.assertEqual(a, y.grad_fn._saved_self)
self.assertEqual(a, y.grad_fn._saved_other)
y.sum().backward()
self.assertEqual(2 * a, a.grad)
with torch.autograd.graph.saved_tensors_hooks(lambda x: 2 * x, lambda x: x / 2):
a = torch.randn(5, requires_grad=True)
y = a * a
self.assertEqual(a, y.grad_fn._saved_self)
self.assertEqual(a, y.grad_fn._saved_other)
y.sum().backward()
self.assertEqual(2 * a, a.grad)
with torch.autograd.graph.saved_tensors_hooks(lambda x: 2 * x, lambda x: x):
a = torch.randn(5, requires_grad=True)
y = a * a
self.assertEqual(2 * a, y.grad_fn._saved_self)
self.assertEqual(2 * a, y.grad_fn._saved_other)
y.sum().backward()
self.assertEqual(4 * a, a.grad)
# Exited hooks correctly
a = torch.randn(5, requires_grad=True)
y = a * a
self.assertEqual(a, y.grad_fn._saved_self)
self.assertEqual(a, y.grad_fn._saved_other)
y.sum().backward()
self.assertEqual(2 * a, a.grad)
def test_saved_variable_packing_unpacking_did_not_save_original_with_default_hooks(self):
# See also test_saved_variable_packing_unpacking_did_not_save_original_with_hooks
with torch.autograd.graph.saved_tensors_hooks(lambda x: x, lambda x: x):
a = torch.randn(5, requires_grad=True)
y = torch.exp(a)
self.assertEqual(y, y.grad_fn._saved_result)
y.sum().backward()
self.assertEqual(a.grad, y)
def test_setting_default_saved_variable_hooks_twice_should_not_fail(self):
with torch.autograd.graph.saved_tensors_hooks(lambda x: x, lambda x: x):
with torch.autograd.graph.saved_tensors_hooks(lambda x: x, lambda x: x):
pass
def test_setting_default_saved_variable_hooks_twice_should_use_inner(self):
with torch.autograd.graph.saved_tensors_hooks(lambda x: 3 * x, lambda x: 3 * x):
b = torch.randn(5, requires_grad=True)
with torch.autograd.graph.saved_tensors_hooks(lambda x: 5 * x, lambda x: 5 * x):
a = torch.randn(5, requires_grad=True)
y = a * a
z = b * b
y.sum().backward()
z.sum().backward()
self.assertEqual(2 * 5 * 5 * a, a.grad)
self.assertEqual(2 * 3 * 3 * b, b.grad)
def test_save_on_cpu_and_checkpoint(self):
a = torch.randn(2, 2, requires_grad=True)
b = a.pow(2).pow(2).pow(2).pow(2)
b.sum().backward()
b_grad = a.grad.clone()
a.grad.zero_()
with torch.autograd.graph.save_on_cpu():
h = a.pow(2)
h = checkpoint(lambda x: x.pow(2).pow(2), h, use_reentrant=False)
c = h.pow(2)
c.sum().backward()
c_grad = a.grad.clone()
a.grad.zero_()
def f(a):
h = a.pow(2)
with torch.autograd.graph.save_on_cpu():
h = h.pow(2).pow(2)
return h.pow(2)
d = checkpoint(f, a, use_reentrant=False)
d.sum().backward()
d_grad = a.grad.clone()
self.assertEqual(b_grad, c_grad)
self.assertEqual(b_grad, d_grad)
def test_pack_hook_with_inplace_modification_should_fail(self):
a = torch.randn(5, requires_grad=True)
def inc(x):
x += 1
return x
with torch.autograd.graph.saved_tensors_hooks(inc, lambda x: x):
with self.assertRaisesRegex(RuntimeError, "A saved tensor pack hook is modifying its input in place."):
y = torch.exp(a)
y = torch.exp(a)
with self.assertRaisesRegex(RuntimeError, "A saved tensor pack hook is modifying its input in place."):
y.grad_fn._raw_saved_result.register_hooks(inc, lambda x: x)
def test_saving_variable_to_disk(self):
with tempfile.TemporaryDirectory() as tmp_dir:
def pack(x):
name = os.path.join(tmp_dir, str(uuid.uuid4()))
torch.save(x, name)
return name
def unpack(name):
return torch.load(name)
with torch.autograd.graph.saved_tensors_hooks(pack, unpack):
a = torch.ones(5, requires_grad=True)
y = a * a
self.assertEqual(a, y.grad_fn._saved_self)
y.sum().backward()
self.assertEqual(2 * a, a.grad)
def test_default_saved_variable_hooks_double_backward(self):
with torch.autograd.graph.saved_tensors_hooks(lambda x: x, lambda x: x):
a = torch.randn(5, requires_grad=True)
y = a ** 3
s = torch.sum(y)
g, = torch.autograd.grad(s, (a, ), create_graph=True)
g.sum().backward()
self.assertEqual(6 * a, a.grad)
with torch.autograd.graph.saved_tensors_hooks(lambda x: 2 * x, lambda x: x):
a = torch.randn(5, requires_grad=True)
y = a ** 3
s = torch.sum(y)
g, = torch.autograd.grad(s, (a, ), create_graph=True)
g.sum().backward()
# factor 2 because only a is saved once
self.assertEqual(6 * 2 * a, a.grad)
a = torch.randn(5, requires_grad=True)
y = a ** 3
s = torch.sum(y)
with torch.autograd.graph.saved_tensors_hooks(lambda x: 2 * x, lambda x: x):
g, = torch.autograd.grad(s, (a, ), create_graph=True)
g.sum().backward()
# factor 4 because pow_backward is grad * (exp * self.pow(exp - 1))
# so grad is saved and self (i.e. a) is saved
self.assertEqual(6 * 4 * a, a.grad)
with torch.autograd.graph.saved_tensors_hooks(lambda x: 2 * x, lambda x: x):
a = torch.randn(5, requires_grad=True)
y = a ** 3
s = torch.sum(y)
g, = torch.autograd.grad(s, (a, ), create_graph=True)
g.sum().backward()
# combining the two above blocks: 2 * 4 = 8
# note that in that sense, a is saved twice
self.assertEqual(6 * 8 * a, a.grad)
def test_graph_save_on_cpu(self):
def test(get_input, cuda, pin_memory):
with torch.autograd.graph.save_on_cpu(pin_memory):
a = get_input()
if cuda:
a.cuda()
y = a * a
self.assertEqual(a, y.grad_fn._saved_self)
self.assertEqual(a, y.grad_fn._saved_other)
self.assertEqual(a.dtype, y.grad_fn._saved_self.dtype)
self.assertEqual(a.layout, y.grad_fn._saved_self.layout)
if y.is_sparse:
y = y.to_dense()
y.sum().backward()
actual = 2 * a
expected = a.grad
if a.is_sparse:
actual = actual.coalesce()
expected = expected.coalesce()
self.assertEqual(actual, expected)
for cuda in [False] + ([True] if torch.cuda.is_available() else []):
for pin_memory in [True, False]:
# FloatTensor
test(lambda: torch.randn(5, requires_grad=True), cuda, pin_memory)
# DoubleTensor
test(lambda: torch.randn(5, requires_grad=True, dtype=torch.double), cuda, pin_memory)
# Sparse tensor
x = torch.sparse_coo_tensor(torch.tensor([[1, 1]]).long(), torch.tensor([1., 1.]), requires_grad=True)
test(lambda: x, cuda, pin_memory)
@unittest.skipIf(not TEST_CUDA, "test requires CUDA")
def test_graph_save_on_cpu_cuda(self):
def f(x):
a = x + 1
return a * a
# with grad
a = torch.ones(1, requires_grad=True, device="cuda")
y = f(a)
memory_with_grad = torch.cuda.memory_allocated()
del a
del y
# without grad
a = torch.ones(1, requires_grad=True, device="cuda")
with torch.no_grad():
y = f(a)
memory_without_grad = torch.cuda.memory_allocated()
self.assertGreater(memory_with_grad, memory_without_grad)
del a
del y
# with hooks
with torch.autograd.graph.save_on_cpu():
a = torch.ones(1, requires_grad=True, device="cuda")
y = f(a)
memory_with_hooks = torch.cuda.memory_allocated()
self.assertEqual(memory_with_hooks, memory_without_grad)
def test_pynode_destruction_deadlock(self):
script = """
import torch
class Foo(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
return x.clone()
@staticmethod
def forward(ctx, gO):
return gO.clone()
def get_out():
inp = torch.rand(2, requires_grad=True)
# The python function is first so that it runs
# last in the backward pass
right = Foo.apply(inp)
# An op that creates new memory
left1 = inp.clone()
# An op that saves its input
left2 = left1 ** 2
# Inplace modify so that the backward for
# left2 always raises an error
left1 += 1
# An op that takes both side as input.
# After running, both side's last op will be in
# the ready queue
# And the op for left will run first as it was
# executed last during the forward
out = left2 + right
return out
# Nothing should be global variables here as, from what
# I can see, python leaks all the global objects
get_out().sum().backward()
# This used to deadlock when the PyNode is being destroyed after
# the error is raised.
"""
try:
subprocess.check_output(
[sys.executable, '-c', script],
stderr=subprocess.STDOUT,
# On Windows, opening the subprocess with the default CWD makes `import torch`
# fail, so just set CWD to this script's directory
cwd=os.path.dirname(os.path.realpath(__file__)),
# It is ok to have an extra long timeout here as a timeout means the test failed
timeout=20)
except subprocess.TimeoutExpired as e:
self.fail(msg="Example code timed out! See the code sample in the test for details.")
except subprocess.CalledProcessError as e:
err_msg = "RuntimeError: one of the variables needed for gradient computation"
self.assertTrue(err_msg in e.output.decode("utf-8"))
def index_perm_variable(shape, max_indices):
if not isinstance(shape, tuple):
shape = (shape,)
index = torch.randperm(max_indices).narrow(0, 0, reduce(mul, shape)).view(shape)
return index
def bernoulli_scalar():
return torch.tensor(0, dtype=torch.uint8).bernoulli_()
class TestAutogradForwardModeBatchedGrad(TestCase):
def test_out_of_place_basic(self):
a = torch.rand(4, 4, dtype=torch.double, requires_grad=True)
b = torch.rand(4, 4, dtype=torch.double, requires_grad=True)
self.assertTrue(gradcheck(torch.sin, a, check_forward_ad=True, check_batched_grad=True,
check_batched_forward_grad=True))
self.assertTrue(gradcheck(torch.add, (a, b), check_forward_ad=True, check_batched_grad=True,
check_batched_forward_grad=True))
def test_out_of_place_not_same_layout(self):
input = torch.zeros([2, 2]).transpose(0, 1)
tangent = torch.zeros([2, 2, 2])
def jvp(tangent):
with fwAD.dual_level():
x = fwAD.make_dual(input, tangent)
return fwAD.unpack_dual(x)[1]
x_tangent = torch._vmap_internals._vmap(jvp, 0, 0)(tangent)
self.assertIsNot(x_tangent, tangent)
def test_inplace_on_view_same_layout(self):
input = torch.zeros([2, 2])
tangent = torch.zeros([2, 2, 2])
base = torch.zeros([2, 2])
view = base.view_as(base)
def jvp(tangent):
with fwAD.dual_level():
x = fwAD.make_dual(input, tangent)
view.copy_(x)
return fwAD.unpack_dual(x)[1], fwAD.unpack_dual(view)[1], fwAD.unpack_dual(view._base)[1]
x_tangent, view_tangent, base_tangent = torch._vmap_internals._vmap(jvp, 0, 0)(tangent)
self.assertFalse(view_tangent._is_view()) # Optimization to share the same tensor!
self.assertIs(view_tangent, base_tangent)
self.assertIs(x_tangent, tangent)
self.assertIs(view_tangent, tangent)
def test_inplace_on_view_not_same_layout(self):
input = torch.zeros([2, 2])
tangent = torch.zeros([2, 2, 2])
view = torch.zeros([2, 2]).transpose(0, 1)
def jvp(tangent):
with fwAD.dual_level():
x = fwAD.make_dual(input, tangent)
view.copy_(x)
return fwAD.unpack_dual(x)[1], fwAD.unpack_dual(view)[1], fwAD.unpack_dual(view._base)[1]
x_tangent, view_tangent, base_tangent = torch._vmap_internals._vmap(jvp, 0, 0)(tangent)
self.assertIs(view_tangent._base, base_tangent)
self.assertIs(x_tangent, tangent)
self.assertIsNot(view_tangent, tangent)
def test_metadata_check_for_storage_numel_skipped(self):
# See: test_metadata_check_checks_storage_numel for the reverse of this test
primal = torch.randn(5)[:4].detach()
self.assertEqual(len(primal.storage()), 5)
tangent = torch.randn(10, 4)
def jvp(tangent):
with fwAD.dual_level():
dual = fwAD.make_dual(primal, tangent)
_, unpacked_tangent = fwAD.unpack_dual(dual)
# No copy is made
self.assertIs(tangent, unpacked_tangent)
# as_strided raises
with self.assertRaisesRegex(RuntimeError, "can access memory outside of `tensor`"):
dual.as_strided((5,), (1,), 0)
return unpacked_tangent
torch._vmap_internals._vmap(jvp, 0, 0)(tangent)
class TestAutogradForwardMode(TestCase):
def tearDown(self):
# Ensure that a failing test won't make others fail
while fwAD._current_level >= 0:
fwAD.exit_dual_level()
super().tearDown()
def test_forward_level_cleanup(self):
def get_tensor_and_weak_ref():
# Create a new Tensor and weak reference
t = torch.rand(2, requires_grad=True)
return t, torch._C._WeakTensorRef(t)
# Sanity check that the helper function works as expected
t, t_ref = get_tensor_and_weak_ref()
self.assertFalse(t_ref.expired())
del t
self.assertTrue(t_ref.expired())
# Main test code
foo = torch.rand(2)
with fwAD.dual_level():
tangent, tangent_ref = get_tensor_and_weak_ref()
self.assertFalse(tangent_ref.expired())
dual = fwAD.make_dual(foo, tangent)
self.assertFalse(tangent_ref.expired())
# Make sure that the tangent we provided has been re-used as is
self.assertTrue(fwAD.unpack_dual(dual)[1] is tangent)
# Make sure that dual is keeping the tangent alive
del tangent
self.assertFalse(tangent_ref.expired())
# Make sure that the dual level does not keep the c++
# version of the tangent alive
del dual
self.assertTrue(tangent_ref.expired())
def test_size_check(self):
foo = torch.rand(2)
tangent = torch.rand(3)
with fwAD.dual_level():
with self.assertRaisesRegex(RuntimeError, "Trying to set a forward gradient that has a different size"):
dual = fwAD.make_dual(foo, tangent)
dual = fwAD.make_dual(foo, tangent[1:])
def test_metadata_check_checks_storage_numel(self):
primal = torch.randn(5)[:4].detach()
self.assertEqual(len(primal.storage()), 5)
tangent = torch.randn(4)
with fwAD.dual_level():
dual = fwAD.make_dual(primal, tangent)
_, unpacked_tangent = fwAD.unpack_dual(dual)
# # Verify that mutating unpacked tangent does not affect the original tangent
tangent_clone = tangent.clone()
unpacked_tangent *= 2
self.assertTrue(torch.allclose(tangent_clone, tangent))
# as_strided runs without error
dual.as_strided((5,), (1,), 0)
def test_metadata_check_checks_ignores_size_zero(self):
a = torch.ones(0).as_strided((0, 1,), (1, 1,), 0)
b = torch.ones(0).as_strided((0, 1,), (1, 0,), 0)
with fwAD.dual_level():
dual = fwAD.make_dual(a, b)
torch.diagonal(dual, offset=0)
input = torch.rand([0, 1], dtype=torch.complex128, requires_grad=True)
func = partial(torch.diagonal, offset=0)
torch.autograd.gradcheck(func, (input,), check_forward_ad=True)
def test_metadata_check_when_primal_has_conj_bit(self):
# Make sure the _has_same_storage_numel is a fallthrough, so that
# conj bit does not materialize. If it materializes it would
# cause the layout check to fail for views that do not index the
# the entire storage.
a = torch.randn(2, 2, dtype=torch.cdouble).conj()
b = torch.rand_like(a)
self.assertTrue(torch.is_conj(a))
self.assertEqual(len(a.storage()), len(b.storage()))
with fwAD.dual_level():
dual = fwAD.make_dual(a, b)
dual[1:]
def test_metadata_check_when_primal_has_neg_bit(self):
# Make sure the _has_same_storage_numel is a fallthrough, so that
# conj bit does not materialize. If it materializes it would
# cause the layout check to fail for views that do not index the
# the entire storage.
a = torch.randn(2, 2, dtype=torch.cdouble).conj().imag
b = torch.randn(2, 2, dtype=torch.cdouble).imag
self.assertTrue(torch.is_neg(a))
self.assertEqual(len(a.storage()), len(b.storage()))
with fwAD.dual_level():
dual = fwAD.make_dual(a, b)
dual[1:]
# The following test functions want to ensure all the following behaviors:
# - Ensure that default level system in the python binding works
# - Ensure that only level 0 exists and nesting is properly disabled
# - Ensure that printing works fine
# - Ensure that basic packing/unpacking works
# - Ensure that advanced packing/unpacking works
# - For memory / version counter share
# - For backward AD (regular ops)
# - Ensure that view + inplace for both modes work fine
# - Ensure we do proper cleanup on exit of a level
def test_default_level(self):
foo = torch.rand(2)
bar = torch.rand(2)
with fwAD.dual_level():
baz = fwAD.make_dual(foo, bar)
baz_primal, baz_tangent = fwAD.unpack_dual(baz)
self.assertEqual(baz_primal, foo)
# We don't actually need to enforce that these two are the exact same python
# object, feel free to relax in the future
self.assertIs(baz_tangent, bar)
baz_primal, baz_tangent = fwAD.unpack_dual(baz)
self.assertEqual(baz_primal, foo)
self.assertEqual(baz_tangent, None)
def test_nested_level(self):
with fwAD.dual_level() as level:
# For now only level 0 exists
self.assertEqual(level, 0)
with fwAD.dual_level():
with self.assertRaisesRegex(RuntimeError, "Nested forward mode AD is not supported at the moment"):
nest_level = fwAD.enter_dual_level()
def test_set_fw_grad_having_own_fw_grad_at_same_level(self):
foo = torch.rand(2)
bar = torch.rand(2)
baz = torch.rand(2)
with fwAD.dual_level():
dual = fwAD.make_dual(foo, bar)
with self.assertRaisesRegex(RuntimeError, "has a forward gradient at the same level"):
fwAD.make_dual(baz, dual)
def test_make_dual_inference_tensor_in_inference_mode(self):
with torch.inference_mode():
foo = torch.rand(2)
bar = torch.rand(2)
foo_copy = foo.clone()
with fwAD.dual_level():
dual = fwAD.make_dual(foo, bar)
self.assertFalse(dual._is_view())
dual += 1
self.assertFalse(torch.allclose(foo, foo_copy))
def test_make_dual_torch_dispatch(self):
counter = [0]
class MySubclass(torch.Tensor):
def __new__(cls, data=None):
return torch.Tensor._make_subclass(cls, data)
__torch_function__ = torch._C._disabled_torch_function_impl
@classmethod
def __torch_dispatch__(cls, func, types, args=(), kwargs=None):
if func.overloadpacket == torch.ops.aten.alias:
counter[0] += 1
# Make sure we can re-enable autograd here
with torch.overrides.enable_reentrant_dispatch():
foo = torch.rand(1, requires_grad=True)
self.assertIsNotNone(foo.exp().grad_fn)
with no_dispatch():
return func(*args, **kwargs)
a = torch.tensor(1.)
s = MySubclass(a)
with fwAD.dual_level():
# Only the primal has "alias" called on it
fwAD.make_dual(s, torch.rand_like(s))
self.assertEqual(counter[0], 1)
fwAD.make_dual(torch.rand_like(s), s)
self.assertEqual(counter[0], 1)
def test_print(self):
with fwAD.dual_level() as level:
a = torch.rand(3)
self.assertFalse("tangent=" in str(a))
b = fwAD.make_dual(a, torch.rand(3))
self.assertFalse("tangent=" in str(a))
self.assertTrue("tangent=" in str(b))
b_primal, b_tangent = fwAD.unpack_dual(b)
self.assertFalse("tangent=" in str(b_primal))
self.assertFalse("tangent=" in str(b_tangent))
def test_basic_packing_unpacking(self):
foo = torch.rand(2)
bar = torch.rand(2)
with fwAD.dual_level():
baz = fwAD.make_dual(foo, bar)
baz_primal, baz_tangent = fwAD.unpack_dual(baz)
self.assertEqual(baz_primal, foo)
self.assertIs(baz_tangent, bar)
# Check unpacked dual is returned as a named tuple
# NB: Every invocation of unpack_dual returns a new tensor view
self.assertIsNot(baz_primal, fwAD.unpack_dual(baz).primal)
self.assertEqual(baz_primal, fwAD.unpack_dual(baz).primal)
self.assertIs(baz_tangent, fwAD.unpack_dual(baz).tangent)
# Check that packing/unpacking did not change the input
foo_primal, foo_tangent = fwAD.unpack_dual(foo)
self.assertEqual(foo_primal, foo)
self.assertIsNone(foo_tangent)
def test_advanced_packing_unpacking(self):
foo = torch.rand(2)
bar = torch.ones(2)
# Memory and version counter check
with fwAD.dual_level():
dual = fwAD.make_dual(foo, bar)
# Ensure that they are sharing memory and version counter
self.assertEqual(dual.storage().data_ptr(), foo.storage().data_ptr())
# Ensure we properly share the version counter
self.assertEqual(foo._version, dual._version)
foo.add_(1)
self.assertEqual(foo._version, dual._version)
# Unpacking should only create aliases as well
dual_primal, dual_tangent = fwAD.unpack_dual(dual)
self.assertEqual(dual_primal.storage().data_ptr(), foo.storage().data_ptr())
self.assertEqual(dual_tangent.storage().data_ptr(), bar.storage().data_ptr())
# And the tangent is actually re-used as-is so it is still the same Tensor
self.assertIs(dual_tangent, bar)
# Ensure we properly share the version counter
self.assertEqual(foo._version, dual_primal._version)
foo.add_(1)
self.assertEqual(foo._version, dual_primal._version)
self.assertEqual(bar._version, dual_tangent._version)
bar.add_(1)
self.assertEqual(bar._version, dual_tangent._version)
# backward mode check
with fwAD.dual_level():
foo.requires_grad_()
bar.requires_grad_()
# Check that backward gradients properly propagates through packing/unpacking
dual = fwAD.make_dual(foo, bar)
p, t = fwAD.unpack_dual(dual)
gfoo, gbar = torch.autograd.grad(p.sum(), (foo, bar), retain_graph=True, allow_unused=True)
self.assertEqual(gfoo, torch.ones_like(foo))
self.assertIsNone(gbar)
gfoo, gbar = torch.autograd.grad(t.sum(), (foo, bar), retain_graph=True, allow_unused=True)
self.assertIsNone(gfoo)
self.assertEqual(gbar, torch.ones_like(bar))
# Check that forward gradients are impacted by detach()
detached_dual = dual.detach()
out = detached_dual * 2
p, t = fwAD.unpack_dual(out)
self.assertFalse(p.requires_grad)
self.assertEqual(p, foo * 2)
self.assertIsNone(t)
# Check that forward gradients are not impacted by no_grad
with torch.no_grad():
out = dual * 3
p, t = fwAD.unpack_dual(out)
self.assertFalse(p.requires_grad)
self.assertFalse(t.requires_grad)
self.assertEqual(p, foo * 3)
self.assertEqual(t, bar * 3)
# Check that forward gradients are not impacted by inplace detach
dual = dual.clone()
dual.detach_()
out = dual * 2
p, t = fwAD.unpack_dual(out)
self.assertFalse(p.requires_grad)
self.assertEqual(p, foo * 2)
self.assertIsNone(t)
def test_view_inplace_non_differentiable_views(self):
original_foo = torch.rand(2, dtype=torch.double)
original_bar = torch.ones(2, dtype=torch.double)
# Do clones to be able to compare the values updated inplace
# with the original content of these Tensors
foo = original_foo.clone()
bar = original_bar.clone()
with fwAD.dual_level():
# Note that in this test, we use "update" to mean computing the right tangent for the dual
# All the inplace operations here are expected to update the primal value of the Tensors but
# not always their tangents.
# Also all mentions of "non differentiable view" here means non forward differentiable view
# unless specified otherwise.
# See note [Forward Grad View/inplace] for more details on how these views work.
# Check that inplace ops do not update non-differentiable views
# Non differentiable view
dual = fwAD.make_dual(foo, bar)
dual *= 2
# Check that non differentiable view's tangent was not updated
self.assertIsNone(fwAD.unpack_dual(foo)[1])
# Check that the computed result is correct
self.assertEqual(bar, original_bar * 2)
self.assertEqual(fwAD.unpack_dual(dual)[1], original_bar * 2)
self.assertEqual(foo, original_foo * 2)
self.assertEqual(fwAD.unpack_dual(dual)[0], original_foo * 2)
# Other non differentiable view
dual_primal, dual_tangent = fwAD.unpack_dual(dual)
self.assertIsNone(fwAD.unpack_dual(dual_primal)[1])
self.assertIsNone(fwAD.unpack_dual(dual_tangent)[1])
dual_primal *= 2
# Ensure dual's tangent did not change
self.assertEqual(fwAD.unpack_dual(dual)[0], original_foo * 4)
self.assertEqual(fwAD.unpack_dual(dual)[1], original_bar * 2)
dual_tangent *= 2
# Ensure dual's primal did not change
self.assertEqual(fwAD.unpack_dual(dual)[0], original_foo * 4)
self.assertEqual(fwAD.unpack_dual(dual)[1], original_bar * 4)
def test_view_inplace_differentiable_views(self):
original_foo = torch.rand(2)
original_bar = torch.ones(2)
# Do clones to be able to compare the values updated inplace
# with the original content of these Tensors
foo = original_foo.clone()
bar = original_bar.clone()
with fwAD.dual_level():
# Check that inplace ops do update differentiable view but stop at non differentiable ones
# A non differentiable view
dual = fwAD.make_dual(foo, bar)
# A differentiable view
view = dual.narrow(0, 0, 1)
view *= 2
# Check that non differentiable view was not updated
self.assertIsNone(fwAD.unpack_dual(foo)[1])
# Check that differentiable view was updated
self.assertEqual(fwAD.unpack_dual(dual)[1], torch.tensor([2., 1.]))
self.assertEqual(fwAD.unpack_dual(view)[1], torch.tensor([2.]))
# Check that we track differentiable view even for Tensors that are not dual
baz = torch.rand(2)
baz += dual
self.assertEqual(fwAD.unpack_dual(baz)[1], fwAD.unpack_dual(dual)[1])
# Updates on view should as well
baz = torch.rand(2)
baz[0] = dual[0]
self.assertEqual(fwAD.unpack_dual(baz)[1][0], fwAD.unpack_dual(dual)[1][0])
# Unused values get a gradient of 0
self.assertEqual(fwAD.unpack_dual(baz)[1][1], 0.)
# Check that forward non-differentiable views do prevent gradient update
baz = torch.rand(2)
view = baz.detach()
view += dual
self.assertIsNone(fwAD.unpack_dual(baz)[1])
def test_view_inplace_always_creates_a_view(self):
# See https://github.com/pytorch/pytorch/issues/67800
# The codepath may depend on the op. At the time writing, when self is not a dual tensor
# the resulting forward grad for self for...
# - add_ has the same layout as self
# - mul_ has the same layout as other
# This is kind of fragile because the above depends on how the forward grad expression
# is written. For add and mul at least, the output inherits the layout of LHS.
# We want to handle at least these two cases.
inplace_binary_ops = ( # Add more to this list?
lambda x, y: x.add_(y),
lambda x, y: x.mul_(y),
lambda x, y: x.copy_(y),
)
for inplace_binary_op in inplace_binary_ops:
base = torch.randn(2, 2)
view = base.transpose(0, 1)
primal = torch.randn(2, 2)
tangent = torch.randn(2, 2)
with fwAD.dual_level():
dual = fwAD.make_dual(primal, tangent)
inplace_binary_op(view, dual)
# Verify that a view relationship is created for both the primal and tangent
p, t = fwAD.unpack_dual(base)
p_clone = p.clone()
t_clone = t.clone()
view *= 2
p, t = fwAD.unpack_dual(base)
self.assertTrue(torch.allclose(p_clone * 2, p))
self.assertTrue(torch.allclose(t_clone * 2, t))
def test_grad_cleanup(self):
foo = torch.rand(2)
bar = torch.rand(2)
baz = torch.rand(2)
with fwAD.dual_level():
dual = fwAD.make_dual(foo, bar)
self.assertIsNone(fwAD.unpack_dual(foo)[1])
self.assertIs(fwAD.unpack_dual(dual)[1], bar)
self.assertIsNone(fwAD.unpack_dual(dual)[1])
with fwAD.dual_level():
self.assertIsNone(fwAD.unpack_dual(foo)[1])
new_dual = fwAD.make_dual(foo, baz)
dual_primal, dual_tangent = fwAD.unpack_dual(dual)
new_dual_primal, new_dual_tangent = fwAD.unpack_dual(new_dual)
self.assertEqual(dual_primal, new_dual_primal)
self.assertIsNone(dual_tangent)
self.assertEqual(new_dual_tangent, baz)
def test_detach_view_tracking(self):
# Default detach is both forward and backward non-differentiable
foo = torch.rand(2)
foo_weak = torch._C._WeakTensorRef(foo)
out = foo.detach()
del foo
self.assertTrue(foo_weak.expired())
def test_out_variant(self):
with fwAD.dual_level():
foo = fwAD.make_dual(torch.rand(2), torch.rand(2))
bar = torch.rand(2)
with self.assertRaisesRegex(RuntimeError, "out= function"):
torch.add(bar, bar, out=foo)
with self.assertRaisesRegex(RuntimeError, "out= function"):
torch.add(foo, bar, out=bar)
def test_non_differentiable(self):
with fwAD.dual_level():
foo = fwAD.make_dual(torch.rand(2), torch.rand(2))
bar = torch.rand(2)
# No differentiable outputs, shouldn't error
eq = foo == bar
# Inplace
foo.eq_(bar)
def test_create_new_zeros_with_same_meta(self):
new_zeroes_fn = torch.ops.aten._new_zeros_with_same_feature_meta
def check(a, b):
def assert_same_meta(t, target):
for num_bdim in range(t.dim()):
result = new_zeroes_fn(t, target, self_num_batch_dims=num_bdim)
self.assertEqual(result.dim(), target.dim() + num_bdim)
# Check size/strides match for feature dims only
for i in range(num_bdim, result.dim()):
self.assertEqual(result.size()[i], target.size()[i - num_bdim])
self.assertEqual(result.stride()[i], target.stride()[i - num_bdim])
# Check that we generate strides reasonably
if target.is_contiguous():
self.assertTrue(result.is_contiguous())
self.assertEqual(result.storage_offset(), target.storage_offset())
prod_of_t_bdims = reduce(operator.mul, t.size()[:num_bdim], 1)
self.assertEqual(len(result.storage()), len(target.storage()) * prod_of_t_bdims)
# TensorOptions is same
self.assertEqual(result.dtype, target.dtype)
assert_same_meta(a, b)
assert_same_meta(b, a)
a = torch.randn(5, dtype=torch.float)
b = torch.randn(2, 3, 4, dtype=torch.double)
check(a, b)
# non-contiguous case
a = torch.randn(2, 3, 4).transpose(0, 1).contiguous().transpose(0, 1)
b = torch.randn(2, 3, 4)
check(a, b)
a = torch.randn(5).narrow(0, 1, 2)
b = torch.randn(2)
check(a, b)
# tensor is not a view, but still does not index entirety of storage
a = torch.randn(5).resize_(4)
b = torch.randn(4)
check(a, b)
# Zero-numel tensors
a = torch.randn(1, 0, 2)
b = torch.randn(1, 2)
check(a, b)
# Scalar tensor
a = torch.tensor(1.)
b = torch.randn(1, 2)
check(a, b)
def test_backward_graph_destruction(self):
def fn():
a = torch.rand(10, requires_grad=True)
da = fwAD.make_dual(torch.rand_like(a), a)
# Create an object with a c++ cycle as:
# db -> AutogradMeta -> ForwardGrad -> db's grad
# db's grad -> AutogradMeta -> MulBackward
# MulBackward -> SavedVariable -> db
db = da.exp()
with fwAD.dual_level():
fn()
# This test make sure that we don't deadlock on exit of this
# context manager. If you do, there is something wrong with the
# locking of the forward ad level most likely
# Generic device type autograd tests.
class TestAutogradDeviceType(TestCase):
def test_min_max_median_backprops_to_all_values(self, device):
for f in [torch.min, torch.max, torch.median, torch.nanmedian]:
x1 = torch.tensor([1., 0., 1., 0., 1., 0.], device=device, requires_grad=True)
x2 = torch.tensor([float('nan'), float('nan'), float('nan')], requires_grad=True)
for x in [x1, x2]:
y = f(x)
y.backward()
self.assertEqual(x.grad.sum(), 1.)
self.assertEqual((x.grad == 1 / 3).sum(), 3)
def test_scatter_index_reduce_amin_amax_backprops_to_all_values(self, device):
# tests that gradients are evenly distributed when there are multiple max/min values
# tested here instead of adding a SampleInput as the backward for this case is non-differentiable for gradgrad
# as is the case for test_min_max_median_backprops_to_all_values above
fns = (torch.scatter_reduce, torch.index_reduce)
reduces = ('amin', 'amax')
for fn, reduction in product(fns, reduces):
input = torch.randn((2, 3), device=device, dtype=torch.float64, requires_grad=True)
src = input.clone().detach_().requires_grad_(True)
idx = torch.arange(2).to(dtype=torch.long, device=device)
if fn == torch.scatter_reduce:
idx = idx.unsqueeze(-1).expand((2, 3))
gradcheck(fn, (input, 0, idx, src, reduction), check_batched_grad=False)
def test_scatter_index_reduce_prod_gradgrad_error(self, device):
# test that double backward raises an error for the case where 2 zeros in src
# are scattered to the same position in self
input = torch.tensor([1.], device=device, dtype=torch.float64, requires_grad=True)
src = torch.tensor([0., 0.], device=device, dtype=torch.float64, requires_grad=True)
idx = torch.tensor([0, 0], device=device, dtype=torch.long)
for fn in (torch.scatter_reduce, torch.index_reduce):
# check that this case passes on gradcheck
gradcheck(fn, (input, 0, idx, src, 'prod'), check_batched_grad=False)
with self.assertRaisesRegex(RuntimeError, "Double backward is unsupported for"):
gradgradcheck(fn, (input, 0, idx, src, 'prod'))
@skipIfMps # the test doesn't work on MPS as double types are not supported
def test_parameter_resize(self, device):
asd = torch.nn.Parameter(torch.ones(16, dtype=torch.double, device=device))
for i in range(2):
with torch.no_grad():
asd.set_(asd[1:])
asd.grad = None
m = torch.cat((asd, asd))
m.sum().backward()
@skipIfMps # the test doesn't work on MPS as double types are not supported
@dtypes(torch.double, torch.cdouble)
def test_sparse_ctor_getter_backward(self, device, dtype):
# See NOTE [ Sparse: autograd and API ] on the expected behavior of this test
def _test(size, sparse_dim, nnz, device):
v_size = [nnz] + list(size[sparse_dim:])
i = torch.rand(sparse_dim, nnz)
i.mul_(torch.tensor(size[:sparse_dim]).unsqueeze(1).to(i))
i = i.to(torch.long)
inp = torch.randn(v_size, dtype=torch.double, device=device, requires_grad=True)
other = self.genSparseTensor(size, sparse_dim, nnz, is_uncoalesced=True, device=device,
dtype=dtype)[0]
def fn(v):
x = torch.sparse_coo_tensor(i, v, size, dtype=dtype, device=device)
y = (x + other).coalesce()
yv = y.values()
new_v = yv.tanh()
z = torch.sparse_coo_tensor(y.indices(), new_v, y.size())
return z.coalesce().values()
gradcheck(fn, (inp,), check_batched_grad=False)
# FIXME: make gradgradcheck work.
# gradgradcheck(fn, (inp,), check_batched_grad=False)
# assert that _values is non-differentiable
with self.assertRaisesRegex(RuntimeError, "does not have a grad_fn"):
other.detach().requires_grad_()._values().backward(torch.ones_like(other._values()))
for empty_i, empty_v, empty_nnz in product([True, False], repeat=3):
sparse_size = [] if empty_i else [2, 1]
dense_size = [1, 0, 2] if empty_v else [1, 2]
nnz = 0 if empty_nnz else 5
_test(sparse_size + dense_size, len(sparse_size), nnz, device)
@skipMeta
@skipIfMps
@dtypes(torch.double, torch.cdouble)
def test_sparse_backward(self, device, dtype):
class FixedGradientFunction(Function):
@staticmethod
def forward(ctx, x, grad_x):
ctx.save_for_backward(grad_x)
return x
@staticmethod
def backward(ctx, grad_x):
saved_grad_x, = ctx.saved_tensors
return saved_grad_x, None
size = torch.Size([6, 3, 2])
i1 = torch.tensor([
[0, 3, 4],
[0, 2, 2],
], dtype=torch.long)
v1 = make_tensor([3, 2], dtype=dtype, device=device)
sparse_grad1 = torch.sparse_coo_tensor(i1, v1, size, dtype=dtype, device=device)
i2 = torch.tensor([
[0, 1, 3, 4],
[0, 1, 2, 2],
], dtype=torch.long)
v2 = make_tensor([4, 2], dtype=dtype, device=device)
sparse_grad2 = torch.sparse_coo_tensor(i2, v2, size, dtype=dtype, device=device)
dense_grad = torch.rand(size, device=device, dtype=dtype)
fn = FixedGradientFunction
# sparse first
x = torch.randn(size, dtype=dtype, device=device, requires_grad=True)
(fn.apply(x, sparse_grad1) + fn.apply(x, dense_grad) + fn.apply(x, sparse_grad2)).sum().backward()
self.assertEqual(x.grad, dense_grad + sparse_grad1 + sparse_grad2)
# dense first
x = torch.randn(size, dtype=dtype, device=device, requires_grad=True)
(fn.apply(x, dense_grad) + fn.apply(x, sparse_grad1) + fn.apply(x, sparse_grad2)).sum().backward()
self.assertEqual(x.grad, dense_grad + sparse_grad1 + sparse_grad2)
# sparse only
x = torch.randn(size, dtype=dtype, device=device, requires_grad=True)
(fn.apply(x, sparse_grad1) + fn.apply(x, sparse_grad2)).sum().backward()
self.assertEqual(x.grad, sparse_grad1 + sparse_grad2)
# autograd tests via common_method_invocations don't allow input tensors to
# be sparse (RuntimeError: gradcheck expects all tensor inputs are dense when
# check_sparse_nnz is set to False.)
@skipIfMps
def test_sparse_mask_autograd(self, device):
tensor = torch.randn(3, requires_grad=True, device=device)
mask = torch.ones(3, device=device)
mask[1] = 0
mask = mask.to_sparse()
converted = tensor.sparse_mask(mask).to_dense()
converted.sum().backward()
self.assertEqual(tensor.grad, mask.to_dense())
@skipIfMps # the test doesn't work on MPS as double types are not supported
def test_pyscalar_conversions(self, device):
def _test_pyscalar_conversions(t, integral_conv):
# integral -> integral
l = t(torch.zeros(1, 1, 1, dtype=torch.long))
pyscalar = -12345
l[0] = pyscalar
self.assertEqual(integral_conv(l), pyscalar)
# floating point -> floating point
f = Variable(t(torch.randn(1, 1, dtype=torch.double)))
pyscalar = -12345.1
f[0] = pyscalar
self.assertEqual(float(f), pyscalar)
f[0] = nan
self.assertTrue(math.isnan(float(f)))
f[0] = inf
self.assertEqual(float(f), inf)
f[0] = -inf
self.assertEqual(float(f), -inf)
# integral -> floating point
# check we can convert something that loses precision
pyscalar = 1234567890123456789
self.assertNotEqual(pyscalar, integral_conv(float(pyscalar)))
l[0] = pyscalar
self.assertEqual(float(l), float(pyscalar))
# floating point -> integral
f[0] = nan
self.assertRaises(ValueError, lambda: integral_conv(f[0]))
f[0] = inf
self.assertRaises(OverflowError, lambda: integral_conv(f[0]))
f[0] = -inf
self.assertRaises(OverflowError, lambda: integral_conv(f[0]))
f[0] = sys.float_info.max
self.assertEqual(integral_conv(f), sys.float_info.max)
# bool, nonzero
def test_nonzero(tensor, value, expected):
tensor[0] = value
self.assertEqual(expected, bool(tensor))
self.assertEqual(expected, True if tensor else False)
test_nonzero(l, 0, False)
test_nonzero(l, -2, True)
test_nonzero(f, 0.0, False)
test_nonzero(f, sys.float_info.min, True)
test_nonzero(f, nan, bool(nan))
test_nonzero(f, inf, bool(inf))
test_nonzero(f, -inf, bool(-inf))
_test_pyscalar_conversions(lambda x: x.to(device), lambda x: int(x))
@dtypesIfMPS(torch.float32)
@dtypesIfCUDA(torch.half, torch.float, torch.double, torch.int8, torch.int16, torch.int32, torch.int64)
@dtypes(torch.float, torch.double, torch.int8, torch.int16, torch.int32, torch.int64)
def test_set_requires_grad_only_for_floats(self, device, dtype):
def f1():
a = torch.ones(1, dtype=dtype, device=device)
a.requires_grad_()
def f2():
a = torch.ones(1, dtype=dtype, device=device)
a.requires_grad = True
def f3():
torch.ones(1, dtype=dtype, device=device, requires_grad=True)
a = torch.ones(1, dtype=dtype, device=device)
a.requires_grad = False # should always work
a.requires_grad_(False)
for f in [f1, f2, f3]:
if dtype.is_floating_point:
f()
else:
with self.assertRaisesRegex(RuntimeError, 'floating point', msg="dt: {} device: {}".format(a.dtype, a.device)):
f()
@onlyCUDA
def test_advanced_indexing_backwards_large(self, device):
# See https://github.com/pytorch/pytorch/issues/22843
n = (1 << 16)
x = torch.rand(n, 1, device=device, requires_grad=True)
a = x[:, [0]]
a.sum().backward()
self.assertEqual(x.grad, torch.ones(n, 1, device=device))
def test_advanced_indexing_backwards_memory_format(self, device):
# See https://github.com/pytorch/pytorch/issues/36956
shape = (2, 8, 1, 2)
i = torch.randint(1, shape, device=device).contiguous(memory_format=torch.channels_last)
x = torch.randn(shape, requires_grad=True, device=device)
x[i].sum().backward()
def _test_reentrant_parent_error_on_cpu(self, device):
t1 = torch.rand([3, 3], requires_grad=True)
t2 = torch.rand([3, 3], device=device, requires_grad=True)
t3 = torch.rand([3, 3], device=device, requires_grad=True)
# Parent graph cpu graph.
t4 = t1 * t1
t5 = TestAutograd.SimulateBackwardError.apply(t4)
# Child gpu graph (much longer than parent graph).
prev = t2 * t2
for i in range(10):
prev = prev * t2
reentrant_root = prev
class ReentrantFunc(Function):
@staticmethod
def forward(ctx, inp):
return inp.clone()
@staticmethod
def backward(ctx, grad):
# Reentrant backward in child will take much longer.
reentrant_root.backward()
return grad
# Parent gpu graph.
t6 = ReentrantFunc.apply(t3)
t7 = t6 * t6
# Parent graph will error out first, while child graph will continue executing.
with self.assertRaisesRegex(Exception, "Simulate error"):
torch.autograd.backward([t5.sum(), t7.sum()])
# No grads should be accumulated since child graph will stop execution
# after parent receives error.
self.assertIsNone(t2.grad)
self.assertIsNone(t1.grad)
self.assertIsNone(t3.grad)
@onlyCUDA
def test_reentrant_parent_error_on_cpu(self, device):
def _get_cuda_memory_usage():
# we don't need CUDA synchronize because the statistics are not tracked at
# actual freeing, but at when marking the block as free.
num_devices = torch.cuda.device_count()
gc.collect()
return tuple(torch.cuda.memory_allocated(i) for i in range(num_devices))
before = _get_cuda_memory_usage()
# Run as separate function so that gc can clean up everything when we
# check for memory usage.
self._test_reentrant_parent_error_on_cpu(device)
# Wait for autograd thread to cleanup failed tasks.
after = _get_cuda_memory_usage()
start = time.time()
while before != after and time.time() - start < 30:
time.sleep(0.1)
after = _get_cuda_memory_usage()
self.assertEqual(before, after)
@skipIfMps # the test doesn't work on MPS
# TODO: see if these tests can be ported to OpInfos or moved to where's test suite
def test_where_functional(self, device):
x = torch.randn(5, 5, dtype=torch.double, device=device, requires_grad=True)
y = torch.randn(5, 5, dtype=torch.double, device=device, requires_grad=True)
cond = mask_not_all_zeros((5, 5)).to(device=device)
def where(cond, x, y):
return torch.where(cond, x, y)
gradcheck(where, [cond, x, y], raise_exception=True)
gradgradcheck(where, [cond, x, y], [torch.randn(5, 5, device=device)])
x = torch.randn(5, 1, 5, dtype=torch.double, device=device, requires_grad=True)
y = torch.randn(5, 5, 1, dtype=torch.double, device=device, requires_grad=True)
gradcheck(where, [cond, x, y], raise_exception=True)
gradgradcheck(where, [cond, x, y], [torch.randn(5, 5, 5, device=device)])
@skipIfMps # the test doesn't work on MPS
def test_where_scalar(self, device):
x = torch.randn(5, 5, dtype=torch.double, device=device, requires_grad=True)
scalar = 4.
cond = mask_not_all_zeros((5, 5)).to(device=device)
def where_scalar_first(cond, x):
return torch.where(cond, scalar, x)
def where_scalar_second(cond, x):
return torch.where(cond, x, scalar)
gradcheck(where_scalar_first, (cond, x))
gradgradcheck(where_scalar_first, (cond, x))
gradcheck(where_scalar_second, (cond, x))
gradgradcheck(where_scalar_second, (cond, x))
@onlyCUDA
def test_free_unneeded_tensor(self, device):
x = torch.randn(2, 3, 10, 10, device=device, requires_grad=True)
m = torch.randn(1, 3, 1, 1, device=device)
z = x.sum()
base_mem = torch.cuda.memory_allocated()
z = ((x + 2) * m).sum()
end_mem = torch.cuda.memory_allocated()
# In the end the memory usage should remain equal, because neither of
# (x + 2) and ((x + 2) * m) should be kept alive for backward, while the
# previous allocation of z had the same size as the current one.
self.assertEqual(base_mem, end_mem)
@onlyCUDA
def test_pin_memory(self, device):
x = torch.randn(2, 2, dtype=torch.double, requires_grad=True)
self.assertEqual(x, x.pin_memory())
self.assertIsNot(x, x.pin_memory())
self.assertTrue(x.pin_memory().requires_grad)
gradcheck(lambda x: x.pin_memory(), [x])
gradgradcheck(lambda x: x.pin_memory(), [x])
@skipCUDAIfRocm
@onlyCUDA
def test_profiler_emit_nvtx(self, device):
# This test is not intended to ensure correctness of nvtx ranges.
# That would require something a great deal more complex (you'd have to create a
# profile in a subprocess, open it, and parse the sql somehow).
# This test is merely intended to catch if emit_nvtx breaks on construction.
a = torch.tensor([1, 2, 3], dtype=torch.float32, device=device)
with torch.cuda.profiler.profile():
with emit_nvtx():
a.add(1.0)
@onlyCUDA
def test_rnn_backward_to_input_but_not_parameters(self, device):
# this checks whether it is possible to not require
# weight parameters, but require inputs, see #7722
l = torch.nn.LSTM(2, 3).to(device)
for p in l.parameters():
p.requires_grad = False
s = torch.randn(1, 1, 2, requires_grad=True, device=device)
out, _ = l(s)
out.sum().backward()
self.assertFalse(s.grad is None or s.grad.abs().sum().item() == 0)
@skipIfMps # the test doesn't work as randn is not supported with type long
@deviceCountAtLeast(1)
def test_grad_assignment(self, devices):
x = torch.randn(5, 5, device=devices[0])
# Tests that the wrong type raises
with self.assertRaisesRegex(TypeError, "expected to be a Tensor or None"):
x.grad = 0
# Tests that the wrong shape raises
with self.assertRaises(RuntimeError):
x.grad = torch.randn(2, 2, device=devices[0])
# Tests that the wrong dtype raises
with self.assertRaises(RuntimeError):
x.grad = torch.randn(5, 5, dtype=torch.long, device=devices[0])
# Tests that self-assignment raises
with self.assertRaises(RuntimeError):
x.grad = x
# Tests device -> cpu grad assignment raises
if self.device_type != 'cpu':
with self.assertRaises(RuntimeError):
t_cpu = torch.rand(5, 5)
t_cpu.grad = torch.randn(5, 5, device=devices[0])
# Tests half type on CUDA
if self.device_type == 'cuda':
x = x.to(dtype=torch.half, device=devices[0])
x.grad = torch.zeros_like(x)
# Tests cross-device assignment raises
if len(devices) > 1:
x = torch.randn(5, 5, device=devices[0])
with self.assertRaises(RuntimeError):
x.grad = torch.randn(5, 5, device=devices[1])
@dtypesIfMPS(torch.float32)
@deviceCountAtLeast(1)
@dtypes(torch.float, torch.double)
def test_requires_grad_factory(self, devices, dtype):
fns = [torch.ones_like, torch.randn_like]
x = torch.randn(2, 3, dtype=dtype, device=devices[0])
for fn in fns:
for requires_grad in [True, False]:
output = fn(x, dtype=dtype, device=devices[0], requires_grad=requires_grad)
self.assertEqual(requires_grad, output.requires_grad)
self.assertIs(dtype, output.dtype)
self.assertEqual(devices[0], str(x.device))
@deviceCountAtLeast(2)
def test_unused_output_device(self, devices):
from torch.nn.parallel._functions import Broadcast
x = torch.randn(5, 5, dtype=torch.float, device=devices[0], requires_grad=True)
outputs = Broadcast.apply(list(range(len(devices))), x)
y = outputs[-1] * 2
y.sum().backward()
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(x.grad, torch.ones(5, 5) * 2)
@deviceCountAtLeast(2)
def test_backward_device(self, devices):
# check that current device matches the variable's device
device = [None]
class Identity(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
return x.clone()
@staticmethod
def backward(ctx, grad_output):
device[0] = grad_output.device
return grad_output.clone()
v = torch.randn(1, device=devices[1], requires_grad=True)
Identity.apply(v).backward()
self.assertEqual(str(device[0]), devices[1])
@deviceCountAtLeast(2)
def test_inputbuffer_add_multidevice(self, devices):
input = torch.randn(1, device=devices[0], requires_grad=True)
output = input.to(device=devices[1]) + input.to(device=devices[1])
output.backward()
@onlyCPU
def test_copy_(self, device):
# At the time of writing this test, copy_ is not generated from native_functions.yaml
# there was a bug that bfloat16 was not recognized as floating.
x = torch.randn(10, device=device, requires_grad=True)
floating_dt = floating_types_and(torch.half, torch.bfloat16)
for dt in floating_dt:
y = torch.empty(10, device=device, dtype=dt)
y.copy_(x)
self.assertTrue(y.requires_grad)
z = x.to(torch.bfloat16)
self.assertTrue(z.requires_grad)
def test_copy_forward_ad_broadcasting(self, device):
# copy_ allows the src to have a different shape from self as long as src is
# broadcastable to self. Make sure forward AD handles this case.
primal = torch.rand(3, 3, device=device)
tangent = torch.rand(3, 3, device=device)
non_dual = torch.rand(1, 3, 3, device=device)
with fwAD.dual_level():
dual = fwAD.make_dual(primal, tangent)
non_dual.copy_(dual)
@onlyCUDA
def test_simple_reentrant_cross_device(self, device):
class ReentrantFunc(Function):
_cpu_mode = True
@staticmethod
def forward(ctx, x):
return x * (x + 2)
@staticmethod
def backward(ctx, grad_output):
with torch.enable_grad():
if ReentrantFunc._cpu_mode:
new_param = torch.randn(2, 2, requires_grad=True)
(new_param ** 2).sum().backward()
else:
new_param = torch.randn(2, 2, device=device, requires_grad=True)
(new_param ** 2).sum().backward()
return grad_output
# Reentrant starts on GPU thread, finishs on GPU thread
x = torch.randn(2, 2, device=device, requires_grad=True)
out = ReentrantFunc.apply(x)
out.sum().backward()
# Reentrant starts on CPU thread, finishs on GPU thread
x = torch.randn(2, 2, requires_grad=True)
# set ReentrantFunc node to GPU to emit tasks to GPU queue
ReentrantFunc._cpu_mode = False
out = ReentrantFunc.apply(x)
out.sum().backward()
# Reentrant starts on GPU thread, finishs on CPU thread
x = torch.randn(2, 2, device=device, requires_grad=True)
# set ReentrantFunc node to CPU to emit tasks to CPU queue
ReentrantFunc._cpu_mode = True
out = ReentrantFunc.apply(x)
out.sum().backward()
@onlyCUDA
def test_cross_device_reentrant_autograd(self, device):
# Output on gpu so that this task will be associated with the gpu thread
def fn_on_gpu(inp):
# Artificially increase the priority of the next op to make sure it runs
# as soon as we reach it before the ops of branch1.
dummy = inp * 2 * 2 * 2 * 2
return inp.to(device=device)
def parent_on_cpu(inp):
# Slow branch of ops on gpu so that the work queue for the gpu thread
# won't empty too quickly. They also have smaller priorities than the
# ones created by fn_on_gpu
branch1 = inp.to(device=device)
branch1 = branch1 / branch1
branch1 = branch1 / branch1
branch1 = branch1 / branch1
# Perform checkpoint on cpu tensors. So the last op performed in the reentrant
# autograd is an AccumulateGrad that runs on the cpu thread for the gpu thread.
# So the cpu thread will notify the gpu thread with an empty NodeTask.
branch2 = checkpoint(fn_on_gpu, inp)
out = branch2 + branch1
return out
inp = torch.rand(2, requires_grad=True)
out = parent_on_cpu(inp)
# This will segfault if the empty NodeTask is not handled properly in the
# gpu thread ReadyQueue
out.sum().backward()
def test_inplace_on_view_backprop_base(self, device):
# modify view and back-prop through base
root = torch.randn(2, 2, device=device, requires_grad=True)
x = root.clone()
v1 = x.narrow(0, 0, 1)
v1.mul_(2)
x.sum().backward()
self.assertEqual(root.grad.tolist(), [[2, 2], [1, 1]])
def test_inplace_on_view_backprop_view_of_view(self, device):
# modify view and backprop through view-of-view
root = torch.randn(2, 2, device=device, requires_grad=True)
x = root.clone()
v1 = x.narrow(0, 0, 1)
v2 = x.narrow(0, 0, 1)
v1.mul_(2)
v2.sum().backward()
self.assertEqual(root.grad.tolist(), [[2, 2], [0, 0]])
def test_inplace_on_view_of_view(self, device):
# modify view-of-view and backprop through base
root = torch.randn(2, 2, device=device, requires_grad=True)
x = root.clone()
v1 = x.narrow(0, 0, 1)
v2 = v1.narrow(1, 1, 1)
v2.mul_(2)
x.sum().backward()
self.assertEqual(root.grad.tolist(), [[1, 2], [1, 1]])
@skipIfMps # the test doesn't work on MPS as double types are not supported
def test_inplace_on_view_then_no_grad(self, device):
# Perform an in-place operation on a view of a non-leaf variable.
a = torch.ones(3, 1, dtype=torch.double, device=device, requires_grad=True)
b = a * 2
c = b.view_as(b)
c[0][0] = 3
# Force a graph update with grad disabled.
with torch.no_grad():
c.grad_fn
c.sum().backward()
@skipIfMps # the test doesn't work on MPS as double types are not supported
def test_inplace_on_view_gradcheck(self, device):
# gradcheck modifications to views
a = torch.randn(4, 4, dtype=torch.double, device=device, requires_grad=True)
b = torch.randn(2, 2, dtype=torch.double, device=device, requires_grad=True)
def func(root, b):
x = root.clone()
x.narrow(1, 2, 2).narrow(0, 1, 2).mul_(b)
x.narrow(1, 0, 2).narrow(0, 1, 2).mul_(b)
return x
gradcheck(func, [a, b], raise_exception=True)
go = torch.randn(a.size(), dtype=torch.double, device=device, requires_grad=True)
gradgradcheck(func, (a, b), (go,))
def test_inplace_on_view_multiple_outputs(self, device):
root = torch.arange(9., dtype=torch.double).reshape(3, 3).requires_grad_()
x = root.clone()
v1 = x.unbind()
with self.assertRaises(RuntimeError):
v1[0].mul_(2)
@skipIfMps # the test doesn't work on MPS as double types are not supported
def test_inplace_on_view_of_multiple_output_view(self, device):
a = torch.rand(10, dtype=torch.double, device=device, requires_grad=True).clone()
b = a.unbind(0)
c = b[0].view_as(b[0])
with self.assertRaises(RuntimeError):
c.mul_(2)
@skipIfMps # MPS backend doesn't support double types
def test_inplace_multiple_output_view_of_view(self, device):
a = torch.rand(10, dtype=torch.double, device=device, requires_grad=True).clone()
b = a.view_as(a)
c = b.unbind(0)
with self.assertRaises(RuntimeError):
c[0].mul_(2)
@skipIfMps # MPS backend doesn't support double types
def test_inplace_on_view_makes_base_require_grad(self, device):
# in-place modification to view makes base require grad
a = torch.randn(4, 4, dtype=torch.double, device=device, requires_grad=False)
b = torch.randn(4, 2, dtype=torch.double, device=device, requires_grad=True)
def func(root, b):
x = root.clone()
self.assertFalse(x.requires_grad)
x.narrow(1, 2, 2).mul_(b)
self.assertTrue(x.requires_grad)
return x
gradcheck(func, [a, b], raise_exception=True)
go = torch.randn(a.size(), dtype=torch.double, device=device, requires_grad=True)
gradgradcheck(func, (a, b), (go,))
def test_inplace_on_view_backprop_view(self, device):
# modify view and backprop through view
a = torch.tensor([2., 5.], device=device, requires_grad=False)
b = torch.tensor([3.], device=device, requires_grad=True)
res = a.narrow(0, 1, 1).mul_(b)
res.sum().backward()
self.assertEqual(b.grad.tolist(), [5])
self.assertIsNone(a.grad)
@skipIfMps # the test doesn't work on MPS as double types are not supported
def test_inplace_on_view_modify_base(self, device):
# Test that an in-place operation on a base that forced it to require
# grad also forces any previous views to require grad and backprop
# correctly
r = torch.ones(1, dtype=torch.double, device=device, requires_grad=True)
def fn(r):
x = torch.ones(5, dtype=torch.double, device=device)
v = x.select(0, 1)
self.assertFalse(v.requires_grad)
self.assertIsNone(v.grad_fn)
x.add_(r) # v is now dependent on r due to the in-place op on x
self.assertTrue(v.requires_grad)
return v
gradcheck(fn, [r])
gradgradcheck(fn, [r])
@skipIfMps # the test doesn't work on MPS as double types are not supported
def test_inplace_on_view_python(self, device):
# in-place modifications of Python-autograd created view
a = torch.randn(4, 4, dtype=torch.double, device=device, requires_grad=True)
b = torch.randn(2, 2, dtype=torch.double, device=device, requires_grad=True)
class PyAdd(torch.autograd.Function):
@staticmethod
def forward(ctx, x, y):
ctx.mark_dirty(x)
x.add_(y)
return x
@staticmethod
def backward(ctx, grad):
return grad, grad
def func(root, b):
x = root.clone()
PyAdd.apply(x.narrow(1, 2, 2).narrow(0, 1, 2), b)
PyAdd.apply(x.narrow(1, 0, 2).narrow(0, 1, 2), b)
return x
gradcheck(func, [a, b], raise_exception=True)
go = torch.randn(a.size(), dtype=torch.double, device=device, requires_grad=True)
gradgradcheck(func, (a, b), (go,))
def test_inplace_on_view_non_contig(self, device):
root = torch.ones(2, 3, 2, device=device).select(2, 1).t().requires_grad_(True)
x = root.clone()
v1 = x.narrow(0, 0, 1)
v2 = v1.narrow(1, 1, 1)
v2.mul_(2)
x.sum().backward()
self.assertEqual(root.grad.tolist(), [[1, 2], [1, 1], [1, 1]])
def test_inplace_on_view_multi_output_unsafe(self, device):
for f in [lambda t: t.unsafe_split(1),
lambda t: t.unsafe_split_with_sizes((1, 1, 1)),
lambda t: t.unsafe_chunk(3)]:
a = torch.randn(3, 3, device=device, requires_grad=True)
b = a + a
s1, s2, s3 = f(b)
s1.mul_(s2)
s1.sum().backward()
def test_inplace_on_view_multi_output_safe(self, device):
for f in [lambda t: t.split(1),
lambda t: t.split_with_sizes((1, 1, 1)),
lambda t: t.chunk(3)]:
a = torch.randn(3, 3, device=device, requires_grad=True)
b = a + a
s1, s2, s3 = f(b)
error_msg = 'This view is the output of a function that returns multiple views.'
with self.assertRaisesRegex(RuntimeError, error_msg):
s1.mul_(s2)
@skipIfMps # the test doesn't work on MPS as double types are not supported
def test_mv_grad_stride_0(self, device):
# Reference: https://github.com/pytorch/pytorch/issues/38315
mat = torch.randn(2, 2, dtype=torch.double, device=device)
vec = torch.randn(1, dtype=torch.double, device=device).requires_grad_(True)
def fn(vec):
# Expand inside the function to make sure the input to
# gradcheck does not have overlapping memory
vec = vec.expand(2)
return (mat @ vec).sum()
gradcheck(fn, (vec))
gradgradcheck(fn, (vec))
@onlyCUDA
def test_gradcheck_input_output_different_device(self, device):
x = torch.ones((1,), dtype=torch.double, device="cuda", requires_grad=True)
gradcheck(lambda x: x.to("cpu"), (x,))
x = torch.ones((1,), dtype=torch.double, device="cpu", requires_grad=True)
gradcheck(lambda x: x.to("cuda"), (x,))
def test_strided_leaf_grad_layout(self, device):
# (1) If leaf is non-overlapping and dense, grad's layout should match its leaf.
for fmt_a in (torch.contiguous_format, torch.channels_last):
for fmt_b in (torch.contiguous_format, torch.channels_last):
a = torch.rand((2, 3, 4, 5), device=device).to(memory_format=fmt_a)
b = torch.rand((2, 3, 4, 5), device=device).to(memory_format=fmt_b)
a.requires_grad_()
b.requires_grad_()
# checks (1) for broadcasted gradients
a.sum().backward()
self.assertEqual(a.grad.stride(), a.stride())
b.sum().backward()
self.assertEqual(b.grad.stride(), b.stride())
# checks (1) for non-broadcasted gradients
a.grad = None
b.grad = None
(a * b).sum().backward()
self.assertEqual(a.grad.stride(), a.stride())
self.assertEqual(b.grad.stride(), b.stride())
# (2) If leaf isn't dense, checks that grads are rowmajor contiguous.
c = torch.empty_strided((2, 2), (4, 2), device=device).copy_(torch.rand((2, 2), device=device))
c.requires_grad_()
d = torch.rand((2, 2), device=device)
# checks (2) for broadcasted gradients
c.sum().backward()
self.assertEqual(c.grad.stride(), (2, 1))
# checks (2) for non-broadcasted gradients
c.grad = None
(c * d).sum().backward()
self.assertEqual(c.grad.stride(), (2, 1))
@skipIfMps
def test_copy_r_to_c(self, device):
out_c = torch.empty(3, 2, dtype=torch.cdouble, device=device)
inp_r = torch.randn(3, 2, dtype=torch.double, device=device,
requires_grad=True)
def do_test():
out_c.copy_(inp_r)
out_c.sum().backward()
self.assertEqual(inp_r.grad, torch.ones_like(inp_r))
self.assertNotWarn(do_test)
def test_to_r_to_c(self, device):
def do_test():
inp_r = torch.randn(3, 2, dtype=torch.double, device=device,
requires_grad=True)
out = inp_r.to(torch.complex128)
out.sum().backward()
self.assertEqual(inp_r.grad, torch.ones_like(inp_r))
self.assertNotWarn(do_test)
def test_non_differentiable_ops(self, device):
# Just make sure the op doesn't raise an error
# and resulting tensor has requires_grad=False.
x = torch.tensor([[1, 2], [3, 4.]], requires_grad=True, device=device)
out = torch.isin(x, torch.tensor([2, 3], device=device))
self.assertFalse(out.requires_grad)
x = torch.randn(3, 3, requires_grad=True)
out = torch.signbit(x)
self.assertFalse(out.requires_grad)
def test_warning_in_backward(self, device):
# Test warning during backward are always propagated as python warnings (gh-50209)
# NOTE: For device=cuda, warning gets propagated from a worker thread
a = torch.zeros((), device=device, requires_grad=True)
b = torch._C._nn._test_warn_in_autograd(a)
with self.assertWarnsRegex(UserWarning, "Warn from backward"):
b.backward()
class TestAutogradInferenceMode(TestCase):
def _is_inference_tensor(self, tensor):
try:
err_msg = "Inference tensors do not track version counter"
with self.assertRaisesRegex(RuntimeError, err_msg):
tensor._version
return True
except AssertionError as e:
return False
def test_inference_mode_context_manager(self):
self.assertFalse(torch.is_inference_mode_enabled())
with torch.inference_mode():
self.assertTrue(torch.is_inference_mode_enabled())
with torch.inference_mode(False):
self.assertFalse(torch.is_inference_mode_enabled())
self.assertTrue(torch.is_inference_mode_enabled())
self.assertFalse(torch.is_inference_mode_enabled())
def test_inference_mode_decorator(self):
for mode in (True, False):
@torch.inference_mode(mode)
def func(x):
self.assertEqual(torch.is_inference_mode_enabled(), mode)
return x * x
for requires_grad in (True, False):
c = torch.ones(1, 2, 3, requires_grad=requires_grad)
d = func(c)
self.assertTrue(not mode or torch.is_inference(d))
self.assertEqual(d.requires_grad, requires_grad and not mode)
def test_inference_mode_tensor_creation(self):
with torch.inference_mode():
# new tensors created through constructors are inference tensors
c = torch.ones(1, 2, 3)
self.assertFalse(c.requires_grad)
self.assertTrue(torch.is_inference(c))
# requires_grad doesn't change inference tensor behavior in InferenceMode
tmp = torch.ones(1, 2, 3, requires_grad=True)
self.assertTrue(tmp.requires_grad)
self.assertTrue(torch.is_inference(tmp))
tmp = torch.ones(1, 2, 3).requires_grad_(False)
self.assertFalse(tmp.requires_grad)
self.assertTrue(torch.is_inference(tmp))
def test_inference_mode_existing_autograd_session(self):
s = torch.ones(1, 2, 3, requires_grad=True)
a = s.clone()
# `a` gets saved outside of inference mode
out = a * a
with torch.inference_mode():
a.add_(2)
self.assertFalse(torch.is_inference(a))
# tensors created outside of inference mode aren't
# inference tensors, so they will still have their
# version counters tracked
err_msg = ("one of the variables needed for gradient computation has been "
"modified by an inplace operation")
with self.assertRaisesRegex(RuntimeError, err_msg):
out.backward(torch.ones_like(out))
def test_inference_mode_inf_tensor_in_inf_mode_functional_op(self):
def functional_op(x):
return x * x
with torch.inference_mode():
for requires_grad in (True, False):
c = torch.ones(1, 2, 3, requires_grad=requires_grad)
# performing a non-view operation produces a inference tensor
# that does not require grad
func_out = functional_op(c)
self.assertTrue(torch.is_inference(func_out))
self.assertFalse(func_out.requires_grad)
def test_inference_mode_inf_tensor_in_inf_mode_inplace_op(self):
@torch.inference_mode()
def run_test(fn):
for requires_grad in (True, False):
c = torch.ones(1, 2, 3, requires_grad=requires_grad)
# after performing inplace operation, tensor is still
# an inference tensor
fn(c)
self.assertTrue(torch.is_inference(c))
self.assertEqual(c.requires_grad, requires_grad)
run_test(lambda x: x.add_(2))
run_test(lambda x: x.transpose_(0, 1))
def test_inference_mode_inf_tensor_in_inf_mode_view_op(self):
with torch.inference_mode():
for requires_grad in (True, False):
c = torch.ones(1, 2, 3, requires_grad=requires_grad)
# perform view operation produces inference tensor
# that does not require grad
view_out = c.view(-1)
self.assertTrue(torch.is_inference(view_out))
self.assertFalse(view_out.requires_grad)
def test_inference_mode_inf_tensor_in_normal_mode_functional_op(self):
def functional_op(x):
return x * x
for requires_grad in (True, False):
with torch.inference_mode():
c = torch.ones(1, 2, 3, requires_grad=requires_grad)
func_out = functional_op(c)
self.assertFalse(torch.is_inference(func_out))
self.assertFalse(func_out.requires_grad)
self.assertTrue(func_out.is_leaf)
def test_inference_mode_inf_tensor_in_normal_mode_inplace_op(self):
def run_test(fn):
for requires_grad in (False, True):
with torch.inference_mode():
c = torch.ones(1, 2, 3, requires_grad=requires_grad)
if requires_grad:
# leaf variable that requires grad is being used in an inplace
# operation when requires_grad=True
pass
else:
err_msg = "Inplace update to inference tensor outside InferenceMode"
with self.assertRaisesRegex(RuntimeError, err_msg):
fn(c)
run_test(lambda x: x.add_(2))
run_test(lambda x: x.transpose_(0, 1))
def test_inference_mode_inf_tensor_in_normal_mode_view_op(self):
for requires_grad in (True, False):
with torch.inference_mode():
c = torch.ones(1, 2, 3, requires_grad=requires_grad)
out = c.view(-1)
self.assertTrue(torch.is_inference(out))
self.assertFalse(out.requires_grad)
self.assertFalse(out._is_view())
self.assertTrue(out.is_leaf)
def test_normal_tensor_inplace_output_in_inference_mode(self):
def run_test(fn):
for requires_grad in (True, False):
s = torch.ones(1, 2, 3, requires_grad=requires_grad)
a = s.clone()
with torch.inference_mode():
fn(a)
self.assertFalse(torch.is_inference(a))
self.assertEqual(a.requires_grad, requires_grad)
# inplace -> inplace
fn(a)
self.assertFalse(torch.is_inference(a))
self.assertEqual(a.requires_grad, requires_grad)
# inplace -> inplace -> view
view_out = a.view(-1)
self.assertFalse(torch.is_inference(view_out))
self.assertEqual(view_out.requires_grad, requires_grad)
run_test(lambda x: x.add_(2))
run_test(lambda x: x.transpose_(0, 1))
def test_normal_tensor_inplace_output_in_normal_mode(self):
def run_test(fn):
for requires_grad in (True, False):
s = torch.ones(1, 2, 3, requires_grad=requires_grad)
a = s.clone()
with torch.inference_mode():
fn(a)
self.assertFalse(torch.is_inference(a))
self.assertEqual(a.requires_grad, requires_grad)
fn(a)
self.assertFalse(torch.is_inference(a))
self.assertEqual(a.requires_grad, requires_grad)
# inplace -> inplace
fn(a)
self.assertFalse(torch.is_inference(a))
self.assertEqual(a.requires_grad, requires_grad)
# inplace -> inplace -> view
view_out = a.view(-1)
self.assertFalse(torch.is_inference(view_out))
self.assertEqual(view_out.requires_grad, requires_grad)
run_test(lambda x: x.add_(2))
run_test(lambda x: x.transpose_(0, 1))
def test_normal_tensor_view_output_in_inference_mode(self):
for requires_grad in (True, False):
s = torch.ones(1, 2, 3, requires_grad=requires_grad)
a = s.clone()
with torch.inference_mode():
out = a.view(-1)
self.assertFalse(torch.is_inference(out))
self.assertEqual(out.requires_grad, requires_grad)
self.assertTrue(out._is_view())
# view -> view
tmp = out.view(-1)
self.assertFalse(torch.is_inference(tmp))
self.assertEqual(tmp.requires_grad, requires_grad)
self.assertTrue(tmp._is_view())
self.assertTrue(tmp.is_leaf)
# view -> view -> inplace
self.assertTrue(torch.is_inference_mode_enabled())
tmp.add_(2)
self.assertFalse(torch.is_inference(tmp))
self.assertEqual(tmp.requires_grad, requires_grad)
# Accessing is_leaf in python tries to update grad_fn and raises:
# A view was created in inference mode and its base or
# another view of its base has been modified inplace in normal mode
# tmp.is_leaf
self.assertEqual(a._version, tmp._version)
def test_normal_tensor_view_output_in_normal_mode(self):
def functional_op(x):
return x * x
for requires_grad in (True, False):
s = torch.ones(1, 2, 3, requires_grad=requires_grad)
a = s.clone()
with torch.inference_mode():
out = a.view(-1)
self.assertFalse(torch.is_inference(out))
self.assertEqual(out.requires_grad, requires_grad)
self.assertTrue(out._is_view())
self.assertTrue(out.is_leaf)
tmp = functional_op(out)
self.assertFalse(torch.is_inference(tmp))
self.assertEqual(tmp.requires_grad, requires_grad)
if requires_grad:
err_msg = "A view was created in inference mode and is being modified inplace"
with self.assertRaisesRegex(RuntimeError, err_msg):
out.add_(2)
pass
else:
out.add_(2)
tmp = out.view(2, 3)
self.assertFalse(torch.is_inference(tmp))
self.assertEqual(tmp.requires_grad, requires_grad)
def test_mix_inference_and_normal_tensor_functional_op(self):
for requires_grad in (True, False):
s = torch.ones(1, 2, 3, requires_grad=requires_grad)
with torch.inference_mode():
c = torch.ones(1, 2, 3, requires_grad=requires_grad)
# add is safe since it doesn't save any variable for backward
out = c.add(s)
self.assertFalse(torch.is_inference(out))
self.assertEqual(out.requires_grad, requires_grad)
if requires_grad:
# leaf inference tensor with requires_grad=True can still have gradient
out.backward(torch.ones_like(out))
self.assertEqual(c.grad, torch.ones_like(c))
if requires_grad:
err_msg = "Inference tensors cannot be saved for backward"
with self.assertRaisesRegex(RuntimeError, err_msg):
c * s
# inference tensor in TensorList input
inputs = [s, c]
with self.assertRaisesRegex(RuntimeError, err_msg):
torch.stack(inputs)
def test_mix_inference_and_normal_tensor_inplace_op(self):
for requires_grad in (True, False):
s = torch.ones(1, 2, 3, requires_grad=requires_grad)
a = s.clone()
with torch.inference_mode():
c = torch.ones(1, 2, 3)
self.assertTrue(torch.is_inference(c))
if requires_grad:
err_msg = "Inference tensors cannot be saved for backward"
with self.assertRaisesRegex(RuntimeError, err_msg):
a.mul_(c)
# inference tensor in TensorList input
err_msg = ("out=... arguments don't support automatic differentiation, "
"but one of the arguments requires grad")
with self.assertRaisesRegex(RuntimeError, err_msg):
torch.mul(s, s, out=c)
else:
a.mul_(c)
err_msg = "Inplace update to inference tensor outside InferenceMode is not allowed"
with self.assertRaisesRegex(RuntimeError, err_msg):
torch.mul(s, s, out=c)
def test_mix_inference_and_normal_tensor_view_op(self):
for requires_grad in (True, False):
s = torch.ones(1, 2, 3, requires_grad=requires_grad)
with torch.inference_mode():
c = torch.ones(1, 2, 3)
# view_as is a composite op which calls view with only one
# tensor argument. So there isn't a mixed inference and normal
# tensor inputs for view ops
tmp1 = c.view_as(s)
self.assertTrue(torch.is_inference(tmp1))
self.assertFalse(tmp1.requires_grad)
# this is fine since its equivalent as s.view(c.sizes()) which
# isn't a mixed input scenario
tmp2 = s.view_as(c)
self.assertFalse(torch.is_inference(tmp2))
self.assertEqual(tmp2.requires_grad, requires_grad)
def test_inference_mode_handle_direct_view_on_rebase(self):
def run_test(fn):
for requires_grad in (True, False):
s = torch.ones(1, 2, 3, requires_grad=requires_grad)
a = s.clone()
with torch.inference_mode():
view_out = a.view_as(a)
if requires_grad:
err_msg = "A view was created in inference mode and is being modified inplace"
with self.assertRaisesRegex(RuntimeError, err_msg):
fn(view_out)
pass
else:
fn(view_out)
run_test(lambda x: x.add_(2))
run_test(lambda x: x.transpose_(0, 1))
def test_inference_mode_handle_indirect_view_on_rebase(self):
def run_test(fn):
for requires_grad in (True, False):
s = torch.ones(1, 2, 3, requires_grad=requires_grad)
a = s.clone()
with torch.inference_mode():
view_out = a.view(-1)
fn(a)
if requires_grad:
err_msg = "A view was created in inference mode and its base or another view "
with self.assertRaisesRegex(RuntimeError, err_msg):
view_out.grad_fn
pass
else:
view_out.grad_fn
run_test(lambda x: x.add_(2))
run_test(lambda x: x.transpose_(0, 1))
class TestMultithreadAutograd(TestCase):
def _run_py_multithread_fn(self, fn, args=(), num_threads=10, kwargs=None):
class PropagatingThread(threading.Thread):
'''Helper class to propagate exception from child
thread to main thread on join.
Reference: https://stackoverflow.com/a/31614591/5602957
'''
def run(self):
self.exception = None
try:
self.ret = super(PropagatingThread, self).run()
except Exception as e:
self.exception = e
def join(self, timeout=None):
super(PropagatingThread, self).join(timeout)
if self.exception:
raise self.exception from self.exception
return self.ret
threads = []
for _ in range(num_threads):
p = PropagatingThread(target=fn, args=args)
p.start()
threads.append(p)
for p in threads:
p.join()
def test_multithreaded_exception_propagation(self):
# Test whether exception in child thread
# are propagated to main thread.
def fn():
self.assertTrue(False)
with self.assertRaises(AssertionError):
self._run_py_multithread_fn(fn)
def test_simple_backward(self):
# simple multithreaded backward that create threads in the beginning of training
# and everything else is training separately, i.e. inputs, operations, etc.
def train_fn():
x = torch.ones(5, 5, requires_grad=True)
y = (x + 3) * (x + 4) * 0.5
y.sum().backward()
self.assertEqual(x.grad, x + 3.5)
self._run_py_multithread_fn(train_fn)
def test_simple_backward_same_input(self):
# simple multithreaded backward with only shared inputs (i.e. This is common
# for things like Hogwild multithreaded training with multiple CPU threads)
def train_fn_backward(x):
y = (x + 3) * (x + 4) * 0.5
y.sum().backward()
x = torch.ones(5, 5, requires_grad=True)
self._run_py_multithread_fn(train_fn_backward, (x,))
# Since we are calling backward from multiple threads
# and all threads share the same input, when we do backward
# concurrently, different backwards will all accumulate to
# the same .grad for each input, and the gradients should
# be equal to num_threads * gradient
self.assertEqual(x.grad, 10 * (x + 3.5))
def train_fn_grad(x):
y = (x + 3) * (x + 4) * 0.5
grads = torch.autograd.grad(y.sum(), x)
self.assertEqual(len(grads), 1)
self.assertEqual(grads[0], x + 3.5)
# since we use functional grad() api, gradients will not
# be accumulate to the same place and should be the same
self._run_py_multithread_fn(train_fn_grad, (x,))
def test_multithread_saved_tensors_hooks(self):
def pack(x):
warnings.warn("pack")
return x
def registers_hooks_for_each_thread():
with torch.autograd.graph.saved_tensors_hooks(pack, lambda x: x):
x = torch.ones(5, 5, requires_grad=True)
with warnings.catch_warnings(record=True) as w:
y = x * x
# should raise two warnings from x being saved twice
self.assertEqual(len(w), 2)
y.sum().backward()
def test_dataparallel_saved_tensors_hooks(self):
def pack(x):
warnings.warn("pack")
return x
_self = self
class Model(torch.nn.Module):
def forward(self, x):
with warnings.catch_warnings(record=True) as w:
y = x * x
if torch.cuda.device_count() >= 2:
# DataParallel is calling the forward in different threads
# without progating TLS, so hooks should not be called here
_self.assertEqual(len(w), 0)
else:
# DataParallel only uses one thread
# so hooks should be called here
_self.assertGreater(len(w), 0)
x = torch.ones(5, 5, requires_grad=True)
model = torch.nn.DataParallel(Model())
with torch.autograd.graph.saved_tensors_hooks(pack, lambda x: x):
model(x)
with warnings.catch_warnings(record=True) as w:
y = x * x
# hooks should be called here
_self.assertGreater(len(w), 0)
def test_python_thread_in_middle(self):
# User might write a network that starts on one CPU thread, then runs its second half
# concurrently with other threads (either via python threading or fork/join calls),
# then calls backward()/grad() on BOTH threads, like a Y pattern from input at the
# bottom to output at the top. This way part of the GraphTask is being shared across
# different threads and we need to ensure user specify retain_graph=True, otherwise
# error out with the correct error message
# Case 1: multiple backward with python threads, retain_graph=False
# should throw error in some threads with no retain_graph.
success_vs_raises = [0, 0]
def train_fn_no_retain_graph(x):
y = x + x ** 2
try:
y.sum().backward()
success_vs_raises[0] += 1
except RuntimeError as error:
success_vs_raises[1] += 1
self.assertRegex(str(error), "Specify retain_graph=True")
x_no_retain = torch.ones(5, 5, requires_grad=True)
y_no_retain = x_no_retain + x_no_retain ** 2
self._run_py_multithread_fn(train_fn_no_retain_graph, (y_no_retain,), num_threads=5)
# at least one thread will be success in this case, all other threads should raise
# with the error that throw to user to recommend them specify retain_graph=True
self.assertTrue(success_vs_raises[0] >= 1)
# multiple backward with python threads, no error with retain_graph=True
def train_fn_retain_graph(x):
y = x + x ** 2
y.sum().backward(retain_graph=True)
x_retain = torch.ones(5, 5, requires_grad=True)
y_retain = x_retain + x_retain ** 2
self._run_py_multithread_fn(train_fn_retain_graph, (y_retain,), num_threads=5)
# result should equal to num_thread * gradients
self.assertEqual(x_retain.grad, 5 * (4 * x_retain ** 3 + 6 * (x_retain ** 2) + 4 * x_retain + 1))
def test_fork_join_in_middle(self):
# multiple backward with jit threads (fork/join primitive)
# similar to test_python_thread_in_middle, we test with retain_graph=False/True
# Case 1: multiple grad() calls with jit threads, retain_graph=False
# should throw error in some threads with no retain_graph.
@torch.jit.script
def train_fn_jit_no_retain(middle, orig_x):
y = middle + middle ** 2
return torch.autograd.grad([y.sum()], [orig_x])
@torch.jit.script
def train_fn_fork_join_calls_no_retain(x):
y_no_retain = (x + 3) * (x + 4) * 0.5
fut = torch.jit._fork(train_fn_jit_no_retain, y_no_retain, x)
grad_hat = train_fn_jit_no_retain(y_no_retain, x)
grad = torch.jit._wait(fut)
return grad, grad_hat
try:
train_fn_fork_join_calls_no_retain(torch.randn(5, 5, requires_grad=True))
except RuntimeError as error:
self.assertRegex(str(error), "Specify retain_graph=True")
# Case 2: no error with retain_graph=True
@torch.jit.script
def train_fn_jit_retain(middle, orig_x):
y = middle + middle ** 2
return torch.autograd.grad([y.sum()], [orig_x], retain_graph=True)
@torch.jit.script
def train_fn_fork_join_calls_retain(x):
y_retain = (x + 3) * (x + 4) * 0.5
fut1 = torch.jit._fork(train_fn_jit_retain, y_retain, x)
fut2 = torch.jit._fork(train_fn_jit_retain, y_retain, x)
grad = train_fn_jit_retain(y_retain, x)
grad1 = torch.jit._wait(fut1)
grad2 = torch.jit._wait(fut2)
return grad, grad1, grad2
grad, grad1, grad2 = train_fn_fork_join_calls_retain(torch.randn(5, 5, requires_grad=True))
self.assertEqual(grad, grad1)
self.assertEqual(grad, grad2)
def test_preserve_backtrace(self):
class Foo(torch.autograd.Function):
@staticmethod
def forward(ctx, input):
return input
@staticmethod
def backward(ctx, *grad):
raise ValueError("something")
t = torch.rand(10, requires_grad=True)
try:
Foo.apply(t).sum().backward()
except Exception:
import traceback
tb = sys.exc_info()[2]
tb_str = "\n".join(traceback.format_tb(tb))
self.assertTrue('raise ValueError("something")' in tb_str)
# TODO(@anjali411): add an OpInfo based test for torch.cat
# Issue: https://github.com/pytorch/pytorch/issues/51627
def test_cat_r_to_c(self):
inp_c = torch.rand(3, 2, dtype=torch.cdouble, requires_grad=True)
inp_r = torch.randn(3, 2, dtype=torch.double, requires_grad=True)
def fn(x1, x2):
return torch.cat((x1, x2), dim=-1)
torch.autograd.gradcheck(fn, [inp_r, inp_c], check_forward_ad=True)
torch.autograd.gradcheck(fn, [inp_c, inp_r], check_forward_ad=True)
# Import test cases from below autograd/ here. These are found
# implicitly by the loader, so Flake8 thinks they are unused, hence
# the suppressions.
from autograd.test_complex import TestAutogradComplex # noqa: F401
from autograd.test_functional import TestAutogradFunctional # noqa: F401
# e.g., TestAutogradDeviceTypeCPU and TestAutogradDeviceTypeCUDA
instantiate_device_type_tests(
TestAutogradDeviceType,
globals(),
except_for=None
)
instantiate_parametrized_tests(TestAutograd)
if __name__ == '__main__':
run_tests()
|
rotation_calc_addon.py
|
# this file defines the addon that the user uses to control the rotation calculator in the veiwport
import bpy
from bpy.props import IntProperty, FloatProperty, StringProperty, BoolProperty, EnumProperty
from math import pi
from time import sleep
from mathutils import Matrix
import threading
import os
import pip
# python script to rotate the vector by the input args
class Rot_vec_blend():
path_rotated = "./rotated.png" # path to where the renders will be saved
path_init = "./init.png"
# boolean that determines if the vector will be reset after being rendered
reset_after = True
def __init__(self, context): # default constructor
# check to see if opencv-python is installed as it needed for the image processing
try:
__import__('opencv-python')
except ImportError: # if it is not installed, install it
pip.main(['install', 'opencv-python'])
global cv2
import cv2
self.context = context
# define the vector object in the scene
self.vector = context.scene.objects["Vector"]
def rot_vec (self, rot_x, rot_y, rot_z):
# function must be run on a separate thread if the user needs to be able to see the rotation
print("Rotating the vector by: " + str([rot_x, rot_y, rot_z]) + "...")
# loop to interpolate the rotation so that it plays out in real time
steps = 50
time = 3 # time in seconds
for i in range(steps):
# rotate the vector by the inputs
mat_rot_x = Matrix.Rotation(rot_x/steps, 4, 'X')
mat_rot_y = Matrix.Rotation(rot_y/steps, 4, 'Y')
mat_rot_z = Matrix.Rotation(rot_z/steps, 4, 'Z')
self.vector.matrix_world = mat_rot_z @ self.vector.matrix_world
self.vector.matrix_world = mat_rot_y @ self.vector.matrix_world
self.vector.matrix_world = mat_rot_x @ self.vector.matrix_world
self.vector.rotation_euler = self.vector.matrix_world.to_euler()
sleep(time/steps)
def reset (self, init_x, init_y, init_z): # function to reset the vector
print("Resetting vector...")
self.vector.rotation_euler = (init_x, init_y, init_z)
def capture (self, rot_x, rot_y, rot_z): # function that automatically captures an image of the rotated vector
# render the initial orientation
bpy.context.scene.render.filepath = self.path_init
bpy.ops.render.render(write_still=True, use_viewport=True)
# rotate the vector
self.rot_vec(rot_x, rot_y, rot_z)
# render the rotated vector
bpy.context.scene.render.filepath = self.path_rotated
bpy.ops.render.render(write_still=True, use_viewport=True)
# stack the rendered image with the render of the inital orientation on top
rotated = cv2.imread(self.path_rotated)
init = cv2.imread(self.path_init)
comparison = cv2.vconcat([init, rotated])
# remove temp renders
os.remove(self.path_rotated)
os.remove(self.path_init)
cv2.imwrite('./rotation.png', comparison)
def operation (self, rot_x, rot_y, rot_z, reset, init_x, init_y, init_z):
try:
self.vector.rotation_euler = (init_x, init_y, init_z)
self.capture(rot_x, rot_y, rot_z)
if reset:
self.reset(init_x, init_y, init_z)
except:
pass # just end function if an exception happens, as it is likely caused by blender being closed before the thread running this function has fully executed
bl_info = {
"name": "Rotation Calculator",
"category": "Object",
"blender": (2, 80, 0),
}
class RotationCalculator(bpy.types.Operator):
bl_idname = "object.rotation_calc"
bl_label = "Rotate Vector Object By [X, Y, Z]"
bl_options = {'REGISTER', 'UNDO'}
radians = BoolProperty(name="Radians: ", description="reads angle values in radians if set to True, and degrees if set to False", default=True)
init_orientation_x = FloatProperty(name="Initial Orientation X: ", description="the initial rotation of the vector around the x axis relative to the z axis", default=0, min=-180, max=180)
init_orientation_y = FloatProperty(name="Initial Orientation Y: ", description="the initial rotation of the vector around the y axis relative to the z axis", default=0, min=-180, max=180)
init_orientation_z = FloatProperty(name="Initial Orientation Z: ", description="the initial rotation of the vector around the z axis relative to the z axis", default=0, min=-180, max=180)
rot_as_str_list = StringProperty(name="Rotation List [X, Y, Z] as String: ", description="string storing the rotations of the vector as a list of form [X, Y, Z]", default="[0, 0, 0]")
rot_x = FloatProperty("Rotation of Vector around X Axis: ", description="rotation around x axis", default=0, min=-180, max=180)
rot_y = FloatProperty("Rotation of Vector around Y Axis: ", description="rotation around y axis", default=0, min=-180, max=180)
rot_z = FloatProperty("Rotation of Vector around Z Axis: ", description="rotation around z axis", default=0, min=-180, max=180)
reset_after = BoolProperty(name="Reset Vector after Rotation: ", description="reset the orientation of the vector to the initial orientation after the rotation", default=True)
def invoke(self, context, event):
#Popup a dialog the user can interact with.
wm = context.window_manager
return wm.invoke_props_dialog(self)
def draw(self,context):
layout = self.layout
layout.prop(self,"radians")
layout.separator()
layout.prop(self,"init_orientation_x")
layout.separator()
layout.prop(self,"init_orientation_y")
layout.separator()
layout.prop(self,"init_orientation_z")
layout.separator()
layout.prop(self,"rot_as_str_list")
layout.separator()
layout.prop(self,"rot_x")
layout.separator()
layout.prop(self,"rot_y")
layout.separator()
layout.prop(self,"rot_z")
layout.separator()
layout.prop(self,"reset_after")
layout.separator()
def execute(self, context): # called when operator is called
# check to see if rot_as_str_list has been modified
if (self.rot_as_str_list != '[0, 0, 0]'):
rots = [float(i) for i in self.rot_as_str_list.replace('[', '').replace(']', '').split(",")]
else:
rots = [self.rot_x, self.rot_y, self.rot_z]
inits = [self.init_orientation_x, self.init_orientation_y, self.init_orientation_z]
if (not self.radians):
rots = [0.0174533*i for i in rots] # convert to degrees
inits = [0.0174533*i for i in inits]
r = Rot_vec_blend(context)
t = threading.Thread(target=r.operation, args=(rots[0], rots[1], rots[2], self.reset_after, inits[0], inits[1], inits[2],))
t.start()
return {'FINISHED'}
def menu_func(self, context):
self.layout.operator(RotationCalculator.bl_idname)
def register():
bpy.utils.register_class(RotationCalculator)
bpy.types.VIEW3D_MT_object.append(menu_func)
def unregister():
bpy.utils.unregister_class(RotationCalculator)
if __name__ == "__main__":
register()
|
epi_serial.py
|
'''
epi_serial.py is part of the micro:bit epidemic project.
It handles communication between the management GUI, and
the master micro:bit via the serial port.
The MIT License (MIT)
Copyright (c) 2019 Wes Hinsley
MRC Centre for Global Infectious Disease Analysis
Department of Infectious Disease Epidemiology
Imperial College London
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
Created on 6 Jun 2018
@author: Wes Hinsley
'''
from threading import Thread
import time
import pip
try:
from pip import main as pipmain
except:
from pip._internal import main as pipmain
try:
import serial.tools.list_ports
except ImportError, e:
pipmain(['install', 'pySerial'])
import serial.tools.list_ports
import re
import csv
import os.path
from serial.serialutil import SerialException
class EpiSerial:
MSG_IN_VERSION = 'VER:'
MSG_IN_REGISTER = 'REG:'
MSG_IN_INF = 'INF:'
MSG_IN_RECOV = 'REC:'
MSG_IN_DEBUG = 'DEB:'
MSG_IDENTIFY_YOURSELF = '1#'
MSG_REG = '2'
MSG_OUT_PARAMS = '3'
MSG_SEED_EPI = '4'
MSG_RESET_EPI = '5#'
MSG_POWER_OFF = '6#'
MSG_SCREEN_ON = '7#'
MSG_SCREEN_OFF = '8#'
# Output Path is relative to bin/python-gui
OUTPUT_PATH = '../../data/'
MICROBIT_PID = 516
MICROBIT_VID = 3368
input_buffer = ""
latest_minion_buildno = '14'
current_epi_t0 = 0
RECENT_TIME_S = 900
def get_friendly_id(self, sid):
result = '-1'
for i in range(len(self.serials)):
if (self.serials[i]['serial'] == sid):
result = self.serials[i]['id']
break
# Serial not found - add to file if there are blanks...
if (result=='-1'):
for i in range(len(self.serials)):
if (self.serials[i]['serial'] == ''):
self.serials[i]['serial'] = sid
result = str(i)
with open('serials.csv', 'w') as f:
f.write('serial,id\n')
for i in range(len(self.serials)):
s = "{},{}\n".format(self.serials[i]['serial'],self.serials[i]['id'])
f.write(s)
return result
# Loop to read from the port while there is data to be
# read. This executes continually in its own thread, and sleeps
# for 1/10 s when there is no data to process.
# Python appeared to intermittently insert newlines into mid-message,
# in ways not apparent when using Putty. Therefore, all new-lines are
# ignored, and all valid incoming messages must end with '#'
def read_from_port(self):
while True:
did_work = False
if (self.serial_port!=0):
if (self.serial_port.port!=""):
try:
reading = self.serial_port.readline().decode()
except:
reading = ''
if (len(reading)!=0):
did_work = True
self.input_buffer = self.input_buffer + reading
self.input_buffer = re.sub('[\n]', '', self.input_buffer)
if (self.input_buffer.endswith('#')):
self.input_buffer = re.sub('[#]','', self.input_buffer)
self.handle_serial_data(self.input_buffer)
self.input_buffer = ''
if (did_work == False):
time.sleep(0.1)
# Find all serial ports with device ID matching a micro:bot.
# If a micro:bit serial port was previous selected, remember it and
# reselect it (if still available) after rescan.
# For reasons I don't yet understand, double-clicking the combo-box causes
# the app to lock up totally. First and last line therefore force ignoring
# the double-click.
def refresh_microbit_comports(self):
self.gui_link.cb_masters_double_click = self.gui_link.cb_masters_double_click + 1
if (self.gui_link.cb_masters_double_click == 1):
old_val = self.gui_link.cb_masters.get()
microbits = []
ports = serial.tools.list_ports.comports()
for p in ports:
if (p.pid == self.MICROBIT_PID) and (p.vid == self.MICROBIT_VID):
microbits.append(p.device)
microbits.sort()
self.gui_link.cb_masters['values'] = microbits
try:
reselect = microbits.index(old_val)
except ValueError:
reselect = 0
if (len(microbits)>0):
self.gui_link.cb_masters.current(reselect)
self.get_master_info(self)
self.gui_link.cb_masters_double_click = self.gui_link.cb_masters_double_click - 1
# Send a message to the serial port asking the micro:bit to identify itself.
def get_master_info(self, event):
port = self.gui_link.cb_masters.get()
if (self.serial_port != 0):
self.serial_port.close()
self.serial_port = 0
try:
self.serial_port = serial.Serial(port, 115200, timeout=1, xonxoff=True)
self.serial_port.write(self.MSG_IDENTIFY_YOURSELF+"\n")
except SerialException:
self.gui_link.sv_software.set(self.gui_link.lang.serial_error)
# Process incoming serial data
def handle_serial_data(self, data):
if (len(data)>4):
if (data[0:4] == self.MSG_IN_DEBUG):
print data
if (data[0:4] == self.MSG_IN_VERSION):
self.gui_link.sv_software.set(data.split(":")[1])
self.gui_link.sv_serialno.set(data.split(":")[2])
self.gui_link.sv_mbitver.set(data.split(":")[3])
elif (data[0:4] == self.MSG_IN_REGISTER):
serialno = data.split(":")[1]
buildno = data.split(":")[2]
friendlyid = self.get_friendly_id(serialno)
if (buildno != self.latest_minion_buildno):
print self.gui_link.lang.mb_ood.format(serialno,
friendlyid, buildno, self.latest_minion_buildno)
if (friendlyid == '-1'):
print self.gui_link.lang.serial_lookup_err.format(serialno)
else:
msg = "{}{},{},#".format(self.MSG_REG, serialno, friendlyid)
self.serial_port.write(msg+"\n")
self.gui_link.set_minion_status(friendlyid, self.gui_link.STATUS_SUSCEPTIBLE)
self.write_xml_params()
elif (data[0:4] == self.MSG_IN_INF):
#Incoming is INF:ID:VICTIM:TIME:NCONTACTS
bits = data.split(":")
m_id = int(bits[2])
# Check whether inf was already reported. If it was, then no need to
# do anything here. If not:
if (self.gui_link.inf_reported[m_id] == 0):
fn = self.OUTPUT_PATH + self.gui_link.sv_serialno.get() + "_" + self.gui_link.sv_epidno.get() + ".csv"
if (not os.path.isfile(fn)):
with open(fn, "w") as f:
f.write("Event,TimeH,Mins,Infectedby,Seeding,Recency,Category,ID,NoContacts\n")
with open(fn, "a") as f:
inf_time_epoch = self.current_epi_t0 + (float(bits[3]) / 1000.0)
inf_time = time.gmtime(inf_time_epoch)
mins = inf_time.tm_min + (inf_time.tm_sec/60.0)
seeding = 'N'
if (bits[1] == '32767'):
seeding = 'S'
bits[1] = 'NA'
recency = 'Old'
if (time.time() - inf_time_epoch < self.RECENT_TIME_S):
recency = 'Recent'
f.write("I,{},{},{},{},{},{},{},{}\n".format(
inf_time.tm_hour, mins, bits[1], seeding, recency, 0, bits[2], bits[4]))
# Check whether recovery was already reported. If it was, then minion
# icon should remain blue; otherwise, it's a new infection, so red.
if (self.gui_link.rec_reported[m_id]==0):
self.gui_link.set_minion_status(m_id, self.gui_link.STATUS_INFECTED)
self.gui_link.inf_reported[m_id] = 1
elif (data[0:4] == self.MSG_IN_RECOV):
bits = data.split(":")
m_id = int(bits[1])
# Check whether recovery was already reported. If it was, then no need to
# do anything here. Otherwise...
if (self.gui_link.rec_reported[m_id] == 0):
self.gui_link.set_minion_status(m_id, self.gui_link.STATUS_RECOVERED)
fn = self.OUTPUT_PATH + self.gui_link.sv_serialno.get() + "_" + self.gui_link.sv_epidno.get() + ".csv"
if (not os.path.isfile(fn)):
with open(fn, "w") as f:
f.write("Event,TimeH,Mins,Infectedby,Seeding,Recency,Category,ID,NoContacts\n")
rec_time_epoch = self.current_epi_t0 + (float(bits[2]) / 1000.0)
rec_time = time.gmtime(rec_time_epoch)
mins = rec_time.tm_min + (rec_time.tm_sec/60.0)
with open(fn, "a") as f:
f.write("R,{},{},NA,NA,NA,NA,{},NA\n".format(rec_time.tm_hour, mins, bits[1]))
self.gui_link.rec_reported[m_id] = 1
else:
self.gui_link.sv_software.set(self.gui_link.lang.unrecog_serial)
self.gui_link.sv_serialno.set("")
self.gui_link.sv_mbitver.set("")
else:
self.gui_link.sv_software.set(self.gui_link.lang.unrecog_serial)
self.gui_link.sv_serialno.set("")
self.gui_link.sv_mbitver.set("")
# Allow serial class to talk to gui
def set_gui_link(self, gui_link):
self.gui_link = gui_link
def write_xml_params(self):
fn = self.OUTPUT_PATH + self.gui_link.sv_serialno.get() + "_" + self.gui_link.sv_epidno.get() + ".xml"
players = ""
for x in range(100):
col = self.gui_link.minions[x % 10][x / 10]['bg']
if ((col == self.gui_link.STATUS_SUSCEPTIBLE) or (col == self.gui_link.STATUS_INFECTED) or (col == self.gui_link.STATUS_RECOVERED)):
if (players != ""):
players = players + ","
players = players + str(x)
par = (self.gui_link.sv_epidno.get() + "," +
self.gui_link.sv_r0.get() + "," +
str(self.gui_link.cb_rtype.current()) + "," +
self.gui_link.cb_poimin.get() + "," +
self.gui_link.cb_poimax.get() + "," +
self.gui_link.cb_rpower.get() + "," +
self.gui_link.cb_exposure.get() + "," +
str(self.gui_link.cb_btrans.current()) + "," +
str(self.gui_link.cb_brec.current()) + "," +
str(self.gui_link.cb_icons.current()))
with open(fn, "w") as f:
f.write("<?xml version=\"1.0\" encoding=\"ISO-8859-1\" ?>\n")
f.write("<meta>\n")
f.write(" <params>" + par + "</params>\n")
f.write(" <time>" + str(self.current_epi_t0) + "</time>\n")
f.write(" <players>" + players + "</players>\n")
f.write(" <game>" + str(self.gui_link.cb_paramset.get()) + "</game>\n")
f.write("</meta>")
# Also check that a stub .csv exists; Slideshow wants to load a CSV file.
fn = self.OUTPUT_PATH + self.gui_link.sv_serialno.get() + "_" + self.gui_link.sv_epidno.get() + ".csv"
if (not os.path.isfile(fn)):
with open(fn, "w") as f:
f.write("Event,TimeH,Mins,Infectedby,Seeding,Recency,Category,ID,NoContacts\n")
# Send the parameters to the micro:bit master.
def send_params(self):
msg = (self.MSG_OUT_PARAMS+
self.gui_link.sv_epidno.get() + "," +
self.gui_link.sv_r0.get() + "," +
str(self.gui_link.cb_rtype.current()) + "," +
self.gui_link.cb_poimin.get() + "," +
self.gui_link.cb_poimax.get() + "," +
self.gui_link.cb_rpower.get() + "," +
self.gui_link.cb_exposure.get() + "," +
str(self.gui_link.cb_btrans.current()) + "," +
str(self.gui_link.cb_brec.current()) + "," +
str(self.gui_link.cb_icons.current()) + ",#")
self.serial_port.write(msg+"\n")
self.current_epi_t0 = time.time()
# Also write a meta file for the viewer.
# It won't have all the hosts yet, but it's better
# if slideshow has something to show before seeding,
# especially on Saviour game.
self.write_xml_params()
# Send seeding information to master, who forwards it by radio to minion.
def seed_epidemic(self):
forcer = 0
if (self.gui_link.iv_forcer.get()==1):
forcer = 1 + self.gui_link.cb_forcer.current()
msg = self.MSG_SEED_EPI + self.gui_link.sv_seedid.get() + "," + str(forcer) + ",#"
self.serial_port.write(msg+"\n")
self.write_xml_params()
def reset_epidemic(self):
self.serial_port.write(self.MSG_RESET_EPI+"\n")
def screens_on(self):
self.serial_port.write(self.MSG_SCREEN_ON+"\n")
def screens_off(self):
self.serial_port.write(self.MSG_SCREEN_OFF+"\n")
def poweroff_minions(self):
self.serial_port.write(self.MSG_POWER_OFF+"\n")
# Initialise serial port listener thread
def __init__(self):
self.gui = 0
self.serial_port = 0
self.serial_port_thread = Thread(target = self.read_from_port)
self.serial_port_thread.setDaemon(True)
self.serial_port_thread.start()
with open("serials.csv") as f:
reader = csv.DictReader(f)
self.serials = [r for r in reader]
|
relay_integration.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-variable,invalid-name
"""
Integrate auto_scheduler into relay. It implements the following items:
1. Extract search tasks from a relay program
2. Provide auto-scheduling for all TOPI compute functions
"""
import logging
import threading
from copy import deepcopy
import tvm
from tvm import autotvm, transform
from tvm.ir.transform import PassContext
from tvm.runtime import convert_to_object
from tvm.te.tensor import ComputeOp, PlaceholderOp, Tensor
from tvm.tir import Reduce
from tvm.tir import expr as _expr
from tvm.target import Target
from . import _ffi_api
from .compute_dag import ComputeDAG, LayoutRewriteOption
from .dispatcher import DispatchContext
from .search_task import SearchTask
from .utils import get_const_tuple
from .workload_registry import register_workload_tensors
logger = logging.getLogger("auto_scheduler")
def call_all_topi_funcs(mod, params, target, opt_level=3):
"""Call all TOPI compute to extract auto_scheduler tasks in a Relay program"""
# pylint: disable=import-outside-toplevel
from tvm import relay
from tvm.relay.backend import graph_executor_codegen
# Turn off AutoTVM config not found warnings
old_autotvm_silent = autotvm.GLOBAL_SCOPE.silent
autotvm.GLOBAL_SCOPE.silent = True
with transform.PassContext(
opt_level=opt_level,
config={
"relay.backend.use_auto_scheduler": True,
"relay.backend.disable_compile_engine_cache": True,
},
disabled_pass={"AutoSchedulerLayoutRewrite"},
):
try:
# TODO(jwfromm) Remove this once AlterOpLayout bug that mutates
# source module is fixed. Until then, create a clone.
mod_clone = deepcopy(mod)
opt_mod, _ = relay.optimize(mod_clone, target, params)
grc = graph_executor_codegen.GraphExecutorCodegen(None, target)
grc.codegen(opt_mod["main"])
except tvm.TVMError:
print(
"Get errors with GraphExecutorCodegen for task extraction. "
"Fallback to VMCompiler."
)
mod_clone = deepcopy(mod)
compiler = relay.vm.VMCompiler()
if params:
compiler.set_params(params)
mod_clone = (
tvm.IRModule.from_expr(mod_clone)
if isinstance(mod_clone, relay.Function)
else mod_clone
)
compiler.lower(mod_clone, target)
autotvm.GLOBAL_SCOPE.silent = old_autotvm_silent
def extract_tasks(
mod,
params,
target,
target_host=None,
hardware_params=None,
include_simple_tasks=False,
opt_level=3,
):
"""Extract tuning tasks from a relay program.
Parameters
----------
mod: tvm.IRModule or relay.function.Function
The module or function to tune
params: dict of str to numpy array
The associated parameters of the program
target: Union[tvm.target.Target, str]
The compilation target
target_host: Optional[Union[tvm.target.Target, str]]
The host compilation target
hardware_params : Optional[HardwareParams]
Hardware parameters used for the search tasks
include_simple_tasks: bool
Whether to extract simple tasks that do not include complicated ops.
opt_level : Optional[int]
The optimization level of the task extractions.
Returns
-------
tasks: List[SearchTask]
The tasks in this network
weights: List[int]
The weight (i.e. the number of appearance) of extracted tasks
"""
# pylint: disable=import-outside-toplevel
target, target_host = Target.check_and_update_host_consist(target, target_host)
# Run the compiler to collect all TOPI calls during compilation.
env = TracingEnvironment(
TracingMode.EXTRACT_TASK if include_simple_tasks else TracingMode.EXTRACT_COMPLEX_TASK_ONLY
)
dispatch_ctx = DispatchContext.current
old_verbose = dispatch_ctx.verbose
dispatch_ctx.verbose = 0
with env:
# Wrap build call in a new thread to avoid the conflict
# between python's multiprocessing and tvm's thread pool
build_thread = threading.Thread(
target=call_all_topi_funcs, args=(mod, params, target, opt_level)
)
build_thread.start()
build_thread.join()
dispatch_ctx.verbose = old_verbose
# create search tasks
tasks = []
weights = []
for wkl_key, (weight, func_names) in env.wkl_key_to_weight.items():
tasks.append(
SearchTask(
workload_key=wkl_key,
target=target,
hardware_params=hardware_params,
# When auto scheduler is used in end to end network, try to apply layout rewrite
# to improve the overall performance
layout_rewrite_option=LayoutRewriteOption.get_target_default(target, True),
task_inputs=(
env.wkl_key_to_input_names[wkl_key]
if wkl_key in env.wkl_key_to_input_names
else None
),
task_inputs_save_to_file=True,
desc=",".join(func_names),
)
)
weights.append(weight)
return tasks, weights
class TracingMode:
"""Two modes for tracing"""
EXTRACT_TASK = 0 # trace all topi calls to extract tasks
EXTRACT_COMPLEX_TASK_ONLY = 1 # same as EXTRACT_TASK but ignore the task without complex ops
PREPARE_LAYOUT_REWRITE = 2 # trace topi calls to prepare layout rewrite
class TracingEnvironment:
"""Global environment for tracing all topi function calls"""
current = None
def __init__(self, tracing_mode):
self.tracing_mode = tracing_mode
self.relay_disable_build_cache = "false"
self.func_name_to_wkl_key = {}
self.wkl_key_to_weight = {}
self.wkl_key_to_input_names = {}
def __enter__(self):
TracingEnvironment.current = self
return self
def __exit__(self, exc_type, exc_val, exc_tb):
TracingEnvironment.current = None
def add_workload_key(self, func_name, workload_key):
"""Add the workload key of a search task.
Parameters
----------
func_name: str
The function name of the task.
workload_key: str
The workload key of a task.
"""
self.func_name_to_wkl_key[func_name] = workload_key
if workload_key not in self.wkl_key_to_weight:
self.wkl_key_to_weight[workload_key] = (0, set())
weight, func_names = self.wkl_key_to_weight[workload_key]
func_names.add(func_name)
self.wkl_key_to_weight[workload_key] = (weight + 1, func_names)
def add_workload_input_names(self, workload_key, input_names):
"""Add special task inputs to this workload.
Parameters
----------
workload_key : str
The workload key of a task.
input_names : List[str]
A list of input names.
"""
self.wkl_key_to_input_names[workload_key] = input_names
@tvm._ffi.register_func("auto_scheduler.enter_layout_rewrite")
def enter_layout_rewrite():
"""Enter layout rewrite tracing environment"""
env = TracingEnvironment(TracingMode.PREPARE_LAYOUT_REWRITE)
env.__enter__()
@tvm._ffi.register_func("auto_scheduler.exit_layout_rewrite")
def exit_layout_rewrite():
"""Exit layout rewrite tracing environment"""
env = TracingEnvironment.current
env.__exit__(None, None, None)
def traverse_to_get_io_tensors(outs):
"""Traverse from a list of output tensors to get input/output tensors and
other useful information.
Parameters
----------
outs: List[Tensor]
The output tensors
Returns
-------
io_tensors: List[Tensor]
The input and output tensors with static shape
has_layout_free: bool
Whether the compute DAG has layout_free placeholders
has_complex_op: bool
Whether the topi compute function includes at least one complex (reduce) op
"""
layout_free_ops = []
inputs = []
has_complex_op = False
visited = set()
def traverse(t):
nonlocal has_complex_op
# We cannot directly add tensors to the set, because the comparison of
# two tensors with ndim=0 is ambiguous.
assert t.handle is not None
if t.handle.value in visited:
return
if isinstance(t.op, PlaceholderOp):
inputs.append(t)
elif isinstance(t.op, ComputeOp):
has_complex_op = has_complex_op or any([isinstance(e, Reduce) for e in t.op.body])
if "layout_free_placeholders" in t.op.attrs:
layout_free_ops.append(t.op)
for x in t.op.input_tensors:
traverse(x)
visited.add(t.handle.value)
for t in outs:
traverse(t)
io_tensors = inputs + list(outs)
for tensor in io_tensors:
# Reject the compute if any of its I/O tensors has dynamic shape.
if any([not isinstance(v, int) for v in get_const_tuple(tensor.shape)]):
return ([], False, False)
return (io_tensors, len(layout_free_ops) > 0, has_complex_op)
@tvm._ffi.register_func("auto_scheduler.relay_integration.auto_schedule_topi_compute")
def auto_schedule_topi(func_name, outs):
"""Use auto-scheduler to schedule any topi compute function.
Note: This is used internally for relay integration. Do
not use this as a general user-facing API.
Parameters
----------
func_name: str
The name of the function being scheduled.
outs: List[Tensor]
The output tensors of topi compute functions
Returns
-------
sch: Optional[te.Schedule]
A tuned schedule or none (if not tuned) in the final build mode;
None in the tracing mode so that the fallback topi schedule will be used.
"""
# pylint: disable=import-outside-toplevel
from tvm.auto_scheduler.measure import (
prepare_input_map,
) # lazily import to avoid recursive dependency
io_tensors, has_layout_free, has_complex_op = traverse_to_get_io_tensors(outs)
if not io_tensors: # The compute includes dynamic shapes which are not supported yet.
return None
try:
dag = ComputeDAG(io_tensors)
except tvm.error.TVMError as err:
logger.info("Failed to create a ComputeDAG for auto_scheduler: %s", str(err))
return None
key = register_workload_tensors(dag.workload_key(), io_tensors)
target = tvm.target.Target.current()
dispatch_ctx = DispatchContext.current
state = dispatch_ctx.query(target, key, has_complex_op, dag, func_name)
schedule = None
env = TracingEnvironment.current
if env is None:
# in the final build mode
if state is None:
return None
schedule, _ = dag.apply_steps_from_state(state)
return schedule
if env.tracing_mode in [TracingMode.EXTRACT_TASK, TracingMode.EXTRACT_COMPLEX_TASK_ONLY]:
# in the task extraction mode
if has_complex_op or env.tracing_mode == TracingMode.EXTRACT_TASK:
env.add_workload_key(func_name, key)
input_map = prepare_input_map(io_tensors)
if input_map:
env.add_workload_input_names(key, list(input_map.values()))
elif env.tracing_mode == TracingMode.PREPARE_LAYOUT_REWRITE:
# in prepare_layout_rewrite mode
if (
LayoutRewriteOption.get_target_default(target, True) != LayoutRewriteOption.NO_REWRITE
and has_layout_free
):
if state is None:
return None
# rewrite the layout and update the context for the new dag
new_dag = dag.rewrite_layout_from_state(state)
new_key = new_dag.workload_key()
if new_key != key:
dispatch_ctx.update(target, new_key, state)
else:
raise ValueError("Invalid tracing mode: " + env.tracing_mode)
return schedule
@tvm._ffi.register_func("auto_scheduler.relay_integration.te_compiler_update_weights")
def te_compiler_update_weights(function_weights):
"""A callback for updating the weights of extracted tasks. When using the TE compiler
that avoids compiling the same function multiple times by caching, all extracted tasks
have weight 1, so the TE compiler invokes this callback at the end. In this case,
we override existing weights with the use_count in TE compiler cache.
Parameters
----------
function_weights: Dict[str, int]
Mapping from function names to their weights.
"""
env = TracingEnvironment.current
if env is not None:
# Override this map with the weights in the TE compiler.
env.wkl_key_to_weight = {}
for func_name, weight in function_weights.items():
# If the function name is not in the map, then it means we are not interested in
# this function during task extraction (e.g., a function without reduction).
if func_name not in env.func_name_to_wkl_key:
continue
workload_key = env.func_name_to_wkl_key[func_name]
if workload_key not in env.wkl_key_to_weight:
env.wkl_key_to_weight[workload_key] = (0, set())
# Note that the function appears multiple times in a model will be renamed
# to make sure function names are unique, so we use the workload key generated
# from the function's TE compute to determine their weights.
old_weight, func_names = env.wkl_key_to_weight[workload_key]
func_names.add(func_name)
env.wkl_key_to_weight[workload_key] = (old_weight + weight, func_names)
def tensor_no_check_call(self, *indices):
"""An indexing function without any check.
This is the same as `tvm.te.Tensor::__call__` except that the safety
check is removed.
"""
indices = convert_to_object(indices)
args = []
for x in indices:
if isinstance(x, _expr.PrimExpr):
args.append(x)
elif isinstance(x, _expr.IterVar):
args.append(x.var)
else:
raise ValueError("The indices must be expression")
return _expr.ProducerLoad(self, args)
def remove_index_check(tensor):
"""Remove the safety check in the indexing function for a tensor.
This is done by monkey patching its indexing function.
After removing the check, we are allowed to create a
temporary wrong IR and fix it later in other places.
Parameters
----------
tensor: Tensor
The tensor to remove index check.
"""
# Monkey patch the indexing function
tensor.__call__ = tensor_no_check_call.__get__(tensor, Tensor)
def rewrite_compute_body(compute_tensor, new_layout):
"""Rewrite the body of a ComputeOp according to a new layout of a placeholder"""
op = compute_tensor.op
# Get layout free placeholders
layout_free_placeholders = op.attrs["layout_free_placeholders"]
assert len(layout_free_placeholders) == 1, "Only support one layout free placeholder"
placeholder_op = layout_free_placeholders[0].op
# Rewrite the index expression in body
body = []
for b in op.body:
body.append(_ffi_api.RewriteIndexForNewLayout(placeholder_op, new_layout, b))
op_node = tvm.te._ffi_api.ComputeOp(op.name, op.tag, op.attrs, op.axis, body)
num = op_node.num_outputs
outputs = tuple(op_node.output(i) for i in range(num))
return outputs[0] if num == 1 else outputs
def is_auto_scheduler_enabled():
"""Return whether the auto-scheduler is enabled.
Parameters
----------
enabled: bool
Whether the auto-scheduler is enabled
"""
return PassContext.current().config.get("relay.backend.use_auto_scheduler", False)
|
__init__.py
|
#!/usr/bin/python3
import argparse
import logging
import multiprocessing.dummy as mp
import os
import shutil
import sys
from ctypes import c_bool
from multiprocessing.context import TimeoutError
from time import sleep
import tbf.testcase_converter as testcase_converter
import tbf.tools.afl as afl
import tbf.tools.cpatiger as cpatiger
import tbf.tools.crest as crest
import tbf.tools.fshell as fshell
import tbf.tools.klee as klee
import tbf.tools.random_tester as random_tester
import tbf.tools.dummy as dummy
import tbf.utils as utils
from tbf.testcase_processing import ProcessingConfig, ExecutionRunner
__VERSION__ = "0.2-dev"
XML_DIR = utils.get_output_path('test-suite')
class StopEvent(object):
def __init__(self, parent=None):
m = mp.Manager()
self.val = m.Value(c_bool, False)
self._parent = parent
def is_set(self):
return self.val.value or (self._parent and self._parent.val.value)
def set(self):
self.val.value = True
def _create_cli_arg_parser():
parser = argparse.ArgumentParser(
description='An Automatic Test-Case Generation and Execution Framework',
add_help=False)
args = parser.add_mutually_exclusive_group()
run_args = args.add_argument_group()
input_generator_args = run_args.add_argument_group(
title="Input generation args",
description="arguments for input generation")
input_generator_args.add_argument(
"--input-generator",
'-i',
dest="input_generator",
action="store",
required=True,
choices=['afl', 'fshell', 'klee', 'crest', 'cpatiger', 'random', "dummy"],
help="input generator to use")
input_generator_args.add_argument(
"--use-existing-test-dir",
dest="existing_tests_dir",
action="store",
required=False,
type=str,
default=None,
help=
"don't create new test cases, but use test cases from the provided directory"
)
input_generator_args.add_argument(
"--ig-timelimit",
dest="ig_timelimit",
help="time limit (in s) for input generation.\n" +
"After this limit, input generation" +
" stops and analysis is performed\nwith the inputs generated up" +
" to this point.")
input_generator_args.add_argument(
"--svcomp-nondets",
dest="svcomp_nondets_only",
action="store_true",
default=False,
help=
"only expect methods to be non-deterministic according to sv-comp guidelines"
)
validation_args = run_args.add_argument_group('Validation')
validation_args.add_argument(
'--execution',
dest="execution_validation",
action="store_true",
default=False,
help="use test execution to find successful test vector")
validation_args.add_argument(
"--klee-replay",
dest="klee_replay_validation",
action="store_true",
default=False,
help=
"use klee-replay to execute test cases - only works when using klee.")
validation_args.add_argument(
"--naive-verification",
dest="naive_verification",
action="store_true",
default=False,
help=
"If no error was found and all test cases were handled, assume that the program under test is safe"
)
machine_model_args = run_args.add_mutually_exclusive_group()
machine_model_args.add_argument(
'-32',
dest="machine_model",
action="store_const",
const="32bit",
help="Use 32 bit machine model")
machine_model_args.add_argument(
'-64',
dest="machine_model",
action="store_const",
const="64bit",
help="Use 64 bit machine model")
run_args.add_argument(
'--timelimit',
dest="timelimit",
action="store",
default=None,
help="timelimit to use")
run_args.add_argument(
'--verbose',
'-v',
dest="log_verbose",
action='store_true',
default=False,
help="print verbose information")
run_args.add_argument(
'--no-parallel',
dest='run_parallel',
action='store_false',
default=True,
help="do not run input generation and tests in parallel")
run_args.add_argument(
'--keep-files',
dest='keep_files',
action='store_true',
default=False,
help=
"keep all created intermediate files (prepared C files, created inputs, etc.)"
)
run_args.add_argument(
'--no-coverage',
dest='report_coverage',
action='store_false',
default=True,
help="do not report coverage of the executed test cases")
run_args.add_argument(
'--stats',
dest='print_stats',
action='store_true',
default=False,
help="print statistics on stdout")
run_args.add_argument(
'--error-method',
dest='error_method',
action='store',
default='__VERIFIER_error',
help='name of error method to check for. If not specified, __VERIFIER_error is used'
)
run_args.add_argument(
'--no-error-method',
dest='use_error_method',
action='store_false',
default=True,
help='tells TBF not to look for a call to an error method, but just run all tests'
)
run_args.add_argument(
'--no-stop-after-success',
dest="stop_after_success",
action='store_false',
default=True,
help="do not terminate TBF after a test case covering the error method was found"
)
run_args.add_argument(
'--write-xml',
dest="write_xml",
action='store_true',
default=False,
help="write test-format XML files for created tests"
)
run_args.add_argument("file", type=str, help="file to verify")
args.add_argument(
"--version", action="version", version='{}'.format(__VERSION__))
args.add_argument('--help', '-h', action='help')
return parser
def _parse_cli_args(argv):
try:
end_idx = argv.index('--')
known_args = argv[:end_idx]
input_gen_args = argv[(end_idx + 1):]
except ValueError:
known_args = argv
input_gen_args = None
parser = _create_cli_arg_parser()
args = parser.parse_args(known_args)
args.ig_options = input_gen_args if input_gen_args else list()
args.timelimit = int(args.timelimit) if args.timelimit else None
args.ig_timelimit = int(args.ig_timelimit) if args.ig_timelimit else None
if not args.machine_model:
logging.info("No machine model specified. Assuming 32 bit")
args.machine_model = utils.MACHINE_MODEL_32
elif '32' in args.machine_model:
args.machine_model = utils.MACHINE_MODEL_32
elif '64' in args.machine_model:
args.machine_model = utils.MACHINE_MODEL_64
else:
raise AssertionError("Unhandled machine model arg: " +
args.machine_model)
if args.existing_tests_dir:
if not os.path.exists(args.existing_tests_dir):
sys.exit("Directory doesn't exist: " + args.existing_tests_dir)
else:
args.existing_tests_dir = os.path.abspath(args.existing_tests_dir)
args.file = os.path.abspath(args.file)
return args
def _get_input_generator(args):
input_generator = args.input_generator.lower()
if input_generator == 'afl':
return afl.InputGenerator(args.machine_model, args.log_verbose, args.ig_options)
elif input_generator == 'fshell':
return fshell.InputGenerator(args.machine_model, args.log_verbose, args.ig_options)
elif input_generator == 'klee':
return klee.InputGenerator(
args.ig_timelimit,
args.log_verbose,
args.ig_options,
machine_model=args.machine_model)
elif input_generator == 'crest':
return crest.InputGenerator(
args.log_verbose,
args.ig_options,
machine_model=args.machine_model)
elif input_generator == 'cpatiger':
return cpatiger.InputGenerator(
args.ig_timelimit,
args.log_verbose,
args.ig_options,
machine_model=args.machine_model)
elif input_generator == 'random':
return random_tester.InputGenerator(args.machine_model, args.log_verbose, args.ig_options)
elif input_generator == "dummy":
return dummy.InputGenerator(
args.machine_model,
args.log_verbose,
args.ig_options
)
else:
raise utils.ConfigError('Unhandled input generator: ' + input_generator)
def _get_test_processor(args, write_xml, nondet_methods):
generator = args.input_generator.lower()
processing_config = ProcessingConfig(args)
if generator == 'afl':
extractor = afl.AflTestConverter()
elif generator == "fshell":
extractor = fshell.FshellTestConverter(nondet_methods)
elif generator == 'klee':
extractor = klee.KleeTestConverter()
elif generator == 'crest':
extractor = crest.CrestTestConverter()
elif generator == 'cpatiger':
extractor = cpatiger.CpaTigerTestConverter()
elif generator == 'random':
extractor = random_tester.RandomTestConverter()
elif generator == "dummy":
extractor = dummy.DummyTestConverter()
else:
raise AssertionError('Unhandled validator: ' + generator)
if write_xml:
extractor = testcase_converter.XmlWritingTestConverter(extractor, XML_DIR)
return testcase_processing.TestProcessor(processing_config, extractor)
def run(args, stop_all_event=None):
"""
Runs tbf with the given arguments in the current working directory.
All created files are put in a directory `created_files`.
:param args:
:param stop_all_event:
:return:
"""
if args.use_error_method:
error_method = args.error_method
else:
error_method = None
default_err = "Unknown error"
processing_result = utils.VerdictUnknown()
filename = args.file
processing_stats = None
generator_stats = None
old_dir_abs = os.path.abspath('.')
if args.keep_files:
created_dir = utils.provide_directory(utils.get_output_path('created_files'))
work_dir = created_dir
else:
work_dir = utils.create_temp()
try:
_change_dir(work_dir)
if error_method:
error_method_exclude = [error_method]
specification = utils.get_error_spec(error_method)
else:
error_method_exclude = ()
specification = utils.get_coverage_spec()
nondet_methods = utils.find_nondet_methods(filename, args.svcomp_nondets_only, error_method_exclude)
input_generator = _get_input_generator(args)
test_processor = _get_test_processor(args, args.write_xml, nondet_methods)
if args.write_xml:
testcase_converter.write_metadata(
filename,
input_generator.get_name(),
specification,
args.machine_model,
directory=XML_DIR
)
assert not stop_all_event.is_set(
), "Stop event is already set before starting input generation"
stop_input_generator_event = StopEvent(stop_all_event)
generator_pool = mp.Pool(processes=1)
if args.existing_tests_dir is None:
# Define the methods for running test generation and test processing in parallel/sequentially
if args.run_parallel and _is_processing_necessary(args):
generator_function = generator_pool.apply_async
def get_generation_result(res):
return res.get(3)
def is_ready0(r):
return r.ready()
else:
generator_function = generator_pool.apply
def get_generation_result(res):
return res
def is_ready0(r):
return True
if args.ig_timelimit:
utils.set_stop_timer(args.ig_timelimit, stop_input_generator_event)
generation_result = generator_function(
input_generator.generate_input,
args=(filename, error_method, nondet_methods, stop_input_generator_event))
else:
generation_result = None
def get_generation_result(res):
return True, None
def is_ready0(r):
return True
# We can't use a def here because we pass this function to a different function,
# in which the def wouldn't be defined
is_ready = lambda: is_ready0(generation_result)
if stop_all_event.is_set():
stop_input_generator_event.set()
logging.info("Stop-all event is set, returning from execution")
return
processing_result, processing_stats = test_processor.process_inputs(
filename, error_method, nondet_methods, is_ready, stop_all_event, args.existing_tests_dir)
stop_input_generator_event.set()
stop_all_event.set()
logging.debug("Processing terminated and got results")
try:
generation_success, generator_stats = get_generation_result(
generation_result)
generation_done = True
except TimeoutError:
logging.warning("Couldn't' get result of input generation")
generation_done = False
generator_pool.terminate()
logging.debug("Input generation terminated and got results")
_change_dir(old_dir_abs)
if processing_result.is_positive():
test_name = os.path.basename(processing_result.test_vector.origin)
persistent_test = utils.get_output_path(test_name)
shutil.copy(processing_result.test_vector.origin, persistent_test)
if processing_result.harness is not None:
persistent_harness = utils.get_output_path('harness.c')
shutil.copy(processing_result.harness, persistent_harness)
# Create an ExecutionRunner only for the purpose of
# compiling the persistent harness
validator_for_compilation = ExecutionRunner(args.machine_model,
processing_result.test)
final_harness_name = utils.get_output_path('a.out')
validator_for_compilation.compile(filename, persistent_harness, final_harness_name)
elif not generation_done:
processing_result = utils.VerdictUnknown()
except utils.CompileError as e:
# This is a proper error because the program can't be compiled, so no tests can be executed
logging.error("Compile error: %s", e.msg if e.msg else default_err)
except utils.ParseError as e:
# This is a proper error because even parsing of the program failed, so preparation for the test execution
# was not possible
logging.error("Parse error: %s", e.msg if e.msg else default_err)
except FileNotFoundError as e:
logging.error("File not found: %s", e.filename)
finally:
# In case an exception occurred before we went back to the original directory
_change_dir(old_dir_abs)
statistics = ""
if generator_stats:
statistics += str(generator_stats)
if processing_stats:
if statistics: # If other statistics are there, add some spacing
statistics += "\n\n"
statistics += str(processing_stats)
if not error_method:
verdict = utils.DONE
else:
verdict = processing_result.verdict.upper()
verdict_str = "\nTBF verdict: " + verdict
with open(utils.get_output_path('Statistics.txt'),
'w+') as stats:
stats.write(statistics)
stats.write('\n')
stats.write(verdict_str)
stats.write('\n')
if args.print_stats:
print("Statistics:")
print(statistics)
print(verdict_str)
if not args.keep_files:
shutil.rmtree(work_dir, ignore_errors=True)
def _is_processing_necessary(arguments):
return arguments.execution_validation or arguments.klee_replay_validation \
or arguments.write_xml
def _change_dir(directory):
logging.debug("Changing to directory %s", directory)
os.chdir(directory)
def main():
timeout_watch = utils.Stopwatch()
timeout_watch.start()
args = _parse_cli_args(sys.argv[1:])
if args.log_verbose:
logging.getLogger().setLevel(level=logging.DEBUG)
else:
logging.getLogger().setLevel(level=logging.INFO)
stop_event = StopEvent()
main_run = mp.Process(target=run, args=(args, stop_event))
try:
main_run.start()
while main_run.is_alive() and (
not args.timelimit or timeout_watch.curr_s() < args.timelimit):
sleep(0.1)
finally:
timeout_watch.stop()
if args.timelimit and timeout_watch.sum() >= args.timelimit:
logging.info("Timelimit reached.\n")
logging.info("Time taken: " + str(timeout_watch.sum()))
stop_event.set()
if main_run.is_alive():
try:
main_run.join(5)
except mp.TimeoutError:
logging.info("Main run didn't terminate within acceptable limit. Killing it.")
main_run.terminate()
if __name__ == '__main__':
if sys.platform.startswith('cygwin'):
logging.warning(
"It seems you're running TBF on cygwin - this is not officially supported."
)
elif not sys.platform.startswith('linux'):
sys.exit("TBF currently only runs on Linux - exiting.")
main()
|
parallel.py
|
#!/usr/bin/env python
# coding=utf-8
"""
Utility functions for easy parallel processing, thanks go to:
http://stackoverflow.com/a/5792404/1467943
"""
from multiprocessing import Process, Pipe
from itertools import izip
def spawn(f):
def fun(pipe,x):
pipe.send(f(x))
pipe.close()
return fun
def parallel_map(f,X):
pipe=[Pipe() for x in X]
proc=[Process(target=spawn(f),args=(c,x)) for x,(p,c) in izip(X,pipe)]
[p.start() for p in proc]
[p.join() for p in proc]
return [p.recv() for (p,c) in pipe]
|
start_capture.py
|
"""
Copyright (c) 2019 Cisco and/or its affiliates.
This software is licensed to you under the terms of the Cisco Sample
Code License, Version 1.0 (the "License"). You may obtain a copy of the
License at
https://developer.cisco.com/docs/licenses
All use of the material herein must be in accordance with the terms of
the License. All rights not expressly granted by the License are
reserved. Unless required by applicable law or agreed to separately in
writing, software distributed under the License is distributed on an "AS
IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
or implied.
ISR Montiroing via command
Created on Fri Feb 14 10:10:20 2020
Script to inovke command in the remote shell with specified intervals and to capture the data in log/csv file.
"show platform hardware qfp active datapath utilization"
@author: rmalyava
"""
import os
import sys
import time
import signal
import threading
from datetime import datetime
from pathlib import Path
# Get the absolute path for the directory where this file is located
project_root = os.path.abspath(os.path.dirname(__file__))
sys.path.insert(0, project_root)
import channel
import devices
# Output Directory for the script
output_dir="logs"
command_to_execute_remotely = "show platform hardware qfp active datapath utilization"
# Headers for the CSV file
csv_header_input="Input:Priority(pps)-5 secs,Input:Priority(pps)-1 min,Input:Priority(pps)-5 min,Input:Priority(pps)-60 min,Input:Priority(bps)-5 secs,Input:Priority(bps)-1 min,Input:Priority(bps)-5 min,Input:Priority(bps)-60 min,Input:Non-Priority(pps)-5 secs,Input:Non-Priority(pps)-1 min,Input:Non-Priority(pps)-5 min,Input:Non-Priority(pps)-60 min,Input:Non-Priority(bps)-5 secs,Input:Non-Priority(bps)-1 min,Input:Non-Priority(bps)-5 min,Input:Non-Priority(bps)-60 min,Input:Total(pps)-5 secs,Input:Total(pps)-1 min,Input:Total(pps)-5 min,Input:Total(pps)-60 min,Input:Total(bps)-5 secs,Input:Total(bps)-1 min,Input:Total(bps)-5 min,Input:Total(bps)-60 min"
csv_header_output="Output:Priority(pps)-5 secs,Output:Priority(pps)-1 min,Output:Priority(pps)-5 min,Output:Priority(pps)-60 min,Output:Priority(bps)-5 secs,Output:Priority(bps)-1 min,Output:Priority(bps)-5 min,Output:Priority(bps)-60 min,Output:Non-Priority(pps)-5 secs,Output:Non-Priority(pps)-1 min,Output:Non-Priority(pps)-5 min,Output:Non-Priority(pps)-60 min,Output:Non-Priority(bps)-5 secs,Output:Non-Priority(bps)-1 min,Output:Non-Priority(bps)-5 min,Output:Non-Priority(bps)-60 min,Output:Total(pps)-5 secs,Output:Total(pps)-1 min,Output:Total(pps)-5 min,Output:Total(pps)-60 min,Output:Total(bps)-5 secs,Output:Total(bps)-1 min,Output:Total(bps)-5 min,Output:Total(bps)-60 min"
csv_header_processing="Processing:Load(pct)-5 secs,Processing:Load(pct)-1 min,Processing:Load(pct)-5 min,Processing:Load(pct)-60 min"
# Method for Appending the command output to the file
def write_to_file(filename,content,ext=".log"):
# Create the output folder if not available
Path(output_dir).mkdir(parents=True, exist_ok=True)
output_file=output_dir+"/"+filename+'-'+time.strftime('%Y-%m-%d')+ext
with open(output_file,'a') as f:
f.write(content)
def write_to_csv(filename,content,ext=".csv"):
# Create the output folder if not available
Path(output_dir).mkdir(parents=True, exist_ok=True)
output_file=output_dir+"/"+filename+'-'+time.strftime('%Y-%m-%d')+ext
if os.path.exists(output_file)==False:
# Writing the CSV Header for the first time only
with open(output_file,'a') as f:
f.write("{},{},{},{}\n".format('timestamp',csv_header_input,csv_header_output,csv_header_processing))
data_to_write=str(datetime.now())+","+content
with open(output_file,'a') as f:
f.write(data_to_write)
# Method to check whether the word exist in the given sentence
def wordInLine(words_list,line):
for word in words_list:
if word in line:
return True
return False
# Transforming the command output to CSV
def transformOutputToCSV(content):
contentLines=content.splitlines()
contentLines = list(filter(None, contentLines))
word_match_list=["datapath","CPP","terminal","show"]
csvContent=""
for line in contentLines:
if wordInLine(word_match_list,line) != True:
content_for_append="{},{},{},{}".format(line[23:36].strip(),line[37:50].strip(),line[51:64].strip(),line[65:78].strip())
if(content_for_append.strip(',')!=''):
if(csvContent!=""):
csvContent+=","
csvContent+=content_for_append
return csvContent+"\n"
# Method to create remote shell on the device
def remote_command_capture(device_info):
try:
print("{} : Trying SSH ...".format(device_info['alias']))
remote_ssh = channel.channel(str(device_info['host']),str(device_info['username']),str(device_info['password']))
print("{} : Remote Shell acquired.".format(device_info['alias']))
invoke_command_scheduled(remote_ssh,device_info)
except Exception as error:
print("{} : {}, not able to capture ".format(device_info['alias'],error))
# Method to invoke the command in specified intervals of time
def invoke_command_scheduled(ssh,device_info):
print("{} : Capture started ... with interval of {} seconds".format(device_info['alias'],device_info['interval']))
while(True):
output=ssh.sendCommand(command_to_execute_remotely)
write_to_file(device_info['alias'],output)
write_to_csv(device_info['alias'],transformOutputToCSV(output.strip()))
time.sleep(int(device_info['interval']))
def signal_handler(signal, frame):
print('stopping capture of utilization ...')
sys.exit(0)
# Steps to invoke when the script is invoked
if __name__ == "__main__":
signal.signal(signal.SIGINT,signal_handler)
pooling_length=len(devices.DEVICE_LIST)
print("Found {} devices\nfind logs at {}".format(pooling_length,os.path.abspath(output_dir)))
jobs = []
for device in devices.DEVICE_LIST:
p = threading.Thread(target=remote_command_capture,args=(device,))
jobs.append(p)
p.start()
|
composed_writer.py
|
#!/usr/bin/env python3
import logging
import sys
import threading
from os.path import dirname, realpath
sys.path.append(dirname(dirname(dirname(realpath(__file__)))))
from logger.writers.writer import Writer # noqa: E402
from logger.utils import formats # noqa: E402
class ComposedWriter(Writer):
############################
def __init__(self, transforms=[], writers=[], check_format=False):
"""
Apply zero or more Transforms (in series) to passed records, then
write them (in parallel threads) using the specified Writers.
```
transforms A single Transform, a list of Transforms, or None.
writers A single Writer or a list of Writers.
check_format If True, attempt to check that Transform/Writer formats
are compatible, and throw a ValueError if they are not.
If check_format is False (the default) the output_format()
of the whole reader will be formats.Unknown.
```
Example:
```
writer = ComposedWriter(transforms=[TimestampTransform(),
PrefixTransform('gyr1')],
writers=[NetworkWriter(':6221'),
LogfileWriter('/logs/gyr1')],
check_format=True)
```
NOTE: we make the rash assumption that transforms are thread-safe,
that is, that no mischief or corrupted internal state will result if
more than one thread calls a transform at the same time. To be
thread-safe, a transform must protect any changes to its internal
state with a non-re-entrant thread lock, as described in the threading
module. We do *not* make this assumption of our writers, and impose a
lock to prevent a writer's write() method from being called a second
time if the first has not yet completed.
"""
# Make transforms a list if it's not. Even if it's only one transform.
if not isinstance(transforms, type([])):
self.transforms = [transforms]
else:
self.transforms = transforms
# Make writers a list if it's not. Even if it's only one writer.
if not isinstance(writers, type([])):
self.writers = [writers]
else:
self.writers = writers
# One lock per writer, to prevent us from accidental re-entry if a
# new write is requested before the previous one has completed.
self.writer_lock = [threading.Lock() for w in self.writers]
self.exceptions = [None for w in self.writers]
# If they want, check that our writers and transforms have
# compatible input/output formats.
input_format = formats.Unknown
if check_format:
input_format = self._check_writer_formats()
if not input_format:
raise ValueError('ComposedWriter: No common format found '
'for passed transforms (%s) and writers (%s)'
% (self.transforms, self.writers))
super().__init__(input_format=input_format)
############################
def _run_writer(self, index, record):
"""Internal: grab the appropriate lock and call the appropriate
write() method. If there's an exception, save it."""
with self.writer_lock[index]:
try:
self.writers[index].write(record)
except Exception as e:
self.exceptions[index] = e
############################
def apply_transforms(self, record):
"""Internal: apply the transforms in series."""
if record:
for t in self.transforms:
record = t.transform(record)
if not record:
break
return record
############################
def write(self, record):
"""Transform the passed record and dispatch it to writers."""
# Transforms run in series
record = self.apply_transforms(record)
if record is None:
return
# No idea why someone would instantiate without writers, but it's
# plausible. Try to be accommodating.
if not self.writers:
return
# If we only have one writer, there's no point making things
# complicated. Just write and return.
if len(self.writers) == 1:
self.writers[0].write(record)
return
# Fire record off to write() requests for each writer.
writer_threads = []
for i in range(len(self.writers)):
t = threading.Thread(target=self._run_writer, args=(i, record),
name=str(type(self.writers[i])), daemon=True)
t.start()
writer_threads.append(t)
# Wait for all writes to complete
for t in writer_threads:
t.join()
# Were there any exceptions? Arbitrarily raise the first one in list
exceptions = [e for e in self.exceptions if e]
for e in exceptions:
logging.error(e)
if exceptions:
raise exceptions[0]
############################
def _check_writer_formats(self):
"""Check that Writer outputs are compatible with each other and with
Transform inputs. Return None if not."""
# Begin with output format of first transform and work way to end;
# the output of each is input of next one.
for i in range(1, len(self.transforms)):
transform_input = self.transforms[i].input_format()
previous_output = self.transforms[i - 1].output_format()
if not transform_input.can_accept(previous_output):
logging.error('Transform %s can not accept input format %s',
self.transform[i], previous_output)
return None
# Make sure that all the writers can accept the output of the last
# transform.
if self.transforms:
transform_output = self.transforms[-1].output_format()
for writer in self.writers:
if not writer.input_format().can_accept(transform_output):
logging.error('Writer %s can not accept input format %s',
writer, transform_output)
return None
# Finally, return the input_format that we can take.
if self.transforms:
return self.transforms[0].input_format()
# If no transform, our input_format is the lowest common format of
# our writers. If no writers, then we've got nothing - right?
if not self.writers:
logging.error('ComposedWriter has no transforms or writers?!?')
return None
lowest_common = self.writers[0].input_format()
for writer in self.writers:
lowest_common = writer.input_format().common(lowest_common)
if not lowest_common:
logging.error('No common input format among writers')
return None
return lowest_common
|
locking.py
|
#!/usr/bin/env python
# ===- system_tests/locking/locking.py ------------------------------------===//
# * _ _ _ *
# * | | ___ ___| | _(_)_ __ __ _ *
# * | |/ _ \ / __| |/ / | '_ \ / _` | *
# * | | (_) | (__| <| | | | | (_| | *
# * |_|\___/ \___|_|\_\_|_| |_|\__, | *
# * |___/ *
# ===----------------------------------------------------------------------===//
#
# Part of the pstore project, under the Apache License v2.0 with LLVM Exceptions.
# See https://github.com/SNSystems/pstore/blob/master/LICENSE.txt for license
# information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
# ===----------------------------------------------------------------------===//
from __future__ import print_function
import argparse
import logging
import os.path
import subprocess
import sys
import threading
import time
EXIT_SUCCESS = 0
EXIT_FAILURE = 1
class WatchdogTimer(threading.Thread):
"""Run *callback* in *timeout* seconds unless the timer is restarted."""
def __init__(self, timeout, callback, timer=time.time, args=(), kwargs=None, name=None):
threading.Thread.__init__(self, name=name)
self.daemon = True
self.__timeout = timeout
self.__callback = callback
self.__args = args
self.__kwargs = kwargs if kwargs is not None else {}
self.__timer = timer
self.__cancelled = threading.Event()
self.blocked = threading.Lock()
self.__deadline = self.__timer() + self.__timeout
def run(self):
self.restart() # don't start timer until `.start()` is called
# wait until timeout happens or the timer is canceled
while not self.__cancelled.wait(self.__deadline - self.__timer()):
# don't test the timeout while something else holds the lock
# allow the timer to be restarted while blocked
with self.blocked:
if self.__deadline <= self.__timer() and not self.__cancelled.is_set():
return self.__callback(*self.__args, **self.__kwargs) # on timeout
def restart(self):
"""Restart the watchdog timer."""
self.__deadline = self.__timer() + self.__timeout
def cancel(self):
self.__cancelled.set()
def start_lock_test_process(binaries, database):
args = [os.path.join(binaries, 'pstore-lock-test'), database]
logging.info('start process: %s', str(args))
process = subprocess.Popen(args,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=True)
logging.debug('process ID=%d', process.pid)
return process
class State:
def __init__(self):
self.p1_holding_lock = threading.Event()
self.p2_was_blocked = threading.Event()
self.p1_released_lock = threading.Event()
self.exit_code = EXIT_SUCCESS
def check_companion_thread(self):
if self.exit_code != EXIT_SUCCESS:
raise RuntimeError('an error in the companion thread')
def get_process_output(state, process, watchdog):
watchdog.restart()
logging.debug('process readline')
state.check_companion_thread()
line = process.stdout.readline(1024)
state.check_companion_thread()
rc = process.poll()
if line == '' and rc is not None:
raise RuntimeError('process exited ({0})'.format(rc))
if line and line[-1] == '\n':
line = line[0:-1]
logging.info('process said: %s', line)
return line
def send(process):
logging.info('sending to stdin')
process.stdin.write('a\n')
process.stdin.flush()
logging.info('sent')
def kill(state, process):
logging.error('Watchdog timeout: killing process')
process.kill()
state.exit_code = EXIT_FAILURE
def wait(state, event):
state.check_companion_thread()
while not event.is_set() and state.exit_code == EXIT_SUCCESS:
event.wait(TIMEOUT)
logging.debug('iterate')
state.check_companion_thread()
state.check_companion_thread()
TIMEOUT = 60
PAUSE = 2
def run_p1(state, binaries, database):
try:
process = start_lock_test_process(binaries, database)
watchdog = WatchdogTimer(timeout=TIMEOUT, callback=kill, args=(state, process,), name='p1-watchdog')
try:
watchdog.start()
l1 = get_process_output(state, process, watchdog)
if l1 != 'start':
raise RuntimeError('process stdout should have been "start"')
out1 = get_process_output(state, process, watchdog)
if out1 != 'pre-lock':
raise RuntimeError('process stdout should have been "pre-lock", got "{0}"'.format(out1))
send(process)
out2 = get_process_output(state, process, watchdog)
while out2 == 'blocked':
out2 = get_process_output(state, process, watchdog)
if out2 != 'holding-lock':
raise RuntimeError('process stdout should have been "holding-lock", got "{0}"'.format(out2))
logging.info('notifying that we saw "holding-lock"')
state.p1_holding_lock.set()
# Wait for the other process to say "blocked"
logging.info('wait for p2 to see "blocked"')
wait(state, state.p2_was_blocked)
(stdout, stderr) = process.communicate("a\n")
if stdout != 'done\n':
raise RuntimeError('process stdout should have been "done", got "{0}"'.format(stdout))
if stderr is not None:
raise RuntimeError('stderr contained: "{0}"'.format(stderr))
logging.info('done')
state.p1_released_lock.set()
finally:
watchdog.cancel()
except Exception as ex:
logging.error(str(ex), exc_info=ex)
state.exit_code = EXIT_FAILURE
def run_p2(state, binaries, database):
try:
time.sleep(1)
process = start_lock_test_process(binaries, database)
watchdog = WatchdogTimer(timeout=TIMEOUT, callback=kill, args=(state, process,), name='p2-watchdog')
try:
watchdog.start()
l1 = get_process_output(state, process, watchdog)
if l1 != 'start':
raise RuntimeError('process stdout should have been "start"')
l1 = get_process_output(state, process, watchdog)
if l1 != 'pre-lock':
raise RuntimeError('process stdout should have been "pre-lock"')
# wait for the companion thread to see "holding lock"
logging.info('waiting for p1 to see "holding-lock"')
wait(state, state.p1_holding_lock)
logging.info('got it.')
send(process)
# We need to "blocked" indicating that another process has the lock.
out2 = get_process_output(state, process, watchdog)
while out2 == 'blocked':
state.p2_was_blocked.set()
out2 = get_process_output(state, process, watchdog)
logging.info('waiting for p1 to release the lock')
wait(state, state.p1_released_lock)
if out2 != 'holding-lock':
raise RuntimeError('process stdout should have been "holding-lock", got "{0}"'.format(out2))
(stdout, stderr) = process.communicate("a\n")
if stdout != 'done\n':
raise RuntimeError('process stdout should have been "done", got "{0}"'.format(stdout))
if stderr is not None:
raise RuntimeError('stderr contained: "{0}"'.format(stderr))
logging.info('done')
finally:
watchdog.cancel()
except Exception as ex:
logging.error(str(ex), exc_info=ex)
state.exit_code = EXIT_FAILURE
def test(binaries, database):
"""
Run the test.
:param binaries: The directory containing the pstore executables.
:param database: The path of the database file to be used for the test
:return: EXIT_SUCCESS if the test is successful, EXIT_FAILURE otherwise.
"""
state = State()
kwargs = {
'state': state,
'binaries': binaries,
'database': database
}
t1 = threading.Thread(target=run_p1, kwargs=kwargs, name='run_p1')
t2 = threading.Thread(target=run_p2, kwargs=kwargs, name='run_p2')
t1.start()
t2.start()
logging.debug('joining run_p1')
t1.join()
logging.debug('joining run_p2')
t2.join()
logging.debug('done')
return state.exit_code
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG,
format='%(levelname)s - %(asctime)s - %(threadName)s - %(filename)s:%(lineno)d - %(message)s')
logging.debug('Starting')
parser = argparse.ArgumentParser('locking test tool')
parser.add_argument('binaries', help='Directory containing the pstore binaries')
parser.add_argument('database', help='Path of the database file used for the test')
options = parser.parse_args()
exit_code = test(options.binaries, options.database)
logging.info('Exit-code is %d', exit_code)
logging.shutdown()
sys.exit(exit_code)
|
test_storange_and_inference_scp.py
|
# Copyright (C) 2020 Matthew Cooper
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
import threading
import time
import images_storage_scu
def fuzz():
time.sleep(random.random())
root_path = "/media/matthew/secondary/rccc_prostate_dataset/"
series_paths = [
"000638/with_transfer_syntax/",
"002487/with_transfer_syntax/",
"003887/with_transfer_syntax/",
"011821/with_transfer_syntax/",
"012125/with_transfer_syntax/",
"012600/with_transfer_syntax/",
"013030/with_transfer_syntax/",
"013604/with_transfer_syntax/",
"013780/with_transfer_syntax/",
"013872/with_transfer_syntax/",
"013875/with_transfer_syntax/",
"014072/with_transfer_syntax/",
"014199/with_transfer_syntax/",
"014362/with_transfer_syntax/",
]
for series_path in series_paths:
threading.Thread(
target=images_storage_scu.main, args=(root_path + series_path,)
).start()
fuzz()
|
test_preload.py
|
import time
import pytest
import threading
from unittest.mock import Mock
from irrd.rpki.status import RPKIStatus
from irrd.utils.test_utils import flatten_mock_calls
from ..database_handler import DatabaseHandler
from ..preload import Preloader, PreloadStoreManager, PreloadUpdater, REDIS_KEY_ORIGIN_SOURCE_SEPARATOR
from ..queries import RPSLDatabaseQuery
# Use different
TEST_REDIS_ORIGIN_ROUTE4_STORE_KEY = 'TEST-irrd-preload-origin-route4'
TEST_REDIS_ORIGIN_ROUTE6_STORE_KEY = 'TEST-irrd-preload-origin-route6'
TEST_REDIS_PRELOAD_RELOAD_CHANNEL = 'TEST-irrd-preload-reload-channel'
TEST_REDIS_PRELOAD_COMPLETE_CHANNEL = 'TEST-irrd-preload-complete-channel'
@pytest.fixture()
def mock_preload_updater(monkeypatch, config_override):
mock_preload_updater = Mock(spec=PreloadUpdater)
monkeypatch.setattr('irrd.storage.preload.PreloadUpdater', mock_preload_updater)
yield mock_preload_updater
@pytest.fixture()
def mock_redis_keys(monkeypatch, config_override):
monkeypatch.setattr('irrd.storage.preload.REDIS_ORIGIN_ROUTE4_STORE_KEY', TEST_REDIS_ORIGIN_ROUTE4_STORE_KEY)
monkeypatch.setattr('irrd.storage.preload.REDIS_ORIGIN_ROUTE6_STORE_KEY', TEST_REDIS_ORIGIN_ROUTE6_STORE_KEY)
monkeypatch.setattr('irrd.storage.preload.REDIS_PRELOAD_RELOAD_CHANNEL', TEST_REDIS_PRELOAD_RELOAD_CHANNEL)
monkeypatch.setattr('irrd.storage.preload.REDIS_PRELOAD_COMPLETE_CHANNEL', TEST_REDIS_PRELOAD_COMPLETE_CHANNEL)
class TestPreloading:
def test_load_reload_thread_management(self, mock_preload_updater, mock_redis_keys):
preload_manager = PreloadStoreManager()
preload_manager_thread = threading.Thread(target=preload_manager.main)
preload_manager_thread.start()
time.sleep(1)
assert mock_preload_updater.mock_calls[0][0] == ''
assert mock_preload_updater.mock_calls[0][1][0] == preload_manager
assert mock_preload_updater.mock_calls[0][1][1] == preload_manager._reload_lock
assert mock_preload_updater.mock_calls[1][0] == '().start'
assert len(mock_preload_updater.mock_calls) == 2
assert len(preload_manager._threads) == 1
mock_preload_updater.reset_mock()
preload_manager._perform_reload()
assert mock_preload_updater.mock_calls[0][0] == '().is_alive'
assert mock_preload_updater.mock_calls[1][0] == ''
assert mock_preload_updater.mock_calls[1][1][0] == preload_manager
assert mock_preload_updater.mock_calls[2][0] == '().start'
assert len(mock_preload_updater.mock_calls) == 3
assert len(preload_manager._threads) == 2
mock_preload_updater.reset_mock()
# Two threads already running, do nothing
preload_manager._perform_reload()
assert mock_preload_updater.mock_calls[0][0] == '().is_alive'
assert mock_preload_updater.mock_calls[1][0] == '().is_alive'
assert len(mock_preload_updater.mock_calls) == 2
assert len(preload_manager._threads) == 2
mock_preload_updater.reset_mock()
# Assume all threads are dead
for thread in preload_manager._threads:
thread.is_alive = lambda: False
# Reload through the redis channel. First call is ignored, inetnums are not relevant.
Preloader().signal_reload({'inetnum'})
Preloader().signal_reload()
Preloader().signal_reload()
# As all threads are considered dead, a new thread should be started
assert mock_preload_updater.mock_calls[0][0] == ''
assert mock_preload_updater.mock_calls[1][0] == '().start'
# Listen() on redis is blocking, unblock it after setting terminate
preload_manager.terminate = True
Preloader().signal_reload()
def test_routes_for_origins(self, mock_redis_keys):
preloader = Preloader()
preload_manager = PreloadStoreManager()
preload_manager.update_route_store(
{
f'TEST2{REDIS_KEY_ORIGIN_SOURCE_SEPARATOR}AS65546': {'192.0.2.0/25'},
f'TEST1{REDIS_KEY_ORIGIN_SOURCE_SEPARATOR}AS65547': {'192.0.2.128/25', '198.51.100.0/25'},
},
{
f'TEST2{REDIS_KEY_ORIGIN_SOURCE_SEPARATOR}AS65547': {'2001:db8::/32'},
},
)
sources = ['TEST1', 'TEST2']
assert preloader.routes_for_origins([], sources) == set()
assert preloader.routes_for_origins(['AS65545'], sources) == set()
assert preloader.routes_for_origins(['AS65546'], []) == set()
assert preloader.routes_for_origins(['AS65546'], sources, 4) == {'192.0.2.0/25'}
assert preloader.routes_for_origins(['AS65547'], sources, 4) == {'192.0.2.128/25', '198.51.100.0/25'}
assert preloader.routes_for_origins(['AS65546'], sources, 6) == set()
assert preloader.routes_for_origins(['AS65547'], sources, 6) == {'2001:db8::/32'}
assert preloader.routes_for_origins(['AS65546'], sources) == {'192.0.2.0/25'}
assert preloader.routes_for_origins(['AS65547'], sources) == {'192.0.2.128/25', '198.51.100.0/25', '2001:db8::/32'}
assert preloader.routes_for_origins(['AS65547', 'AS65546'], sources, 4) == {'192.0.2.0/25', '192.0.2.128/25', '198.51.100.0/25'}
assert preloader.routes_for_origins(['AS65547', 'AS65546'], ['TEST1']) == {'192.0.2.128/25', '198.51.100.0/25'}
assert preloader.routes_for_origins(['AS65547', 'AS65546'], ['TEST2']) == {'192.0.2.0/25', '2001:db8::/32'}
with pytest.raises(ValueError) as ve:
preloader.routes_for_origins(['AS65547'], [], 2)
assert 'Invalid IP version: 2' in str(ve.value)
class TestPreloadUpdater:
def test_preload_updater(self, monkeypatch):
mock_database_handler = Mock(spec=DatabaseHandler)
mock_database_query = Mock(spec=RPSLDatabaseQuery)
monkeypatch.setattr('irrd.storage.preload.RPSLDatabaseQuery',
lambda column_names, enable_ordering: mock_database_query)
mock_reload_lock = Mock()
mock_preload_obj = Mock()
mock_query_result = [
{
'ip_version': 4,
'ip_first': '192.0.2.0',
'prefix_length': 25,
'asn_first': 65546,
'source': 'TEST1',
},
{
'ip_version': 4,
'ip_first': '192.0.2.128',
'prefix_length': 25,
'asn_first': 65547,
'source': 'TEST1',
},
{
'ip_version': 4,
'ip_first': '198.51.100.0',
'prefix_length': 25,
'asn_first': 65547,
'source': 'TEST1',
},
{
'ip_version': 6,
'ip_first': '2001:db8::',
'prefix_length': 32,
'asn_first': 65547,
'source': 'TEST2',
},
]
mock_database_handler.execute_query = lambda query: mock_query_result
PreloadUpdater(mock_preload_obj, mock_reload_lock).run(mock_database_handler)
assert flatten_mock_calls(mock_reload_lock) == [['acquire', (), {}], ['release', (), {}]]
assert flatten_mock_calls(mock_database_query) == [
['object_classes', (['route', 'route6'],), {}],
['rpki_status', ([RPKIStatus.not_found, RPKIStatus.valid],), {}],
]
assert flatten_mock_calls(mock_preload_obj) == [
[
'update_route_store',
(
{
f'TEST1{REDIS_KEY_ORIGIN_SOURCE_SEPARATOR}AS65546': {'192.0.2.0/25'},
f'TEST1{REDIS_KEY_ORIGIN_SOURCE_SEPARATOR}AS65547': {'192.0.2.128/25', '198.51.100.0/25'},
},
{
f'TEST2{REDIS_KEY_ORIGIN_SOURCE_SEPARATOR}AS65547': {'2001:db8::/32'}
},
),
{}
]
]
def test_preload_updater_failure(self, caplog):
mock_database_handler = Mock()
mock_reload_lock = Mock()
mock_preload_obj = Mock()
PreloadUpdater(mock_preload_obj, mock_reload_lock).run(mock_database_handler)
assert 'Updating preload store failed' in caplog.text
assert flatten_mock_calls(mock_reload_lock) == [['acquire', (), {}], ['release', (), {}]]
|
main.py
|
##################################################################################
# #
# Copyright (c) 2020 AECgeeks #
# #
# Permission is hereby granted, free of charge, to any person obtaining a copy #
# of this software and associated documentation files (the "Software"), to deal #
# in the Software without restriction, including without limitation the rights #
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell #
# copies of the Software, and to permit persons to whom the Software is #
# furnished to do so, subject to the following conditions: #
# #
# The above copyright notice and this permission notice shall be included in all #
# copies or substantial portions of the Software. #
# #
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, #
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE #
# SOFTWARE. #
# #
##################################################################################
from __future__ import print_function
import os
import json
import threading
from collections import defaultdict, namedtuple
from flask_dropzone import Dropzone
from werkzeug.middleware.proxy_fix import ProxyFix
from flask import Flask, request, send_file, render_template, abort, jsonify, redirect, url_for, make_response
from flask_cors import CORS
from flask_basicauth import BasicAuth
from flasgger import Swagger
import utils
import worker
import database
application = Flask(__name__)
dropzone = Dropzone(application)
# application.config['DROPZONE_UPLOAD_MULTIPLE'] = True
# application.config['DROPZONE_PARALLEL_UPLOADS'] = 3
DEVELOPMENT = os.environ.get('environment', 'production').lower() == 'development'
if not DEVELOPMENT and os.path.exists("/version"):
PIPELINE_POSTFIX = "." + open("/version").read().strip()
else:
PIPELINE_POSTFIX = ""
if not DEVELOPMENT:
# In some setups this proved to be necessary for url_for() to pick up HTTPS
application.wsgi_app = ProxyFix(application.wsgi_app, x_proto=1)
CORS(application)
application.config['SWAGGER'] = {
'title': os.environ.get('APP_NAME', 'ifc-pipeline request API'),
'openapi': '3.0.2',
"specs": [
{
"version": "0.1",
"title": os.environ.get('APP_NAME', 'ifc-pipeline request API'),
"description": os.environ.get('APP_NAME', 'ifc-pipeline request API'),
"endpoint": "spec",
"route": "/apispec",
},
]
}
swagger = Swagger(application)
if not DEVELOPMENT:
from redis import Redis
from rq import Queue
q = Queue(connection=Redis(host=os.environ.get("REDIS_HOST", "localhost")), default_timeout=3600)
@application.route('/', methods=['GET'])
def get_main():
return render_template('index.html')
def process_upload(filewriter, callback_url=None):
id = utils.generate_id()
d = utils.storage_dir_for_id(id)
os.makedirs(d)
filewriter(os.path.join(d, id+".ifc"))
session = database.Session()
session.add(database.model(id, ''))
session.commit()
session.close()
if DEVELOPMENT:
t = threading.Thread(target=lambda: worker.process(id, callback_url))
t.start()
else:
q.enqueue(worker.process, id, callback_url)
return id
def process_upload_multiple(files, callback_url=None):
id = utils.generate_id()
d = utils.storage_dir_for_id(id)
os.makedirs(d)
file_id = 0
session = database.Session()
m = database.model(id, '')
session.add(m)
for file in files:
fn = file.filename
filewriter = lambda fn: file.save(fn)
filewriter(os.path.join(d, id+"_"+str(file_id)+".ifc"))
file_id += 1
m.files.append(database.file(id, ''))
session.commit()
session.close()
if DEVELOPMENT:
t = threading.Thread(target=lambda: worker.process(id, callback_url))
t.start()
else:
q.enqueue(worker.process, id, callback_url)
return id
@application.route('/', methods=['POST'])
def put_main():
"""
Upload model
---
requestBody:
content:
multipart/form-data:
schema:
type: object
properties:
ifc:
type: string
format: binary
responses:
'200':
description: redirect
"""
ids = []
files = []
for key, f in request.files.items():
if key.startswith('file'):
file = f
files.append(file)
id = process_upload_multiple(files)
url = url_for('check_viewer', id=id)
if request.accept_mimetypes.accept_json:
return jsonify({"url":url})
else:
return redirect(url)
@application.route('/p/<id>', methods=['GET'])
def check_viewer(id):
if not utils.validate_id(id):
abort(404)
return render_template('progress.html', id=id)
@application.route('/pp/<id>', methods=['GET'])
def get_progress(id):
if not utils.validate_id(id):
abort(404)
session = database.Session()
model = session.query(database.model).filter(database.model.code == id).all()[0]
session.close()
return jsonify({"progress": model.progress})
@application.route('/log/<id>.<ext>', methods=['GET'])
def get_log(id, ext):
log_entry_type = namedtuple('log_entry_type', ("level", "message", "instance", "product"))
if ext not in {'html', 'json'}:
abort(404)
if not utils.validate_id(id):
abort(404)
logfn = os.path.join(utils.storage_dir_for_id(id), "log.json")
if not os.path.exists(logfn):
abort(404)
if ext == 'html':
log = []
for ln in open(logfn):
l = ln.strip()
if l:
log.append(json.loads(l, object_hook=lambda d: log_entry_type(*(d.get(k, '') for k in log_entry_type._fields))))
return render_template('log.html', id=id, log=log)
else:
return send_file(logfn, mimetype='text/plain')
@application.route('/v/<id>', methods=['GET'])
def get_viewer(id):
if not utils.validate_id(id):
abort(404)
d = utils.storage_dir_for_id(id)
ifc_files = [os.path.join(d, name) for name in os.listdir(d) if os.path.isfile(os.path.join(d, name)) and name.endswith('.ifc')]
if len(ifc_files) == 0:
abort(404)
failedfn = os.path.join(utils.storage_dir_for_id(id), "failed")
if os.path.exists(failedfn):
return render_template('error.html', id=id)
for ifc_fn in ifc_files:
glbfn = ifc_fn.replace(".ifc", ".glb")
if not os.path.exists(glbfn):
abort(404)
n_files = len(ifc_files) if "_" in ifc_files[0] else None
return render_template(
'viewer.html',
id=id,
n_files=n_files,
postfix=PIPELINE_POSTFIX
)
@application.route('/m/<fn>', methods=['GET'])
def get_model(fn):
"""
Get model component
---
parameters:
- in: path
name: fn
required: true
schema:
type: string
description: Model id and part extension
example: BSESzzACOXGTedPLzNiNklHZjdJAxTGT.glb
"""
id, ext = fn.split('.', 1)
if not utils.validate_id(id):
abort(404)
if ext not in {"xml", "svg", "glb", "unoptimized.glb"}:
abort(404)
path = utils.storage_file_for_id(id, ext)
if not os.path.exists(path):
abort(404)
if os.path.exists(path + ".gz"):
import mimetypes
response = make_response(
send_file(path + ".gz",
mimetype=mimetypes.guess_type(fn, strict=False)[0])
)
response.headers['Content-Encoding'] = 'gzip'
return response
else:
return send_file(path)
"""
# Create a file called routes.py with the following
# example content to add application-specific routes
from main import application
@application.route('/test', methods=['GET'])
def test_hello_world():
return 'Hello world'
"""
try:
import routes
except ImportError as e:
pass
|
forecast.py
|
# Copyright (C) 2013-2016 Martin Vejmelka, UC Denver
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR
# A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from wrf.wrf_cloner import WRFCloner
from wrf.wrf_exec import Geogrid, Ungrib, Metgrid, Real, WRF
from wrf.wps_domains import WPSDomainLCC, WPSDomainConf
from utils import utc_to_esmf, symlink_matching_files, symlink_unless_exists, update_time_control, \
update_namelist, timedelta_hours, esmf_to_utc, render_ignitions, make_dir, \
timespec_to_utc, round_time_to_hour, Dict, dump, save, load, check_obj, \
make_clean_dir, process_create_time, load_sys_cfg, ensure_dir, move, \
json_join, number_minutes, serial_json, link2copy, append2file, delete
from geo.write_geogrid import write_table
from geo.geodriver import GeoDriver
from vis.postprocessor import Postprocessor
from vis.timeseries import Timeseries
from vis.var_wisdom import get_wisdom_variables,_sat_prods
from clamp2mesh import fill_subgrid
from ingest.NAM218 import NAM218
from ingest.HRRR import HRRR
from ingest.NAM227 import NAM227
from ingest.CFSR import CFSR_P, CFSR_S
from ingest.NARR import NARR
from ingest.GFSA import GFSA
from ingest.GFSF import GFSF_P, GFSF_S
from ingest.MODIS import Terra, Aqua
from ingest.VIIRS import SNPP
from ingest.GOES import GOES16, GOES17
from fmda.fuel_moisture_da import assimilate_fm10_observations
from ssh_shuttle import send_product_to_server, ssh_command
import f90nml
from datetime import datetime, timedelta
import time, re, json, sys, logging
import os.path as osp
import os
import stat
from multiprocessing import Process, Queue
import glob
import netCDF4 as nc4
import shutil
import numpy as np
import smtplib
from email.mime.text import MIMEText
import hashlib
import traceback
import pprint
from cleanup import parallel_job_running, delete_visualization, local_rmdir
import six
from six.moves import range
class JobState(Dict):
"""
A coherent structure that holds information about the job.
"""
def __init__(self, args):
"""
Initialize the job state from the arguments dictionary.
:param args: the forecast job arguments
"""
super(JobState, self).__init__(args)
self.grib_source = self.resolve_grib_source(self.get('grib_source',None),args)
self.satellite_source_list = args.get('satellite_source',[])
self.satellite_source = self.resolve_satellite_source(args)
logging.info('Simulation requested from %s to %s' % (str(self.start_utc), str(self.end_utc)))
self.start_utc = round_time_to_hour(self.start_utc, up=False, period_hours=self.grib_source[0].period_hours);
self.end_utc = round_time_to_hour(self.end_utc, up=True, period_hours=self.grib_source[0].period_hours);
self.cycle_start_utc = round_time_to_hour(self.get('cycle_start_utc',None), period_hours=self.grib_source[0].cycle_hours);
logging.info('Simulation times rounded %s to %s' % (str(self.start_utc), str(self.end_utc)))
self.fc_hrs = timedelta_hours(self.end_utc - self.start_utc)
if 'job_id' in args:
logging.info('job_id %s given in the job description' % args['job_id'])
self.job_id = args['job_id']
else:
logging.warning('job_id not given, creating.')
self.job_id = 'wfc-' + self.grid_code + '-' + utc_to_esmf(self.start_utc) + '-{0:02d}'.format(int(self.fc_hrs))
if 'restart' in args:
logging.info('restart %s given in the job description' % args['restart'])
self.restart = args['restart']
else:
self.restart = False
logging.info('restart not in arguments, default restart option %s' % self.restart)
self.emails = self.parse_emails(args)
self.domains = args['domains']
self.ignitions = args.get('ignitions', None)
self.fmda = args.get('fuel_moisture_da', None)
self.postproc = args['postproc']
self.wrfxpy_dir = args['sys_install_path']
self.clean_dir = args.get('clean_dir', True)
self.run_wrf = args.get('run_wrf', True)
self.args = args
logging.debug('JobState initialized: ' + str(self))
def resolve_grib_source(self, gs_name, js):
"""
Creates the right GribSource object from the name.
:param gs_name: the name of the grib source
:param js: configuration json
"""
if gs_name == 'HRRR':
return [HRRR(js)]
elif gs_name == 'NAM' or gs_name == 'NAM218' :
return [NAM218(js)]
elif gs_name == 'NAM227':
return [NAM227(js)]
elif gs_name == 'NARR':
return [NARR(js)]
elif gs_name == 'CFSR':
return [CFSR_P(js),CFSR_S(js)]
elif gs_name == 'GFSA':
return [GFSA(js)]
elif gs_name == 'GFSF':
return [GFSF_P(js),GFSF_S(js)]
else:
sat_only = js.get('sat_only',False)
if not sat_only:
raise ValueError('Unrecognized grib_source %s' % gs_name)
else:
return [Dict({'period_hours': 1, 'cycle_hours': 1})]
def resolve_satellite_source(self, js):
"""
Creates all the JPSSSource objects from the list of names.
:param sat_list: the list of JPSS sources
:param js: configuration json
"""
sat_list = self.parse_satellite_source(js)
sat = []
if 'Terra' in sat_list:
terra=Terra(js)
sat.append(terra)
if 'Aqua' in sat_list:
aqua=Aqua(js)
sat.append(aqua)
if 'SNPP' in sat_list:
snpp=SNPP(js)
sat.append(snpp)
if 'G16' in sat_list:
g16=GOES16(js)
sat.append(g16)
if 'G17' in sat_list:
g17=GOES17(js)
sat.append(g17)
return sat
def parse_satellite_source(self, args):
"""
Parse information inside the satellite source, if any.
:param args: the forecast job argument dictionary
"""
if 'satellite_source' in args:
sats = args['satellite_source']
return sats
else:
return []
def parse_emails(self, args):
"""
Parse the definition of e-mail notifications
:param args: the forecast job argument dictionary
"""
if 'email_notifications' in args:
emails = args['email_notifications']
self.emails = Dict({'to' : emails['to'], 'events' : emails['events'],
'server' : emails.get('smtp_server', 'localhost'),
'origin' : emails.get('from', 'wrfxpy@gross.ucdenver.edu')})
else:
self.emails = None
def send_email(js, event, body):
"""
Sends an e-mail with body <body> according to the e-mail parameters (constructed in execute) if the stated <event>
is contained in the appropriate array.
:param js: the JobState structure containing confiuration info
:param event: name of the event firing the e-mail, the e-mail will not be sent unless <event> appears in the events array
:param body: the body that will be placed into the e-mail
"""
if js.emails is not None:
if event in js.emails.events:
mail_serv = smtplib.SMTP(js.emails.server)
msg = MIMEText(body)
msg['Subject'] = 'Job %s event %s notification' % (js.job_id, event)
msg['From'] = js.emails.origin
msg['To'] = js.emails.to
mail_serv.sendmail(js.emails.origin, [js.emails.to], msg.as_string())
mail_serv.quit()
def retrieve_satellite(js, sat_source, q):
"""
This function retrieves required Satellite files.
It returns either 'SUCCESS' or 'FAILURE' on completion.
:param js: the JobState object containing the forecast configuration
:param sat_source: the SatSource object
:param q: the multiprocessing Queue into which we will send either 'SUCCESS' or 'FAILURE'
"""
try:
logging.info("retrieving satellite files from %s" % sat_source.id)
# retrieve satellite granules intersecting the last domain
manifest = sat_source.retrieve_data_sat(js.bounds[str(js.max_dom)], js.start_utc, js.end_utc)
# write a json file with satellite information
sat_file = sat_source.id+'.json'
json.dump(manifest, open(osp.join(js.jobdir,sat_file),'w'), indent=4, separators=(',', ': '))
send_email(js, 'satellite', 'Job %s - satellite retrieving complete.' % js.job_id)
logging.info('satellite retrieval complete for %s' % sat_source.id)
q.put('SUCCESS')
except Exception as e:
logging.error('satellite retrieving step failed with exception %s' % repr(e))
traceback.print_exc()
q.put('FAILURE')
def create_sat_manifest(js):
sat_manifest = Dict({})
sat_manifest.granules = json_join(js.jobdir, js.satellite_source_list)
sat_manifest.bounds = js.bounds
sat_manifest.time_interval = (utc_to_esmf(js.start_utc), utc_to_esmf(js.end_utc))
sat_manifest.dt = Dict({})
sat_manifest.sat_interval = Dict({})
for k in js.domains.keys():
sat_manifest.dt[k] = js.domains[k]['history_interval']
if 'sat_interval' in list(js.domains[k].keys()):
sat_manifest.sat_interval[k] = js.domains[k]['sat_interval']
else:
sat_manifest.sat_interval[k] = (js.domains[k]['history_interval'], js.domains[k]['history_interval'])
sat_manifest.satprod_satsource = js.satprod_satsource
satfile = osp.join(js.jobdir, 'sat.json')
if js.restart and osp.exists(satfile):
try:
hist_sats = osp.join(js.jobdir, 'sats')
jsat = Dict(json.load(open(satfile,'r')))
hist_jsat = osp.join(hist_sats, 'sat_{}_{}.json'.format(*jsat.time_interval))
ensure_dir(hist_jsat)
json.dump(jsat, open(hist_jsat, 'w'), indent=4, separators=(',', ': '))
except:
logging.warning('not able to recover previous satellite file')
json.dump(sat_manifest, open(osp.join(js.jobdir, 'sat.json'),'w'), indent=4, separators=(',', ': '))
return sat_manifest
def retrieve_gribs_and_run_ungrib(js, grib_source, q):
"""
This function retrieves required GRIB files and runs ungrib.
It returns either 'SUCCESS' or 'FAILURE' on completion.
:param js: the JobState object containing the forecast configuration
:param grib_source: the GribSource object containing ungrib configuration
:param q: the multiprocessing Queue into which we will send either 'SUCCESS' or 'FAILURE'
"""
wps_dir = osp.abspath(js.wps_dir)
grib_dir = osp.join(wps_dir,grib_source.id)
make_clean_dir(grib_dir)
wps_nml = js.wps_nml
try:
logging.info("retrieving GRIB files from %s" % grib_source.id)
download_whole_cycle = js.get('download_whole_cycle',False)
manifest = grib_source.retrieve_gribs(js.start_utc, js.end_utc, js.ref_utc, js.cycle_start_utc, download_whole_cycle)
logging.info('manifest: ' + str(manifest))
grib_file = grib_source.id+'.json'
json.dump(manifest, open(osp.join(js.jobdir,grib_file),'w'), indent=4, separators=(',', ': '), default=serial_json)
cache_colmet = len(manifest) > 1
have_all_colmet = False
if cache_colmet:
have_all_colmet = len(manifest.colmet_missing) == 0
colmet_dir = osp.join(grib_source.cache_dir, manifest.colmet_prefix)
logging.info('cache colmet %s, have all colmet %s' % (cache_colmet, have_all_colmet))
if not have_all_colmet:
# this is also if we do not cache
grib_source.symlink_gribs(manifest.grib_files, grib_dir)
send_email(js, 'grib2', 'Job %s - %d GRIB2 files downloaded.' % (js.job_id, len(manifest)))
logging.info("running UNGRIB for %s" % grib_source.id)
logging.info("step 4: patch namelist for ungrib end execute ungrib on %s files" % grib_source.id)
update_namelist(wps_nml, grib_source.namelist_wps_keys())
if cache_colmet:
wps_nml['share']['start_date'] = [utc_to_esmf(manifest.colmet_files_utc[0])] * js.num_doms
wps_nml['share']['end_date'] = [utc_to_esmf(manifest.colmet_files_utc[-1])] * js.num_doms
# logging.info("namelist.wps for UNGRIB: %s" % json.dumps(wps_nml, indent=4, separators=(',', ': ')))
f90nml.write(wps_nml, osp.join(grib_dir, 'namelist.wps'), force=True)
grib_source.clone_vtables(grib_dir)
symlink_unless_exists(osp.join(wps_dir,'ungrib.exe'),osp.join(grib_dir,'ungrib.exe'))
print((grib_dir + ':'))
os.system('ls -l %s' % grib_dir)
Ungrib(grib_dir).execute().check_output()
print((grib_dir + ':'))
os.system('ls -l %s' % grib_dir)
if cache_colmet:
# move output to cache directory
make_dir(colmet_dir)
for f in manifest.colmet_files:
move(osp.join(grib_dir,f),osp.join(colmet_dir,f))
# now all colmet files should be in the cache
if cache_colmet:
for f in manifest.colmet_files:
symlink_unless_exists(osp.join(colmet_dir,f),osp.join(wps_dir,f))
else:
# move output
for f in glob.glob(osp.join(grib_dir,grib_source.prefix() + '*')):
move(f,wps_dir)
send_email(js, 'ungrib', 'Job %s - ungrib complete.' % js.job_id)
logging.info('UNGRIB complete for %s' % grib_source.id)
q.put('SUCCESS')
except Exception as e:
logging.error('GRIB2/UNGRIB step failed with exception %s' % repr(e))
traceback.print_exc()
q.put('FAILURE')
def run_geogrid(js, q):
"""
This function runs geogrid or links in precomputed grid files as required.
:param js: the JobState object containing the forecast configuration
:param q: the multiprocessing Queue into which we will send either 'SUCCESS' or 'FAILURE'
"""
try:
js.geo_cache = None
logging.info("running GEOGRID")
vars_add_to_geogrid(js)
Geogrid(js.wps_dir).execute().check_output()
logging.info('GEOGRID complete')
send_email(js, 'geogrid', 'GEOGRID complete.')
q.put('SUCCESS')
except Exception as e:
logging.error('GEOGRID step failed with exception %s' % repr(e))
q.put('FAILURE')
def find_wrfout(path, dom_id, esmf_time):
"""
Find wrfout for postprocessing.
:param path: the wrf path directory
:param dom_id: the domain for which we search wrfouts
:esmf_time: time string to match variable Times
:return: the path to the fresh (latest) wrfout
"""
logging.info('find_wrfout: looking for the first wrfout for domain %s time %s' % (dom_id,esmf_time))
wrfouts = sorted(glob.glob(osp.join(path, 'wrfout_d%02d*' % dom_id)),reverse=True) # reverse order
for wrfout in wrfouts:
wrfout_time = re.match(r'.*wrfout_d.._([0-9_\-:]{19})' ,wrfout).groups()[0]
if esmf_time >= wrfout_time:
logging.info('find_wrfout: found %s' % wrfout)
return wrfout
logging.warning('wrfout for time %s domain %s not found' % (esmf_time, dom_id))
logging.warning('Available wrfouts are: %s' % wrfouts)
return None
def make_job_file(js):
"""
Create minimal dictionary for the job state
:param js: job state from JobState(args)
:return: the dictionary
"""
jsub=Dict({})
jsub.job_id = js.job_id
jsub.pid = os.getpid()
jsub.process_create_time = process_create_time(jsub.pid)
jsub.job_num = None
jsub.old_job_num = None
jsub.state = 'Preparing'
jsub.qsys = js.qsys
jsub.postproc = js.postproc
jsub.grid_code = js.grid_code
jsub.jobfile = osp.abspath(osp.join(js.workspace_path, js.job_id,'job.json'))
jsub.num_doms = js.num_doms
jsub.restart = js.restart
if 'tslist' in js.keys():
jsub.tslist = js.tslist
else:
jsub.tslist = None
return jsub
def make_kmz(args):
ssh_command('wrfxweb/make_kmz.sh ' + args)
def make_zip(args):
ssh_command('wrfxweb/make_zip.sh ' + args)
def read_namelist(path):
logging.info('Reading namelist %s' % path)
return f90nml.read(path)
def ensure_abs_path(path,js,max_char=20):
if len(path) > max_char:
hexhash = hashlib.sha224(js.job_id.encode()).hexdigest()[:6]
geo_path = osp.join(js.wrfxpy_dir,'cache/geo_data.{}'.format(hexhash))
js.geo_cache = geo_path
make_dir(geo_path)
new_path = osp.join(geo_path,osp.basename(path))
symlink_unless_exists(path,new_path)
return new_path
else:
return path
def vars_add_to_geogrid(js):
"""
Add variables datasets to geogrid if specified
"""
# add fmda datasets to geogrid if specified
fmda_add_to_geogrid(js)
# load the variables to process
geo_vars_path = 'etc/vtables/geo_vars.json'
geo_vars = None
try:
geo_vars = Dict(json.load(open(geo_vars_path)))
except:
logging.info('Any {0} specified, defining default GeoTIFF files for NFUEL_CAT and ZSF from {1}.'.format(geo_vars_path,js.args['wps_geog_path']))
nfuel_path = osp.join(js.args['wps_geog_path'],'fuel_cat_fire','lf_data.tif')
topo_path = osp.join(js.args['wps_geog_path'],'topo_fire','ned_data.tif')
if osp.exists(nfuel_path) and osp.exists(topo_path):
geo_vars = Dict({'NFUEL_CAT': nfuel_path, 'ZSF': topo_path})
else:
logging.critical('Any NFUEL_CAT and/or ZSF GeoTIFF path specified')
raise Exception('Failed to find GeoTIFF files, generate file {} with paths to your data'.format(geo_vars_path))
geo_data_path = osp.join(js.wps_dir, 'geo_data')
for var,tif_file in six.iteritems(geo_vars):
bbox = js.bounds[str(js.max_dom)]
logging.info('vars_add_to_geogrid - processing variable {0} from file {1} and bounding box {2}'.format(var,tif_file,bbox))
try:
GeoDriver.from_file(tif_file).to_geogrid(geo_data_path,var,bbox)
except Exception as e:
if var in ['NFUEL_CAT', 'ZSF']:
logging.critical('vars_add_to_geogrid - cannot process variable {}'.format(var))
logging.error('Exception: %s',e)
raise Exception('Failed to process GeoTIFF file for variable {}'.format(var))
else:
logging.warning('vars_add_to_geogrid - cannot process variable {}, will not be included'.format(var))
logging.warning('Exception: %s',e)
# update geogrid table
geogrid_tbl_path = osp.join(js.wps_dir, 'geogrid/GEOGRID.TBL')
link2copy(geogrid_tbl_path)
geogrid_tbl_json_path = osp.join(geo_data_path, 'geogrid_tbl.json')
logging.info('vars_add_to_geogrid - updating GEOGRID.TBL at {0} from {1}'.format(geogrid_tbl_path,geogrid_tbl_json_path))
geogrid_tbl_json = json.load(open(geogrid_tbl_json_path,'r'))
for varname,vartable in six.iteritems(geogrid_tbl_json):
logging.info('vars_add_to_geogrid - writting table for variable {}'.format(varname))
vartable['abs_path'] = 'default:'+ensure_abs_path(vartable['abs_path'],js)
logging.info('GEOGRID abs_path=%s' % vartable['abs_path'])
write_table(geogrid_tbl_path,vartable,mode='a',divider_after=True)
def fmda_add_to_geogrid(js):
"""
Add fmda datasets to geogrid if specified
"""
if 'fmda_geogrid_path' in js:
fmda_geogrid_path = osp.abspath(js['fmda_geogrid_path'])
logging.info('fmda_geogrid_path is %s' % fmda_geogrid_path)
fmda_geogrid_basename = osp.basename(fmda_geogrid_path)
sym_fmda_geogrid_path = osp.join(js.wps_dir,fmda_geogrid_basename)
symlink_unless_exists(fmda_geogrid_path,sym_fmda_geogrid_path)
logging.info('fmda_geogrid_path is linked to %s' % sym_fmda_geogrid_path)
else:
return
logging.info('fmda_geogrid_path not given')
try:
index_path = osp.join(fmda_geogrid_path,'index.json')
index = json.load(open(index_path,'r'))
logging.info('Loaded fmda geogrid index at %s' % index_path)
except:
logging.error('Cannot open %s' % index_path)
raise Exception('Failed opening index file {}'.format(index_path))
geo_path = osp.dirname(osp.dirname(fmda_geogrid_path))+'-geo.nc'
logging.info('fmda_add_to_geogrid reading longitudes and latitudes from NetCDF file %s' % geo_path )
with nc4.Dataset(geo_path,'r') as d:
lats = d.variables['XLAT'][:,:]
lons = d.variables['XLONG'][:,:]
ndomains = len(js['domains'])
lat,lon = js['domains'][str(ndomains)]['center_latlon'] if 'center_latlon' in js['domains'][str(ndomains)] else js['domains']['1']['center_latlon']
bbox = (np.min(lats), np.min(lons), np.max(lats), np.max(lons))
logging.info('fmda_add_to_geogrid: fmda bounding box is %s %s %s %s' % bbox)
i, j = np.unravel_index((np.abs(lats-lat)+np.abs(lons-lon)).argmin(),lats.shape)
if i<=1 or j<=1 or i >= lats.shape[0]-2 or j >= lats.shape[1]-2:
logging.error('fmda_add_to_geogrid: WRF domain center %s %s at %i %i is outside or near FMDA boundary' % (lat,lon,i,j) )
raise OSError('{} is not correct geolocated compared to WRF domain'.format(fmda_geogrid_path))
# update geogrid table
geogrid_tbl_path = osp.join(js.wps_dir, 'geogrid/GEOGRID.TBL')
link2copy(geogrid_tbl_path)
geogrid_tbl_json_path = osp.join(fmda_geogrid_path,'geogrid_tbl.json')
logging.info('fmda_add_to_geogrid: updating GEOGRID.TBL at %s from %s' %
(geogrid_tbl_path,geogrid_tbl_json_path))
geogrid_tbl_json = json.load(open(geogrid_tbl_json_path,'r'))
for varname,vartable in six.iteritems(geogrid_tbl_json):
vartable['abs_path'] = osp.join(js.wps_dir,fmda_geogrid_basename,osp.basename(vartable['abs_path']))
vartable['abs_path'] = 'default:'+ensure_abs_path(vartable['abs_path'],js)
logging.info('GEOGRID abs_path=%s' % vartable['abs_path'])
write_table(geogrid_tbl_path,vartable,mode='a',divider_after=True)
def execute(args,job_args):
"""
Executes a weather/fire simulation.
:param args: a dictionary with all to start the simulationfollowing keys
:param job_args: a the original json given the forecast
Keys in args:
:param grid_code: the (unique) code of the grid that is used
:param sys_install_path: system installation directory
:param start_utc: start time of simulation in UTC
:param end_utc: end time of simulation in UTC
:param workspace_path: workspace directory
:param wps_install_path: installation directory of WPS that will be used
:param wrf_install_path: installation directory of WRF that will be used
:param grib_source: a string identifying a valid GRIB2 source
:param wps_namelist_path: the path to the namelist.wps file that will be used as template
:param wrf_namelist_path: the path to the namelist.input file that will be used as template
:param fire_namelist_path: the path to the namelist.fire file that will be used as template
:param wps_geog_path: the path to the geogrid data directory providing terrain/fuel data
:param email_notification: dictionary containing keys address and events indicating when a mail should be fired off
"""
logging.info('step 0 initialize the job state from the arguments')
js = JobState(args)
jobdir = osp.abspath(osp.join(js.workspace_path, js.job_id))
js.jobdir = jobdir
if (js.clean_dir and not js.restart) or not osp.exists(osp.join(js.jobdir,'input.json')):
make_clean_dir(js.jobdir)
js.num_doms = len(js.domains)
json.dump(job_args, open(osp.join(js.jobdir,'input.json'),'w'), indent=4, separators=(',', ': '))
jsub = make_job_file(js)
json.dump(jsub, open(jsub.jobfile,'w'), indent=4, separators=(',', ': '))
logging.info("job %s starting [%d hours to forecast]." % (js.job_id, js.fc_hrs))
sys.stdout.flush()
send_email(js, 'start', 'Job %s started.' % js.job_id)
# Parse and setup the domain configuration
js.domain_conf = WPSDomainConf(js.domains)
js.num_doms = len(js.domain_conf)
logging.info("number of domains defined is %d." % js.num_doms)
js.bounds = Dict({})
for k,domain in enumerate(js.domain_conf.domains):
bbox = domain.bounding_box()
lons = [b[1] for b in bbox]
lats = [b[0] for b in bbox]
bounds = (min(lons),max(lons),min(lats),max(lats))
js.bounds[str(k+1)] = bounds
logging.info('satellite sources %s' % [s.id for s in js.satellite_source])
if js.sat_only:
if js.satellite_source:
logging.info('sat_only set, skipping everything else')
# retrieving satellite data by source in parallel
proc_q = Queue()
sat_proc = {}
for satellite_source in js.satellite_source:
sat_proc[satellite_source.id] = Process(target=retrieve_satellite, args=(js, satellite_source, proc_q))
for satellite_source in js.satellite_source:
sat_proc[satellite_source.id].start()
for satellite_source in js.satellite_source:
sat_proc[satellite_source.id].join()
proc_q.close()
# create satellite manifest
sat_manifest = create_sat_manifest(js)
# create satellite outputs
process_sat_output(js.job_id)
return
else:
logging.error('any available sat source specified')
return
else:
# read in all namelists
js.wps_nml = read_namelist(js.args['wps_namelist_path'])
js.wrf_nml = read_namelist(js.args['wrf_namelist_path'])
js.fire_nml = read_namelist(js.args['fire_namelist_path'])
js.ems_nml = None
if 'emissions_namelist_path' in js.args:
js.ems_nml = read_namelist(js.args['emissions_namelist_path'])
js.wps_nml['share']['interval_seconds'] = js.grib_source[0].interval_seconds
# build directories in workspace
js.wps_dir = osp.abspath(osp.join(js.jobdir, 'wps'))
js.wrf_dir = osp.abspath(osp.join(js.jobdir, 'wrf'))
#check_obj(args,'args')
#check_obj(js,'Initial job state')
logging.info("step 1: clone WPS and WRF directories")
logging.info("cloning WPS into %s" % js.wps_dir)
cln = WRFCloner(js.args)
cln.clone_wps(js.wps_dir, [])
js.grib_source[0].clone_vtables(js.wps_dir)
logging.info("step 2: process domain information and patch namelist for geogrid")
js.wps_nml['share']['start_date'] = [utc_to_esmf(js.start_utc)] * js.num_doms
js.wps_nml['share']['end_date'] = [utc_to_esmf(js.end_utc)] * js.num_doms
js.wps_nml['geogrid']['geog_data_path'] = js.args['wps_geog_path']
js.domain_conf.prepare_for_geogrid(js.wps_nml, js.wrf_nml, js.wrfxpy_dir, js.wps_dir)
f90nml.write(js.wps_nml, osp.join(js.wps_dir, 'namelist.wps'), force=True)
# do steps 2 & 3 & 4 in parallel (three execution streams)
# -> Satellite retrieval ->
# -> GEOGRID ->
# -> GRIB2 download -> UNGRIB ->
proc_q = Queue()
if js.satellite_source:
sat_proc = {}
for satellite_source in js.satellite_source:
sat_proc[satellite_source.id] = Process(target=retrieve_satellite, args=(js, satellite_source, proc_q))
geogrid_proc = Process(target=run_geogrid, args=(js, proc_q))
grib_proc = {}
for grib_source in js.grib_source:
grib_proc[grib_source.id] = Process(target=retrieve_gribs_and_run_ungrib, args=(js, grib_source, proc_q))
logging.info('starting GEOGRID and GRIB2/UNGRIB')
if js.ungrib_only:
logging.info('ungrib_only set, skipping GEOGRID and SATELLITE, will exit after UNGRIB')
else:
geogrid_proc.start()
for satellite_source in js.satellite_source:
sat_proc[satellite_source.id].start()
for grib_source in js.grib_source:
grib_proc[grib_source.id].start()
# wait until all tasks are done
logging.info('waiting until all tasks are done')
for grib_source in js.grib_source:
grib_proc[grib_source.id].join()
if js.ungrib_only:
for grib_source in js.grib_source:
if proc_q.get() != 'SUCCESS':
return
return
else:
geogrid_proc.join()
for satellite_source in js.satellite_source:
sat_proc[satellite_source.id].join()
if js.satellite_source:
for satellite_source in js.satellite_source:
if proc_q.get() != 'SUCCESS':
return
for grib_source in js.grib_source:
if proc_q.get() != 'SUCCESS':
return
if proc_q.get() != 'SUCCESS':
return
proc_q.close()
logging.info('execute: finished parallel GEOGRID, GRIB2/UNGRIB, and Satellite')
if js.satellite_source:
# create satellite manifiest
sat_manifest = create_sat_manifest(js)
logging.info("step 5: execute metgrid after ensuring all grids will be processed")
update_namelist(js.wps_nml, js.grib_source[0].namelist_wps_keys())
js.domain_conf.prepare_for_metgrid(js.wps_nml)
logging.info("namelist.wps for METGRID: %s" % json.dumps(js.wps_nml, indent=4, separators=(',', ': ')))
f90nml.write(js.wps_nml, osp.join(js.wps_dir, 'namelist.wps'), force=True)
logging.info("running METGRID")
Metgrid(js.wps_dir).execute().check_output()
send_email(js, 'metgrid', 'Job %s - metgrid complete.' % js.job_id)
logging.info("METGRID complete")
logging.info("cloning WRF into %s" % js.wrf_dir)
logging.info("step 6: clone wrf directory, symlink all met_em* files, make namelists")
cln.clone_wrf(js.wrf_dir, [])
symlink_matching_files(js.wrf_dir, js.wps_dir, "met_em*")
time_ctrl = update_time_control(js.start_utc, js.end_utc, js.num_doms)
js.wrf_nml['time_control'].update(time_ctrl)
js.wrf_nml['time_control']['interval_seconds'] = js.grib_source[0].interval_seconds
update_namelist(js.wrf_nml, js.grib_source[0].namelist_keys())
if 'ignitions' in js.args:
update_namelist(js.wrf_nml, render_ignitions(js, js.num_doms))
# if we have an emissions namelist, automatically turn on the tracers
if js.ems_nml is not None:
logging.debug('namelist.fire_emissions given, turning on tracers')
f90nml.write(js.ems_nml, osp.join(js.wrf_dir, 'namelist.fire_emissions'), force=True)
js.wrf_nml['dynamics']['tracer_opt'] = [2] * js.num_doms
f90nml.write(js.wrf_nml, osp.join(js.wrf_dir, 'namelist.input'), force=True)
f90nml.write(js.fire_nml, osp.join(js.wrf_dir, 'namelist.fire'), force=True)
# step 7: execute real.exe
logging.info("running REAL")
# try to run Real twice as it sometimes fails the first time
# it's not clear why this error happens
try:
Real(js.wrf_dir).execute().check_output()
except Exception as e:
logging.error('Real step failed with exception %s, retrying ...' % str(e))
Real(js.wrf_dir).execute().check_output()
# write subgrid coordinates in input files
subgrid_wrfinput_files = ['wrfinput_d{:02d}'.format(int(d)) for d,_ in args.domains.items() if (np.array(_.get('subgrid_ratio', [0, 0])) > 0).all()]
for in_path in subgrid_wrfinput_files:
fill_subgrid(osp.join(js.wrf_dir, in_path))
logging.info('step 7b: if requested, do fuel moisture DA')
logging.info('fmda = %s' % js.fmda)
if js.fmda is not None:
logging.info('running fuel moisture data assimilation')
for dom in js.fmda.domains:
logging.info('assimilate_fm10_observations for domain %s' % dom)
assimilate_fm10_observations(osp.join(js.wrf_dir, 'wrfinput_d%02d' % int(dom)), None, js.fmda.token)
logging.info('step 8: execute wrf.exe on parallel backend')
logging.info('run_wrf = %s' % js.run_wrf)
if js.run_wrf:
# step 8: execute wrf.exe on parallel backend
jobfile = wrf_execute(js.job_id)
# step 9: post-process results from wrf simulation
process_output(js.job_id)
else:
jobfile = jsub.jobfile
return jobfile
def wrf_execute(job_id):
sys_cfg = load_sys_cfg()
jobfile = osp.abspath(osp.join(sys_cfg.workspace_path, job_id,'input.json'))
logging.info('wrf_execute: loading job input from %s' % jobfile)
job_args = json.load(open(jobfile))
args = process_arguments(job_args,sys_cfg)
js = JobState(args)
jsubfile = osp.abspath(osp.join(sys_cfg.workspace_path, job_id,'job.json'))
logging.info('wrf_execute: loading job description from %s' % jsubfile)
try:
jsub = Dict(json.load(open(jsubfile,'r')))
except Exception as e:
logging.error('Cannot load the job description file %s' % jsubfile)
logging.error('%s' % e)
sys.exit(1)
js.jobdir = osp.abspath(osp.join(sys_cfg.workspace_path, job_id))
js.wrf_dir = osp.abspath(osp.join(js.jobdir, 'wrf'))
logging.info('submitting WRF job')
send_email(js, 'wrf_submit', 'Job %s - wrf job submitted.' % job_id)
js.task_id = "sim-" + js.grid_code + "-" + utc_to_esmf(js.start_utc)[:10]
jsub.job_num=WRF(js.wrf_dir, js.qsys).submit(js.task_id, js.num_nodes, js.ppn, js.wall_time_hrs)
send_email(js, 'wrf_exec', 'Job %s - wrf job starting now with id %s.' % (job_id, js.task_id))
logging.info("WRF job %s submitted with id %s, waiting for rsl.error.0000" % (jsub.job_num, js.task_id))
json.dump(jsub, open(jsubfile,'w'), indent=4, separators=(',', ': '))
return jsubfile
def process_output(job_id):
args = load_sys_cfg()
jobfile = osp.abspath(osp.join(args.workspace_path, job_id,'job.json'))
satfile = osp.abspath(osp.join(args.workspace_path, job_id,'sat.json'))
logging.info('process_output: loading job description from %s' % jobfile)
try:
js = Dict(json.load(open(jobfile,'r')))
except Exception as e:
logging.error('Cannot load the job description file %s' % jobfile)
logging.error('%s' % e)
sys.exit(1)
logging.info('process_output: loading satellite description from %s' % satfile)
try:
jsat = Dict(json.load(open(satfile,'r')))
available_sats = [sat.upper()+prod for sat in jsat.granules.keys() for prod in _sat_prods]
not_empty_sats = [sat.upper()+prod for sat in jsat.granules.keys() for prod in _sat_prods if jsat.granules[sat]]
except:
logging.warning('Cannot load the satellite data in satellite description file %s' % satfile)
available_sats = []
not_empty_sats = []
pass
logging.info('process_output: available satellite data %s' % available_sats)
logging.info('process_output: not empty satellite data %s' % not_empty_sats)
js.old_pid = js.pid
js.pid = os.getpid()
js.state = 'Processing'
js.restart = js.get('restart',False)
json.dump(js, open(jobfile,'w'), indent=4, separators=(',', ': '))
js.wrf_dir = osp.abspath(osp.join(args.workspace_path, js.job_id, 'wrf'))
pp = None
if js.postproc is None:
logging.info('No postprocessing specified, exiting.')
return
# set up postprocessing
if js.postproc.get('shuttle', None) != None and not js.restart:
delete_visualization(js.job_id)
js.pp_dir = osp.join(args.workspace_path, js.job_id, "products")
if not js.restart:
already_sent_files = []
make_clean_dir(js.pp_dir)
else:
already_sent_files = [x for x in os.listdir(js.pp_dir) if not (x.endswith('json') or x.endswith('csv') or x.endswith('html'))]
prod_name = 'wfc-' + js.grid_code
pp = Postprocessor(js.pp_dir, prod_name)
if 'tslist' in js.keys() and js.tslist is not None:
ts = Timeseries(js.pp_dir, prod_name, js.tslist, js.num_doms)
else:
ts = None
js.manifest_filename= 'wfc-' + js.grid_code + '.json'
logging.debug('Postprocessor created manifest %s',js.manifest_filename)
tif_proc = js.postproc.get('tif_proc', False)
if js.postproc.get('from', None) == 'wrfout':
logging.info('Postprocessing all wrfout files.')
failures = cases = 0
# postprocess all wrfouts
for wrfout_path in sorted(glob.glob(osp.join(js.wrf_dir,'wrfout_d??_????-??-??_??:??:??'))):
logging.info("Found %s" % wrfout_path)
domain_str,wrfout_esmf_time = re.match(r'.*wrfout_d(0[0-9])_([0-9_\-:]{19})',wrfout_path).groups()
dom_id = int(domain_str)
d = nc4.Dataset(wrfout_path,'r')
# extract ESMF string times
times = [''.join(x) for x in d.variables['Times'][:].astype(str)]
d.close()
for esmf_time in sorted(times):
logging.info("Processing domain %d for time %s." % (dom_id, esmf_time))
if js.postproc is not None and str(dom_id) in js.postproc:
cases += 1
if available_sats:
sat_list = [sat for sat in available_sats if sat in js.postproc[str(dom_id)]]
var_list = [str(x) for x in js.postproc[str(dom_id)] if not str(x) in sat_list]
sat_list = [sat for sat in sat_list if sat in not_empty_sats]
logging.info("Executing postproc instructions for sats %s for domain %d." % (str(sat_list), dom_id))
else:
sat_list = []
var_list = [str(x) for x in js.postproc[str(dom_id)]]
logging.info("Executing postproc instructions for vars %s for domain %d." % (str(var_list), dom_id))
try:
if sat_list:
pp.process_sats(jsat, dom_id, esmf_time, sat_list)
pp.process_vars(osp.join(js.wrf_dir,wrfout_path), dom_id, esmf_time, var_list, tif_proc = tif_proc, tslist = ts)
# in incremental mode, upload to server
if js.postproc.get('shuttle', None) == 'incremental':
desc = js.postproc['description'] if 'description' in js.postproc else js.job_id
tif_files = [x for x in os.listdir(js.pp_dir) if x.endswith('tif')]
sent_files_1 = send_product_to_server(args, js.pp_dir, js.job_id, js.job_id, js.manifest_filename, desc, already_sent_files+tif_files)
already_sent_files = [x for x in already_sent_files + sent_files_1 if not (x.endswith('json') or x.endswith('csv') or x.endswith('html'))]
except Exception as e:
logging.warning('Failed to postprocess for time %s with error %s.' % (esmf_time, str(e)))
failures += 1
if cases != failures:
logging.info('number of postprocessing steps is %d and number of postprocessing failures is %d' % (cases,failures))
# if we are to send out the postprocessed files after completion, this is the time
if js.postproc.get('shuttle', None) == 'on_completion':
desc = js.postproc['description'] if 'description' in js.postproc else js.job_id
tif_files = [x for x in os.listdir(js.pp_dir) if x.endswith('tif')]
send_product_to_server(args, js.pp_dir, js.job_id, js.job_id, js.manifest_filename, desc, tif_files)
else:
logging.error('All postprocessing steps failed')
js.state = 'Postprocessing failed'
json.dump(js, open(jobfile,'w'), indent=4, separators=(',', ': '))
return
# step 9: wait for appearance of rsl.error.0000 and open it
wrf_out = None
rsl_path = osp.join(js.wrf_dir, 'rsl.error.0000')
while wrf_out is None:
try:
wrf_out = open(rsl_path)
break
except IOError:
logging.info('process_output: waiting 5 seconds for rsl.error.0000 file')
time.sleep(5)
logging.info('process_output: Detected rsl.error.0000')
js.run_utc = time.ctime(os.path.getmtime(rsl_path))
js.processed_utc = time.asctime(time.gmtime())
# step 10: track log output and check for history writes from WRF
wait_lines = 0
wait_wrfout = 0
failures = cases = 0
while True:
line = wrf_out.readline().strip()
if not line:
if wait_lines > 10 and not parallel_job_running(js):
logging.warning('WRF did not run to completion.')
break
if not wait_lines:
logging.info('Waiting for more output lines')
wait_lines = wait_lines + 1
time.sleep(5)
continue
wait_lines = 0
if "SUCCESS COMPLETE WRF" in line:
# send_email(js, 'complete', 'Job %s - wrf job complete SUCCESS.' % js.job_id)
logging.info("WRF completion detected.")
js.old_job_num = js.job_num
js.job_num = None
json.dump(js, open(jobfile,'w'), indent=4, separators=(',', ': '))
break
if "Timing for Writing wrfout" in line:
wait_wrfout = 0
esmf_time,domain_str = re.match(r'.*wrfout_d.._([0-9_\-:]{19}) for domain\ +(\d+):' ,line).groups()
dom_id = int(domain_str)
logging.info("Detected history write for domain %d for time %s." % (dom_id, esmf_time))
if js.postproc is not None and str(dom_id) in js.postproc:
cases += 1
if available_sats:
sat_list = [sat for sat in available_sats if sat in js.postproc[str(dom_id)]]
var_list = [str(x) for x in js.postproc[str(dom_id)] if not str(x) in sat_list]
sat_list = [sat for sat in sat_list if sat in not_empty_sats]
logging.info("Executing postproc instructions for sats %s for domain %d." % (str(sat_list), dom_id))
else:
sat_list = []
var_list = [str(x) for x in js.postproc[str(dom_id)]]
logging.info("Executing postproc instructions for vars %s for domain %d." % (str(var_list), dom_id))
wrfout_path = find_wrfout(js.wrf_dir, dom_id, esmf_time)
try:
if sat_list:
pp.process_sats(jsat, dom_id, esmf_time, sat_list)
pp.process_vars(osp.join(js.wrf_dir,wrfout_path), dom_id, esmf_time, var_list, tif_proc = tif_proc, tslist = ts)
except Exception as e:
logging.warning('Failed to postprocess for time %s with error %s.' % (esmf_time, str(e)))
failures += 1
else:
try:
# in incremental mode, upload to server
if js.postproc.get('shuttle', None) == 'incremental':
desc = js.postproc['description'] if 'description' in js.postproc else js.job_id
tif_files = [x for x in os.listdir(js.pp_dir) if x.endswith('tif')]
sent_files_1 = send_product_to_server(args, js.pp_dir, js.job_id, js.job_id, js.manifest_filename, desc, already_sent_files+tif_files)
already_sent_files = [x for x in already_sent_files + sent_files_1 if not (x.endswith('json') or x.endswith('csv') or x.endswith('html'))]
except Exception as e:
logging.warning('Failed sending potprocess results to the server with error %s' % str(e))
else:
if not wait_wrfout:
logging.info('Waiting for wrfout')
time.sleep(5)
wait_wrfout = wait_wrfout + 1
# if we are to send out the postprocessed files after completion, this is the time
if cases != failures:
logging.info('number of postprocessing steps is %d and number of postprocessing failures is %d' % (cases,failures))
if js.postproc.get('shuttle', None) == 'on_completion':
desc = js.postproc['description'] if 'description' in js.postproc else js.job_id
tif_files = [x for x in os.listdir(js.pp_dir) if x.endswith('tif')]
send_product_to_server(args, js.pp_dir, js.job_id, js.job_id, js.manifest_filename, desc, tif_files)
if js.postproc.get('shuttle', None) is not None:
steps = ','.join(['1' for x in js.postproc.keys() if len(x) == 1])
args = ' '.join([js.job_id,steps,'inc'])
make_kmz(args)
args = ' '.join([js.job_id,steps,'ref'])
make_kmz(args)
js.state = 'Completed'
else:
logging.error('All postprocessing steps failed')
js.state = 'Postprocessing failed'
if ts is not None:
make_zip(js.job_id)
js.old_pid = js.pid
js.pid = None
json.dump(js, open(jobfile,'w'), indent=4, separators=(',', ': '))
def create_process_output_script(job_id):
cfg = load_sys_cfg()
script_path = osp.join(cfg.workspace_path, job_id,'job_process_output.sh')
log_path = osp.join(cfg.workspace_path, job_id,'job_process_output.log')
process_script = osp.join(cfg.sys_install_path,'process_output.sh')
with open(script_path,'w') as f:
f.write('#!/usr/bin/env bash\n')
f.write('cd ' + cfg.sys_install_path + '\n')
f.write('LOG=' + log_path + '\n')
f.write(process_script + ' ' + job_id + ' &> $LOG \n')
# make it executable
st = os.stat(script_path)
os.chmod(script_path, st.st_mode | stat.S_IEXEC)
def process_sat_output(job_id):
args = load_sys_cfg()
jobfile = osp.abspath(osp.join(args.workspace_path, job_id,'job.json'))
satfile = osp.abspath(osp.join(args.workspace_path, job_id,'sat.json'))
logging.info('process_sat_output: loading job description from %s' % jobfile)
try:
js = Dict(json.load(open(jobfile,'r')))
except Exception as e:
logging.error('Cannot load the job description file %s' % jobfile)
logging.error('%s' % e)
sys.exit(1)
logging.info('process_sat_output: loading satellite description from %s' % satfile)
try:
jsat = Dict(json.load(open(satfile,'r')))
available_sats = [sat.upper()+prod for sat in jsat.granules.keys() for prod in _sat_prods]
not_empty_sats = [sat.upper()+prod for sat in jsat.granules.keys() for prod in _sat_prods if jsat.granules[sat]]
except:
logging.warning('Cannot load the satellite data in satellite description file %s' % satfile)
available_sats = []
not_empty_sats = []
return
logging.info('process_sat_output: available satellite data %s' % available_sats)
logging.info('process_sat_output: not empty satellite data %s' % not_empty_sats)
if not not_empty_sats:
logging.warning('Do not have satellite data to postprocess')
return
js.old_pid = js.pid
js.pid = os.getpid()
js.state = 'Processing'
js.restart = js.get('restart',False)
json.dump(js, open(jobfile,'w'), indent=4, separators=(',', ': '))
pp = None
if js.postproc.get('shuttle', None) != None and not js.restart:
delete_visualization(js.job_id)
js.pp_dir = osp.join(args.workspace_path, js.job_id, "products")
if not js.restart:
already_sent_files = []
make_clean_dir(js.pp_dir)
else:
already_sent_files = [x for x in os.listdir(js.pp_dir) if not x.endswith('json')]
pp = Postprocessor(js.pp_dir, 'wfc-' + js.grid_code)
js.manifest_filename= 'wfc-' + js.grid_code + '.json'
logging.debug('Postprocessor created manifest %s',js.manifest_filename)
domains = sorted([int(x) for x in [x for x in js.postproc if len(x) == 1]])
for dom_id in domains:
logging.info('Processing domain %s' % str(dom_id))
dt = timedelta(minutes=jsat.dt[str(dom_id)])
logging.info('dt for satellite postprocessing = %s' % dt)
t_int = esmf_to_utc(jsat['time_interval'][0])
t_fin = esmf_to_utc(jsat['time_interval'][1])
ndts = number_minutes(t_int,t_fin,jsat.dt[str(dom_id)])
times = [t_int + tt*dt for tt in range(ndts)]
sat_list = [sat for sat in available_sats if sat in js.postproc[str(dom_id)]]
if sat_list:
for time in times:
try:
esmf_time = utc_to_esmf(time)
logging.info('Posprocessing satellite data for time %s' % esmf_time)
pp.process_sats(jsat, dom_id, esmf_time, sat_list)
except Exception as e:
logging.warning('Failed to postprocess for time %s with error %s.' % (esmf_time, str(e)))
else:
try:
if js.postproc.get('shuttle', None) == 'incremental':
desc = js.postproc['description'] if 'description' in js.postproc else js.job_id
tif_files = [x for x in os.listdir(js.pp_dir) if x.endswith('tif')]
sent_files_1 = send_product_to_server(args, js.pp_dir, js.job_id, js.job_id, js.manifest_filename, desc, already_sent_files+tif_files)
already_sent_files = [x for x in already_sent_files + sent_files_1 if not x.endswith('json')]
except Exception as e:
logging.warning('Failed sending potprocess results to the server with error %s' % str(e))
# if we are to send out the postprocessed files after completion, this is the time
if js.postproc.get('shuttle', None) == 'on_completion':
desc = js.postproc['description'] if 'description' in js.postproc else js.job_id
tif_files = [x for x in os.listdir(js.pp_dir) if x.endswith('tif')]
send_product_to_server(args, js.pp_dir, js.job_id, js.job_id, js.manifest_filename, desc, tif_files)
if js.postproc.get('shuttle', None) is not None:
steps = ','.join(['1' for x in js.postproc.keys() if len(x) == 1])
args = ' '.join([js.job_id,steps,'inc'])
make_kmz(args)
args = ' '.join([js.job_id,steps,'ref'])
make_kmz(args)
js.old_pid = js.pid
js.pid = None
js.state = 'Completed'
json.dump(js, open(jobfile,'w'), indent=4, separators=(',', ': '))
def verify_inputs(args,sys_cfg):
"""
Check if arguments (eventually) supplied to execute(...) are valid - if not exception is thrown.
Arguments:
args -- dictionary of arguments
"""
# dump(sys_cfg,'sys_cfg')
# dump(args,'args')
for key in sys_cfg:
if key in args:
if sys_cfg[key] != args[key]:
logging_error('system configuration %s=%s attempted change to %s'
% (key, sys_cfg[key], args[key]))
raise ValueError('System configuration values may not be overwritten.')
# we don't check if job_id is a valid path
if 'sat_only' in args and args['sat_only']:
required_files = [('sys_install_path', 'Non-existent system installation directory %s')]
optional_files = []
elif 'ungrib_only' in args and args['ungrib_only']:
required_files = [('sys_install_path', 'Non-existent system installation directory %s'),
('workspace_path', 'Non-existent workspace directory %s'),
('wps_install_path', 'Non-existent WPS installation directory %s'),
('wps_namelist_path', 'Non-existent WPS namelist template %s')]
optional_files = []
else:
required_files = [('sys_install_path', 'Non-existent system installation directory %s'),
('workspace_path', 'Non-existent workspace directory %s'),
('wps_install_path', 'Non-existent WPS installation directory %s'),
('wrf_install_path', 'Non-existent WRF installation directory %s'),
('wps_namelist_path', 'Non-existent WPS namelist template %s'),
('wrf_namelist_path', 'Non-existent WRF namelist template %s'),
('fire_namelist_path', 'Non-existent fire namelist template %s'),
('wps_geog_path', 'Non-existent geogrid data (WPS-GEOG) path %s')]
optional_files = [('emissions_namelist_path', 'Non-existent namelist template %s')]
# check each path that should exist
for key, err in required_files:
if not osp.exists(args[key]):
raise OSError(err % args[key])
# check each path that should exist
for key, err in optional_files:
if key in args:
if not osp.exists(args[key]):
raise OSError(err % args[key])
# check for valid grib source
if 'grib_source' in args:
if args['grib_source'] not in ['HRRR', 'NAM','NAM218', 'NAM227', 'NARR','CFSR','GFSA','GFSF']:
raise ValueError('Invalid grib source %s, must be one of HRRR, NAM, NAM227, NARR, CFSR, GFSA, GFSF' % args['grib_source'])
# check for valid satellite source
if 'satellite_source' in args:
for sat in args['satellite_source']:
if sat not in ['Terra','Aqua','SNPP','G16','G17']:
raise ValueError('Invalid satellite source %s, must be one of Terra, Aqua, SNPP, G16, G17' % sat)
# if precomputed key is present, check files linked in
if 'precomputed' in args:
for key,path in six.iteritems(args['precomputed']):
if not osp.exists(path):
raise OSError('Precomputed entry %s points to non-existent file %s' % (key,path))
# check if the postprocessor knows how to handle all variables
wvs = get_wisdom_variables()
failing = False
if 'postproc' in args:
for dom in [x for x in list(args['postproc'].keys()) if len(x) == 1]:
for vname in args['postproc'][dom]:
if vname not in wvs:
logging.error('unrecognized variable %s in postproc key for domain %s.' % (vname, dom))
failing = True
if 'tslist' in args:
for vname in args['tslist']['vars']:
if vname not in wvs:
logging.error('unrecognized variable %s in tslist key.' % vname)
failing = True
if failing:
raise ValueError('One or more unrecognized variables in postproc.')
def process_arguments(job_args,sys_cfg):
"""
Convert arguments passed into program via the JSON configuration file and job json argument.
This is processed after the configuration is updated by the job json file.
Transforms unicode strings into standard strings.
:param args: the input arguments
"""
# note: the execution flow allows us to override anything in the etc/conf.json file
# dump(sys_cfg,'sys_cfg')
args = sys_cfg
keys = list(job_args.keys())
for key in keys:
if job_args[key] is None:
logging.warning('Job argument %s=None, ignoring' % key)
del job_args[key]
args.update(job_args)
# logging.info('updated args = %s' % json.dumps(args, indent=4, separators=(',', ': ')))
# resolve possible relative time specifications
start_utc = timespec_to_utc(args['start_utc'])
args['orig_start_utc'] = start_utc
args['start_utc'] = round_time_to_hour(start_utc)
args['end_utc'] = round_time_to_hour(timespec_to_utc(args['end_utc'], args['start_utc']), True)
args['cycle_start_utc'] = timespec_to_utc(args.get('cycle_start_utc', None))
args['max_dom'] = max([int(x) for x in [x for x in args['domains'] if len(x) == 1]])
args['max_dom_pp'] = max([int(x) for x in [x for x in args['postproc'] if len(x) == 1]])
args['satprod_satsource'] = Dict({})
# add postprocess satellite data
if 'satellite_source' in args:
if 'postproc' in args:
sats = args['satellite_source']
for sat in sats:
for prod in _sat_prods:
satprod = sat.upper()+prod
args['satprod_satsource'].update({satprod: sat})
# load tokens if etc/tokens.json exists
try:
tokens = json.load(open('etc/tokens.json'))
args.update({'tokens': tokens})
except:
logging.warning('Any etc/tokens.json specified, any token is going to be used.')
# defaults
if args['ref_utc'] is not None:
args['ref_utc'] = timespec_to_utc(args['ref_utc'], args['start_utc'])
# sanity check, also that nothing in etc/conf got overrident
verify_inputs(args,sys_cfg)
return args
if __name__ == '__main__':
# configure the basic logger
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
# logging.basicConfig(level=logging.DEBUG)
# load configuration JSON
sys_cfg = load_sys_cfg()
# logging.info('sys_cfg = %s' % json.dumps(sys_cfg, indent=4, separators=(',', ': ')))
# load job JSON
job_args = json.load(open(sys.argv[1]))
# logging.info('job_args = %s' % json.dumps(job_args, indent=4, separators=(',', ': ')))
# process arguments
args = process_arguments(job_args,sys_cfg)
# logging.info('processed args = %s' % str(args))
# execute the job
logging.info('calling execute')
execute(args,job_args)
logging.info('forecast.py done')
|
traceroute.py
|
#!/usr/bin/env python3
import socket
import struct
import icmplib
import threading
R = '\033[31m' # red
G = '\033[32m' # green
C = '\033[36m' # cyan
W = '\033[0m' # white
Y = '\033[33m' # yellow
def icmp_trace(ip, tr_tout, output, collect):
result = icmplib.traceroute(ip, count=1, interval=0.05, timeout=tr_tout, id=icmplib.PID, max_hops=30, fast_mode=True)
print('\n\n' + R + 'HOPS'.ljust(7) + 'IP'.ljust(17) + 'HOST' + W + '\n')
for entry in result:
hop_index = str(entry._distance)
hop_addr = entry._address
try:
hop_host = socket.gethostbyaddr(hop_addr)[0]
except socket.herror:
hop_host = 'Unknown'
print(G + hop_index.ljust(7) + C + hop_addr.ljust(17) + W + hop_host)
if output != 'None':
collect.setdefault('Result', []).append([str(hop_index), str(hop_addr), str(hop_host)])
def udp_trace(ip, port, tr_tout, output, collect):
status = {'end': False}
rx = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket.IPPROTO_ICMP)
rx.setblocking(0)
rx.settimeout(tr_tout)
rx.bind(('', port))
print('\n' + R + 'HOPS'.ljust(7) + 'IP'.ljust(17) + 'HOST' + W + '\n')
for ttl in range(1, 31):
udp_send(ip, port, ttl, rx, status, tr_tout, output, collect)
if status['end'] == True:
break
rx.close()
def udp_send(ip, port, ttl, rx, status, tr_tout, output, collect):
tx = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
tx.setsockopt(socket.SOL_IP, socket.IP_TTL, ttl)
tx.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
tx.setblocking(0)
tx.settimeout(tr_tout)
tx.sendto(''.encode(), (ip, port))
try:
data, curr_addr = rx.recvfrom(512)
curr_addr = curr_addr[0]
except socket.error as e:
curr_addr = '* * *'
finally:
tx.close()
hop_index = str(ttl)
hop_addr = curr_addr
if hop_addr != '* * *':
try:
hop_host = socket.gethostbyaddr(hop_addr)[0]
except socket.herror:
hop_host = 'Unknown'
else:
hop_addr = '* * *'
hop_host = ''
print(G + hop_index.ljust(7) + C + hop_addr.ljust(17) + W + hop_host)
if output != 'None':
collect.setdefault('Result', []).append([str(hop_index), str(hop_addr), str(hop_host)])
if curr_addr == ip:
status['end'] = True
def tcp_trace(ip, port, tr_tout, output, collect):
status = {'end': False}
rx = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket.IPPROTO_ICMP)
rx.setblocking(0)
rx.settimeout(tr_tout)
rx.bind(('', 0))
print('\n' + R + 'HOPS'.ljust(7) + 'IP'.ljust(17) + 'HOST' + W + '\n')
for ttl in range(1,31):
t = threading.Thread(target=tcp_send(ip, port, ttl, rx, status, tr_tout, output, collect), daemon=True)
t = t.start()
if status['end'] == True:
break
rx.close()
def tcp_send(ip, port, ttl, rx, status, tr_tout, output, collect):
tx = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
tx.setsockopt(socket.IPPROTO_IP, socket.IP_TTL, struct.pack('I', ttl))
tx.setblocking(0)
tx.settimeout(tr_tout)
while True:
try:
try:
tx.connect((ip, port))
hop_index = str(ttl)
try:
hop_host = socket.gethostbyaddr(ip)[0]
except socket.herror:
hop_host = 'Unknown'
print(G + hop_index.ljust(7) + C + ip.ljust(17) + W + hop_host)
status['end'] = True
if output != 'None':
collect.setdefault('Result', []).append([str(hop_index), str(ip), str(hop_host)])
except (socket.error, socket.timeout) as err:
try:
data, curr_addr = rx.recvfrom(512)
curr_addr = curr_addr[0]
except socket.timeout:
curr_addr = '* * *'
hop_index = str(ttl)
hop_addr = curr_addr
if hop_addr != '* * *':
try:
hop_host = socket.gethostbyaddr(hop_addr)[0]
except socket.herror:
hop_host = 'Unknown'
else:
hop_addr = '* * *'
hop_host = ''
print(G + hop_index.ljust(7) + C + hop_addr.ljust(17) + W + hop_host)
if output != 'None':
collect.setdefault('Result', []).append([str(hop_index), str(hop_addr), str(hop_host)])
continue
finally:
tx.close()
break
def troute(ip, mode, port, tr_tout, output, data):
collect = {}
print('\n\n' + G + '[+]' + C + ' Port : ' + W + str(port))
print(G + '[+]' + C + ' Timeout : ' + W + str(tr_tout))
if mode == 'ICMP':
print('\n' + Y + '[!]' + Y + ' Starting ICMP Traceroute...' + W)
icmp_trace(ip, tr_tout, output, collect)
elif mode == 'UDP':
print('\n' + Y + '[!]' + Y + ' Starting UDP Traceroute...' + W)
udp_trace(ip, port, tr_tout, output, collect)
elif mode == 'TCP':
print('\n' + Y + '[!]' + Y + ' Starting TCP Traceroute...' + W)
tcp_trace(ip, port, tr_tout, output, collect)
else:
print('\n' + R + '[-]' + C + ' Invalid Mode Selected!' + W)
if output != 'None':
collect['Protocol'] = mode
collect['Port'] = str(port)
collect['Timeout'] = str(tr_tout)
trace_output(output, data, collect)
print()
def trace_output(output, data, collect):
data['module-Traceroute'] = collect
|
pyport.py
|
'''Author = Yatin Kalra
Website : yatinkalra.github.io
Github : www.github.com/yatinkalra
'''
from queue import Queue
import socket, threading, sys
from datetime import datetime
t1 = datetime.now()
print("-" * 60)
print(" ___ ___ _ ")
print(" / _ \_ _ / _ \___ _ __| |_ ")
print(" / /_)/ | | |/ /_)/ _ \| '__| __|")
print(" / ___/| |_| / ___/ (_) | | | |_ ")
print(" \/ \__, \/ \___/|_| \__|")
print(" |___/ ")
print("-" * 60)
print("What do you wanna scan?")
print("1. Scan Reserved Ports Only")
print("2. Scan All Ports")
print("3. Scan by Custom Range")
print("4. Scan Well Known Ports")
print("5. Scan Specific Port")
try:
scan_mode = int(input("Enter your option: "))
if(scan_mode not in [1,2,3,4,5]):
print("WRONG OPTION!")
sys.exit()
except ValueError:
print("WRONG OPTION!")
sys.exit()
target = input("Enter target: ")
if(len(target) == 0):
print("TARGET NOT FOUND!")
sys.exit()
print("-"*50)
print("TARGET: ", target)
print("STARTING TIME", t1)
print("-"*60)
def portscan(port):
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
socket.setdefaulttimeout(0.25)
s.connect((target, port))
return True
except:
return False
queue = Queue()
def get_ports(mode):
if(mode == 1):
for port in range(0, 1024):
queue.put(port)
elif(mode == 2):
for port in range(0, 65536):
queue.put(port)
elif(mode == 3):
custom_range = input("Enter your custom range :")
star, end = custom_range.split
for port in range(int(start), int(end)):
queue.put(port)
elif(mode == 4):
ports = [20, 21, 22, 23, 25, 53, 8, 110, 169, 443, 445]
for port in ports:
queue.put(port)
elif(mode == 5):
ports = input("Enter your ports")
ports = ports.split()
ports = list(map(int, ports))
for port in ports:
queue.put(port)
open_ports = []
def worker():
while not queue.empty():
port = queue.get()
if portscan(port):
print(f"Port {port} is open")
open_ports.append(port)
def run_scanner(thread, mode):
get_ports(mode)
thread_list = []
for t in range(thread):
thread = threading.Thread(target=worker)
thread_list.append(thread)
for thread in thread_list:
thread.start()
for thread in thread_list:
thread.join()
open_ports.sort()
print("Open Ports: ", open_ports)
print("-"*60)
run_scanner(500, scan_mode)
t2 = datetime.now()
print(f"Scanning completed in {t2-t1} seconds")
print("-"*60)
|
runOnLinux.py
|
#!/usr/bin/env python
"""This script is used by ../../runOnLinux
- It can be used for debugging by running a terminal-like interface with the FPGA
- And it can be used for running a program (or more) above linux on FPGA
"""
import threading
import warnings
import re
from test_gfe_unittest import *
class BaseGfeForLinux(BaseGfeTest):
def read_uart_out_until_stop (self,run_event,stop_event):
while (not stop_event.is_set()):
run_event.wait()
pending = self.gfe.uart_session.in_waiting
if pending:
data = self.gfe.uart_session.read(pending)
sys.stdout.write(data)
time.sleep(1)
return
def flush_uart_out (self,timeout=1):
while (True):
time.sleep (timeout)
pending = self.gfe.uart_session.in_waiting
if (not pending):
return
dump = self.gfe.uart_session.read(pending)
def interpreter_sput (self, sourceFilePath, destFilePath, riscv_ip, userTimeOut=0, linuxImage="busybox"):
###check sourceFileExist
sourceFilePath = os.path.expanduser(sourceFilePath)
if (not os.path.isfile(sourceFilePath)):
warnings.warn("%s: Cannot open or file does not exist. Press Enter to continue..." % (sourceFilePath), RuntimeWarning)
return
portNum = 1234 #arbitrarily chosen
if (linuxImage == "busybox"):
self.gfe.uart_session.write('nc -lp {0} > {1}\r'.format(portNum, destFilePath)) #use tcp
time.sleep (1)
try:
subprocess.Popen('busybox nc {0} {1} <{2}'.format(riscv_ip,portNum,sourceFilePath),shell=True) #use Popen to be non-blocking
except:
warnings.warn("%s: Sending failed. Please use --ctrlc if terminal not responding." % (sourceFilePath), RuntimeWarning)
return
fileSize = os.path.getsize(sourceFilePath)
if (fileSize > 400e6):
warnings.warn("%s: File size is too big; this might cause a crash." % (sourceFilePath), RuntimeWarning)
#The busybox netcat does not end the connection automatically, so we have to interrupt it
#The ethernet theoretical speed is 1Gbps (=125MB/s), but the actual speed sometime is way slower than that
#So we'll wait 10X (1 sec for each 100MB) (seems reasonable) .. Or you can force it by using the -ft option
if (userTimeOut > 0):
timeToWait = userTimeOut
else:
MBtoWaitPerSec = 100
timeToWait = 10 * (((fileSize-1) // (MBtoWaitPerSec*1e6)) + 1) #ceil division
time.sleep (timeToWait)
#This Ctrl+C is enough to cut the connection and kill the Popen process called above
self.gfe.uart_session.write(b'\x03\r')
print ("\nSending successful!")
elif (linuxImage == "debian"):
self.gfe.uart_session.write('nc -lp {0} > {1}\r'.format(portNum, destFilePath)) #use tcp
time.sleep (1)
try:
subprocess.call('nc {0} {1} <{2}'.format(riscv_ip,portNum,sourceFilePath),shell=True) #use blocking because it ends with new nc
except:
warnings.warn("%s: Sending failed. Please use --ctrlc if terminal not responding." % (sourceFilePath), RuntimeWarning)
return
fileSize = os.path.getsize(sourceFilePath)
if (fileSize > 400e6):
warnings.warn("%s: File size is too big; this might cause a crash." % (sourceFilePath), RuntimeWarning)
self.gfe.uart_session.write(b'\r')
print ("\nSending successful!")
else:
warnings.warn("LinuxImage not supported. Only [busybox|debian] are.", RuntimeWarning)
return
def interactive_terminal (self,riscv_ip,linuxImage):
print ("\nStarting interactive terminal...")
stopReading = threading.Event() #event to stop the reading process in the end
runReading = threading.Event() #event to run/pause the reading process
readThread = threading.Thread(target=self.read_uart_out_until_stop, args=(runReading,stopReading))
stopReading.clear()
runReading.set()
readThread.start() #start the reading
warnings.simplefilter ("always")
formatwarning_orig = warnings.formatwarning
warnings.formatwarning = lambda message, category, filename, lineno, line=None: \
formatwarning_orig(message, category, filename, lineno, line='')
exitTerminal = False
while (not exitTerminal):
instruction = raw_input ("")
if (len(instruction)>2 and instruction[0:2]=='--'): #instruction to the interpreter
if (instruction[2:6] == 'exit'): #exit terminal and end test
exitTerminal = True
elif (instruction[2:6] == 'sput'): #copy a file from local to linux
sputFTMatch = re.match(r'--sput -ft (?P<userTimeOut>[\d]+) (?P<sourceFilePath>[\w/.~-]+) (?P<destFilePath>[\w/.~-]+)\s*',instruction)
sputMatch = re.match(r'--sput (?P<sourceFilePath>[\w/.~-]+) (?P<destFilePath>[\w/.~-]+)\s*',instruction)
if (sputFTMatch != None):
self.interpreter_sput(sputFTMatch.group('sourceFilePath'), sputFTMatch.group('destFilePath'), riscv_ip, sputFTMatch.group('userTimeOut'),linuxImage=linuxImage)
elif (sputMatch != None):
self.interpreter_sput(sputMatch.group('sourceFilePath'), sputMatch.group('destFilePath'), riscv_ip,linuxImage=linuxImage)
else:
warnings.warn("Please use \"--sput [-ft SEC] sourceFilePath destFilePath\". Press Enter to continue...", SyntaxWarning)
elif (instruction[2:7] == 'ctrlc'): #ctrlC
self.gfe.uart_session.write(b'\x03\r')
else:
warnings.warn("Interpreter command not found. Press Enter to continue...", SyntaxWarning)
else:
self.gfe.uart_session.write(instruction + '\r')
time.sleep(1)
stopReading.set()
return
class RunOnLinux (TestLinux, BaseGfeForLinux):
def boot_busybox (self):
""" This function boots the busybox and returns its ip address """
# Boot busybox
self.boot_linux()
linux_boot_timeout=60
print("Running elf with a timeout of {}s".format(linux_boot_timeout))
# Check that busybox reached activation screen
self.check_uart_out(
timeout=linux_boot_timeout,
expected_contents=["Please press Enter to activate this console"])
#self.gfe.uart_session.write(b'stty -echo\r')
self.gfe.uart_session.write(b'\r')
time.sleep(1)
# Run DHCP client
self.gfe.uart_session.write(b'ifconfig eth0 up\r')
self.check_uart_out(
timeout=10,
expected_contents=["xilinx_axienet 62100000.ethernet eth0: Link is Up - 1Gbps/Full - flow control rx/tx"])
self.gfe.uart_session.write(b'udhcpc -i eth0\r')
# Store and print all UART output while the elf is running
timeout = 10
print("Printing all UART output from the GFE...")
rx_buf = []
start_time = time.time()
while time.time() < (start_time + timeout):
pending = self.gfe.uart_session.in_waiting
if pending:
data = self.gfe.uart_session.read(pending)
rx_buf.append(data) # Append read chunks to the list.
sys.stdout.write(data)
print("Timeout reached")
# Get FPGA IP address
riscv_ip = 0
rx_buf_str = ''.join(rx_buf)
rx_buf_list = rx_buf_str.split('\n')
for line in rx_buf_list:
index = line.find('Setting IP address')
if index != -1:
ip_str = line.split()
riscv_ip = ip_str[3]
print("RISCV IP address is: " + riscv_ip)
# break # keep reading till the end to get the latest IP asignemnt
# Ping FPGA
if riscv_ip == 0:
raise Exception("Could not get RISCV IP Address. Check that it was assigned in the UART output.")
ping_response = os.system("ping -c 1 " + riscv_ip)
self.assertEqual(ping_response, 0,
"Cannot ping FPGA.")
return riscv_ip
def test_busybox_terminal (self):
""" Boot busybox and start an interactive terminal """
#boot busybox and get ip
riscv_ip = self.boot_busybox()
#start interactive terminal
self.interactive_terminal(riscv_ip,"busybox")
return
def test_busybox_runAprog (self,returnIP=False):
""" Boot busybox and run a program """
#boot busybox and get ip
riscv_ip = self.boot_busybox()
stopReading = threading.Event() #event to stop the reading process in the end
runReading = threading.Event() #event to run/pause the reading process
readThread = threading.Thread(target=self.read_uart_out_until_stop, args=(runReading,stopReading))
stopReading.clear()
runReading.set()
readThread.start() #start the reading
#Transmitting the program
self.interpreter_sput("../../runOnLinux/binary2run", "binary2run", riscv_ip,linuxImage="busybox")
time.sleep(1)
self.gfe.uart_session.write('chmod +x binary2run\r')
time.sleep(1)
self.gfe.uart_session.write('./binary2run\r')
time.sleep(3)
stopReading.set()
if (returnIP):
return riscv_ip
return
def test_busybox_runANDterminal (self):
""" Boot busybox, run a program, then open the interactive terminal """
#boot, get ip, and runAprog
riscv_ip = self.test_busybox_runAprog()
#start interactive terminal
self.interactive_terminal(riscv_ip,"busybox")
return
def boot_debian (self):
""" This function boots the debian and returns its ip address """
# Boot Debian
self.boot_linux()
linux_boot_timeout=800
print("Running elf with a timeout of {}s".format(linux_boot_timeout))
# Check that Debian booted
self.check_uart_out(
timeout=linux_boot_timeout,
expected_contents=[ "Debian GNU/Linux 10",
"login:"
])
# Login to Debian
self.gfe.uart_session.write(b'root\r')
# Check for password prompt and enter password
self.check_uart_out(timeout=5, expected_contents=["Password"])
self.gfe.uart_session.write(b'riscv\r')
# Check for command line prompt
self.check_uart_out(
timeout=15,
expected_contents=["The programs included with the Debian GNU/Linux system are free software;",
":~#"
])
self.gfe.uart_session.write(b'ifup eth0\r')
self.gfe.uart_session.write(b'ip addr\r')
# Get RISC-V IP address and ping it from host
# Store and print all UART output while the elf is running
timeout = 60
print("Printing all UART output from the GFE...")
rx_buf = []
start_time = time.time()
while time.time() < (start_time + timeout):
pending = self.gfe.uart_session.in_waiting
if pending:
data = self.gfe.uart_session.read(pending)
rx_buf.append(data) # Append read chunks to the list.
sys.stdout.write(data)
print("Timeout reached")
# Get FPGA IP address
riscv_ip = 0
rx_buf_str = ''.join(rx_buf)
rx_buf_list = rx_buf_str.split('\n')
for line in rx_buf_list:
index1 = line.find('inet')
index2 = line.find('eth0')
if (index1 != -1) & (index2 != -1):
ip_str = re.split('[/\s]\s*', line)
riscv_ip = ip_str[2]
# Ping FPGA
print("RISCV IP address is: " + riscv_ip)
if (riscv_ip == 0) or (riscv_ip == "0.0.0.0"):
raise Exception("Could not get RISCV IP Address. Check that it was assigned in the UART output.")
ping_response = os.system("ping -c 1 " + riscv_ip)
self.assertEqual(ping_response, 0,
"Cannot ping FPGA.")
return riscv_ip
def test_debian_terminal(self):
""" Boot debian and start an interactive terminal """
#boot debian and get ip
riscv_ip = self.boot_debian()
#start interactive terminal
self.interactive_terminal(riscv_ip,"debian")
return
def test_debian_runAprog (self,returnIP=False):
""" Boot debian and run a program """
#boot debianand get ip
riscv_ip = self.boot_debian()
stopReading = threading.Event() #event to stop the reading process in the end
runReading = threading.Event() #event to run/pause the reading process
readThread = threading.Thread(target=self.read_uart_out_until_stop, args=(runReading,stopReading))
stopReading.clear()
runReading.set()
readThread.start() #start the reading
#Transmitting the program
self.interpreter_sput("../../runOnLinux/binary2run", "binary2run", riscv_ip,linuxImage="debian")
time.sleep(1)
self.gfe.uart_session.write('chmod +x binary2run\r')
time.sleep(1)
self.gfe.uart_session.write('./binary2run\r')
time.sleep(3)
stopReading.set()
if (returnIP):
return riscv_ip
return
def test_debian_runANDterminal (self):
""" Boot debian, run a program, then open the interactive terminal """
#boot, get ip, and runAprog
riscv_ip = self.test_debian_runAprog()
#start interactive terminal
self.interactive_terminal(riscv_ip,"debian")
return
if __name__ == '__main__':
unittest.main()
|
helpers.py
|
"""
Helper functions file for OCS QE
"""
import logging
import re
import datetime
import statistics
import os
from subprocess import TimeoutExpired, run, PIPE
import tempfile
import time
import yaml
import threading
from ocs_ci.ocs.ocp import OCP
from uuid import uuid4
from ocs_ci.ocs.exceptions import (
TimeoutExpiredError,
UnexpectedBehaviour,
UnavailableBuildException
)
from concurrent.futures import ThreadPoolExecutor
from ocs_ci.ocs import constants, defaults, ocp, node
from ocs_ci.utility import templating
from ocs_ci.ocs.resources import pod, pvc
from ocs_ci.ocs.resources.ocs import OCS
from ocs_ci.ocs.exceptions import CommandFailed, ResourceWrongStatusException
from ocs_ci.utility.retry import retry
from ocs_ci.utility.utils import TimeoutSampler, ocsci_log_path, run_cmd
from ocs_ci.framework import config
logger = logging.getLogger(__name__)
def create_unique_resource_name(resource_description, resource_type):
"""
Creates a unique object name by using the object_description,
object_type and a random uuid(in hex) as suffix
Args:
resource_description (str): The user provided object description
resource_type (str): The type of object for which the unique name
will be created. For example: project, pvc, etc
Returns:
str: A unique name
"""
return f"{resource_type}-{resource_description[:23]}-{uuid4().hex}"
def create_resource(do_reload=True, **kwargs):
"""
Create a resource
Args:
do_reload (bool): True for reloading the resource following its creation,
False otherwise
kwargs (dict): Dictionary of the OCS resource
Returns:
OCS: An OCS instance
Raises:
AssertionError: In case of any failure
"""
ocs_obj = OCS(**kwargs)
resource_name = kwargs.get('metadata').get('name')
created_resource = ocs_obj.create(do_reload=do_reload)
assert created_resource, (
f"Failed to create resource {resource_name}"
)
return ocs_obj
def wait_for_resource_state(resource, state, timeout=60):
"""
Wait for a resource to get to a given status
Args:
resource (OCS obj): The resource object
state (str): The status to wait for
timeout (int): Time in seconds to wait
Raises:
ResourceWrongStatusException: In case the resource hasn't
reached the desired state
"""
if (
resource.name == constants.DEFAULT_STORAGECLASS_CEPHFS
or resource.name == constants.DEFAULT_STORAGECLASS_RBD
):
logger.info(f"Attempt to default default Secret or StorageClass")
return
try:
resource.ocp.wait_for_resource(
condition=state, resource_name=resource.name, timeout=timeout
)
except TimeoutExpiredError:
logger.error(f"{resource.kind} {resource.name} failed to reach {state}")
resource.reload()
raise ResourceWrongStatusException(resource.name, resource.describe())
logger.info(f"{resource.kind} {resource.name} reached state {state}")
def create_pod(
interface_type=None, pvc_name=None,
do_reload=True, namespace=defaults.ROOK_CLUSTER_NAMESPACE,
node_name=None, pod_dict_path=None, sa_name=None, dc_deployment=False,
raw_block_pv=False, raw_block_device=constants.RAW_BLOCK_DEVICE, replica_count=1,
pod_name=None, node_selector=None
):
"""
Create a pod
Args:
interface_type (str): The interface type (CephFS, RBD, etc.)
pvc_name (str): The PVC that should be attached to the newly created pod
do_reload (bool): True for reloading the object after creation, False otherwise
namespace (str): The namespace for the new resource creation
node_name (str): The name of specific node to schedule the pod
pod_dict_path (str): YAML path for the pod
sa_name (str): Serviceaccount name
dc_deployment (bool): True if creating pod as deploymentconfig
raw_block_pv (bool): True for creating raw block pv based pod, False otherwise
raw_block_device (str): raw block device for the pod
replica_count (int): Replica count for deployment config
pod_name (str): Name of the pod to create
node_selector (dict): dict of key-value pair to be used for nodeSelector field
eg: {'nodetype': 'app-pod'}
Returns:
Pod: A Pod instance
Raises:
AssertionError: In case of any failure
"""
if interface_type == constants.CEPHBLOCKPOOL:
pod_dict = pod_dict_path if pod_dict_path else constants.CSI_RBD_POD_YAML
interface = constants.RBD_INTERFACE
else:
pod_dict = pod_dict_path if pod_dict_path else constants.CSI_CEPHFS_POD_YAML
interface = constants.CEPHFS_INTERFACE
if dc_deployment:
pod_dict = pod_dict_path if pod_dict_path else constants.FEDORA_DC_YAML
pod_data = templating.load_yaml(pod_dict)
if not pod_name:
pod_name = create_unique_resource_name(
f'test-{interface}', 'pod'
)
pod_data['metadata']['name'] = pod_name
pod_data['metadata']['namespace'] = namespace
if dc_deployment:
pod_data['metadata']['labels']['app'] = pod_name
pod_data['spec']['template']['metadata']['labels']['name'] = pod_name
pod_data['spec']['replicas'] = replica_count
if pvc_name:
if dc_deployment:
pod_data['spec']['template']['spec']['volumes'][0][
'persistentVolumeClaim'
]['claimName'] = pvc_name
else:
pod_data['spec']['volumes'][0]['persistentVolumeClaim']['claimName'] = pvc_name
if interface_type == constants.CEPHBLOCKPOOL and raw_block_pv:
if pod_dict_path == constants.FEDORA_DC_YAML:
temp_dict = [
{'devicePath': raw_block_device, 'name': pod_data.get('spec').get(
'template').get('spec').get('volumes')[0].get('name')}
]
del pod_data['spec']['template']['spec']['containers'][0]['volumeMounts']
pod_data['spec']['template']['spec']['containers'][0]['volumeDevices'] = temp_dict
elif pod_dict_path == constants.NGINX_POD_YAML:
temp_dict = [
{'devicePath': raw_block_device, 'name': pod_data.get('spec').get(
'containers')[0].get('volumeMounts')[0].get('name')}
]
del pod_data['spec']['containers'][0]['volumeMounts']
pod_data['spec']['containers'][0]['volumeDevices'] = temp_dict
else:
pod_data['spec']['containers'][0]['volumeDevices'][0]['devicePath'] = raw_block_device
pod_data['spec']['containers'][0]['volumeDevices'][0]['name'] = pod_data.get('spec').get('volumes')[
0].get('name')
if node_name:
if dc_deployment:
pod_data['spec']['template']['spec']['nodeName'] = node_name
else:
pod_data['spec']['nodeName'] = node_name
if node_selector:
if dc_deployment:
pod_data['spec']['template']['spec']['nodeSelector'] = node_selector
else:
pod_data['spec']['nodeSelector'] = node_selector
if sa_name and dc_deployment:
pod_data['spec']['template']['spec']['serviceAccountName'] = sa_name
if dc_deployment:
ocs_obj = create_resource(**pod_data)
logger.info(ocs_obj.name)
assert (ocp.OCP(kind='pod', namespace=namespace)).wait_for_resource(
condition=constants.STATUS_COMPLETED,
resource_name=pod_name + '-1-deploy',
resource_count=0, timeout=180, sleep=3
)
dpod_list = pod.get_all_pods(namespace=namespace)
for dpod in dpod_list:
if '-1-deploy' not in dpod.name:
if pod_name in dpod.name:
return dpod
else:
pod_obj = pod.Pod(**pod_data)
pod_name = pod_data.get('metadata').get('name')
logger.info(f'Creating new Pod {pod_name} for test')
created_resource = pod_obj.create(do_reload=do_reload)
assert created_resource, (
f"Failed to create Pod {pod_name}"
)
return pod_obj
def create_project():
"""
Create a project
Returns:
OCP: Project object
"""
namespace = create_unique_resource_name('test', 'namespace')
project_obj = ocp.OCP(kind='Project', namespace=namespace)
assert project_obj.new_project(namespace), f"Failed to create namespace {namespace}"
return project_obj
def create_multilpe_projects(number_of_project):
"""
Create one or more projects
Args:
number_of_project (int): Number of projects to be created
Returns:
list: List of project objects
"""
project_objs = [create_project() for _ in range(number_of_project)]
return project_objs
def create_secret(interface_type):
"""
Create a secret
** This method should not be used anymore **
** This method is for internal testing only **
Args:
interface_type (str): The type of the interface
(e.g. CephBlockPool, CephFileSystem)
Returns:
OCS: An OCS instance for the secret
"""
secret_data = dict()
if interface_type == constants.CEPHBLOCKPOOL:
secret_data = templating.load_yaml(
constants.CSI_RBD_SECRET_YAML
)
secret_data['stringData']['userID'] = constants.ADMIN_USER
secret_data['stringData']['userKey'] = get_admin_key()
interface = constants.RBD_INTERFACE
elif interface_type == constants.CEPHFILESYSTEM:
secret_data = templating.load_yaml(
constants.CSI_CEPHFS_SECRET_YAML
)
del secret_data['stringData']['userID']
del secret_data['stringData']['userKey']
secret_data['stringData']['adminID'] = constants.ADMIN_USER
secret_data['stringData']['adminKey'] = get_admin_key()
interface = constants.CEPHFS_INTERFACE
secret_data['metadata']['name'] = create_unique_resource_name(
f'test-{interface}', 'secret'
)
secret_data['metadata']['namespace'] = defaults.ROOK_CLUSTER_NAMESPACE
return create_resource(**secret_data)
def default_ceph_block_pool():
"""
Returns default CephBlockPool
Returns:
default CephBlockPool
"""
return constants.DEFAULT_BLOCKPOOL
def create_ceph_block_pool(pool_name=None):
"""
Create a Ceph block pool
** This method should not be used anymore **
** This method is for internal testing only **
Args:
pool_name (str): The pool name to create
Returns:
OCS: An OCS instance for the Ceph block pool
"""
cbp_data = templating.load_yaml(constants.CEPHBLOCKPOOL_YAML)
cbp_data['metadata']['name'] = (
pool_name if pool_name else create_unique_resource_name(
'test', 'cbp'
)
)
cbp_data['metadata']['namespace'] = defaults.ROOK_CLUSTER_NAMESPACE
cbp_data['spec']['failureDomain'] = get_failure_domin()
cbp_obj = create_resource(**cbp_data)
cbp_obj.reload()
assert verify_block_pool_exists(cbp_obj.name), (
f"Block pool {cbp_obj.name} does not exist"
)
return cbp_obj
def create_ceph_file_system(pool_name=None):
"""
Create a Ceph file system
** This method should not be used anymore **
** This method is for internal testing only **
Args:
pool_name (str): The pool name to create
Returns:
OCS: An OCS instance for the Ceph file system
"""
cfs_data = templating.load_yaml(constants.CEPHFILESYSTEM_YAML)
cfs_data['metadata']['name'] = (
pool_name if pool_name else create_unique_resource_name(
'test', 'cfs'
)
)
cfs_data['metadata']['namespace'] = defaults.ROOK_CLUSTER_NAMESPACE
cfs_data = create_resource(**cfs_data)
cfs_data.reload()
assert validate_cephfilesystem(cfs_data.name), (
f"File system {cfs_data.name} does not exist"
)
return cfs_data
def default_storage_class(
interface_type,
):
"""
Return default storage class based on interface_type
Args:
interface_type (str): The type of the interface
(e.g. CephBlockPool, CephFileSystem)
Returns:
OCS: Existing StorageClass Instance
"""
if interface_type == constants.CEPHBLOCKPOOL:
base_sc = OCP(
kind='storageclass',
resource_name=constants.DEFAULT_STORAGECLASS_RBD
)
elif interface_type == constants.CEPHFILESYSTEM:
base_sc = OCP(
kind='storageclass',
resource_name=constants.DEFAULT_STORAGECLASS_CEPHFS
)
sc = OCS(**base_sc.data)
return sc
def create_storage_class(
interface_type, interface_name, secret_name,
reclaim_policy=constants.RECLAIM_POLICY_DELETE, sc_name=None,
provisioner=None
):
"""
Create a storage class
** This method should not be used anymore **
** This method is for internal testing only **
Args:
interface_type (str): The type of the interface
(e.g. CephBlockPool, CephFileSystem)
interface_name (str): The name of the interface
secret_name (str): The name of the secret
sc_name (str): The name of storage class to create
reclaim_policy (str): Type of reclaim policy. Defaults to 'Delete'
(eg., 'Delete', 'Retain')
Returns:
OCS: An OCS instance for the storage class
"""
sc_data = dict()
if interface_type == constants.CEPHBLOCKPOOL:
sc_data = templating.load_yaml(
constants.CSI_RBD_STORAGECLASS_YAML
)
sc_data['parameters'][
'csi.storage.k8s.io/node-stage-secret-name'
] = secret_name
sc_data['parameters'][
'csi.storage.k8s.io/node-stage-secret-namespace'
] = defaults.ROOK_CLUSTER_NAMESPACE
interface = constants.RBD_INTERFACE
sc_data['provisioner'] = (
provisioner if provisioner else defaults.RBD_PROVISIONER
)
elif interface_type == constants.CEPHFILESYSTEM:
sc_data = templating.load_yaml(
constants.CSI_CEPHFS_STORAGECLASS_YAML
)
sc_data['parameters'][
'csi.storage.k8s.io/node-stage-secret-name'
] = secret_name
sc_data['parameters'][
'csi.storage.k8s.io/node-stage-secret-namespace'
] = defaults.ROOK_CLUSTER_NAMESPACE
interface = constants.CEPHFS_INTERFACE
sc_data['parameters']['fsName'] = get_cephfs_name()
sc_data['provisioner'] = (
provisioner if provisioner else defaults.CEPHFS_PROVISIONER
)
sc_data['parameters']['pool'] = interface_name
sc_data['metadata']['name'] = (
sc_name if sc_name else create_unique_resource_name(
f'test-{interface}', 'storageclass'
)
)
sc_data['metadata']['namespace'] = defaults.ROOK_CLUSTER_NAMESPACE
sc_data['parameters'][
'csi.storage.k8s.io/provisioner-secret-name'
] = secret_name
sc_data['parameters'][
'csi.storage.k8s.io/provisioner-secret-namespace'
] = defaults.ROOK_CLUSTER_NAMESPACE
sc_data['parameters']['clusterID'] = defaults.ROOK_CLUSTER_NAMESPACE
sc_data['reclaimPolicy'] = reclaim_policy
try:
del sc_data['parameters']['userid']
except KeyError:
pass
return create_resource(**sc_data)
def create_pvc(
sc_name, pvc_name=None, namespace=defaults.ROOK_CLUSTER_NAMESPACE,
size=None, do_reload=True, access_mode=constants.ACCESS_MODE_RWO,
volume_mode=None
):
"""
Create a PVC
Args:
sc_name (str): The name of the storage class for the PVC to be
associated with
pvc_name (str): The name of the PVC to create
namespace (str): The namespace for the PVC creation
size (str): Size of pvc to create
do_reload (bool): True for wait for reloading PVC after its creation, False otherwise
access_mode (str): The access mode to be used for the PVC
volume_mode (str): Volume mode for rbd RWX pvc i.e. 'Block'
Returns:
PVC: PVC instance
"""
pvc_data = templating.load_yaml(constants.CSI_PVC_YAML)
pvc_data['metadata']['name'] = (
pvc_name if pvc_name else create_unique_resource_name(
'test', 'pvc'
)
)
pvc_data['metadata']['namespace'] = namespace
pvc_data['spec']['accessModes'] = [access_mode]
pvc_data['spec']['storageClassName'] = sc_name
if size:
pvc_data['spec']['resources']['requests']['storage'] = size
if volume_mode:
pvc_data['spec']['volumeMode'] = volume_mode
ocs_obj = pvc.PVC(**pvc_data)
created_pvc = ocs_obj.create(do_reload=do_reload)
assert created_pvc, f"Failed to create resource {pvc_name}"
return ocs_obj
def create_multiple_pvcs(
sc_name, namespace, number_of_pvc=1, size=None, do_reload=False,
access_mode=constants.ACCESS_MODE_RWO
):
"""
Create one or more PVC
Args:
sc_name (str): The name of the storage class to provision the PVCs from
namespace (str): The namespace for the PVCs creation
number_of_pvc (int): Number of PVCs to be created
size (str): The size of the PVCs to create
do_reload (bool): True for wait for reloading PVC after its creation,
False otherwise
access_mode (str): The kind of access mode for PVC
Returns:
list: List of PVC objects
"""
if access_mode == 'ReadWriteMany' and 'rbd' in sc_name:
volume_mode = 'Block'
else:
volume_mode = None
return [
create_pvc(
sc_name=sc_name, size=size, namespace=namespace,
do_reload=do_reload, access_mode=access_mode, volume_mode=volume_mode
) for _ in range(number_of_pvc)
]
def verify_block_pool_exists(pool_name):
"""
Verify if a Ceph block pool exist
Args:
pool_name (str): The name of the Ceph block pool
Returns:
bool: True if the Ceph block pool exists, False otherwise
"""
logger.info(f"Verifying that block pool {pool_name} exists")
ct_pod = pod.get_ceph_tools_pod()
try:
for pools in TimeoutSampler(
60, 3, ct_pod.exec_ceph_cmd, 'ceph osd lspools'
):
logger.info(f'POOLS are {pools}')
for pool in pools:
if pool_name in pool.get('poolname'):
return True
except TimeoutExpiredError:
return False
def get_admin_key():
"""
Fetches admin key secret from Ceph
Returns:
str: The admin key
"""
ct_pod = pod.get_ceph_tools_pod()
out = ct_pod.exec_ceph_cmd('ceph auth get-key client.admin')
return out['key']
def get_cephfs_data_pool_name():
"""
Fetches ceph fs datapool name from Ceph
Returns:
str: fs datapool name
"""
ct_pod = pod.get_ceph_tools_pod()
out = ct_pod.exec_ceph_cmd('ceph fs ls')
return out[0]['data_pools'][0]
def validate_cephfilesystem(fs_name):
"""
Verify CephFileSystem exists at Ceph and OCP
Args:
fs_name (str): The name of the Ceph FileSystem
Returns:
bool: True if CephFileSystem is created at Ceph and OCP side else
will return False with valid msg i.e Failure cause
"""
cfs = ocp.OCP(
kind=constants.CEPHFILESYSTEM,
namespace=defaults.ROOK_CLUSTER_NAMESPACE
)
ct_pod = pod.get_ceph_tools_pod()
ceph_validate = False
ocp_validate = False
result = cfs.get(resource_name=fs_name)
if result.get('metadata').get('name'):
logger.info("Filesystem %s got created from Openshift Side", fs_name)
ocp_validate = True
else:
logger.info(
"Filesystem %s was not create at Openshift Side", fs_name
)
return False
try:
for pools in TimeoutSampler(
60, 3, ct_pod.exec_ceph_cmd, 'ceph fs ls'
):
for out in pools:
result = out.get('name')
if result == fs_name:
logger.info("FileSystem %s got created from Ceph Side", fs_name)
ceph_validate = True
break
else:
logger.error("FileSystem %s was not present at Ceph Side", fs_name)
ceph_validate = False
if ceph_validate:
break
except TimeoutExpiredError:
pass
return True if (ceph_validate and ocp_validate) else False
def get_all_storageclass_names():
"""
Function for getting all storageclass
Returns:
list: list of storageclass name
"""
sc_obj = ocp.OCP(
kind=constants.STORAGECLASS,
namespace=defaults.ROOK_CLUSTER_NAMESPACE
)
result = sc_obj.get()
sample = result['items']
storageclass = [
item.get('metadata').get('name') for item in sample if (
(item.get('metadata').get('name') not in constants.IGNORE_SC_GP2)
and (item.get('metadata').get('name') not in constants.IGNORE_SC_FLEX)
)
]
return storageclass
def delete_storageclasses(sc_objs):
""""
Function for Deleting storageclasses
Args:
sc_objs (list): List of SC objects for deletion
Returns:
bool: True if deletion is successful
"""
for sc in sc_objs:
logger.info("Deleting StorageClass with name %s", sc.name)
sc.delete()
return True
def get_cephblockpool_names():
"""
Function for getting all CephBlockPool
Returns:
list: list of cephblockpool name
"""
pool_obj = ocp.OCP(
kind=constants.CEPHBLOCKPOOL,
namespace=defaults.ROOK_CLUSTER_NAMESPACE
)
result = pool_obj.get()
sample = result['items']
pool_list = [
item.get('metadata').get('name') for item in sample
]
return pool_list
def delete_cephblockpools(cbp_objs):
"""
Function for deleting CephBlockPool
Args:
cbp_objs (list): List of CBP objects for deletion
Returns:
bool: True if deletion of CephBlockPool is successful
"""
for cbp in cbp_objs:
logger.info("Deleting CephBlockPool with name %s", cbp.name)
cbp.delete()
return True
def get_cephfs_name():
"""
Function to retrive CephFS name
Returns:
str: Name of CFS
"""
cfs_obj = ocp.OCP(
kind=constants.CEPHFILESYSTEM,
namespace=defaults.ROOK_CLUSTER_NAMESPACE
)
result = cfs_obj.get()
return result['items'][0].get('metadata').get('name')
def pull_images(image_name):
"""
Function to pull images on all nodes
Args:
image_name (str): Name of the container image to be pulled
Returns: None
"""
node_objs = node.get_node_objs(get_worker_nodes())
for node_obj in node_objs:
logging.info(f'pulling image "{image_name} " on node {node_obj.name}')
assert node_obj.ocp.exec_oc_debug_cmd(
node_obj.name, cmd_list=[f'podman pull {image_name}']
)
def run_io_with_rados_bench(**kw):
""" A task for radosbench
Runs radosbench command on specified pod . If parameters are
not provided task assumes few default parameters.This task
runs command in synchronous fashion.
Args:
**kw: Needs a dictionary of various radosbench parameters.
ex: pool_name:pool
pg_num:number of pgs for pool
op: type of operation {read, write}
cleanup: True OR False
Returns:
ret: return value of radosbench command
"""
logger.info("Running radosbench task")
ceph_pods = kw.get('ceph_pods') # list of pod objects of ceph cluster
config = kw.get('config')
role = config.get('role', 'client')
clients = [cpod for cpod in ceph_pods if role in cpod.roles]
idx = config.get('idx', 0)
client = clients[idx]
op = config.get('op', 'write')
cleanup = ['--no-cleanup', '--cleanup'][config.get('cleanup', True)]
pool = config.get('pool')
block = str(config.get('size', 4 << 20))
time = config.get('time', 120)
time = str(time)
rados_bench = (
f"rados --no-log-to-stderr "
f"-b {block} "
f"-p {pool} "
f"bench "
f"{time} "
f"{op} "
f"{cleanup} "
)
try:
ret = client.exec_ceph_cmd(ceph_cmd=rados_bench)
except CommandFailed as ex:
logger.error(f"Rados bench failed\n Error is: {ex}")
return False
logger.info(ret)
logger.info("Finished radosbench")
return ret
def get_all_pvs():
"""
Gets all pv in openshift-storage namespace
Returns:
dict: Dict of all pv in openshift-storage namespace
"""
ocp_pv_obj = ocp.OCP(
kind=constants.PV, namespace=defaults.ROOK_CLUSTER_NAMESPACE
)
return ocp_pv_obj.get()
# TODO: revert counts of tries and delay,BZ 1726266
@retry(AssertionError, tries=20, delay=10, backoff=1)
def validate_pv_delete(pv_name):
"""
validates if pv is deleted after pvc deletion
Args:
pv_name (str): pv from pvc to validates
Returns:
bool: True if deletion is successful
Raises:
AssertionError: If pv is not deleted
"""
ocp_pv_obj = ocp.OCP(
kind=constants.PV, namespace=defaults.ROOK_CLUSTER_NAMESPACE
)
try:
if ocp_pv_obj.get(resource_name=pv_name):
msg = f"{constants.PV} {pv_name} is not deleted after PVC deletion"
raise AssertionError(msg)
except CommandFailed:
return True
def create_pods(pvc_objs, pod_factory, interface, pods_for_rwx=1, status=""):
"""
Create pods
Args:
pvc_objs (list): List of ocs_ci.ocs.resources.pvc.PVC instances
pod_factory (function): pod_factory function
interface (int): Interface type
pods_for_rwx (int): Number of pods to be created if access mode of
PVC is RWX
status (str): If provided, wait for desired state of each pod before
creating next one
Returns:
list: list of Pod objects
"""
pod_objs = []
for pvc_obj in pvc_objs:
volume_mode = getattr(
pvc_obj, 'volume_mode', pvc_obj.get()['spec']['volumeMode']
)
access_mode = getattr(
pvc_obj, 'access_mode', pvc_obj.get_pvc_access_mode
)
if volume_mode == 'Block':
pod_dict = constants.CSI_RBD_RAW_BLOCK_POD_YAML
raw_block_pv = True
else:
raw_block_pv = False
pod_dict = ''
if access_mode == constants.ACCESS_MODE_RWX:
pod_obj_rwx = [pod_factory(
interface=interface, pvc=pvc_obj, status=status,
pod_dict_path=pod_dict, raw_block_pv=raw_block_pv
) for _ in range(1, pods_for_rwx)]
pod_objs.extend(pod_obj_rwx)
pod_obj = pod_factory(
interface=interface, pvc=pvc_obj, status=status,
pod_dict_path=pod_dict, raw_block_pv=raw_block_pv
)
pod_objs.append(pod_obj)
return pod_objs
def create_build_from_docker_image(
image_name,
install_package,
namespace,
source_image='centos',
source_image_label='latest'
):
"""
Allows to create a build config using a Dockerfile specified as an argument
For eg., oc new-build -D $'FROM centos:7\nRUN yum install -y httpd',
creates a build with 'httpd' installed
Args:
image_name (str): Name of the image to be created
source_image (str): Source image to build docker image from,
Defaults to Centos as base image
namespace (str): project where build config should be created
source_image_label (str): Tag to use along with the image name,
Defaults to 'latest'
install_package (str): package to install over the base image
Returns:
OCP (obj): Returns the OCP object for the image
Fails on UnavailableBuildException exception if build creation
fails
"""
base_image = source_image + ':' + source_image_label
docker_file = (f"FROM {base_image}\n "
f"RUN yum install -y {install_package}\n "
f"CMD tail -f /dev/null")
command = f"new-build -D $\'{docker_file}\' --name={image_name}"
kubeconfig = os.getenv('KUBECONFIG')
oc_cmd = f"oc -n {namespace} "
if kubeconfig:
oc_cmd += f"--kubeconfig {kubeconfig} "
oc_cmd += command
logger.info(f'Running command {oc_cmd}')
result = run(
oc_cmd,
stdout=PIPE,
stderr=PIPE,
timeout=15,
shell=True
)
if result.stderr.decode():
raise UnavailableBuildException(
f'Build creation failed with error: {result.stderr.decode()}'
)
out = result.stdout.decode()
logger.info(out)
if 'Success' in out:
# Build becomes ready once build pod goes into Comleted state
pod_obj = OCP(kind='Pod', resource_name=image_name)
if pod_obj.wait_for_resource(
condition='Completed',
resource_name=f'{image_name}' + '-1-build',
timeout=300,
sleep=30
):
logger.info(f'build {image_name} ready')
set_image_lookup(image_name)
logger.info(f'image {image_name} can now be consumed')
image_stream_obj = OCP(
kind='ImageStream', resource_name=image_name
)
return image_stream_obj
else:
raise UnavailableBuildException('Build creation failed')
def set_image_lookup(image_name):
"""
Function to enable lookup, which allows reference to the image stream tag
in the image field of the object. Example,
$ oc set image-lookup mysql
$ oc run mysql --image=mysql
Args:
image_name (str): Name of the image stream to pull
the image locally
Returns:
str: output of set image-lookup command
"""
ocp_obj = ocp.OCP(kind='ImageStream')
command = f'set image-lookup {image_name}'
logger.info(f'image lookup for image"{image_name}" is set')
status = ocp_obj.exec_oc_cmd(command)
return status
def get_worker_nodes():
"""
Fetches all worker nodes.
Returns:
list: List of names of worker nodes
"""
label = 'node-role.kubernetes.io/worker'
ocp_node_obj = ocp.OCP(kind=constants.NODE)
nodes = ocp_node_obj.get(selector=label).get('items')
worker_nodes_list = [node.get('metadata').get('name') for node in nodes]
return worker_nodes_list
def get_master_nodes():
"""
Fetches all master nodes.
Returns:
list: List of names of master nodes
"""
label = 'node-role.kubernetes.io/master'
ocp_node_obj = ocp.OCP(kind=constants.NODE)
nodes = ocp_node_obj.get(selector=label).get('items')
master_nodes_list = [node.get('metadata').get('name') for node in nodes]
return master_nodes_list
def get_start_creation_time(interface, pvc_name):
"""
Get the starting creation time of a PVC based on provisioner logs
Args:
interface (str): The interface backed the PVC
pvc_name (str): Name of the PVC for creation time measurement
Returns:
datetime object: Start time of PVC creation
"""
format = '%H:%M:%S.%f'
# Get the correct provisioner pod based on the interface
pod_name = pod.get_csi_provisioner_pod(interface)
# get the logs from the csi-provisioner containers
logs = pod.get_pod_logs(pod_name[0], 'csi-provisioner')
logs += pod.get_pod_logs(pod_name[1], 'csi-provisioner')
logs = logs.split("\n")
# Extract the starting time for the PVC provisioning
start = [
i for i in logs if re.search(f"provision.*{pvc_name}.*started", i)
]
start = start[0].split(' ')[1]
return datetime.datetime.strptime(start, format)
def get_end_creation_time(interface, pvc_name):
"""
Get the ending creation time of a PVC based on provisioner logs
Args:
interface (str): The interface backed the PVC
pvc_name (str): Name of the PVC for creation time measurement
Returns:
datetime object: End time of PVC creation
"""
format = '%H:%M:%S.%f'
# Get the correct provisioner pod based on the interface
pod_name = pod.get_csi_provisioner_pod(interface)
# get the logs from the csi-provisioner containers
logs = pod.get_pod_logs(pod_name[0], 'csi-provisioner')
logs += pod.get_pod_logs(pod_name[1], 'csi-provisioner')
logs = logs.split("\n")
# Extract the starting time for the PVC provisioning
end = [
i for i in logs if re.search(f"provision.*{pvc_name}.*succeeded", i)
]
end = end[0].split(' ')[1]
return datetime.datetime.strptime(end, format)
def measure_pvc_creation_time(interface, pvc_name):
"""
Measure PVC creation time based on logs
Args:
interface (str): The interface backed the PVC
pvc_name (str): Name of the PVC for creation time measurement
Returns:
float: Creation time for the PVC
"""
start = get_start_creation_time(interface=interface, pvc_name=pvc_name)
end = get_end_creation_time(interface=interface, pvc_name=pvc_name)
total = end - start
return total.total_seconds()
def measure_pvc_creation_time_bulk(interface, pvc_name_list):
"""
Measure PVC creation time of bulk PVC based on logs.
Args:
interface (str): The interface backed the PVC
pvc_name_list (list): List of PVC Names for measuring creation time
Returns:
pvc_dict (dict): Dictionary of pvc_name with creation time.
"""
# Get the correct provisioner pod based on the interface
pod_name = pod.get_csi_provisioner_pod(interface)
# get the logs from the csi-provisioner containers
logs = pod.get_pod_logs(pod_name[0], 'csi-provisioner')
logs += pod.get_pod_logs(pod_name[1], 'csi-provisioner')
logs = logs.split("\n")
pvc_dict = dict()
format = '%H:%M:%S.%f'
for pvc_name in pvc_name_list:
# Extract the starting time for the PVC provisioning
start = [
i for i in logs if re.search(f"provision.*{pvc_name}.*started", i)
]
start = start[0].split(' ')[1]
start_time = datetime.datetime.strptime(start, format)
# Extract the end time for the PVC provisioning
end = [
i for i in logs if re.search(f"provision.*{pvc_name}.*succeeded", i)
]
end = end[0].split(' ')[1]
end_time = datetime.datetime.strptime(end, format)
total = end_time - start_time
pvc_dict[pvc_name] = total.total_seconds()
return pvc_dict
def measure_pv_deletion_time_bulk(interface, pv_name_list):
"""
Measure PV deletion time of bulk PV, based on logs.
Args:
interface (str): The interface backed the PV
pv_name_list (list): List of PV Names for measuring deletion time
Returns:
pv_dict (dict): Dictionary of pv_name with deletion time.
"""
# Get the correct provisioner pod based on the interface
pod_name = pod.get_csi_provisioner_pod(interface)
# get the logs from the csi-provisioner containers
logs = pod.get_pod_logs(pod_name[0], 'csi-provisioner')
logs += pod.get_pod_logs(pod_name[1], 'csi-provisioner')
logs = logs.split("\n")
pv_dict = dict()
format = '%H:%M:%S.%f'
for pv_name in pv_name_list:
# Extract the deletion start time for the PV
start = [
i for i in logs if re.search(f"delete \"{pv_name}\": started", i)
]
start = start[0].split(' ')[1]
start_time = datetime.datetime.strptime(start, format)
# Extract the deletion end time for the PV
end = [
i for i in logs if re.search(f"delete \"{pv_name}\": succeeded", i)
]
end = end[0].split(' ')[1]
end_time = datetime.datetime.strptime(end, format)
total = end_time - start_time
pv_dict[pv_name] = total.total_seconds()
return pv_dict
def get_start_deletion_time(interface, pv_name):
"""
Get the starting deletion time of a PVC based on provisioner logs
Args:
interface (str): The interface backed the PVC
pvc_name (str): Name of the PVC for deletion time measurement
Returns:
datetime object: Start time of PVC deletion
"""
format = '%H:%M:%S.%f'
# Get the correct provisioner pod based on the interface
pod_name = pod.get_csi_provisioner_pod(interface)
# get the logs from the csi-provisioner containers
logs = pod.get_pod_logs(pod_name[0], 'csi-provisioner')
logs += pod.get_pod_logs(pod_name[1], 'csi-provisioner')
logs = logs.split("\n")
# Extract the starting time for the PVC deletion
start = [
i for i in logs if re.search(f"delete \"{pv_name}\": started", i)
]
start = start[0].split(' ')[1]
return datetime.datetime.strptime(start, format)
def get_end_deletion_time(interface, pv_name):
"""
Get the ending deletion time of a PVC based on provisioner logs
Args:
interface (str): The interface backed the PVC
pv_name (str): Name of the PVC for deletion time measurement
Returns:
datetime object: End time of PVC deletion
"""
format = '%H:%M:%S.%f'
# Get the correct provisioner pod based on the interface
pod_name = pod.get_csi_provisioner_pod(interface)
# get the logs from the csi-provisioner containers
logs = pod.get_pod_logs(pod_name[0], 'csi-provisioner')
logs += pod.get_pod_logs(pod_name[1], 'csi-provisioner')
logs = logs.split("\n")
# Extract the starting time for the PV deletion
end = [
i for i in logs if re.search(f"delete \"{pv_name}\": succeeded", i)
]
end = end[0].split(' ')[1]
return datetime.datetime.strptime(end, format)
def measure_pvc_deletion_time(interface, pv_name):
"""
Measure PVC deletion time based on logs
Args:
interface (str): The interface backed the PVC
pv_name (str): Name of the PV for creation time measurement
Returns:
float: Deletion time for the PVC
"""
start = get_start_deletion_time(interface=interface, pv_name=pv_name)
end = get_end_deletion_time(interface=interface, pv_name=pv_name)
total = end - start
return total.total_seconds()
def pod_start_time(pod_obj):
"""
Function to measure time taken for container(s) to get into running state
by measuring the difference between container's start time (when container
went into running state) and started time (when container was actually
started)
Args:
pod_obj(obj): pod object to measure start time
Returns:
containers_start_time(dict):
Returns the name and start time of container(s) in a pod
"""
time_format = '%Y-%m-%dT%H:%M:%SZ'
containers_start_time = {}
start_time = pod_obj.data['status']['startTime']
start_time = datetime.datetime.strptime(start_time, time_format)
for container in range(len(pod_obj.data['status']['containerStatuses'])):
started_time = pod_obj.data[
'status']['containerStatuses'][container]['state'][
'running']['startedAt']
started_time = datetime.datetime.strptime(started_time, time_format)
container_name = pod_obj.data[
'status']['containerStatuses'][container]['name']
container_start_time = (started_time - start_time).seconds
containers_start_time[container_name] = container_start_time
return containers_start_time
def get_default_storage_class():
"""
Get the default StorageClass(es)
Returns:
list: default StorageClass(es) list
"""
default_sc_obj = ocp.OCP(kind='StorageClass')
storage_classes = default_sc_obj.get().get('items')
storage_classes = [
sc for sc in storage_classes if 'annotations' in sc.get('metadata')
]
return [
sc.get('metadata').get('name') for sc in storage_classes if sc.get(
'metadata'
).get('annotations').get(
'storageclass.kubernetes.io/is-default-class'
) == 'true'
]
def change_default_storageclass(scname):
"""
Change the default StorageClass to the given SC name
Args:
scname (str): StorageClass name
Returns:
bool: True on success
"""
default_sc = get_default_storage_class()
ocp_obj = ocp.OCP(kind='StorageClass')
if default_sc:
# Change the existing default Storageclass annotation to false
patch = " '{\"metadata\": {\"annotations\":" \
"{\"storageclass.kubernetes.io/is-default-class\"" \
":\"false\"}}}' "
patch_cmd = f"patch storageclass {default_sc} -p" + patch
ocp_obj.exec_oc_cmd(command=patch_cmd)
# Change the new storageclass to default
patch = " '{\"metadata\": {\"annotations\":" \
"{\"storageclass.kubernetes.io/is-default-class\"" \
":\"true\"}}}' "
patch_cmd = f"patch storageclass {scname} -p" + patch
ocp_obj.exec_oc_cmd(command=patch_cmd)
return True
def verify_volume_deleted_in_backend(interface, image_uuid, pool_name=None):
"""
Verify that Image/Subvolume is not present in the backend.
Args:
interface (str): The interface backed the PVC
image_uuid (str): Part of VolID which represents
corresponding image/subvolume in backend
eg: oc get pv/<volumeName> -o jsonpath='{.spec.csi.volumeHandle}'
Output is the CSI generated VolID and looks like:
'0001-000c-rook-cluster-0000000000000001-
f301898c-a192-11e9-852a-1eeeb6975c91' where
image_uuid is 'f301898c-a192-11e9-852a-1eeeb6975c91'
pool_name (str): Name of the rbd-pool if interface is CephBlockPool
Returns:
bool: True if volume is not present. False if volume is present
"""
ct_pod = pod.get_ceph_tools_pod()
if interface == constants.CEPHBLOCKPOOL:
valid_error = f"error opening image csi-vol-{image_uuid}"
cmd = f"rbd info -p {pool_name} csi-vol-{image_uuid}"
if interface == constants.CEPHFILESYSTEM:
valid_error = f"Subvolume 'csi-vol-{image_uuid}' not found"
cmd = (
f"ceph fs subvolume getpath {defaults.CEPHFILESYSTEM_NAME}"
f" csi-vol-{image_uuid} csi"
)
try:
ct_pod.exec_ceph_cmd(ceph_cmd=cmd, format='json')
return False
except CommandFailed as ecf:
assert valid_error in str(ecf), (
f"Error occurred while verifying volume is deleted in backend: "
f"{str(ecf)} ImageUUID: {image_uuid}. Interface type: {interface}"
)
logger.info(
f"Verified: Volume corresponding to uuid {image_uuid} is deleted "
f"in backend"
)
return True
def create_serviceaccount(namespace):
"""
Create a Serviceaccount
Args:
namespace (str): The namespace for the serviceaccount creation
Returns:
OCS: An OCS instance for the service_account
"""
service_account_data = templating.load_yaml(
constants.SERVICE_ACCOUNT_YAML
)
service_account_data['metadata']['name'] = create_unique_resource_name(
'sa', 'serviceaccount'
)
service_account_data['metadata']['namespace'] = namespace
return create_resource(**service_account_data)
def get_serviceaccount_obj(sa_name, namespace):
"""
Get serviceaccount obj
Args:
sa_name (str): Service Account name
namespace (str): The namespace for the serviceaccount creation
Returns:
OCS: An OCS instance for the service_account
"""
ocp_sa_obj = ocp.OCP(kind=constants.SERVICE_ACCOUNT, namespace=namespace)
try:
sa_dict = ocp_sa_obj.get(resource_name=sa_name)
return OCS(**sa_dict)
except CommandFailed:
logger.error("ServiceAccount not found in specified namespace")
def validate_scc_policy(sa_name, namespace):
"""
Validate serviceaccount is added to scc of privileged
Args:
sa_name (str): Service Account name
namespace (str): The namespace for the serviceaccount creation
Returns:
bool: True if sc_name is present in scc of privileged else False
"""
sa_name = f"system:serviceaccount:{namespace}:{sa_name}"
logger.info(sa_name)
ocp_scc_obj = ocp.OCP(kind=constants.SCC, namespace=namespace)
scc_dict = ocp_scc_obj.get(resource_name=constants.PRIVILEGED)
scc_users_list = scc_dict.get('users')
for scc_user in scc_users_list:
if scc_user == sa_name:
return True
return False
def add_scc_policy(sa_name, namespace):
"""
Adding ServiceAccount to scc privileged
Args:
sa_name (str): ServiceAccount name
namespace (str): The namespace for the scc_policy creation
"""
ocp = OCP()
out = ocp.exec_oc_cmd(
command=f"adm policy add-scc-to-user privileged system:serviceaccount:{namespace}:{sa_name}",
out_yaml_format=False
)
logger.info(out)
def remove_scc_policy(sa_name, namespace):
"""
Removing ServiceAccount from scc privileged
Args:
sa_name (str): ServiceAccount name
namespace (str): The namespace for the scc_policy deletion
"""
ocp = OCP()
out = ocp.exec_oc_cmd(
command=f"adm policy remove-scc-from-user privileged system:serviceaccount:{namespace}:{sa_name}",
out_yaml_format=False
)
logger.info(out)
def delete_deploymentconfig_pods(pod_obj):
"""
Delete deploymentconfig pod
Args:
pod_obj (object): Pod object
"""
dc_ocp_obj = ocp.OCP(kind=constants.DEPLOYMENTCONFIG, namespace=pod_obj.namespace)
pod_data_list = dc_ocp_obj.get()['items']
if pod_data_list:
for pod_data in pod_data_list:
if pod_obj.get_labels().get('name') == pod_data.get('metadata').get('name'):
dc_ocp_obj.delete(resource_name=pod_obj.get_labels().get('name'))
dc_ocp_obj.wait_for_delete(resource_name=pod_obj.get_labels().get('name'))
def craft_s3_command(mcg_obj, cmd):
"""
Crafts the AWS CLI S3 command including the
login credentials and command to be ran
Args:
mcg_obj: An MCG object containing the MCG S3 connection credentials
cmd: The AWSCLI command to run
Returns:
str: The crafted command, ready to be executed on the pod
"""
if mcg_obj:
base_command = (
f"sh -c \"AWS_ACCESS_KEY_ID={mcg_obj.access_key_id} "
f"AWS_SECRET_ACCESS_KEY={mcg_obj.access_key} "
f"AWS_DEFAULT_REGION={mcg_obj.region} "
f"aws s3 "
f"--endpoint={mcg_obj.s3_endpoint} "
f"--no-verify-ssl "
)
string_wrapper = "\""
else:
base_command = (
f"aws s3 --no-verify-ssl --no-sign-request "
)
string_wrapper = ''
return f"{base_command}{cmd}{string_wrapper}"
def wait_for_resource_count_change(
func_to_use, previous_num, namespace, change_type='increase',
min_difference=1, timeout=20, interval=2, **func_kwargs
):
"""
Wait for a change in total count of PVC or pod
Args:
func_to_use (function): Function to be used to fetch resource info
Supported functions: pod.get_all_pvcs(), pod.get_all_pods()
previous_num (int): Previous number of pods/PVCs for comparison
namespace (str): Name of the namespace
change_type (str): Type of change to check. Accepted values are
'increase' and 'decrease'. Default is 'increase'.
min_difference (int): Minimum required difference in PVC/pod count
timeout (int): Maximum wait time in seconds
interval (int): Time in seconds to wait between consecutive checks
Returns:
True if difference in count is greater than or equal to
'min_difference'. False in case of timeout.
"""
try:
for sample in TimeoutSampler(
timeout, interval, func_to_use, namespace, **func_kwargs
):
if func_to_use == pod.get_all_pods:
current_num = len(sample)
else:
current_num = len(sample['items'])
if change_type == 'increase':
count_diff = current_num - previous_num
else:
count_diff = previous_num - current_num
if count_diff >= min_difference:
return True
except TimeoutExpiredError:
return False
def verify_pv_mounted_on_node(node_pv_dict):
"""
Check if mount point of a PV exists on a node
Args:
node_pv_dict (dict): Node to PV list mapping
eg: {'node1': ['pv1', 'pv2', 'pv3'], 'node2': ['pv4', 'pv5']}
Returns:
dict: Node to existing PV list mapping
eg: {'node1': ['pv1', 'pv3'], 'node2': ['pv5']}
"""
existing_pvs = {}
for node_name, pvs in node_pv_dict.items():
cmd = f'oc debug nodes/{node_name} -- df'
df_on_node = run_cmd(cmd)
existing_pvs[node_name] = []
for pv_name in pvs:
if f"/pv/{pv_name}/" in df_on_node:
existing_pvs[node_name].append(pv_name)
return existing_pvs
def converge_lists(list_to_converge):
"""
Function to flatten and remove the sublist created during future obj
Args:
list_to_converge (list): arg list of lists, eg: [[1,2],[3,4]]
Returns:
list (list): return converged list eg: [1,2,3,4]
"""
return [item for sublist in list_to_converge for item in sublist]
def create_multiple_pvc_parallel(
sc_obj, namespace, number_of_pvc, size, access_modes
):
"""
Funtion to create multiple PVC in parallel using threads
Function will create PVCs based on the available access modes
Args:
sc_obj (str): Storage Class object
namespace (str): The namespace for creating pvc
number_of_pvc (int): NUmber of pvc to be created
size (str): size of the pvc eg: '10Gi'
access_modes (list): List of access modes for PVC creation
Returns:
pvc_objs_list (list): List of pvc objs created in function
"""
obj_status_list, result_lists = ([] for i in range(2))
with ThreadPoolExecutor() as executor:
for mode in access_modes:
result_lists.append(
executor.submit(
create_multiple_pvcs, sc_name=sc_obj.name,
namespace=namespace, number_of_pvc=number_of_pvc,
access_mode=mode, size=size)
)
result_list = [result.result() for result in result_lists]
pvc_objs_list = converge_lists(result_list)
# Check for all the pvcs in Bound state
with ThreadPoolExecutor() as executor:
for objs in pvc_objs_list:
obj_status_list.append(
executor.submit(wait_for_resource_state, objs, 'Bound')
)
if False in [obj.result() for obj in obj_status_list]:
raise TimeoutExpiredError
return pvc_objs_list
def create_pods_parallel(
pvc_list, namespace, interface, pod_dict_path=None, sa_name=None, raw_block_pv=False,
dc_deployment=False, node_selector=None
):
"""
Function to create pods in parallel
Args:
pvc_list (list): List of pvcs to be attached in pods
namespace (str): The namespace for creating pod
interface (str): The interface backed the PVC
pod_dict_path (str): pod_dict_path for yaml
sa_name (str): sa_name for providing permission
raw_block_pv (bool): Either RAW block or not
dc_deployment (bool): Either DC deployment or not
node_selector (dict): dict of key-value pair to be used for nodeSelector field
eg: {'nodetype': 'app-pod'}
Returns:
pod_objs (list): Returns list of pods created
"""
future_pod_objs = []
# Added 300 sec wait time since in scale test once the setup has more
# PODs time taken for the pod to be up will be based on resource available
wait_time = 300
if raw_block_pv and not pod_dict_path:
pod_dict_path = constants.CSI_RBD_RAW_BLOCK_POD_YAML
with ThreadPoolExecutor() as executor:
for pvc_obj in pvc_list:
future_pod_objs.append(executor.submit(
create_pod, interface_type=interface,
pvc_name=pvc_obj.name, do_reload=False, namespace=namespace,
raw_block_pv=raw_block_pv, pod_dict_path=pod_dict_path,
sa_name=sa_name, dc_deployment=dc_deployment, node_selector=node_selector
))
pod_objs = [pvc_obj.result() for pvc_obj in future_pod_objs]
# Check for all the pods are in Running state
# In above pod creation not waiting for the pod to be created because of threads usage
with ThreadPoolExecutor() as executor:
for obj in pod_objs:
future_pod_objs.append(
executor.submit(wait_for_resource_state, obj, 'Running', timeout=wait_time)
)
# If pods not up raise exception/failure
if False in [obj.result() for obj in future_pod_objs]:
raise TimeoutExpiredError
return pod_objs
def delete_objs_parallel(obj_list):
"""
Function to delete objs specified in list
Args:
obj_list(list): List can be obj of pod, pvc, etc
Returns:
bool: True if obj deleted else False
"""
threads = list()
for obj in obj_list:
process = threading.Thread(target=obj.delete)
process.start()
threads.append(process)
for process in threads:
process.join()
return True
def memory_leak_analysis(median_dict):
"""
Function to analyse Memory leak after execution of test case
Memory leak is analyzed based on top output "RES" value of ceph-osd daemon,
i.e. list[7] in code
Args:
median_dict (dict): dict of worker nodes and respective median value
eg: median_dict = {'worker_node_1':102400, 'worker_node_2':204800, ...}
More Detail on Median value:
For calculating memory leak require a constant value, which should not be
start or end of test, so calculating it by getting memory for 180 sec
before TC execution and take a median out of it.
Memory value could be different for each nodes, so identify constant value
for each node and update in median_dict
Usage:
test_case(.., memory_leak_function):
.....
median_dict = helpers.get_memory_leak_median_value()
.....
TC execution part, memory_leak_fun will capture data
....
helpers.memory_leak_analysis(median_dict)
....
"""
# dict to store memory leak difference for each worker
diff = {}
for worker in get_worker_nodes():
memory_leak_data = []
if os.path.exists(f"/tmp/{worker}-top-output.txt"):
with open(f"/tmp/{worker}-top-output.txt", "r") as f:
data = f.readline()
list = data.split(" ")
list = [i for i in list if i]
memory_leak_data.append(list[7])
else:
logging.info(f"worker {worker} memory leak file not found")
raise UnexpectedBehaviour
number_of_lines = len(memory_leak_data) - 1
# Get the start value form median_dict arg for respective worker
start_value = median_dict[f"{worker}"]
end_value = memory_leak_data[number_of_lines]
logging.info(f"Median value {start_value}")
logging.info(f"End value {end_value}")
# Convert the values to kb for calculations
if start_value.__contains__('g'):
start_value = float(1024 ** 2 * float(start_value[:-1]))
elif start_value.__contains__('m'):
start_value = float(1024 * float(start_value[:-1]))
else:
start_value = float(start_value)
if end_value.__contains__('g'):
end_value = float(1024 ** 2 * float(end_value[:-1]))
elif end_value.__contains__('m'):
end_value = float(1024 * float(end_value[:-1]))
else:
end_value = float(end_value)
# Calculate the percentage of diff between start and end value
# Based on value decide TC pass or fail
diff[worker] = ((end_value - start_value) / start_value) * 100
logging.info(f"Percentage diff in start and end value {diff[worker]}")
if diff[worker] <= 20:
logging.info(f"No memory leak in worker {worker} passing the test")
else:
logging.info(f"There is a memory leak in worker {worker}")
logging.info(f"Memory median value start of the test {start_value}")
logging.info(f"Memory value end of the test {end_value}")
raise UnexpectedBehaviour
def get_memory_leak_median_value():
"""
Function to calculate memory leak Median value by collecting the data for 180 sec
and find the median value which will be considered as starting point
to evaluate memory leak using "RES" value of ceph-osd daemon i.e. list[7] in code
Returns:
median_dict (dict): dict of worker nodes and respective median value
"""
median_dict = {}
timeout = 180 # wait for 180 sec to evaluate memory leak median data.
logger.info(f"waiting for {timeout} sec to evaluate the median value")
time.sleep(timeout)
for worker in get_worker_nodes():
memory_leak_data = []
if os.path.exists(f"/tmp/{worker}-top-output.txt"):
with open(f"/tmp/{worker}-top-output.txt", "r") as f:
data = f.readline()
list = data.split(" ")
list = [i for i in list if i]
memory_leak_data.append(list[7])
else:
logging.info(f"worker {worker} memory leak file not found")
raise UnexpectedBehaviour
median_dict[f"{worker}"] = statistics.median(memory_leak_data)
return median_dict
def refresh_oc_login_connection(user=None, password=None):
"""
Function to refresh oc user login
Default login using kubeadmin user and password
Args:
user (str): Username to login
password (str): Password to login
"""
user = user or config.RUN['username']
if not password:
filename = os.path.join(
config.ENV_DATA['cluster_path'],
config.RUN['password_location']
)
with open(filename) as f:
password = f.read()
ocs_obj = ocp.OCP()
ocs_obj.login(user=user, password=password)
def rsync_kubeconf_to_node(node):
"""
Function to copy kubeconfig to OCP node
Args:
node (str): OCP node to copy kubeconfig if not present
"""
# ocp_obj = ocp.OCP()
filename = os.path.join(
config.ENV_DATA['cluster_path'],
config.RUN['kubeconfig_location']
)
file_path = os.path.dirname(filename)
master_list = get_master_nodes()
ocp_obj = ocp.OCP()
check_auth = 'auth'
check_conf = 'kubeconfig'
node_path = '/home/core/'
if check_auth not in ocp_obj.exec_oc_debug_cmd(node=master_list[0], cmd_list=[f"ls {node_path}"]):
ocp.rsync(
src=file_path, dst=f"{node_path}", node=node, dst_node=True
)
elif check_conf not in ocp_obj.exec_oc_debug_cmd(node=master_list[0], cmd_list=[f"ls {node_path}auth"]):
ocp.rsync(
src=file_path, dst=f"{node_path}", node=node, dst_node=True
)
def create_dummy_osd(deployment):
"""
Replace one of OSD pods with pod that contains all data from original
OSD but doesn't run osd daemon. This can be used e.g. for direct acccess
to Ceph Placement Groups.
Args:
deployment (str): Name of deployment to use
Returns:
list: first item is dummy deployment object, second item is dummy pod
object
"""
oc = OCP(
kind=constants.DEPLOYMENT,
namespace=config.ENV_DATA.get('cluster_namespace')
)
osd_data = oc.get(deployment)
dummy_deployment = create_unique_resource_name('dummy', 'osd')
osd_data['metadata']['name'] = dummy_deployment
osd_containers = osd_data.get('spec').get('template').get('spec').get(
'containers'
)
# get osd container spec
original_osd_args = osd_containers[0].get('args')
osd_data['spec']['template']['spec']['containers'][0]['args'] = []
osd_data['spec']['template']['spec']['containers'][0]['command'] = [
'/bin/bash',
'-c',
'sleep infinity'
]
osd_file = tempfile.NamedTemporaryFile(
mode='w+', prefix=dummy_deployment, delete=False
)
with open(osd_file.name, "w") as temp:
yaml.dump(osd_data, temp)
oc.create(osd_file.name)
# downscale the original deployment and start dummy deployment instead
oc.exec_oc_cmd(f"scale --replicas=0 deployment/{deployment}")
oc.exec_oc_cmd(f"scale --replicas=1 deployment/{dummy_deployment}")
osd_list = pod.get_osd_pods()
dummy_pod = [pod for pod in osd_list if dummy_deployment in pod.name][0]
wait_for_resource_state(
resource=dummy_pod,
state=constants.STATUS_RUNNING,
timeout=60
)
ceph_init_cmd = '/rook/tini' + ' ' + ' '.join(original_osd_args)
try:
logger.info('Following command should expire after 7 seconds')
dummy_pod.exec_cmd_on_pod(ceph_init_cmd, timeout=7)
except TimeoutExpired:
logger.info('Killing /rook/tini process')
try:
dummy_pod.exec_sh_cmd_on_pod(
"kill $(ps aux | grep '[/]rook/tini' | awk '{print $2}')"
)
except CommandFailed:
pass
return dummy_deployment, dummy_pod
def get_failure_domin():
"""
Function is used to getting failure domain of pool
Returns:
str: Failure domain from cephblockpool
"""
ct_pod = pod.get_ceph_tools_pod()
out = ct_pod.exec_ceph_cmd(ceph_cmd="ceph osd crush rule dump", format='json')
assert out, "Failed to get cmd output"
for crush_rule in out:
if constants.CEPHBLOCKPOOL.lower() in crush_rule.get("rule_name"):
for steps in crush_rule.get("steps"):
if "type" in steps:
return steps.get("type")
def wait_for_ct_pod_recovery():
"""
In case the of node failures scenarios, in which the selected node is
running the ceph tools pod, we'll want to wait for the pod recovery
Returns:
bool: True in case the ceph tools pod was recovered, False otherwise
"""
try:
_ = get_admin_key()
except CommandFailed as ex:
logger.info(str(ex))
if "connection timed out" in str(ex):
logger.info(
"Ceph tools box was running on the node that had a failure. "
"Hence, waiting for a new Ceph tools box pod to spin up"
)
wait_for_resource_count_change(
func_to_use=pod.get_all_pods, previous_num=1,
namespace=config.ENV_DATA['cluster_namespace'], timeout=120,
selector=constants.TOOL_APP_LABEL
)
return True
else:
return False
return True
def label_worker_node(node_list, label_key, label_value):
"""
Function to label worker node for running app pods on specific worker nodes.
Args:
node_list (list): List of node name
label_key (str): Label_key to be added in worker
label_value (str): Label_value
"""
ocp_obj = OCP()
out = ocp_obj.exec_oc_cmd(
command=f"label node {' '.join(node_list)} {label_key}={label_value}", out_yaml_format=False
)
logger.info(out)
def remove_label_from_worker_node(node_list, label_key):
"""
Function to remove label from worker node.
Args:
node_list (list): List of node name
label_key (str): Label_key to be remove from worker node
"""
ocp_obj = OCP()
out = ocp_obj.exec_oc_cmd(
command=f"label node {' '.join(node_list)} {label_key}-", out_yaml_format=False
)
logger.info(out)
def get_pods_nodes_logs():
"""
Get logs from all pods and nodes
Returns:
dict: node/pod name as key, logs content as value (string)
"""
all_logs = {}
all_pods = pod.get_all_pods()
all_nodes = node.get_node_objs()
for node_obj in all_nodes:
node_name = node_obj.name
log_content = node.get_node_logs(node_name)
all_logs.update({node_name: log_content})
for pod_obj in all_pods:
try:
pod_name = pod_obj.name
log_content = pod.get_pod_logs(pod_name)
all_logs.update({pod_name: log_content})
except CommandFailed:
pass
return all_logs
def get_logs_with_errors(errors=None):
"""
From logs of all pods and nodes, get only logs
containing any of specified errors
Args:
errors (list): List of errors to look for
Returns:
dict: node/pod name as key, logs content as value; may be empty
"""
all_logs = get_pods_nodes_logs()
output_logs = {}
errors_list = constants.CRITICAL_ERRORS
if errors:
errors_list = errors_list + errors
for name, log_content in all_logs.items():
for error_msg in errors_list:
if error_msg in log_content:
logger.debug(f"Found '{error_msg}' in log of {name}")
output_logs.update({name: log_content})
log_path = f"{ocsci_log_path()}/{name}.log"
with open(log_path, 'w') as fh:
fh.write(log_content)
return output_logs
def modify_osd_replica_count(resource_name, replica_count):
"""
Function to modify osd replica count to 0 or 1
Args:
resource_name (str): Name of osd i.e, 'rook-ceph-osd-0-c9c4bc7c-bkf4b'
replica_count (int): osd replica count to be changed to
Returns:
bool: True in case if changes are applied. False otherwise
"""
ocp_obj = ocp.OCP(kind=constants.DEPLOYMENT, namespace=defaults.ROOK_CLUSTER_NAMESPACE)
params = f'{{"spec": {{"replicas": {replica_count}}}}}'
resource_name = '-'.join(resource_name.split('-')[0:4])
return ocp_obj.patch(resource_name=resource_name, params=params)
|
test_imperative_signal_handler.py
|
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import signal
import unittest
import multiprocessing
import time
import paddle.compat as cpt
from paddle.fluid import core
def set_child_signal_handler(self, child_pid):
core._set_process_pid(id(self), child_pid)
current_handler = signal.getsignal(signal.SIGCHLD)
if not callable(current_handler):
current_handler = None
def __handler__(signum, frame):
core._throw_error_if_process_failed()
if current_handler is not None:
current_handler(signum, frame)
signal.signal(signal.SIGCHLD, __handler__)
class TestDygraphDataLoaderSingalHandler(unittest.TestCase):
def test_child_process_exit_will_error(self):
def __test_process__():
core._set_process_signal_handler()
sys.exit(1)
exception = None
try:
test_process = multiprocessing.Process(target=__test_process__)
test_process.start()
set_child_signal_handler(id(self), test_process.pid)
time.sleep(3)
except core.EnforceNotMet as ex:
self.assertIn("FatalError", cpt.get_exception_message(ex))
exception = ex
self.assertIsNotNone(exception)
def test_child_process_killed_by_sigsegv(self):
def __test_process__():
core._set_process_signal_handler()
os.kill(os.getpid(), signal.SIGSEGV)
exception = None
try:
test_process = multiprocessing.Process(target=__test_process__)
test_process.start()
set_child_signal_handler(id(self), test_process.pid)
time.sleep(3)
except core.EnforceNotMet as ex:
self.assertIn("FatalError", cpt.get_exception_message(ex))
exception = ex
self.assertIsNotNone(exception)
def test_child_process_killed_by_sigterm(self):
def __test_process__():
core._set_process_signal_handler()
time.sleep(10)
test_process = multiprocessing.Process(target=__test_process__)
test_process.daemon = True
test_process.start()
set_child_signal_handler(id(self), test_process.pid)
time.sleep(1)
if __name__ == '__main__':
unittest.main()
|
helper.py
|
from pathlib import Path
import sys
import time
import threading
import itertools
import re
from texttable import Texttable
from termcolor import colored
class Logger:
show_warnings = True
@classmethod
def error(cls, text):
Logger.print(text, "ERROR:", "red")
@classmethod
def clear(cls):
sys.stdout.write("\r" + " " * 100 + "\r")
@classmethod
def warning(cls, text):
if cls.show_warnings:
Logger.print(text, "WARNING:", "yellow")
@classmethod
def info(cls, text):
Logger.print(text, "INFO:", "green")
@classmethod
def print(cls, text, head, color=None, background=None, end="\n"):
Logger.clear()
print(colored(f"{head}", color, background), text, end=end)
@classmethod
def print_table(cls, rows):
Logger.clear()
table = Texttable()
table.set_max_width(100)
table.add_rows(rows)
print(table.draw())
def animate_wait(f):
done = False
def animate():
for c in itertools.cycle(list("/—\|")):
if done:
sys.stdout.write("\r")
break
sys.stdout.write("\rPlease wait " + c)
time.sleep(0.1)
sys.stdout.flush()
def wrapper(*args):
nonlocal done
done = False
t = threading.Thread(target=animate)
t.daemon = True
t.start()
output = f(*args)
done = True
return output
return wrapper
def correct_path(path: str):
return re.sub("[^-a-zA-Z0-9_.() /]+", "", path)
def download_file(
session, link: str, path: Path, progress=True, max_retry=10, overwrite=False
):
# start = time.clock()
if not overwrite and path.exists():
Logger.warning(f"{path.absolute()} is already downloaded")
return
for i in range(max_retry):
try:
response = session.get(link, stream=True)
i = -1
break
except Exception:
Logger.print(f"", f"Retry [{i+1}/{max_retry}]", "magenta", end="")
if i != -1:
Logger.error(f"Failed to download {link}")
return
path.parent.mkdir(exist_ok=True, parents=True)
total_length = response.headers.get("content-length")
with path.open("wb") as f:
if total_length is None: # no content length header
f.write(response.content)
else:
dl = 0
total_length = int(total_length)
for data in response.iter_content(chunk_size=1024 * 1024): # 1MB
dl += len(data)
f.write(data)
if progress:
print_progress(dl, total_length, path.name)
if progress:
sys.stdout.write("\n")
def print_progress(progress, total, name, max=50):
done = int(max * progress / total)
Logger.print(
"[%s%s] %d%%" % ("=" * done, " " * (max - done), done * 2),
f"Downloading [{name}]",
"blue",
end="\r",
)
sys.stdout.flush()
def save_text(path: Path, content: str, overwrite=False):
if not path.is_file:
Logger.error(f"{path.absolute()} isn't a file")
return
if not overwrite and path.exists():
Logger.warning(f"{path.absolute()} is already downloaded")
return
path.parent.mkdir(exist_ok=True, parents=True)
path.write_text(content, encoding="utf8")
# Logger.info(f"{path.name} has been saved.")
def fix_track_link(link):
if "?" in link:
link += "&embedded=true"
else:
link += "?embedded=true"
return link
|
__init__.py
|
import builtins
import contextlib
import errno
import glob
import importlib.util
from importlib._bootstrap_external import _get_sourcefile
import marshal
import os
import py_compile
import random
import shutil
import stat
import subprocess
import sys
import textwrap
import threading
import time
import unittest
from unittest import mock
from test.support import os_helper
from test.support import (is_jython, swap_attr, swap_item, cpython_only)
from test.support.import_helper import (
forget, make_legacy_pyc, unlink, unload, DirsOnSysPath)
from test.support.os_helper import (
TESTFN, rmtree, temp_umask, TESTFN_UNENCODABLE, temp_dir)
from test.support import script_helper
from test.support import threading_helper
from test.test_importlib.util import uncache
from types import ModuleType
skip_if_dont_write_bytecode = unittest.skipIf(
sys.dont_write_bytecode,
"test meaningful only when writing bytecode")
def remove_files(name):
for f in (name + ".py",
name + ".pyc",
name + ".pyw",
name + "$py.class"):
unlink(f)
rmtree('__pycache__')
@contextlib.contextmanager
def _ready_to_import(name=None, source=""):
# sets up a temporary directory and removes it
# creates the module file
# temporarily clears the module from sys.modules (if any)
# reverts or removes the module when cleaning up
name = name or "spam"
with temp_dir() as tempdir:
path = script_helper.make_script(tempdir, name, source)
old_module = sys.modules.pop(name, None)
try:
sys.path.insert(0, tempdir)
yield name, path
sys.path.remove(tempdir)
finally:
if old_module is not None:
sys.modules[name] = old_module
elif name in sys.modules:
del sys.modules[name]
class ImportTests(unittest.TestCase):
def setUp(self):
remove_files(TESTFN)
importlib.invalidate_caches()
def tearDown(self):
unload(TESTFN)
def test_import_raises_ModuleNotFoundError(self):
with self.assertRaises(ModuleNotFoundError):
import something_that_should_not_exist_anywhere
def test_from_import_missing_module_raises_ModuleNotFoundError(self):
with self.assertRaises(ModuleNotFoundError):
from something_that_should_not_exist_anywhere import blah
def test_from_import_missing_attr_raises_ImportError(self):
with self.assertRaises(ImportError):
from importlib import something_that_should_not_exist_anywhere
def test_from_import_missing_attr_has_name_and_path(self):
with self.assertRaises(ImportError) as cm:
from os import i_dont_exist
self.assertEqual(cm.exception.name, 'os')
self.assertEqual(cm.exception.path, os.__file__)
self.assertRegex(str(cm.exception), r"cannot import name 'i_dont_exist' from 'os' \(.*os.py\)")
@cpython_only
def test_from_import_missing_attr_has_name_and_so_path(self):
import _testcapi
with self.assertRaises(ImportError) as cm:
from _testcapi import i_dont_exist
self.assertEqual(cm.exception.name, '_testcapi')
self.assertEqual(cm.exception.path, _testcapi.__file__)
self.assertRegex(str(cm.exception), r"cannot import name 'i_dont_exist' from '_testcapi' \(.*\.(so|pyd)\)")
def test_from_import_missing_attr_has_name(self):
with self.assertRaises(ImportError) as cm:
# _warning has no path as it's a built-in module.
from _warning import i_dont_exist
self.assertEqual(cm.exception.name, '_warning')
self.assertIsNone(cm.exception.path)
def test_from_import_missing_attr_path_is_canonical(self):
with self.assertRaises(ImportError) as cm:
from os.path import i_dont_exist
self.assertIn(cm.exception.name, {'posixpath', 'ntpath'})
self.assertIsNotNone(cm.exception)
def test_from_import_star_invalid_type(self):
import re
with _ready_to_import() as (name, path):
with open(path, 'w') as f:
f.write("__all__ = [b'invalid_type']")
globals = {}
with self.assertRaisesRegex(
TypeError, f"{re.escape(name)}\\.__all__ must be str"
):
exec(f"from {name} import *", globals)
self.assertNotIn(b"invalid_type", globals)
with _ready_to_import() as (name, path):
with open(path, 'w') as f:
f.write("globals()[b'invalid_type'] = object()")
globals = {}
with self.assertRaisesRegex(
TypeError, f"{re.escape(name)}\\.__dict__ must be str"
):
exec(f"from {name} import *", globals)
self.assertNotIn(b"invalid_type", globals)
def test_case_sensitivity(self):
# Brief digression to test that import is case-sensitive: if we got
# this far, we know for sure that "random" exists.
with self.assertRaises(ImportError):
import RAnDoM
def test_double_const(self):
# Another brief digression to test the accuracy of manifest float
# constants.
from test import double_const # don't blink -- that *was* the test
def test_import(self):
def test_with_extension(ext):
# The extension is normally ".py", perhaps ".pyw".
source = TESTFN + ext
if is_jython:
pyc = TESTFN + "$py.class"
else:
pyc = TESTFN + ".pyc"
with open(source, "w") as f:
print("# This tests Python's ability to import a",
ext, "file.", file=f)
a = random.randrange(1000)
b = random.randrange(1000)
print("a =", a, file=f)
print("b =", b, file=f)
if TESTFN in sys.modules:
del sys.modules[TESTFN]
importlib.invalidate_caches()
try:
try:
mod = __import__(TESTFN)
except ImportError as err:
self.fail("import from %s failed: %s" % (ext, err))
self.assertEqual(mod.a, a,
"module loaded (%s) but contents invalid" % mod)
self.assertEqual(mod.b, b,
"module loaded (%s) but contents invalid" % mod)
finally:
forget(TESTFN)
unlink(source)
unlink(pyc)
sys.path.insert(0, os.curdir)
try:
test_with_extension(".py")
if sys.platform.startswith("win"):
for ext in [".PY", ".Py", ".pY", ".pyw", ".PYW", ".pYw"]:
test_with_extension(ext)
finally:
del sys.path[0]
def test_module_with_large_stack(self, module='longlist'):
# Regression test for http://bugs.python.org/issue561858.
filename = module + '.py'
# Create a file with a list of 65000 elements.
with open(filename, 'w') as f:
f.write('d = [\n')
for i in range(65000):
f.write('"",\n')
f.write(']')
try:
# Compile & remove .py file; we only need .pyc.
# Bytecode must be relocated from the PEP 3147 bytecode-only location.
py_compile.compile(filename)
finally:
unlink(filename)
# Need to be able to load from current dir.
sys.path.append('')
importlib.invalidate_caches()
namespace = {}
try:
make_legacy_pyc(filename)
# This used to crash.
exec('import ' + module, None, namespace)
finally:
# Cleanup.
del sys.path[-1]
unlink(filename + 'c')
unlink(filename + 'o')
# Remove references to the module (unload the module)
namespace.clear()
try:
del sys.modules[module]
except KeyError:
pass
def test_failing_import_sticks(self):
source = TESTFN + ".py"
with open(source, "w") as f:
print("a = 1/0", file=f)
# New in 2.4, we shouldn't be able to import that no matter how often
# we try.
sys.path.insert(0, os.curdir)
importlib.invalidate_caches()
if TESTFN in sys.modules:
del sys.modules[TESTFN]
try:
for i in [1, 2, 3]:
self.assertRaises(ZeroDivisionError, __import__, TESTFN)
self.assertNotIn(TESTFN, sys.modules,
"damaged module in sys.modules on %i try" % i)
finally:
del sys.path[0]
remove_files(TESTFN)
def test_import_name_binding(self):
# import x.y.z binds x in the current namespace
import test as x
import test.support
self.assertIs(x, test, x.__name__)
self.assertTrue(hasattr(test.support, "__file__"))
# import x.y.z as w binds z as w
import test.support as y
self.assertIs(y, test.support, y.__name__)
def test_issue31286(self):
# import in a 'finally' block resulted in SystemError
try:
x = ...
finally:
import test.support.script_helper as x
# import in a 'while' loop resulted in stack overflow
i = 0
while i < 10:
import test.support.script_helper as x
i += 1
# import in a 'for' loop resulted in segmentation fault
for i in range(2):
import test.support.script_helper as x
def test_failing_reload(self):
# A failing reload should leave the module object in sys.modules.
source = TESTFN + os.extsep + "py"
with open(source, "w") as f:
f.write("a = 1\nb=2\n")
sys.path.insert(0, os.curdir)
try:
mod = __import__(TESTFN)
self.assertIn(TESTFN, sys.modules)
self.assertEqual(mod.a, 1, "module has wrong attribute values")
self.assertEqual(mod.b, 2, "module has wrong attribute values")
# On WinXP, just replacing the .py file wasn't enough to
# convince reload() to reparse it. Maybe the timestamp didn't
# move enough. We force it to get reparsed by removing the
# compiled file too.
remove_files(TESTFN)
# Now damage the module.
with open(source, "w") as f:
f.write("a = 10\nb=20//0\n")
self.assertRaises(ZeroDivisionError, importlib.reload, mod)
# But we still expect the module to be in sys.modules.
mod = sys.modules.get(TESTFN)
self.assertIsNotNone(mod, "expected module to be in sys.modules")
# We should have replaced a w/ 10, but the old b value should
# stick.
self.assertEqual(mod.a, 10, "module has wrong attribute values")
self.assertEqual(mod.b, 2, "module has wrong attribute values")
finally:
del sys.path[0]
remove_files(TESTFN)
unload(TESTFN)
@skip_if_dont_write_bytecode
def test_file_to_source(self):
# check if __file__ points to the source file where available
source = TESTFN + ".py"
with open(source, "w") as f:
f.write("test = None\n")
sys.path.insert(0, os.curdir)
try:
mod = __import__(TESTFN)
self.assertTrue(mod.__file__.endswith('.py'))
os.remove(source)
del sys.modules[TESTFN]
make_legacy_pyc(source)
importlib.invalidate_caches()
mod = __import__(TESTFN)
base, ext = os.path.splitext(mod.__file__)
self.assertEqual(ext, '.pyc')
finally:
del sys.path[0]
remove_files(TESTFN)
if TESTFN in sys.modules:
del sys.modules[TESTFN]
def test_import_by_filename(self):
path = os.path.abspath(TESTFN)
encoding = sys.getfilesystemencoding()
try:
path.encode(encoding)
except UnicodeEncodeError:
self.skipTest('path is not encodable to {}'.format(encoding))
with self.assertRaises(ImportError) as c:
__import__(path)
def test_import_in_del_does_not_crash(self):
# Issue 4236
testfn = script_helper.make_script('', TESTFN, textwrap.dedent("""\
import sys
class C:
def __del__(self):
import importlib
sys.argv.insert(0, C())
"""))
script_helper.assert_python_ok(testfn)
@skip_if_dont_write_bytecode
def test_timestamp_overflow(self):
# A modification timestamp larger than 2**32 should not be a problem
# when importing a module (issue #11235).
sys.path.insert(0, os.curdir)
try:
source = TESTFN + ".py"
compiled = importlib.util.cache_from_source(source)
with open(source, 'w') as f:
pass
try:
os.utime(source, (2 ** 33 - 5, 2 ** 33 - 5))
except OverflowError:
self.skipTest("cannot set modification time to large integer")
except OSError as e:
if e.errno not in (getattr(errno, 'EOVERFLOW', None),
getattr(errno, 'EINVAL', None)):
raise
self.skipTest("cannot set modification time to large integer ({})".format(e))
__import__(TESTFN)
# The pyc file was created.
os.stat(compiled)
finally:
del sys.path[0]
remove_files(TESTFN)
def test_bogus_fromlist(self):
try:
__import__('http', fromlist=['blah'])
except ImportError:
self.fail("fromlist must allow bogus names")
@cpython_only
def test_delete_builtins_import(self):
args = ["-c", "del __builtins__.__import__; import os"]
popen = script_helper.spawn_python(*args)
stdout, stderr = popen.communicate()
self.assertIn(b"ImportError", stdout)
def test_from_import_message_for_nonexistent_module(self):
with self.assertRaisesRegex(ImportError, "^No module named 'bogus'"):
from bogus import foo
def test_from_import_message_for_existing_module(self):
with self.assertRaisesRegex(ImportError, "^cannot import name 'bogus'"):
from re import bogus
def test_from_import_AttributeError(self):
# Issue #24492: trying to import an attribute that raises an
# AttributeError should lead to an ImportError.
class AlwaysAttributeError:
def __getattr__(self, _):
raise AttributeError
module_name = 'test_from_import_AttributeError'
self.addCleanup(unload, module_name)
sys.modules[module_name] = AlwaysAttributeError()
with self.assertRaises(ImportError) as cm:
from test_from_import_AttributeError import does_not_exist
self.assertEqual(str(cm.exception),
"cannot import name 'does_not_exist' from '<unknown module name>' (unknown location)")
@cpython_only
def test_issue31492(self):
# There shouldn't be an assertion failure in case of failing to import
# from a module with a bad __name__ attribute, or in case of failing
# to access an attribute of such a module.
with swap_attr(os, '__name__', None):
with self.assertRaises(ImportError):
from os import does_not_exist
with self.assertRaises(AttributeError):
os.does_not_exist
def test_concurrency(self):
# bpo 38091: this is a hack to slow down the code that calls
# has_deadlock(); the logic was itself sometimes deadlocking.
def delay_has_deadlock(frame, event, arg):
if event == 'call' and frame.f_code.co_name == 'has_deadlock':
time.sleep(0.1)
sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'data'))
try:
exc = None
def run():
sys.settrace(delay_has_deadlock)
event.wait()
try:
import package
except BaseException as e:
nonlocal exc
exc = e
sys.settrace(None)
for i in range(10):
event = threading.Event()
threads = [threading.Thread(target=run) for x in range(2)]
try:
with threading_helper.start_threads(threads, event.set):
time.sleep(0)
finally:
sys.modules.pop('package', None)
sys.modules.pop('package.submodule', None)
if exc is not None:
raise exc
finally:
del sys.path[0]
@unittest.skipUnless(sys.platform == "win32", "Windows-specific")
def test_dll_dependency_import(self):
from _winapi import GetModuleFileName
dllname = GetModuleFileName(sys.dllhandle)
pydname = importlib.util.find_spec("_sqlite3").origin
depname = os.path.join(
os.path.dirname(pydname),
"sqlite3{}.dll".format("_d" if "_d" in pydname else ""))
with os_helper.temp_dir() as tmp:
tmp2 = os.path.join(tmp, "DLLs")
os.mkdir(tmp2)
pyexe = os.path.join(tmp, os.path.basename(sys.executable))
shutil.copy(sys.executable, pyexe)
shutil.copy(dllname, tmp)
for f in glob.glob(os.path.join(glob.escape(sys.prefix), "vcruntime*.dll")):
shutil.copy(f, tmp)
shutil.copy(pydname, tmp2)
env = None
env = {k.upper(): os.environ[k] for k in os.environ}
env["PYTHONPATH"] = tmp2 + ";" + os.path.dirname(os.__file__)
# Test 1: import with added DLL directory
subprocess.check_call([
pyexe, "-Sc", ";".join([
"import os",
"p = os.add_dll_directory({!r})".format(
os.path.dirname(depname)),
"import _sqlite3",
"p.close"
])],
stderr=subprocess.STDOUT,
env=env,
cwd=os.path.dirname(pyexe))
# Test 2: import with DLL adjacent to PYD
shutil.copy(depname, tmp2)
subprocess.check_call([pyexe, "-Sc", "import _sqlite3"],
stderr=subprocess.STDOUT,
env=env,
cwd=os.path.dirname(pyexe))
@skip_if_dont_write_bytecode
class FilePermissionTests(unittest.TestCase):
# tests for file mode on cached .pyc files
@unittest.skipUnless(os.name == 'posix',
"test meaningful only on posix systems")
def test_creation_mode(self):
mask = 0o022
with temp_umask(mask), _ready_to_import() as (name, path):
cached_path = importlib.util.cache_from_source(path)
module = __import__(name)
if not os.path.exists(cached_path):
self.fail("__import__ did not result in creation of "
"a .pyc file")
stat_info = os.stat(cached_path)
# Check that the umask is respected, and the executable bits
# aren't set.
self.assertEqual(oct(stat.S_IMODE(stat_info.st_mode)),
oct(0o666 & ~mask))
@unittest.skipUnless(os.name == 'posix',
"test meaningful only on posix systems")
def test_cached_mode_issue_2051(self):
# permissions of .pyc should match those of .py, regardless of mask
mode = 0o600
with temp_umask(0o022), _ready_to_import() as (name, path):
cached_path = importlib.util.cache_from_source(path)
os.chmod(path, mode)
__import__(name)
if not os.path.exists(cached_path):
self.fail("__import__ did not result in creation of "
"a .pyc file")
stat_info = os.stat(cached_path)
self.assertEqual(oct(stat.S_IMODE(stat_info.st_mode)), oct(mode))
@unittest.skipUnless(os.name == 'posix',
"test meaningful only on posix systems")
def test_cached_readonly(self):
mode = 0o400
with temp_umask(0o022), _ready_to_import() as (name, path):
cached_path = importlib.util.cache_from_source(path)
os.chmod(path, mode)
__import__(name)
if not os.path.exists(cached_path):
self.fail("__import__ did not result in creation of "
"a .pyc file")
stat_info = os.stat(cached_path)
expected = mode | 0o200 # Account for fix for issue #6074
self.assertEqual(oct(stat.S_IMODE(stat_info.st_mode)), oct(expected))
def test_pyc_always_writable(self):
# Initially read-only .pyc files on Windows used to cause problems
# with later updates, see issue #6074 for details
with _ready_to_import() as (name, path):
# Write a Python file, make it read-only and import it
with open(path, 'w') as f:
f.write("x = 'original'\n")
# Tweak the mtime of the source to ensure pyc gets updated later
s = os.stat(path)
os.utime(path, (s.st_atime, s.st_mtime-100000000))
os.chmod(path, 0o400)
m = __import__(name)
self.assertEqual(m.x, 'original')
# Change the file and then reimport it
os.chmod(path, 0o600)
with open(path, 'w') as f:
f.write("x = 'rewritten'\n")
unload(name)
importlib.invalidate_caches()
m = __import__(name)
self.assertEqual(m.x, 'rewritten')
# Now delete the source file and check the pyc was rewritten
unlink(path)
unload(name)
importlib.invalidate_caches()
bytecode_only = path + "c"
os.rename(importlib.util.cache_from_source(path), bytecode_only)
m = __import__(name)
self.assertEqual(m.x, 'rewritten')
class PycRewritingTests(unittest.TestCase):
# Test that the `co_filename` attribute on code objects always points
# to the right file, even when various things happen (e.g. both the .py
# and the .pyc file are renamed).
module_name = "unlikely_module_name"
module_source = """
import sys
code_filename = sys._getframe().f_code.co_filename
module_filename = __file__
constant = 1
def func():
pass
func_filename = func.__code__.co_filename
"""
dir_name = os.path.abspath(TESTFN)
file_name = os.path.join(dir_name, module_name) + os.extsep + "py"
compiled_name = importlib.util.cache_from_source(file_name)
def setUp(self):
self.sys_path = sys.path[:]
self.orig_module = sys.modules.pop(self.module_name, None)
os.mkdir(self.dir_name)
with open(self.file_name, "w") as f:
f.write(self.module_source)
sys.path.insert(0, self.dir_name)
importlib.invalidate_caches()
def tearDown(self):
sys.path[:] = self.sys_path
if self.orig_module is not None:
sys.modules[self.module_name] = self.orig_module
else:
unload(self.module_name)
unlink(self.file_name)
unlink(self.compiled_name)
rmtree(self.dir_name)
def import_module(self):
ns = globals()
__import__(self.module_name, ns, ns)
return sys.modules[self.module_name]
def test_basics(self):
mod = self.import_module()
self.assertEqual(mod.module_filename, self.file_name)
self.assertEqual(mod.code_filename, self.file_name)
self.assertEqual(mod.func_filename, self.file_name)
del sys.modules[self.module_name]
mod = self.import_module()
self.assertEqual(mod.module_filename, self.file_name)
self.assertEqual(mod.code_filename, self.file_name)
self.assertEqual(mod.func_filename, self.file_name)
def test_incorrect_code_name(self):
py_compile.compile(self.file_name, dfile="another_module.py")
mod = self.import_module()
self.assertEqual(mod.module_filename, self.file_name)
self.assertEqual(mod.code_filename, self.file_name)
self.assertEqual(mod.func_filename, self.file_name)
def test_module_without_source(self):
target = "another_module.py"
py_compile.compile(self.file_name, dfile=target)
os.remove(self.file_name)
pyc_file = make_legacy_pyc(self.file_name)
importlib.invalidate_caches()
mod = self.import_module()
self.assertEqual(mod.module_filename, pyc_file)
self.assertEqual(mod.code_filename, target)
self.assertEqual(mod.func_filename, target)
def test_foreign_code(self):
py_compile.compile(self.file_name)
with open(self.compiled_name, "rb") as f:
header = f.read(16)
code = marshal.load(f)
constants = list(code.co_consts)
foreign_code = importlib.import_module.__code__
pos = constants.index(1)
constants[pos] = foreign_code
code = code.replace(co_consts=tuple(constants))
with open(self.compiled_name, "wb") as f:
f.write(header)
marshal.dump(code, f)
mod = self.import_module()
self.assertEqual(mod.constant.co_filename, foreign_code.co_filename)
class PathsTests(unittest.TestCase):
SAMPLES = ('test', 'test\u00e4\u00f6\u00fc\u00df', 'test\u00e9\u00e8',
'test\u00b0\u00b3\u00b2')
path = TESTFN
def setUp(self):
os.mkdir(self.path)
self.syspath = sys.path[:]
def tearDown(self):
rmtree(self.path)
sys.path[:] = self.syspath
# Regression test for http://bugs.python.org/issue1293.
def test_trailing_slash(self):
with open(os.path.join(self.path, 'test_trailing_slash.py'), 'w') as f:
f.write("testdata = 'test_trailing_slash'")
sys.path.append(self.path+'/')
mod = __import__("test_trailing_slash")
self.assertEqual(mod.testdata, 'test_trailing_slash')
unload("test_trailing_slash")
# Regression test for http://bugs.python.org/issue3677.
@unittest.skipUnless(sys.platform == 'win32', 'Windows-specific')
def test_UNC_path(self):
with open(os.path.join(self.path, 'test_unc_path.py'), 'w') as f:
f.write("testdata = 'test_unc_path'")
importlib.invalidate_caches()
# Create the UNC path, like \\myhost\c$\foo\bar.
path = os.path.abspath(self.path)
import socket
hn = socket.gethostname()
drive = path[0]
unc = "\\\\%s\\%s$"%(hn, drive)
unc += path[2:]
try:
os.listdir(unc)
except OSError as e:
if e.errno in (errno.EPERM, errno.EACCES, errno.ENOENT):
# See issue #15338
self.skipTest("cannot access administrative share %r" % (unc,))
raise
sys.path.insert(0, unc)
try:
mod = __import__("test_unc_path")
except ImportError as e:
self.fail("could not import 'test_unc_path' from %r: %r"
% (unc, e))
self.assertEqual(mod.testdata, 'test_unc_path')
self.assertTrue(mod.__file__.startswith(unc), mod.__file__)
unload("test_unc_path")
class RelativeImportTests(unittest.TestCase):
def tearDown(self):
unload("test.relimport")
setUp = tearDown
def test_relimport_star(self):
# This will import * from .test_import.
from .. import relimport
self.assertTrue(hasattr(relimport, "RelativeImportTests"))
def test_issue3221(self):
# Note for mergers: the 'absolute' tests from the 2.x branch
# are missing in Py3k because implicit relative imports are
# a thing of the past
#
# Regression test for http://bugs.python.org/issue3221.
def check_relative():
exec("from . import relimport", ns)
# Check relative import OK with __package__ and __name__ correct
ns = dict(__package__='test', __name__='test.notarealmodule')
check_relative()
# Check relative import OK with only __name__ wrong
ns = dict(__package__='test', __name__='notarealpkg.notarealmodule')
check_relative()
# Check relative import fails with only __package__ wrong
ns = dict(__package__='foo', __name__='test.notarealmodule')
self.assertRaises(ModuleNotFoundError, check_relative)
# Check relative import fails with __package__ and __name__ wrong
ns = dict(__package__='foo', __name__='notarealpkg.notarealmodule')
self.assertRaises(ModuleNotFoundError, check_relative)
# Check relative import fails with package set to a non-string
ns = dict(__package__=object())
self.assertRaises(TypeError, check_relative)
def test_parentless_import_shadowed_by_global(self):
# Test as if this were done from the REPL where this error most commonly occurs (bpo-37409).
script_helper.assert_python_failure('-W', 'ignore', '-c',
"foo = 1; from . import foo")
def test_absolute_import_without_future(self):
# If explicit relative import syntax is used, then do not try
# to perform an absolute import in the face of failure.
# Issue #7902.
with self.assertRaises(ImportError):
from .os import sep
self.fail("explicit relative import triggered an "
"implicit absolute import")
def test_import_from_non_package(self):
path = os.path.join(os.path.dirname(__file__), 'data', 'package2')
with uncache('submodule1', 'submodule2'), DirsOnSysPath(path):
with self.assertRaises(ImportError):
import submodule1
self.assertNotIn('submodule1', sys.modules)
self.assertNotIn('submodule2', sys.modules)
def test_import_from_unloaded_package(self):
with uncache('package2', 'package2.submodule1', 'package2.submodule2'), \
DirsOnSysPath(os.path.join(os.path.dirname(__file__), 'data')):
import package2.submodule1
package2.submodule1.submodule2
class OverridingImportBuiltinTests(unittest.TestCase):
def test_override_builtin(self):
# Test that overriding builtins.__import__ can bypass sys.modules.
import os
def foo():
import os
return os
self.assertEqual(foo(), os) # Quick sanity check.
with swap_attr(builtins, "__import__", lambda *x: 5):
self.assertEqual(foo(), 5)
# Test what happens when we shadow __import__ in globals(); this
# currently does not impact the import process, but if this changes,
# other code will need to change, so keep this test as a tripwire.
with swap_item(globals(), "__import__", lambda *x: 5):
self.assertEqual(foo(), os)
class PycacheTests(unittest.TestCase):
# Test the various PEP 3147/488-related behaviors.
def _clean(self):
forget(TESTFN)
rmtree('__pycache__')
unlink(self.source)
def setUp(self):
self.source = TESTFN + '.py'
self._clean()
with open(self.source, 'w') as fp:
print('# This is a test file written by test_import.py', file=fp)
sys.path.insert(0, os.curdir)
importlib.invalidate_caches()
def tearDown(self):
assert sys.path[0] == os.curdir, 'Unexpected sys.path[0]'
del sys.path[0]
self._clean()
@skip_if_dont_write_bytecode
def test_import_pyc_path(self):
self.assertFalse(os.path.exists('__pycache__'))
__import__(TESTFN)
self.assertTrue(os.path.exists('__pycache__'))
pyc_path = importlib.util.cache_from_source(self.source)
self.assertTrue(os.path.exists(pyc_path),
'bytecode file {!r} for {!r} does not '
'exist'.format(pyc_path, TESTFN))
@unittest.skipUnless(os.name == 'posix',
"test meaningful only on posix systems")
@unittest.skipIf(hasattr(os, 'geteuid') and os.geteuid() == 0,
"due to varying filesystem permission semantics (issue #11956)")
@skip_if_dont_write_bytecode
def test_unwritable_directory(self):
# When the umask causes the new __pycache__ directory to be
# unwritable, the import still succeeds but no .pyc file is written.
with temp_umask(0o222):
__import__(TESTFN)
self.assertTrue(os.path.exists('__pycache__'))
pyc_path = importlib.util.cache_from_source(self.source)
self.assertFalse(os.path.exists(pyc_path),
'bytecode file {!r} for {!r} '
'exists'.format(pyc_path, TESTFN))
@skip_if_dont_write_bytecode
def test_missing_source(self):
# With PEP 3147 cache layout, removing the source but leaving the pyc
# file does not satisfy the import.
__import__(TESTFN)
pyc_file = importlib.util.cache_from_source(self.source)
self.assertTrue(os.path.exists(pyc_file))
os.remove(self.source)
forget(TESTFN)
importlib.invalidate_caches()
self.assertRaises(ImportError, __import__, TESTFN)
@skip_if_dont_write_bytecode
def test_missing_source_legacy(self):
# Like test_missing_source() except that for backward compatibility,
# when the pyc file lives where the py file would have been (and named
# without the tag), it is importable. The __file__ of the imported
# module is the pyc location.
__import__(TESTFN)
# pyc_file gets removed in _clean() via tearDown().
pyc_file = make_legacy_pyc(self.source)
os.remove(self.source)
unload(TESTFN)
importlib.invalidate_caches()
m = __import__(TESTFN)
try:
self.assertEqual(m.__file__,
os.path.join(os.curdir, os.path.relpath(pyc_file)))
finally:
os.remove(pyc_file)
def test___cached__(self):
# Modules now also have an __cached__ that points to the pyc file.
m = __import__(TESTFN)
pyc_file = importlib.util.cache_from_source(TESTFN + '.py')
self.assertEqual(m.__cached__, os.path.join(os.curdir, pyc_file))
@skip_if_dont_write_bytecode
def test___cached___legacy_pyc(self):
# Like test___cached__() except that for backward compatibility,
# when the pyc file lives where the py file would have been (and named
# without the tag), it is importable. The __cached__ of the imported
# module is the pyc location.
__import__(TESTFN)
# pyc_file gets removed in _clean() via tearDown().
pyc_file = make_legacy_pyc(self.source)
os.remove(self.source)
unload(TESTFN)
importlib.invalidate_caches()
m = __import__(TESTFN)
self.assertEqual(m.__cached__,
os.path.join(os.curdir, os.path.relpath(pyc_file)))
@skip_if_dont_write_bytecode
def test_package___cached__(self):
# Like test___cached__ but for packages.
def cleanup():
rmtree('pep3147')
unload('pep3147.foo')
unload('pep3147')
os.mkdir('pep3147')
self.addCleanup(cleanup)
# Touch the __init__.py
with open(os.path.join('pep3147', '__init__.py'), 'w'):
pass
with open(os.path.join('pep3147', 'foo.py'), 'w'):
pass
importlib.invalidate_caches()
m = __import__('pep3147.foo')
init_pyc = importlib.util.cache_from_source(
os.path.join('pep3147', '__init__.py'))
self.assertEqual(m.__cached__, os.path.join(os.curdir, init_pyc))
foo_pyc = importlib.util.cache_from_source(os.path.join('pep3147', 'foo.py'))
self.assertEqual(sys.modules['pep3147.foo'].__cached__,
os.path.join(os.curdir, foo_pyc))
def test_package___cached___from_pyc(self):
# Like test___cached__ but ensuring __cached__ when imported from a
# PEP 3147 pyc file.
def cleanup():
rmtree('pep3147')
unload('pep3147.foo')
unload('pep3147')
os.mkdir('pep3147')
self.addCleanup(cleanup)
# Touch the __init__.py
with open(os.path.join('pep3147', '__init__.py'), 'w'):
pass
with open(os.path.join('pep3147', 'foo.py'), 'w'):
pass
importlib.invalidate_caches()
m = __import__('pep3147.foo')
unload('pep3147.foo')
unload('pep3147')
importlib.invalidate_caches()
m = __import__('pep3147.foo')
init_pyc = importlib.util.cache_from_source(
os.path.join('pep3147', '__init__.py'))
self.assertEqual(m.__cached__, os.path.join(os.curdir, init_pyc))
foo_pyc = importlib.util.cache_from_source(os.path.join('pep3147', 'foo.py'))
self.assertEqual(sys.modules['pep3147.foo'].__cached__,
os.path.join(os.curdir, foo_pyc))
def test_recompute_pyc_same_second(self):
# Even when the source file doesn't change timestamp, a change in
# source size is enough to trigger recomputation of the pyc file.
__import__(TESTFN)
unload(TESTFN)
with open(self.source, 'a') as fp:
print("x = 5", file=fp)
m = __import__(TESTFN)
self.assertEqual(m.x, 5)
class TestSymbolicallyLinkedPackage(unittest.TestCase):
package_name = 'sample'
tagged = package_name + '-tagged'
def setUp(self):
os_helper.rmtree(self.tagged)
os_helper.rmtree(self.package_name)
self.orig_sys_path = sys.path[:]
# create a sample package; imagine you have a package with a tag and
# you want to symbolically link it from its untagged name.
os.mkdir(self.tagged)
self.addCleanup(os_helper.rmtree, self.tagged)
init_file = os.path.join(self.tagged, '__init__.py')
os_helper.create_empty_file(init_file)
assert os.path.exists(init_file)
# now create a symlink to the tagged package
# sample -> sample-tagged
os.symlink(self.tagged, self.package_name, target_is_directory=True)
self.addCleanup(os_helper.unlink, self.package_name)
importlib.invalidate_caches()
self.assertEqual(os.path.isdir(self.package_name), True)
assert os.path.isfile(os.path.join(self.package_name, '__init__.py'))
def tearDown(self):
sys.path[:] = self.orig_sys_path
# regression test for issue6727
@unittest.skipUnless(
not hasattr(sys, 'getwindowsversion')
or sys.getwindowsversion() >= (6, 0),
"Windows Vista or later required")
@os_helper.skip_unless_symlink
def test_symlinked_dir_importable(self):
# make sure sample can only be imported from the current directory.
sys.path[:] = ['.']
assert os.path.exists(self.package_name)
assert os.path.exists(os.path.join(self.package_name, '__init__.py'))
# Try to import the package
importlib.import_module(self.package_name)
@cpython_only
class ImportlibBootstrapTests(unittest.TestCase):
# These tests check that importlib is bootstrapped.
def test_frozen_importlib(self):
mod = sys.modules['_frozen_importlib']
self.assertTrue(mod)
def test_frozen_importlib_is_bootstrap(self):
from importlib import _bootstrap
mod = sys.modules['_frozen_importlib']
self.assertIs(mod, _bootstrap)
self.assertEqual(mod.__name__, 'importlib._bootstrap')
self.assertEqual(mod.__package__, 'importlib')
self.assertTrue(mod.__file__.endswith('_bootstrap.py'), mod.__file__)
def test_frozen_importlib_external_is_bootstrap_external(self):
from importlib import _bootstrap_external
mod = sys.modules['_frozen_importlib_external']
self.assertIs(mod, _bootstrap_external)
self.assertEqual(mod.__name__, 'importlib._bootstrap_external')
self.assertEqual(mod.__package__, 'importlib')
self.assertTrue(mod.__file__.endswith('_bootstrap_external.py'), mod.__file__)
def test_there_can_be_only_one(self):
# Issue #15386 revealed a tricky loophole in the bootstrapping
# This test is technically redundant, since the bug caused importing
# this test module to crash completely, but it helps prove the point
from importlib import machinery
mod = sys.modules['_frozen_importlib']
self.assertIs(machinery.ModuleSpec, mod.ModuleSpec)
@cpython_only
class GetSourcefileTests(unittest.TestCase):
"""Test importlib._bootstrap_external._get_sourcefile() as used by the C API.
Because of the peculiarities of the need of this function, the tests are
knowingly whitebox tests.
"""
def test_get_sourcefile(self):
# Given a valid bytecode path, return the path to the corresponding
# source file if it exists.
with mock.patch('importlib._bootstrap_external._path_isfile') as _path_isfile:
_path_isfile.return_value = True;
path = TESTFN + '.pyc'
expect = TESTFN + '.py'
self.assertEqual(_get_sourcefile(path), expect)
def test_get_sourcefile_no_source(self):
# Given a valid bytecode path without a corresponding source path,
# return the original bytecode path.
with mock.patch('importlib._bootstrap_external._path_isfile') as _path_isfile:
_path_isfile.return_value = False;
path = TESTFN + '.pyc'
self.assertEqual(_get_sourcefile(path), path)
def test_get_sourcefile_bad_ext(self):
# Given a path with an invalid bytecode extension, return the
# bytecode path passed as the argument.
path = TESTFN + '.bad_ext'
self.assertEqual(_get_sourcefile(path), path)
class ImportTracebackTests(unittest.TestCase):
def setUp(self):
os.mkdir(TESTFN)
self.old_path = sys.path[:]
sys.path.insert(0, TESTFN)
def tearDown(self):
sys.path[:] = self.old_path
rmtree(TESTFN)
def create_module(self, mod, contents, ext=".py"):
fname = os.path.join(TESTFN, mod + ext)
with open(fname, "w") as f:
f.write(contents)
self.addCleanup(unload, mod)
importlib.invalidate_caches()
return fname
def assert_traceback(self, tb, files):
deduped_files = []
while tb:
code = tb.tb_frame.f_code
fn = code.co_filename
if not deduped_files or fn != deduped_files[-1]:
deduped_files.append(fn)
tb = tb.tb_next
self.assertEqual(len(deduped_files), len(files), deduped_files)
for fn, pat in zip(deduped_files, files):
self.assertIn(pat, fn)
def test_nonexistent_module(self):
try:
# assertRaises() clears __traceback__
import nonexistent_xyzzy
except ImportError as e:
tb = e.__traceback__
else:
self.fail("ImportError should have been raised")
self.assert_traceback(tb, [__file__])
def test_nonexistent_module_nested(self):
self.create_module("foo", "import nonexistent_xyzzy")
try:
import foo
except ImportError as e:
tb = e.__traceback__
else:
self.fail("ImportError should have been raised")
self.assert_traceback(tb, [__file__, 'foo.py'])
def test_exec_failure(self):
self.create_module("foo", "1/0")
try:
import foo
except ZeroDivisionError as e:
tb = e.__traceback__
else:
self.fail("ZeroDivisionError should have been raised")
self.assert_traceback(tb, [__file__, 'foo.py'])
def test_exec_failure_nested(self):
self.create_module("foo", "import bar")
self.create_module("bar", "1/0")
try:
import foo
except ZeroDivisionError as e:
tb = e.__traceback__
else:
self.fail("ZeroDivisionError should have been raised")
self.assert_traceback(tb, [__file__, 'foo.py', 'bar.py'])
# A few more examples from issue #15425
def test_syntax_error(self):
self.create_module("foo", "invalid syntax is invalid")
try:
import foo
except SyntaxError as e:
tb = e.__traceback__
else:
self.fail("SyntaxError should have been raised")
self.assert_traceback(tb, [__file__])
def _setup_broken_package(self, parent, child):
pkg_name = "_parent_foo"
self.addCleanup(unload, pkg_name)
pkg_path = os.path.join(TESTFN, pkg_name)
os.mkdir(pkg_path)
# Touch the __init__.py
init_path = os.path.join(pkg_path, '__init__.py')
with open(init_path, 'w') as f:
f.write(parent)
bar_path = os.path.join(pkg_path, 'bar.py')
with open(bar_path, 'w') as f:
f.write(child)
importlib.invalidate_caches()
return init_path, bar_path
def test_broken_submodule(self):
init_path, bar_path = self._setup_broken_package("", "1/0")
try:
import _parent_foo.bar
except ZeroDivisionError as e:
tb = e.__traceback__
else:
self.fail("ZeroDivisionError should have been raised")
self.assert_traceback(tb, [__file__, bar_path])
def test_broken_from(self):
init_path, bar_path = self._setup_broken_package("", "1/0")
try:
from _parent_foo import bar
except ZeroDivisionError as e:
tb = e.__traceback__
else:
self.fail("ImportError should have been raised")
self.assert_traceback(tb, [__file__, bar_path])
def test_broken_parent(self):
init_path, bar_path = self._setup_broken_package("1/0", "")
try:
import _parent_foo.bar
except ZeroDivisionError as e:
tb = e.__traceback__
else:
self.fail("ZeroDivisionError should have been raised")
self.assert_traceback(tb, [__file__, init_path])
def test_broken_parent_from(self):
init_path, bar_path = self._setup_broken_package("1/0", "")
try:
from _parent_foo import bar
except ZeroDivisionError as e:
tb = e.__traceback__
else:
self.fail("ZeroDivisionError should have been raised")
self.assert_traceback(tb, [__file__, init_path])
@cpython_only
def test_import_bug(self):
# We simulate a bug in importlib and check that it's not stripped
# away from the traceback.
self.create_module("foo", "")
importlib = sys.modules['_frozen_importlib_external']
if 'load_module' in vars(importlib.SourceLoader):
old_exec_module = importlib.SourceLoader.exec_module
else:
old_exec_module = None
try:
def exec_module(*args):
1/0
importlib.SourceLoader.exec_module = exec_module
try:
import foo
except ZeroDivisionError as e:
tb = e.__traceback__
else:
self.fail("ZeroDivisionError should have been raised")
self.assert_traceback(tb, [__file__, '<frozen importlib', __file__])
finally:
if old_exec_module is None:
del importlib.SourceLoader.exec_module
else:
importlib.SourceLoader.exec_module = old_exec_module
@unittest.skipUnless(TESTFN_UNENCODABLE, 'need TESTFN_UNENCODABLE')
def test_unencodable_filename(self):
# Issue #11619: The Python parser and the import machinery must not
# encode filenames, especially on Windows
pyname = script_helper.make_script('', TESTFN_UNENCODABLE, 'pass')
self.addCleanup(unlink, pyname)
name = pyname[:-3]
script_helper.assert_python_ok("-c", "mod = __import__(%a)" % name,
__isolated=False)
class CircularImportTests(unittest.TestCase):
"""See the docstrings of the modules being imported for the purpose of the
test."""
def tearDown(self):
"""Make sure no modules pre-exist in sys.modules which are being used to
test."""
for key in list(sys.modules.keys()):
if key.startswith('test.test_import.data.circular_imports'):
del sys.modules[key]
def test_direct(self):
try:
import test.test_import.data.circular_imports.basic
except ImportError:
self.fail('circular import through relative imports failed')
def test_indirect(self):
try:
import test.test_import.data.circular_imports.indirect
except ImportError:
self.fail('relative import in module contributing to circular '
'import failed')
def test_subpackage(self):
try:
import test.test_import.data.circular_imports.subpackage
except ImportError:
self.fail('circular import involving a subpackage failed')
def test_rebinding(self):
try:
import test.test_import.data.circular_imports.rebinding as rebinding
except ImportError:
self.fail('circular import with rebinding of module attribute failed')
from test.test_import.data.circular_imports.subpkg import util
self.assertIs(util.util, rebinding.util)
def test_binding(self):
try:
import test.test_import.data.circular_imports.binding
except ImportError:
self.fail('circular import with binding a submodule to a name failed')
def test_crossreference1(self):
import test.test_import.data.circular_imports.use
import test.test_import.data.circular_imports.source
def test_crossreference2(self):
with self.assertRaises(AttributeError) as cm:
import test.test_import.data.circular_imports.source
errmsg = str(cm.exception)
self.assertIn('test.test_import.data.circular_imports.source', errmsg)
self.assertIn('spam', errmsg)
self.assertIn('partially initialized module', errmsg)
self.assertIn('circular import', errmsg)
def test_circular_from_import(self):
with self.assertRaises(ImportError) as cm:
import test.test_import.data.circular_imports.from_cycle1
self.assertIn(
"cannot import name 'b' from partially initialized module "
"'test.test_import.data.circular_imports.from_cycle1' "
"(most likely due to a circular import)",
str(cm.exception),
)
def test_unwritable_module(self):
self.addCleanup(unload, "test.test_import.data.unwritable")
self.addCleanup(unload, "test.test_import.data.unwritable.x")
import test.test_import.data.unwritable as unwritable
with self.assertWarns(ImportWarning):
from test.test_import.data.unwritable import x
self.assertNotEqual(type(unwritable), ModuleType)
self.assertEqual(type(x), ModuleType)
with self.assertRaises(AttributeError):
unwritable.x = 42
if __name__ == '__main__':
# Test needs to be a package, so we can do relative imports.
unittest.main()
|
test_sys.py
|
# -*- coding: iso-8859-1 -*-
import unittest, test.support
import sys, io, os
import struct
import subprocess
import textwrap
class SysModuleTest(unittest.TestCase):
def setUp(self):
self.orig_stdout = sys.stdout
self.orig_stderr = sys.stderr
self.orig_displayhook = sys.displayhook
def tearDown(self):
sys.stdout = self.orig_stdout
sys.stderr = self.orig_stderr
sys.displayhook = self.orig_displayhook
def test_original_displayhook(self):
import builtins
out = io.StringIO()
sys.stdout = out
dh = sys.__displayhook__
self.assertRaises(TypeError, dh)
if hasattr(builtins, "_"):
del builtins._
dh(None)
self.assertEqual(out.getvalue(), "")
self.assert_(not hasattr(builtins, "_"))
dh(42)
self.assertEqual(out.getvalue(), "42\n")
self.assertEqual(builtins._, 42)
del sys.stdout
self.assertRaises(RuntimeError, dh, 42)
def test_lost_displayhook(self):
del sys.displayhook
code = compile("42", "<string>", "single")
self.assertRaises(RuntimeError, eval, code)
def test_custom_displayhook(self):
def baddisplayhook(obj):
raise ValueError
sys.displayhook = baddisplayhook
code = compile("42", "<string>", "single")
self.assertRaises(ValueError, eval, code)
def test_original_excepthook(self):
err = io.StringIO()
sys.stderr = err
eh = sys.__excepthook__
self.assertRaises(TypeError, eh)
try:
raise ValueError(42)
except ValueError as exc:
eh(*sys.exc_info())
self.assert_(err.getvalue().endswith("ValueError: 42\n"))
def test_excepthook(self):
with test.support.captured_output("stderr") as stderr:
sys.excepthook(1, '1', 1)
self.assert_("TypeError: print_exception(): Exception expected for " \
"value, str found" in stderr.getvalue())
# FIXME: testing the code for a lost or replaced excepthook in
# Python/pythonrun.c::PyErr_PrintEx() is tricky.
def test_exit(self):
self.assertRaises(TypeError, sys.exit, 42, 42)
# call without argument
try:
sys.exit(0)
except SystemExit as exc:
self.assertEquals(exc.code, 0)
except:
self.fail("wrong exception")
else:
self.fail("no exception")
# call with tuple argument with one entry
# entry will be unpacked
try:
sys.exit(42)
except SystemExit as exc:
self.assertEquals(exc.code, 42)
except:
self.fail("wrong exception")
else:
self.fail("no exception")
# call with integer argument
try:
sys.exit((42,))
except SystemExit as exc:
self.assertEquals(exc.code, 42)
except:
self.fail("wrong exception")
else:
self.fail("no exception")
# call with string argument
try:
sys.exit("exit")
except SystemExit as exc:
self.assertEquals(exc.code, "exit")
except:
self.fail("wrong exception")
else:
self.fail("no exception")
# call with tuple argument with two entries
try:
sys.exit((17, 23))
except SystemExit as exc:
self.assertEquals(exc.code, (17, 23))
except:
self.fail("wrong exception")
else:
self.fail("no exception")
# test that the exit machinery handles SystemExits properly
import subprocess
rc = subprocess.call([sys.executable, "-c",
"raise SystemExit(47)"])
self.assertEqual(rc, 47)
def test_getdefaultencoding(self):
self.assertRaises(TypeError, sys.getdefaultencoding, 42)
# can't check more than the type, as the user might have changed it
self.assert_(isinstance(sys.getdefaultencoding(), str))
# testing sys.settrace() is done in test_trace.py
# testing sys.setprofile() is done in test_profile.py
def test_setcheckinterval(self):
self.assertRaises(TypeError, sys.setcheckinterval)
orig = sys.getcheckinterval()
for n in 0, 100, 120, orig: # orig last to restore starting state
sys.setcheckinterval(n)
self.assertEquals(sys.getcheckinterval(), n)
def test_recursionlimit(self):
self.assertRaises(TypeError, sys.getrecursionlimit, 42)
oldlimit = sys.getrecursionlimit()
self.assertRaises(TypeError, sys.setrecursionlimit)
self.assertRaises(ValueError, sys.setrecursionlimit, -42)
sys.setrecursionlimit(10000)
self.assertEqual(sys.getrecursionlimit(), 10000)
sys.setrecursionlimit(oldlimit)
def test_recursionlimit_recovery(self):
# NOTE: this test is slightly fragile in that it depends on the current
# recursion count when executing the test being low enough so as to
# trigger the recursion recovery detection in the _Py_MakeEndRecCheck
# macro (see ceval.h).
oldlimit = sys.getrecursionlimit()
def f():
f()
try:
for i in (50, 1000):
# Issue #5392: stack overflow after hitting recursion limit twice
sys.setrecursionlimit(i)
self.assertRaises(RuntimeError, f)
self.assertRaises(RuntimeError, f)
finally:
sys.setrecursionlimit(oldlimit)
def test_recursionlimit_fatalerror(self):
# A fatal error occurs if a second recursion limit is hit when recovering
# from a first one.
if os.name == "nt":
raise unittest.SkipTest(
"under Windows, test would generate a spurious crash dialog")
code = textwrap.dedent("""
import sys
def f():
try:
f()
except RuntimeError:
f()
sys.setrecursionlimit(%d)
f()""")
for i in (50, 1000):
sub = subprocess.Popen([sys.executable, '-c', code % i],
stderr=subprocess.PIPE)
err = sub.communicate()[1]
self.assertTrue(sub.returncode, sub.returncode)
self.assertTrue(
b"Fatal Python error: Cannot recover from stack overflow" in err,
err)
def test_getwindowsversion(self):
if hasattr(sys, "getwindowsversion"):
v = sys.getwindowsversion()
self.assert_(isinstance(v, tuple))
self.assertEqual(len(v), 5)
self.assert_(isinstance(v[0], int))
self.assert_(isinstance(v[1], int))
self.assert_(isinstance(v[2], int))
self.assert_(isinstance(v[3], int))
self.assert_(isinstance(v[4], str))
def test_call_tracing(self):
self.assertRaises(TypeError, sys.call_tracing, type, 2)
def test_dlopenflags(self):
if hasattr(sys, "setdlopenflags"):
self.assert_(hasattr(sys, "getdlopenflags"))
self.assertRaises(TypeError, sys.getdlopenflags, 42)
oldflags = sys.getdlopenflags()
self.assertRaises(TypeError, sys.setdlopenflags)
sys.setdlopenflags(oldflags+1)
self.assertEqual(sys.getdlopenflags(), oldflags+1)
sys.setdlopenflags(oldflags)
def test_refcount(self):
# n here must be a global in order for this test to pass while
# tracing with a python function. Tracing calls PyFrame_FastToLocals
# which will add a copy of any locals to the frame object, causing
# the reference count to increase by 2 instead of 1.
global n
self.assertRaises(TypeError, sys.getrefcount)
c = sys.getrefcount(None)
n = None
self.assertEqual(sys.getrefcount(None), c+1)
del n
self.assertEqual(sys.getrefcount(None), c)
if hasattr(sys, "gettotalrefcount"):
self.assert_(isinstance(sys.gettotalrefcount(), int))
def test_getframe(self):
self.assertRaises(TypeError, sys._getframe, 42, 42)
self.assertRaises(ValueError, sys._getframe, 2000000000)
self.assert_(
SysModuleTest.test_getframe.__code__ \
is sys._getframe().f_code
)
# sys._current_frames() is a CPython-only gimmick.
def test_current_frames(self):
have_threads = True
try:
import _thread
except ImportError:
have_threads = False
if have_threads:
self.current_frames_with_threads()
else:
self.current_frames_without_threads()
# Test sys._current_frames() in a WITH_THREADS build.
def current_frames_with_threads(self):
import threading, _thread
import traceback
# Spawn a thread that blocks at a known place. Then the main
# thread does sys._current_frames(), and verifies that the frames
# returned make sense.
entered_g = threading.Event()
leave_g = threading.Event()
thread_info = [] # the thread's id
def f123():
g456()
def g456():
thread_info.append(_thread.get_ident())
entered_g.set()
leave_g.wait()
t = threading.Thread(target=f123)
t.start()
entered_g.wait()
# At this point, t has finished its entered_g.set(), although it's
# impossible to guess whether it's still on that line or has moved on
# to its leave_g.wait().
self.assertEqual(len(thread_info), 1)
thread_id = thread_info[0]
d = sys._current_frames()
main_id = _thread.get_ident()
self.assert_(main_id in d)
self.assert_(thread_id in d)
# Verify that the captured main-thread frame is _this_ frame.
frame = d.pop(main_id)
self.assert_(frame is sys._getframe())
# Verify that the captured thread frame is blocked in g456, called
# from f123. This is a litte tricky, since various bits of
# threading.py are also in the thread's call stack.
frame = d.pop(thread_id)
stack = traceback.extract_stack(frame)
for i, (filename, lineno, funcname, sourceline) in enumerate(stack):
if funcname == "f123":
break
else:
self.fail("didn't find f123() on thread's call stack")
self.assertEqual(sourceline, "g456()")
# And the next record must be for g456().
filename, lineno, funcname, sourceline = stack[i+1]
self.assertEqual(funcname, "g456")
self.assert_(sourceline in ["leave_g.wait()", "entered_g.set()"])
# Reap the spawned thread.
leave_g.set()
t.join()
# Test sys._current_frames() when thread support doesn't exist.
def current_frames_without_threads(self):
# Not much happens here: there is only one thread, with artificial
# "thread id" 0.
d = sys._current_frames()
self.assertEqual(len(d), 1)
self.assert_(0 in d)
self.assert_(d[0] is sys._getframe())
def test_attributes(self):
self.assert_(isinstance(sys.api_version, int))
self.assert_(isinstance(sys.argv, list))
self.assert_(sys.byteorder in ("little", "big"))
self.assert_(isinstance(sys.builtin_module_names, tuple))
self.assert_(isinstance(sys.copyright, str))
self.assert_(isinstance(sys.exec_prefix, str))
self.assert_(isinstance(sys.executable, str))
self.assertEqual(len(sys.float_info), 11)
self.assertEqual(sys.float_info.radix, 2)
self.assertEqual(len(sys.int_info), 2)
self.assert_(sys.int_info.bits_per_digit % 5 == 0)
self.assert_(sys.int_info.sizeof_digit >= 1)
self.assertEqual(type(sys.int_info.bits_per_digit), int)
self.assertEqual(type(sys.int_info.sizeof_digit), int)
self.assert_(isinstance(sys.hexversion, int))
self.assert_(isinstance(sys.maxsize, int))
self.assert_(isinstance(sys.maxunicode, int))
self.assert_(isinstance(sys.platform, str))
self.assert_(isinstance(sys.prefix, str))
self.assert_(isinstance(sys.version, str))
vi = sys.version_info
self.assert_(isinstance(vi[:], tuple))
self.assertEqual(len(vi), 5)
self.assert_(isinstance(vi[0], int))
self.assert_(isinstance(vi[1], int))
self.assert_(isinstance(vi[2], int))
self.assert_(vi[3] in ("alpha", "beta", "candidate", "final"))
self.assert_(isinstance(vi[4], int))
self.assert_(isinstance(vi.major, int))
self.assert_(isinstance(vi.minor, int))
self.assert_(isinstance(vi.micro, int))
self.assert_(vi.releaselevel in
("alpha", "beta", "candidate", "final"))
self.assert_(isinstance(vi.serial, int))
self.assertEqual(vi[0], vi.major)
self.assertEqual(vi[1], vi.minor)
self.assertEqual(vi[2], vi.micro)
self.assertEqual(vi[3], vi.releaselevel)
self.assertEqual(vi[4], vi.serial)
self.assert_(vi > (1,0,0))
def test_43581(self):
# Can't use sys.stdout, as this is a StringIO object when
# the test runs under regrtest.
self.assertEqual(sys.__stdout__.encoding, sys.__stderr__.encoding)
def test_intern(self):
self.assertRaises(TypeError, sys.intern)
s = "never interned before"
self.assert_(sys.intern(s) is s)
s2 = s.swapcase().swapcase()
self.assert_(sys.intern(s2) is s)
# Subclasses of string can't be interned, because they
# provide too much opportunity for insane things to happen.
# We don't want them in the interned dict and if they aren't
# actually interned, we don't want to create the appearance
# that they are by allowing intern() to succeeed.
class S(str):
def __hash__(self):
return 123
self.assertRaises(TypeError, sys.intern, S("abc"))
def test_sys_flags(self):
self.failUnless(sys.flags)
attrs = ("debug", "division_warning",
"inspect", "interactive", "optimize", "dont_write_bytecode",
"no_user_site", "no_site", "ignore_environment", "verbose",
"bytes_warning")
for attr in attrs:
self.assert_(hasattr(sys.flags, attr), attr)
self.assertEqual(type(getattr(sys.flags, attr)), int, attr)
self.assert_(repr(sys.flags))
self.assertEqual(len(sys.flags), len(attrs))
def test_clear_type_cache(self):
sys._clear_type_cache()
def test_ioencoding(self):
import subprocess,os
env = dict(os.environ)
# Test character: cent sign, encoded as 0x4A (ASCII J) in CP424,
# not representable in ASCII.
env["PYTHONIOENCODING"] = "cp424"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xa2))'],
stdout = subprocess.PIPE, env=env)
out = p.stdout.read()
self.assertEqual(out, "\xa2\n".encode("cp424"))
env["PYTHONIOENCODING"] = "ascii:replace"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xa2))'],
stdout = subprocess.PIPE, env=env)
out = p.stdout.read().strip()
self.assertEqual(out, b'?')
class SizeofTest(unittest.TestCase):
TPFLAGS_HAVE_GC = 1<<14
TPFLAGS_HEAPTYPE = 1<<9
def setUp(self):
self.c = len(struct.pack('c', ' '))
self.H = len(struct.pack('H', 0))
self.i = len(struct.pack('i', 0))
self.l = len(struct.pack('l', 0))
self.P = len(struct.pack('P', 0))
# due to missing size_t information from struct, it is assumed that
# sizeof(Py_ssize_t) = sizeof(void*)
self.header = 'PP'
self.vheader = self.header + 'P'
if hasattr(sys, "gettotalrefcount"):
self.header += '2P'
self.vheader += '2P'
self.longdigit = sys.int_info.sizeof_digit
import _testcapi
self.gc_headsize = _testcapi.SIZEOF_PYGC_HEAD
self.file = open(test.support.TESTFN, 'wb')
def tearDown(self):
self.file.close()
test.support.unlink(test.support.TESTFN)
def check_sizeof(self, o, size):
result = sys.getsizeof(o)
# add GC header size
if ((type(o) == type) and (o.__flags__ & self.TPFLAGS_HEAPTYPE) or\
((type(o) != type) and (type(o).__flags__ & self.TPFLAGS_HAVE_GC))):
size += self.gc_headsize
msg = 'wrong size for %s: got %d, expected %d' \
% (type(o), result, size)
self.assertEqual(result, size, msg)
def calcsize(self, fmt):
"""Wrapper around struct.calcsize which enforces the alignment of the
end of a structure to the alignment requirement of pointer.
Note: This wrapper should only be used if a pointer member is included
and no member with a size larger than a pointer exists.
"""
return struct.calcsize(fmt + '0P')
def test_gc_head_size(self):
# Check that the gc header size is added to objects tracked by the gc.
h = self.header
vh = self.vheader
size = self.calcsize
gc_header_size = self.gc_headsize
# bool objects are not gc tracked
self.assertEqual(sys.getsizeof(True), size(vh) + self.longdigit)
# but lists are
self.assertEqual(sys.getsizeof([]), size(vh + 'PP') + gc_header_size)
def test_default(self):
h = self.header
vh = self.vheader
size = self.calcsize
self.assertEqual(sys.getsizeof(True), size(vh) + self.longdigit)
self.assertEqual(sys.getsizeof(True, -1), size(vh) + self.longdigit)
def test_objecttypes(self):
# check all types defined in Objects/
h = self.header
vh = self.vheader
size = self.calcsize
check = self.check_sizeof
# bool
check(True, size(vh) + self.longdigit)
# buffer
# XXX
# builtin_function_or_method
check(len, size(h + '3P'))
# bytearray
samples = [b'', b'u'*100000]
for sample in samples:
x = bytearray(sample)
check(x, size(vh + 'iPP') + x.__alloc__() * self.c)
# bytearray_iterator
check(iter(bytearray()), size(h + 'PP'))
# cell
def get_cell():
x = 42
def inner():
return x
return inner
check(get_cell().__closure__[0], size(h + 'P'))
# code
check(get_cell().__code__, size(h + '5i8Pi2P'))
# complex
check(complex(0,1), size(h + '2d'))
# method_descriptor (descriptor object)
check(str.lower, size(h + '2PP'))
# classmethod_descriptor (descriptor object)
# XXX
# member_descriptor (descriptor object)
import datetime
check(datetime.timedelta.days, size(h + '2PP'))
# getset_descriptor (descriptor object)
import collections
check(collections.defaultdict.default_factory, size(h + '2PP'))
# wrapper_descriptor (descriptor object)
check(int.__add__, size(h + '2P2P'))
# method-wrapper (descriptor object)
check({}.__iter__, size(h + '2P'))
# dict
check({}, size(h + '3P2P' + 8*'P2P'))
longdict = {1:1, 2:2, 3:3, 4:4, 5:5, 6:6, 7:7, 8:8}
check(longdict, size(h + '3P2P' + 8*'P2P') + 16*size('P2P'))
# dictionary-keyiterator
check({}.keys(), size(h + 'P'))
# dictionary-valueiterator
check({}.values(), size(h + 'P'))
# dictionary-itemiterator
check({}.items(), size(h + 'P'))
# dictproxy
class C(object): pass
check(C.__dict__, size(h + 'P'))
# BaseException
check(BaseException(), size(h + '5P'))
# UnicodeEncodeError
check(UnicodeEncodeError("", "", 0, 0, ""), size(h + '5P 2P2PP'))
# UnicodeDecodeError
# XXX
# check(UnicodeDecodeError("", "", 0, 0, ""), size(h + '5P2PP'))
# UnicodeTranslateError
check(UnicodeTranslateError("", 0, 1, ""), size(h + '5P 2P2PP'))
# ellipses
check(Ellipsis, size(h + ''))
# EncodingMap
import codecs, encodings.iso8859_3
x = codecs.charmap_build(encodings.iso8859_3.decoding_table)
check(x, size(h + '32B2iB'))
# enumerate
check(enumerate([]), size(h + 'l3P'))
# reverse
check(reversed(''), size(h + 'PP'))
# float
check(float(0), size(h + 'd'))
# sys.floatinfo
check(sys.float_info, size(vh) + self.P * len(sys.float_info))
# frame
import inspect
CO_MAXBLOCKS = 20
x = inspect.currentframe()
ncells = len(x.f_code.co_cellvars)
nfrees = len(x.f_code.co_freevars)
extras = x.f_code.co_stacksize + x.f_code.co_nlocals +\
ncells + nfrees - 1
check(x, size(vh + '12P3i' + CO_MAXBLOCKS*'3i' + 'P' + extras*'P'))
# function
def func(): pass
check(func, size(h + '11P'))
class c():
@staticmethod
def foo():
pass
@classmethod
def bar(cls):
pass
# staticmethod
check(foo, size(h + 'P'))
# classmethod
check(bar, size(h + 'P'))
# generator
def get_gen(): yield 1
check(get_gen(), size(h + 'Pi2P'))
# iterator
check(iter('abc'), size(h + 'lP'))
# callable-iterator
import re
check(re.finditer('',''), size(h + '2P'))
# list
samples = [[], [1,2,3], ['1', '2', '3']]
for sample in samples:
check(sample, size(vh + 'PP') + len(sample)*self.P)
# sortwrapper (list)
# XXX
# cmpwrapper (list)
# XXX
# listiterator (list)
check(iter([]), size(h + 'lP'))
# listreverseiterator (list)
check(reversed([]), size(h + 'lP'))
# long
check(0, size(vh))
check(1, size(vh) + self.longdigit)
check(-1, size(vh) + self.longdigit)
PyLong_BASE = 2**sys.int_info.bits_per_digit
check(int(PyLong_BASE), size(vh) + 2*self.longdigit)
check(int(PyLong_BASE**2-1), size(vh) + 2*self.longdigit)
check(int(PyLong_BASE**2), size(vh) + 3*self.longdigit)
# memory
check(memoryview(b''), size(h + 'P PP2P2i7P'))
# module
check(unittest, size(h + '3P'))
# None
check(None, size(h + ''))
# NotImplementedType
check(NotImplemented, size(h))
# object
check(object(), size(h + ''))
# property (descriptor object)
class C(object):
def getx(self): return self.__x
def setx(self, value): self.__x = value
def delx(self): del self.__x
x = property(getx, setx, delx, "")
check(x, size(h + '4Pi'))
# PyCapsule
# XXX
# rangeiterator
check(iter(range(1)), size(h + '4l'))
# reverse
check(reversed(''), size(h + 'PP'))
# range
check(range(1), size(h + '3P'))
check(range(66000), size(h + '3P'))
# set
# frozenset
PySet_MINSIZE = 8
samples = [[], range(10), range(50)]
s = size(h + '3P2P' + PySet_MINSIZE*'lP' + 'lP')
for sample in samples:
minused = len(sample)
if minused == 0: tmp = 1
# the computation of minused is actually a bit more complicated
# but this suffices for the sizeof test
minused = minused*2
newsize = PySet_MINSIZE
while newsize <= minused:
newsize = newsize << 1
if newsize <= 8:
check(set(sample), s)
check(frozenset(sample), s)
else:
check(set(sample), s + newsize*struct.calcsize('lP'))
check(frozenset(sample), s + newsize*struct.calcsize('lP'))
# setiterator
check(iter(set()), size(h + 'P3P'))
# slice
check(slice(0), size(h + '3P'))
# super
check(super(int), size(h + '3P'))
# tuple
check((), size(vh))
check((1,2,3), size(vh) + 3*self.P)
# type
# (PyTypeObject + PyNumberMethods + PyMappingMethods +
# PySequenceMethods + PyBufferProcs)
s = size(vh + 'P2P15Pl4PP9PP11PI') + size('16Pi17P 3P 10P 2P 2P')
check(int, s)
# class
class newstyleclass(object): pass
check(newstyleclass, s)
# unicode
usize = len('\0'.encode('unicode-internal'))
samples = ['', '1'*100]
# we need to test for both sizes, because we don't know if the string
# has been cached
for s in samples:
basicsize = size(h + 'PPliP') + usize * (len(s) + 1)
check(s, basicsize)
# weakref
import weakref
check(weakref.ref(int), size(h + '2Pl2P'))
# weakproxy
# XXX
# weakcallableproxy
check(weakref.proxy(int), size(h + '2Pl2P'))
def test_pythontypes(self):
# check all types defined in Python/
h = self.header
vh = self.vheader
size = self.calcsize
check = self.check_sizeof
# _ast.AST
import _ast
check(_ast.AST(), size(h + ''))
# imp.NullImporter
import imp
check(imp.NullImporter(self.file.name), size(h + ''))
try:
raise TypeError
except TypeError:
tb = sys.exc_info()[2]
# traceback
if tb != None:
check(tb, size(h + '2P2i'))
# symtable entry
# XXX
# sys.flags
check(sys.flags, size(vh) + self.P * len(sys.flags))
def test_setfilesystemencoding(self):
old = sys.getfilesystemencoding()
sys.setfilesystemencoding("iso-8859-1")
self.assertEqual(sys.getfilesystemencoding(), "iso-8859-1")
sys.setfilesystemencoding(old)
def test_main():
test.support.run_unittest(SysModuleTest, SizeofTest)
if __name__ == "__main__":
test_main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.